diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index b94899f37e..ebbbb1a2a2 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -22,3 +22,7 @@ ecd1e05f5725832c2d5dfdc53f4c4100bf763284 c3bd8eb1214cbebbc92c7958b80aa06913bce3ba 488ded0c8d9e43deef531ad174937982b41f8e4b 26e888965d08486aeed7ebb3ef934ceb1a38cd6f + +# A commit which ran Python Black on all Python files. +# https://gem5-review.googlesource.com/c/public/gem5/+/47024 +787204c92d876dd81357b75aede52d8ef5e053d3 diff --git a/.gitignore b/.gitignore index 90a6bb2515..229a0d5ae9 100644 --- a/.gitignore +++ b/.gitignore @@ -31,3 +31,4 @@ m5out configs/example/memcheck.cfg configs/dram/lowp_sweep.cfg .pyenv +.vscode diff --git a/.mailmap b/.mailmap index 3cc78251f3..49c438d3eb 100644 --- a/.mailmap +++ b/.mailmap @@ -1,37 +1,43 @@ -ARM gem5 Developers Abdul Mutaal Ahmad +adarshpatil +Adrià Armejach Adrià Armejach Adrian Herrera Adrien Pesle -Adrià Armejach Adrià Armejach Akash Bagdia Akash Bagdia Alec Roelke Alec Roelke +Alexander Klimov Alexandru Dutu Alexandru +Alex Richardson Ali Jafri -Ali Saidi Ali Saidi Ali Saidi Ali Saidi +Ali Saidi Ali Saidi Ali Saidi Ali Saidi +Alistair Delva Amin Farmahini Anders Handler -Andrea Mondelli Andrea Mondelli +Andrea Mondelli Andrea Mondelli +Andrea Mondelli Andrea Mondelli Andrea Pellegrini -Andreas Hansson Andreas Hansson Andreas Hansson Andreas Hansson Andreas Hansson Andreas Hansson +Andreas Hansson Andreas Hansson Andreas Hansson Andreas Hansson -Andreas Sandberg Andreas Sandberg Andreas Sandberg Andreas Sandberg +Andreas Sandberg Andreas Sandberg Andreas Sandberg Andreas Sandberg Andrew Bardsley Andrew Bardsley Andrew Lukefahr Andrew Schultz Andriani Mappoura -Ani Udipi +Angie Lee Anis Peysieux +Ani Udipi Anouk Van Laer -Arthur Perais +ARM gem5 Developers +Arthur Perais Arthur Perais +Arun Rodrigues Ashkan Tousi -Austin Harris -Richard D. Strong +Austin Harris Austin Harris Avishai Tvila Ayaz Akram Bagus Hanindhito @@ -41,80 +47,108 @@ Binh Pham Bjoern A. Zeeb Blake Hechtman Blake Hechtman Blake Hechtman Blake Hechtman ext:(%2C%20Nilay%20Vaish%20%3Cnilay%40cs.wisc.edu%3E) -Bobby R. Bruce +Bobby R. Bruce Bobby Bruce Boris Shingarov Boris Shingarov Brad Beckmann Brad Beckmann Brad Beckmann Brad Beckmann ext:(%2C%20Nilay%20Vaish%20%3Cnilay%40cs.wisc.edu%3E) Brad Danofsky Bradley Wang Bradley +Brandon Potter BKP Brandon Potter bpotter Brandon Potter Brandon Potter -Brandon Potter BKP Brian Grayson Cagdas Dirik cdirik +Carlos Falquez Chander Sudanthi Chander Sudanthi Chander Sudanthi Chander Sudanthi +Charles Jamieson +CHEN Meng Chen Zou +Chia-You Chen +Chow, Marcus Chris Adeniyi-Jones -Chris Emmons Chris Emmons Chris Emmons Chris Emmons +Chris Emmons Chris Emmons +Chris January Christian Menard Christian Menard -Christoph Pfister Christopher Torng +Christoph Pfister Chuan Zhu Chun-Chen Hsu Chun-Chen TK Hsu Ciro Santilli Clint Smullen +Cui Jin Cui Jin Curtis Dunham +Daecheol You Dam Sunwoo Dan Gibson Daniel Carvalho Daniel Daniel Carvalho Daniel R. Carvalho +Daniel Gerzhoy Daniel Johnson Daniel Sanchez +Davide Basilio Bartolini David Guillen-Fandos David Guillen David Guillen-Fandos David Guillen Fandos David Hashe David Hashe David Oehmke +David Schall +Derek Christ Derek Hower -Deyaun Guo Deyuan Guo Deyaun Guo Deyuan Guo ext:(%2C%20Nilay%20Vaish%20%3Cnilay%40cs.wisc.edu%3E) +Deyaun Guo Deyuan Guo Dibakar Gope Dibakar Gope ext:(%2C%20Nilay%20Vaish%20%3Cnilay%40cs.wisc.edu%3E) +Dimitrios Chasapis Djordje Kovacevic Djordje Kovacevic -Dongxue Zhang Doğukan Korkmaztürk +Dongxue Zhang Dylan Johnson Earl Ou +eavivi +Éder F. Zulian Edmund Grimley Evans +Eduardo José Gómez Hernández +Eliot Moss Emilio Castillo Emilio Castillo Emilio Castillo Emilio Castillo ext:(%2C%20Nilay%20Vaish%20%3Cnilay%40cs.wisc.edu%3E) +Emily Brickey Erfan Azarkhish +Erhu Eric Van Hensbergen Eric Van Hensbergen +Eric Ye Erik Hallnor Erik Tomusk Faissal Sleiman Faissal Sleiman Fernando Endo +Franklin He Gabe Black Gabe Black Gabe Black Gabe Black +Gabe Loh gloh Gabor Dozsa +Gabriel Busnot +gauravjain14 Gedare Bloom Gedare Bloom Gene Wu Gene WU Gene WU Gene Wu -Geoffrey Blake Geoffrey Blake Geoffrey Blake Geoffrey Blake +Geoffrey Blake Geoffrey Blake Georg Kotheimer Giacomo Gabrielli Giacomo Gabrielli Giacomo Travaglini Glenn Bergmans +GWDx Hamid Reza Khaleghzadeh Hamid Reza Khaleghzadeh ext:(%2C%20Lluc%20Alvarez%20%3Clluc.alvarez%40bsc.es%3E%2C%20Nilay%20Vaish%20%3Cnilay%40cs.wisc.edu%3E) +handsomeliu Hanhwi Jang Hoa Nguyen Hongil Yoon Hsuan Hsu +huangjs Hussein Elnawawy Ian Jiang IanJiangICT Ilias Vougioukas +Iru Cai Isaac Richter Isaac Sánchez Barrera Ivan Pizarro @@ -123,104 +157,152 @@ Jairo Balart Jakub Jermar James Clarkson Jan-Peter Larsson -Jason Lowe-Power Jason Lowe-Power +Jan Vrany +Jarvis Jia +Jasjeet Rangi Jason Lowe-Power Jason Lowe-Power -Jason Lowe-Power Jason Power -Jason Lowe-Power Jason Power +Jason Lowe-Power Jason Lowe-Power Jason Lowe-Power Jason Power ext:(%2C%20Joel%20Hestness%20%3Chestness%40cs.wisc.edu%3E) +Jason Lowe-Power Jason Power +Jason Lowe-Power Jason Power +Jason Yu Javier Bueno Hedo Javier Bueno Javier Cano-Cano +Javier Garcia Hernandez Javier Setoain Jayneel Gandhi Jennifer Treichler -Jieming Yin +Jerin Joy +Jiajie Chen +Jiasen Huang +Jiasen +Jiayi Huang +jiegec +Jieming Yin jiemingyin Jing Qu JingQuJQ Jiuyue Ma Joe Gross Joe Gross +Joel Hestness Joel Hestness ext:(%2C%20Nilay%20Vaish%20%3Cnilay%40cs.wisc.edu%3E) Joel Hestness Joel Hestness Joel Hestness Joel Hestness -Joel Hestness Joel Hestness ext:(%2C%20Nilay%20Vaish%20%3Cnilay%40cs.wisc.edu%3E) +Joël Porquet-Lupine John Alsop John Kalamatianos jkalamat Jordi Vaquero Jose Marinho +Juan M. Cebrian Jui-min Lee +kai.ren Kai Ren Kanishk Sugand Karthik Sangaiah +Kaustav Goswami +Kelly Nguyen Ke Meng Kevin Brodsky Kevin Lim +Kevin Loughlin Khalique Koan-Sin Tan Korey Sewell Krishnendra Nathella Krishnendra Nathella +ksco +kunpai +Kyle Roarty Kyle Roarty +Laura Hinman Lena Olson Lena Olson Lena Olson Lena Olson Lisa Hsu Lisa Hsu Lluc Alvarez Lluís Vilanova Lluis Vilanova +Lukas Steiner +Luming Wang +m5test Mahyar Samani +Majid Jalili Malek Musleh Nilay Vaish ext:(%2C%20Malek%20Musleh%20%3Cmalek.musleh%40gmail.com%3E) Marc Mari Barcelo -Marc Orr Marc Orr Marco Balboni Marco Elver Marco Elver +Marc Orr Marc Orr +Marjan Fariborz marjanfariborz +Mark Hildebrand +Marton Erdos +Maryam Babaie Matt DeVuyst -Matt Evans Matt Evans -Matt Horsnell Matt Horsnell -Matt Horsnell Matt Horsnell -Matt Horsnell Matt Horsnell -Matt Poremba Matt Poremba Matteo Andreozzi Matteo Andreozzi Matteo M. Fusi +Matt Evans Matt Evans Matthew Poremba Matthew Poremba -Matt Sinclair Matthew Sinclair Matthias Hille Matthias Jung +Matthias Jung +Matt Horsnell Matt Horsnell +Matt Horsnell Matt Horsnell +Matt Horsnell Matt Horsnell +Matt Poremba Matt Poremba +Matt Sinclair Matthew Sinclair +Matt Sinclair Matt Sinclair Maurice Becker Maxime Martinasso -Maximilian Stein +Maximilian Stein Maximilian Stein Maximilien Breughe Maximilien Breughe +Melissa Jost Michael Adler +Michael Boyer Michael LeBeane Michael LeBeane Michael LeBeane mlebeane Michael Levenhagen -Michiel Van Tol Michiel W. van Tol Michiel Van Tol Michiel van Tol +Michiel Van Tol Michiel W. van Tol Miguel Serrano +Mike Upton Miles Kaufmann -Min Kyu Jeong Min Kyu Jeong Mingyuan -Mitch Hayenga Mitch Hayenga -Mitch Hayenga Mitch Hayenga -Mitch Hayenga Mitch Hayenga -Mitch Hayenga Mitch Hayenga ext:(%2C%20Amin%20Farmahini%20%3Caminfar%40gmail.com%3E) +Min Kyu Jeong Min Kyu Jeong Mitch Hayenga Mitchell Hayenga +Mitch Hayenga Mitch Hayenga ext:(%2C%20Amin%20Farmahini%20%3Caminfar%40gmail.com%3E) +Mitch Hayenga Mitch Hayenga +Mitch Hayenga Mitch Hayenga +Mitch Hayenga Mitch Hayenga Mohammad Alian Monir Mozumder Moyang Wang Mrinmoy Ghosh Mrinmoy Ghosh -Nathan Binkert Nathan Binkert +Muhammad Sarmad Saeed +Nadia Etemadi Nathanael Premillieu Nathanael Premillieu +Nathanael Premillieu Nathanael Premillieu Nathanael Premillieu Nathanael Premillieu Nathanael Premillieu Nathanael Premillieu Nathanael Premillieu Nathanael Premillieu +Nathan Binkert Nathan Binkert Nayan Deshmukh Neha Agarwal +Neil Natekar Nicholas Lindsay +Nicolas Boichat Nicolas Derumigny Nicolas Zea Nikos Nikoleris Nikos Nikoleris +Nilay Vaish ext:(%2C%20Timothy%20Jones%20%3Ctimothy.jones%40cl.cam.ac.uk%3E) Nils Asmussen Nils Asmussen +Noah Katz +ntampouratzis Nuwan Jayasena Ola Jeppsson Omar Naji +Onur Kayiran Pablo Prieto +paikunal Palle Lyckegaard Pau Cabre Paul Rosenfeld Paul Rosenfeld Paul Rosenfeld Paul Rosenfeld Peter Enns Pierre-Yves Péneau +Peter +Peter Yuen +Philip Metzler +Pierre Ayoub Pin-Yen Lin Po-Hao Su Polina Dudnik Polina Dudnik @@ -229,23 +311,26 @@ Pouya Fotouhi Pouya Fotouhi Prakash Ramrakhyani Prakash Ramrakhani Prakash Ramrakhyani Prakash Ramrakhyani Pritha Ghoshal +Quentin Forcioli Radhika Jagtap Radhika Jagtap Rahul Thakur Reiley Jeapaul -Rekai Gonzalez-Alberquilla Rekai -Rekai Gonzalez-Alberquilla Rekai Gonzalez Alberquilla Rekai Gonzalez-Alberquilla Rekai Gonzalez Alberquilla +Rekai Gonzalez-Alberquilla Rekai Gonzalez Alberquilla Rekai Gonzalez-Alberquilla Rekai Gonzalez-Alberquilla +Rekai Gonzalez-Alberquilla Rekai Rene de Jong Ricardo Alves +Richard Cooper +Richard D. Strong Richard Strong Richard Strong Richard Strong Richard Strong Richard Strong Rick Strong Rico Amslinger Riken Gohil Rizwana Begum -Robert Scheffel Robert Robert Kovacsics +Robert Scheffel Robert Rohit Kurup Ron Dreslinski Ronald Dreslinski Ruben Ayrapetyan @@ -253,20 +338,27 @@ Rune Holm Ruslan Bukin Ruslan Bukin ext:(%2C%20Zhang%20Guoye) Rutuja Oza Ryan Gambord +sacak32 +Sampad Mohapatra Samuel Grayson -Sandipan Das +Samuel Stark +Sandipan Das <31861871+sandip4n@users.noreply.github.com> +Sandipan Das Sandipan Das <31861871+sandip4n@users.noreply.github.com> Santi Galan -Sascha Bischoff Sascha Bischoff Sascha Bischoff Sascha Bischoff +Sascha Bischoff Sascha Bischoff Sean McGoogan Sean Wilson Sergei Trofimov Severin Wischmann Severin Wischmann ext:(%2C%20Ioannis%20Ilkos%20%3Cioannis.ilkos09%40imperial.ac.uk%3E) Shawn Rosti Sherif Elhabbal +Shivani Parekh +Shivani Siddhesh Poyarekar Somayeh Sardashti Sooraj Puthoor +Sooraj Puthoor Sophiane Senni Soumyaroop Roy Srikant Bharadwaj @@ -275,13 +367,14 @@ Stanislaw Czerniawski Stephan Diestelhorst Stephan Diestelhorst Stephen Hines Steve Raasch -Steve Reinhardt Steve Reinhardt -Steve Reinhardt Steve Reinhardt -Steve Reinhardt Steve Reinhardt Steve Reinhardt Steve Reinhardt ext:(%2C%20Nilay%20Vaish%20%3Cnilay%40cs.wisc.edu%3E%2C%20Ali%20Saidi%20%3CAli.Saidi%40ARM.com%3E) +Steve Reinhardt Steve Reinhardt +Steve Reinhardt Steve Reinhardt +Steve Reinhardt Steve Reinhardt Stian Hvatum Sudhanshu Jha Sujay Phadke +Sungkeun Kim Swapnil Haria Swapnil Haria Taeho Kgil Tao Zhang @@ -290,45 +383,50 @@ Tiago Mück Tiago Muck Tim Harris Timothy Hayes Timothy M. Jones Timothy Jones -Timothy M. Jones Nilay Vaish ext:(%2C%20Timothy%20Jones%20%3Ctimothy.jones%40cl.cam.ac.uk%3E) Timothy M. Jones Timothy M. Jones Timothy M. Jones Timothy M. Jones Tom Jablin Tommaso Marinelli +Tom Rollet +Tong Shen Tony Gutierrez Anthony Gutierrez -Tuan Ta Tuan Ta -Tushar Krishna Tushar Krishna +Travis Boraten +Trivikram Reddy tv-reddy +Tuan Ta Tuan Ta Tuan Ta Tushar Krishna Tushar Krishna +Tushar Krishna Tushar Krishna Umesh Bhaskar Uri Wiener Victor Garcia Vilas Sridharan -Vince Weaver Vincentius Robby +Vince Weaver +vramadas95 +vsoria Wade Walker +Wei-Han Chen Weiping Liao +Wende Tan Wendy Elsasser -William Wang William Wang William Wang William Wang +William Wang William Wang Willy Wolff +Wing Li Xiangyu Dong -Xianwei Zhang +Xianwei Zhang Xianwei Zhang Xiaoyu Ma Xin Ouyang +Xiongfei Yasuko Eckert -Yi Xiang +Yen-lin Lai Yifei Liu -Yu-hsin Wang +yiwkd2 +Yi Xiang Yuan Yao Yuetsu Kodama yuetsu.kodama +Yu-hsin Wang Zhang Zheng +Zhantong Qiu +Zhengrong Wang seanzw +zhongchengyong Zicong Wang -Éder F. Zulian -Gabe Loh gloh -jiegec -m5test -Marjan Fariborz marjanfariborz -Mike Upton -seanzw -Trivikram Reddy tv-reddy - diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000..8cbc6afdb7 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,89 @@ +# Copyright (c) 2022 Arm Limited +# All rights reserved. +# +# The license below extends only to copyright in the software and shall +# not be construed as granting a license to any other intellectual +# property including but not limited to intellectual property relating +# to a hardware implementation of the functionality of the software +# licensed hereunder. You may use the software subject to the license +# terms below provided that you ensure that this notice is replicated +# unmodified and in its entirety in all distributions of the software, +# modified or unmodified, in source code or in binary form. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +minimum_pre_commit_version: "2.18" + +default_language_version: + python: python3 + +exclude: | + (?x)^( + ext/.*| + build/.*| + src/systemc/ext/.*| + src/systemc/tests/.*/.*| + src/python/m5/ext/pyfdt/.*| + tests/.*/ref/.* + )$ + +default_stages: [commit] + +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.3.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-json + - id: check-yaml + - id: check-added-large-files + - id: mixed-line-ending + args: [--fix=lf] + - id: check-case-conflict +- repo: https://github.com/psf/black + rev: 22.6.0 + hooks: + - id: black +- repo: local + hooks: + - id: gem5-style-checker + name: gem5 style checker + entry: util/git-pre-commit.py + always_run: true + exclude: ".*" + language: system + description: 'The gem5 style checker hook.' + - id: gem5-commit-msg-checker + name: gem5 commit msg checker + entry: ext/git-commit-msg + language: system + stages: [commit-msg] + description: 'The gem5 commit message checker hook.' + - id: gerrit-commit-msg-job + name: gerrit commit message job + entry: util/gerrit-commit-msg-hook + language: system + stages: [commit-msg] + description: 'Adds Change-ID to the commit message. Needed by Gerrit.' diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 189b63fb48..ae771d3ffb 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -136,6 +136,37 @@ rebasing and git, see the [git book]. [git book]: https://git-scm.com/book/en/v2/Git-Branching-Rebasing + +Setting up pre-commit +--------------------- + +To help ensure the gem5 style guide is maintained, we use [pre-commit]( +https://pre-commit.com) to run checks on changes to be contributed. + +To setup pre-commit, run the following in your gem5 directory to install the +pre-commit and commit message hooks. + +```sh +pip install pre-commit +pre-commit install -t pre-commit -t commit-msg +``` + +The hooks are also automatically installed when gem5 is compiled. + +When you run a `git commit` command the pre-commit hook will run checks on your +committed code. The commit will be blocked if a check fails. + +The same checks are run as part of Gerrit's CI tests (those required to obtain +a Verified label, necessary for a change to be accepted to the develop branch). +Therefore setting up pre-commit in your local gem5 development environment is +recommended. + +You can automatically format files to pass the pre-commit tests by running: + +```sh +pre-commit run --files +``` + Requirements for change descriptions ------------------------------------ To help reviewers and future contributors more easily understand and track diff --git a/RELEASE-NOTES.md b/RELEASE-NOTES.md index 2353a96a67..931be695ba 100644 --- a/RELEASE-NOTES.md +++ b/RELEASE-NOTES.md @@ -1,3 +1,121 @@ +# Version 22.1.0.0 + +This release has 500 contributions from 48 unique contributors and marks our second major release of 2022. +This release incorporates several new features, improvements, and bug fixes for the computer architecture reserach community. + +See below for more details! + +## New features and improvements + +- The gem5 binary can now be compiled to include multiple ISA targets. +A compilation of gem5 which includes all gem5 ISAs can be created using: `scons build/ALL/gem5.opt`. +This will use the Ruby `MESI_Two_Level` cache coherence protocol by default, to use other protocols: `scons build/ALL/gem5.opt PROTOCOL=`. +The classic cache system may continue to be used regardless as to which Ruby cache coherence protocol is compiled. +- The `m5` Python module now includes functions to set exit events are particular simululation ticks: + - *setMaxTick(tick)* : Used to to specify the maximum simulation tick. + - *getMaxTick()* : Used to obtain the maximum simulation tick value. + - *getTicksUntilMax()*: Used to get the number of ticks remaining until the maximum tick is reached. + - *scheduleTickExitFromCurrent(tick)* : Used to schedule an exit exit event a specified number of ticks in the future. + - *scheduleTickExitAbsolute(tick)* : Used to schedule an exit event as a specified tick. +- We now include the `RiscvMatched` board as part of the gem5 stdlib. +This board is modeled after the [HiFive Unmatched board](https://www.sifive.com/boards/hifive-unmatched) and may be used to emulate its behavior. +See "configs/example/gem5_library/riscv-matched-fs.py" and "configs/example/gem5_library/riscv-matched-hello.py" for examples using this board. +- An API for [SimPoints](https://doi.org/10.1145/885651.781076) has been added. +SimPoints can substantially improve gem5 Simulation time by only simulating representative parts of a simulation then extrapolating statistical data accordingly. +Examples of using SimPoints with gem5 can be found in "configs/example/gem5_library/checkpoints/simpoints-se-checkpoint.py" and "configs/example/gem5_library/checkpoints/simpoints-se-restore.py". +- "Workloads" have been introduced to gem5. +Workloads have been incorporated into the gem5 Standard library. +They can be used specify the software to be run on a simulated system that come complete with input parameters and any other dependencies necessary to run a simuation on the target hardware. +At the level of the gem5 configuration script a user may specify a workload via a board's `set_workload` function. +For example, `set_workload(Workload("x86-ubuntu-18.04-boot"))` sets the board to use the "x86-ubuntu-18.04-boot" workload. +This workload specifies a boot consisting of the Linux 5.4.49 kernel then booting an Ubunutu 18.04 disk image, to exit upon booting. +Workloads are agnostic to underlying gem5 design and, via the gem5-resources infrastructure, will automatically retrieve all necessary kernels, disk-images, etc., necessary to execute. +Examples of using gem5 Workloads can be found in "configs/example/gem5_library/x86-ubuntu-ruby.py" and "configs/example/gem5_library/riscv-ubuntu-run.py". +- To aid gem5 developers, we have incorporated [pre-commit](https://pre-commit.com) checks into gem5. +These checks automatically enforce the gem5 style guide on Python files and a subset of other requirements (such as line length) on altered code prior to a `git commit`. +Users may install pre-commit by running `./util/pre-commit-install.sh`. +Passing these checks is a requirement to submit code to gem5 so installation is strongly advised. +- A multiprocessing module has been added. +This allows for multiple simulations to be run from a single gem5 execution via a single gem5 configuration script. +Example of usage found [in this commit message](https://gem5-review.googlesource.com/c/public/gem5/+/63432). +**Note: This feature is still in development. +While functional, it'll be subject to subtantial changes in future releases of gem5**. +- The stdlib's `ArmBoard` now supports Ruby caches. +- Due to numerious fixes and improvements, Ubuntu 22.04 can be booted as a gem5 workload, both in FS and SE mode. +- Substantial improvements have been made to gem5's GDB capabilities. +- The `HBM2Stack` has been added to the gem5 stdlib as a memory component. +- The `MinorCPU` has been fully incorporated into the gem5 Standard Library. +- We now allow for full-system simulation of GPU applications. +The introduction of GPU FS mode allows for the same use-cases as SE mode but reduces the requirement of specific host environments or usage of a Docker container. +The GPU FS mode also has improved simulated speed by functionally simulating memory copies, and provides an easier update path for gem5 developers. +An X86 host and KVM are required to run GPU FS mode. + +## API (user facing) changes + +- The default CPU Vendor String has been updated to `HygonGenuine`. +This is due to newer versions of GLIBC being more strict about checking current system's supported features. +The previous value, `M5 Simulator`, is not recognized as a valid vendor string and therefore GLIBC returns an error. +- [The stdlib's `_connect_things` funciton call has been moved from the `AbstractBoard`'s constructor to be run as board pre-instantiation process](https://gem5-review.googlesource.com/c/public/gem5/+/65051). +This is to overcome instances where stdlib components (memory, processor, and cache hierarhcy) require Board information known only after its construction. +**This change breaks cases where a user utilizes the stdlib `AbstractBoard` but does not use the stdlib `Simulator` module. This can be fixed by adding the `_pre_instantiate` function before `m5.instantiate`**. +An exception has been added which explains this fix, if this error occurs. +- The setting of checkpoints has been moved from the stdlib's "set_workload" functions to the `Simulator` module. +Setting of checkpoints via the stdlib's "set_workload" functions is now deprecated and will be removed in future releases of gem5. +- The gem5 namespace `Trace` has been renamed `trace` to conform to the gem5 style guide. +- Due to the allowing of multiple ISAs per gem5 build, the `TARGET_ISA` variable has been replaced with `USE_$(ISA)` variables. +For example, if a build contains both the X86 and ARM ISAs the `USE_X86` and `USE_ARM` variables will be set. + +## Big Fixes + +- Several compounding bugs were causing bugs with floating point operations within gem5 simulations. +These have been fixed. +- Certain emulated syscalls were behaving incorrectly when using RISC-V due to incorrect `open(2)` flag values. +These values have been fixed. +- The GIVv3 List register mapping has been fixed. +- Access permissions for GICv3 cpu registers have been fixed. +- In previous releases of gem5 the `sim_quantum` value was set for all cores when using the Standard Library. +This caused issues when setting exit events at a particular tick as it resulted in the exit being off by `sim_quantum`. +As such, the `sim_quantum` value is only when using KVM cores. +- PCI ranges in `VExpress_GEM5_Foundation` fixed. +- The `SwitchableProcessor` processor has been fixed to allow switching to a KVM core. +Previously the `SwitchableProcessor` only allowed a user to switch from a KVM core to a non-KVM core. +- The Standard Library has been fixed to permit multicore simulations in SE mode. +- [A bug was fixed in the rcr X86 instruction](https://gem5.atlassian.net/browse/GEM5-1265). + +## Build related changes + +- gem5 can now be compiled with Scons 4 build system. +- gem5 can now be compiled with Clang version 14 (minimum Clang version 6). +- gem5 can now be compiled with GCC Version 12 (minimum GCC version 7). + + +## Other minor updates + +- The gem5 stdlib examples in "configs/example/gem5_library" have been updated to, where appropriate, use the stdlib's Simulator module. +These example configurations can be used for reference as to how `Simulator` module may be utilized in gem5. +- Granulated SGPR computation has been added for gfx9 gpu-compute. +- The stdlib statistics have been improved: + - A `get_simstats` function has been added to access statistics from the `Simulator` module. + - Statistics can be printed: `print(simstats.board.core.some_integer)`. +- GDB ports are now specified for each workload, as opposed to per-simulation run. +- The `m5` utility has been expanded to include "workbegin" and "workend" annotations. +This can be added with `m5 workbegin` and `m5 workend`. +- A `PrivateL1SharedL2CacheHierarchy` has been added to the Standard Library. +- A `GEM5_USE_PROXY` environment variable has been added. +This allows users to specify a socks5 proxy server to use when obtaining gem5 resources and the resources.json file. +It uses the format `:`. +- The fastmodel support has been improved to function with Linux Kernel 5.x. +- The `set_se_binary_workload` function now allows for the passing of input parameters to a binary workload. +- A functional CHI cache hierarchy has been added to the gem5 Standard Library: "src/python/gem5/components/cachehierarchies/chi/private_l1_cache_hierarchy.py". +- The RISC-V K extension has been added. +It includes the following instructions: + - Zbkx: xperm8, xperm4 + - Zknd: aes64ds, aes64dsm, aes64im, aes64ks1i, aes64ks2 + - Zkne: aes64es, aes64esm, aes64ks1i, aes64ks2 + - Zknh: sha256sig0, sha256sig1, sha256sum0, sha256sum1, sha512sig0, sha512sig1, sha512sum0, sha512sum1 + - Zksed: sm4ed, sm4ks + - Zksh: sm3p0, sm3p1 + # Version 22.0.0.2 **[HOTFIX]** This hotfix contains a set of critical fixes to be applied to gem5 v22.0. diff --git a/SConstruct b/SConstruct index f1f1c64f07..e8107ea2c7 100755 --- a/SConstruct +++ b/SConstruct @@ -319,7 +319,10 @@ def config_embedded_python(env): if conf.TryAction(f'@{python_config} --embed')[0]: cmd.append('--embed') - def flag_filter(env, cmd_output): + def flag_filter(env, cmd_output, unique=True): + # Since this function does not use the `unique` param, one should not + # pass any value to this param. + assert(unique==True) flags = cmd_output.split() prefixes = ('-l', '-L', '-I') is_useful = lambda x: any(x.startswith(prefix) for prefix in prefixes) @@ -417,7 +420,6 @@ for variant_path in variant_paths: conf.CheckLinkFlag('-Wl,--threads') conf.CheckLinkFlag( '-Wl,--thread-count=%d' % GetOption('num_jobs')) - else: error('\n'.join(( "Don't know what compiler options to use for your compiler.", diff --git a/TESTING.md b/TESTING.md index 88d1f29571..2273e31ea7 100644 --- a/TESTING.md +++ b/TESTING.md @@ -15,7 +15,7 @@ be built through SCons. To build and run all the unit tests: ```shell -scons build/NULL/unittests.opt +scons build/ALL/unittests.opt ``` All unit tests should be run prior to posting a patch to @@ -25,20 +25,20 @@ To compile and run just one set of tests (e.g. those declared within `src/base/bitunion.test.cc`): ```shell -scons build/NULL/base/bitunion.test.opt -./build/NULL/base/bitunion.test.opt +scons build/ALL/base/bitunion.test.opt +./build/ALL/base/bitunion.test.opt ``` To list the available test functions from a test file: ```shell -./build/NULL/base/bitunion.test.opt --gtest_list_tests +./build/ALL/base/bitunion.test.opt --gtest_list_tests ``` To run a specific test function (e.g., BitUnionData.NormalBitfield): ```shell -./build/NULL/base/bitunion.test.opt --gtest_filter=BitUnionData.NormalBitfield +./build/ALL/base/bitunion.test.opt --gtest_filter=BitUnionData.NormalBitfield ``` # Running system-level tests @@ -246,10 +246,9 @@ maintainer (see MAINTAINERS).* ## Running Tests in Parallel Whimsy has support for parallel testing baked in. This system supports -running multiple suites at the same time on the same computer. To run +running multiple suites at the same time on the same computer. To run suites in parallel, supply the `-t ` flag to the run command. For example, to run up to three test suites at the same time:: ./main.py run --skip-build -t 3 - diff --git a/build_opts/ALL b/build_opts/ALL new file mode 100644 index 0000000000..6e5ede2d50 --- /dev/null +++ b/build_opts/ALL @@ -0,0 +1,7 @@ +USE_ARM_ISA = True +USE_MIPS_ISA = True +USE_POWER_ISA = True +USE_RISCV_ISA = True +USE_SPARC_ISA = True +USE_X86_ISA = True +PROTOCOL = 'MESI_Two_Level' diff --git a/build_opts/ARM b/build_opts/ARM index 5b7da10e22..8c30c21e5a 100644 --- a/build_opts/ARM +++ b/build_opts/ARM @@ -1,2 +1,2 @@ -TARGET_ISA = 'arm' +USE_ARM_ISA = True PROTOCOL = 'CHI' diff --git a/build_opts/ARM_MESI_Three_Level b/build_opts/ARM_MESI_Three_Level index 2ca31b64cc..3057bec0c4 100644 --- a/build_opts/ARM_MESI_Three_Level +++ b/build_opts/ARM_MESI_Three_Level @@ -1,5 +1,5 @@ # Copyright (c) 2019 ARM Limited # All rights reserved. -TARGET_ISA = 'arm' +USE_ARM_ISA = True PROTOCOL = 'MESI_Three_Level' diff --git a/build_opts/ARM_MESI_Three_Level_HTM b/build_opts/ARM_MESI_Three_Level_HTM index 703398d786..7f80c4eee2 100644 --- a/build_opts/ARM_MESI_Three_Level_HTM +++ b/build_opts/ARM_MESI_Three_Level_HTM @@ -1,5 +1,5 @@ # Copyright (c) 2019 ARM Limited # All rights reserved. -TARGET_ISA = 'arm' +USE_ARM_ISA = True PROTOCOL = 'MESI_Three_Level_HTM' diff --git a/build_opts/ARM_MOESI_hammer b/build_opts/ARM_MOESI_hammer index bd5c63f0d2..5322fd96f2 100644 --- a/build_opts/ARM_MOESI_hammer +++ b/build_opts/ARM_MOESI_hammer @@ -1,5 +1,5 @@ # Copyright (c) 2019 ARM Limited # All rights reserved. -TARGET_ISA = 'arm' +USE_ARM_ISA = True PROTOCOL = 'MOESI_hammer' diff --git a/build_opts/GCN3_X86 b/build_opts/GCN3_X86 index b39690812e..aca2f62878 100644 --- a/build_opts/GCN3_X86 +++ b/build_opts/GCN3_X86 @@ -1,4 +1,4 @@ PROTOCOL = 'GPU_VIPER' -TARGET_ISA = 'x86' +USE_X86_ISA = True TARGET_GPU_ISA = 'gcn3' BUILD_GPU = True diff --git a/build_opts/Garnet_standalone b/build_opts/Garnet_standalone index fd730c3f48..2351c5221d 100644 --- a/build_opts/Garnet_standalone +++ b/build_opts/Garnet_standalone @@ -1,2 +1,2 @@ -TARGET_ISA = 'null' +USE_NULL_ISA = True PROTOCOL = 'Garnet_standalone' diff --git a/build_opts/MIPS b/build_opts/MIPS index 26cb23c393..382e10163a 100644 --- a/build_opts/MIPS +++ b/build_opts/MIPS @@ -1,2 +1,2 @@ -TARGET_ISA = 'mips' +USE_MIPS_ISA = True PROTOCOL = 'MI_example' diff --git a/build_opts/NULL b/build_opts/NULL index b749729fbe..51e287a080 100644 --- a/build_opts/NULL +++ b/build_opts/NULL @@ -1,2 +1,2 @@ -TARGET_ISA = 'null' +USE_NULL_ISA = True PROTOCOL='MI_example' diff --git a/build_opts/NULL_MESI_Two_Level b/build_opts/NULL_MESI_Two_Level index 09147b2250..bafb199592 100644 --- a/build_opts/NULL_MESI_Two_Level +++ b/build_opts/NULL_MESI_Two_Level @@ -1,2 +1,2 @@ -TARGET_ISA = 'null' +USE_NULL_ISA = True PROTOCOL = 'MESI_Two_Level' diff --git a/build_opts/NULL_MOESI_CMP_directory b/build_opts/NULL_MOESI_CMP_directory index 466a268c9d..3346964a6b 100644 --- a/build_opts/NULL_MOESI_CMP_directory +++ b/build_opts/NULL_MOESI_CMP_directory @@ -1,2 +1,2 @@ -TARGET_ISA = 'null' +USE_NULL_ISA = True PROTOCOL='MOESI_CMP_directory' diff --git a/build_opts/NULL_MOESI_CMP_token b/build_opts/NULL_MOESI_CMP_token index 0cd030503d..4ea9e70536 100644 --- a/build_opts/NULL_MOESI_CMP_token +++ b/build_opts/NULL_MOESI_CMP_token @@ -1,2 +1,2 @@ -TARGET_ISA = 'null' +USE_NULL_ISA = True PROTOCOL='MOESI_CMP_token' diff --git a/build_opts/NULL_MOESI_hammer b/build_opts/NULL_MOESI_hammer index 39ebcae641..e91b78dddb 100644 --- a/build_opts/NULL_MOESI_hammer +++ b/build_opts/NULL_MOESI_hammer @@ -1,2 +1,2 @@ -TARGET_ISA = 'null' +USE_NULL_ISA = True PROTOCOL='MOESI_hammer' diff --git a/build_opts/POWER b/build_opts/POWER index 35772a4795..207356c0be 100644 --- a/build_opts/POWER +++ b/build_opts/POWER @@ -1,2 +1,2 @@ -TARGET_ISA = 'power' +USE_POWER_ISA = True PROTOCOL = 'MI_example' diff --git a/build_opts/RISCV b/build_opts/RISCV index 0bd069d489..22097b0b3e 100644 --- a/build_opts/RISCV +++ b/build_opts/RISCV @@ -1,2 +1,2 @@ -TARGET_ISA = 'riscv' +USE_RISCV_ISA = True PROTOCOL = 'MI_example' diff --git a/build_opts/SPARC b/build_opts/SPARC index 98acfe2406..22dec5f867 100644 --- a/build_opts/SPARC +++ b/build_opts/SPARC @@ -1,2 +1,2 @@ -TARGET_ISA = 'sparc' +USE_SPARC_ISA = True PROTOCOL = 'MI_example' diff --git a/build_opts/VEGA_X86 b/build_opts/VEGA_X86 index 11e8232129..437b048ce7 100644 --- a/build_opts/VEGA_X86 +++ b/build_opts/VEGA_X86 @@ -1,4 +1,4 @@ PROTOCOL = 'GPU_VIPER' -TARGET_ISA = 'x86' +USE_X86_ISA = True TARGET_GPU_ISA = 'vega' BUILD_GPU = True diff --git a/build_opts/X86 b/build_opts/X86 index 72b200acaa..259325b92e 100644 --- a/build_opts/X86 +++ b/build_opts/X86 @@ -1,3 +1,3 @@ -TARGET_ISA = 'x86' +USE_X86_ISA = True PROTOCOL = 'MESI_Two_Level' NUMBER_BITS_PER_SET = '128' diff --git a/build_opts/X86_MESI_Two_Level b/build_opts/X86_MESI_Two_Level index 72b200acaa..259325b92e 100644 --- a/build_opts/X86_MESI_Two_Level +++ b/build_opts/X86_MESI_Two_Level @@ -1,3 +1,3 @@ -TARGET_ISA = 'x86' +USE_X86_ISA = True PROTOCOL = 'MESI_Two_Level' NUMBER_BITS_PER_SET = '128' diff --git a/build_opts/X86_MI_example b/build_opts/X86_MI_example index 483cf0486a..71bc9a5f3a 100644 --- a/build_opts/X86_MI_example +++ b/build_opts/X86_MI_example @@ -1,2 +1,2 @@ -TARGET_ISA = 'x86' +USE_X86_ISA = True PROTOCOL = 'MI_example' diff --git a/build_opts/X86_MOESI_AMD_Base b/build_opts/X86_MOESI_AMD_Base index 261bedb925..f8f2ce7c8d 100644 --- a/build_opts/X86_MOESI_AMD_Base +++ b/build_opts/X86_MOESI_AMD_Base @@ -1,2 +1,2 @@ PROTOCOL = 'MOESI_AMD_Base' -TARGET_ISA = 'x86' +USE_X86_ISA = True diff --git a/build_tools/blob.py b/build_tools/blob.py index 3d93c45cc8..b3d2d0f0e6 100644 --- a/build_tools/blob.py +++ b/build_tools/blob.py @@ -26,16 +26,17 @@ import array import functools + def bytesToCppArray(code, symbol, data): - ''' + """ Output an array of bytes to a code formatter as a c++ array declaration. - ''' - code('const std::uint8_t ${symbol}[] = {') + """ + code("const std::uint8_t ${symbol}[] = {") code.indent() step = 16 for i in range(0, len(data), step): - x = array.array('B', data[i:i+step]) - strs = map(lambda i: f'{i},', x) + x = array.array("B", data[i : i + step]) + strs = map(lambda i: f"{i},", x) code(functools.reduce(lambda x, y: x + y, strs)) code.dedent() - code('};') + code("};") diff --git a/build_tools/code_formatter.py b/build_tools/code_formatter.py index 374e8cc37e..a2651c9dd0 100644 --- a/build_tools/code_formatter.py +++ b/build_tools/code_formatter.py @@ -1,3 +1,15 @@ +# Copyright (c) 2022 Arm Limited +# All rights reserved. +# +# The license below extends only to copyright in the software and shall +# not be construed as granting a license to any other intellectual +# property including but not limited to intellectual property relating +# to a hardware implementation of the functionality of the software +# licensed hereunder. You may use the software subject to the license +# terms below provided that you ensure that this notice is replicated +# unmodified and in its entirety in all distributions of the software, +# modified or unmodified, in source code or in binary form. +# # Copyright (c) 2006-2009 Nathan Binkert # All rights reserved. # @@ -33,6 +45,7 @@ import inspect import os import re + class lookup(object): def __init__(self, formatter, frame, *args, **kwargs): self.frame = frame @@ -52,10 +65,10 @@ class lookup(object): if item in self.kwargs: return self.kwargs[item] - if item == '__file__': + if item == "__file__": return self.frame.f_code.co_filename - if item == '__line__': + if item == "__line__": return self.frame.f_lineno if self.formatter.locals and item in self.frame.f_locals: @@ -77,6 +90,7 @@ class lookup(object): pass raise IndexError("Could not find '%s'" % item) + class code_formatter_meta(type): pattern = r""" (?: @@ -90,44 +104,48 @@ class code_formatter_meta(type): %(delim)s(?P) # ill-formed delimiter exprs ) """ + def __init__(cls, name, bases, dct): super(code_formatter_meta, cls).__init__(name, bases, dct) - if 'pattern' in dct: + if "pattern" in dct: pat = cls.pattern else: # tuple expansion to ensure strings are proper length - lb,rb = cls.braced - lb1,lb2,rb2,rb1 = cls.double_braced + lb, rb = cls.braced + lb1, lb2, rb2, rb1 = cls.double_braced pat = code_formatter_meta.pattern % { - 'delim' : re.escape(cls.delim), - 'ident' : cls.ident, - 'pos' : cls.pos, - 'lb' : re.escape(lb), - 'rb' : re.escape(rb), - 'ldb' : re.escape(lb1+lb2), - 'rdb' : re.escape(rb2+rb1), - } + "delim": re.escape(cls.delim), + "ident": cls.ident, + "pos": cls.pos, + "lb": re.escape(lb), + "rb": re.escape(rb), + "ldb": re.escape(lb1 + lb2), + "rdb": re.escape(rb2 + rb1), + } cls.pattern = re.compile(pat, re.VERBOSE | re.DOTALL | re.MULTILINE) + class code_formatter(object, metaclass=code_formatter_meta): - delim = r'$' - ident = r'[_A-z]\w*' - pos = r'[0-9]+' - braced = r'{}' - double_braced = r'{{}}' + delim = r"$" + ident = r"[_A-z]\w*" + pos = r"[0-9]+" + braced = r"{}" + double_braced = r"{{}}" globals = True locals = True fix_newlines = True + def __init__(self, *args, **kwargs): self._data = [] self._dict = {} self._indent_level = 0 self._indent_spaces = 4 - self.globals = kwargs.pop('globals', type(self).globals) - self.locals = kwargs.pop('locals', type(self).locals) - self._fix_newlines = \ - kwargs.pop('fix_newlines', type(self).fix_newlines) + self.globals = kwargs.pop("globals", type(self).globals) + self.locals = kwargs.pop("locals", type(self).locals) + self._fix_newlines = kwargs.pop( + "fix_newlines", type(self).fix_newlines + ) if args: self.__call__(args) @@ -159,38 +177,44 @@ class code_formatter(object, metaclass=code_formatter_meta): # Add a comment to inform which file generated the generated file # to make it easier to backtrack and modify generated code frame = inspect.currentframe().f_back - if re.match('\.(cc|hh|c|h)', extension) is not None: - f.write(f'''/** + if re.match(r"^\.(cc|hh|c|h)$", extension) is not None: + f.write( + f"""/** * DO NOT EDIT THIS FILE! * File automatically generated by * {frame.f_code.co_filename}:{frame.f_lineno} */ -''') - elif re.match('\.py', extension) is not None: - f.write(f'''# +""" + ) + elif re.match(r"^\.py$", extension) is not None: + f.write( + f"""# # DO NOT EDIT THIS FILE! # File automatically generated by # {frame.f_code.co_filename}:{frame.f_lineno} # -''') - elif re.match('\.html', extension) is not None: - f.write(f''' -''') +""" + ) for data in self._data: f.write(data) f.close() def __str__(self): - data = ''.join(self._data) - self._data = [ data ] + data = "".join(self._data) + self._data = [data] return data def __getitem__(self, item): @@ -219,21 +243,21 @@ class code_formatter(object, metaclass=code_formatter_meta): self._data.append(data) return - initial_newline = not self._data or self._data[-1] == '\n' + initial_newline = not self._data or self._data[-1] == "\n" for line in data.splitlines(): if line: if self._indent_level: - self._data.append(' ' * self._indent_level) + self._data.append(" " * self._indent_level) self._data.append(line) if line or not initial_newline: - self._data.append('\n') + self._data.append("\n") initial_newline = False def __call__(self, *args, **kwargs): if not args: - self._data.append('\n') + self._data.append("\n") return format = args[0] @@ -242,51 +266,56 @@ class code_formatter(object, metaclass=code_formatter_meta): frame = inspect.currentframe().f_back l = lookup(self, frame, *args, **kwargs) + def convert(match): - ident = match.group('lone') + ident = match.group("lone") # check for a lone identifier if ident: - indent = match.group('indent') # must be spaces - lone = '%s' % (l[ident], ) + indent = match.group("indent") # must be spaces + lone = "%s" % (l[ident],) def indent_lines(gen): for line in gen: yield indent yield line - return ''.join(indent_lines(lone.splitlines(True))) + + return "".join(indent_lines(lone.splitlines(True))) # check for an identifier, braced or not - ident = match.group('ident') or match.group('b_ident') + ident = match.group("ident") or match.group("b_ident") if ident is not None: - return '%s' % (l[ident], ) + return "%s" % (l[ident],) # check for a positional parameter, braced or not - pos = match.group('pos') or match.group('b_pos') + pos = match.group("pos") or match.group("b_pos") if pos is not None: pos = int(pos) if pos > len(args): - raise ValueError \ - ('Positional parameter #%d not found in pattern' % pos, - code_formatter.pattern) - return '%s' % (args[int(pos)], ) + raise ValueError( + "Positional parameter #%d not found in pattern" % pos, + code_formatter.pattern, + ) + return "%s" % (args[int(pos)],) # check for a double braced expression - eval_expr = match.group('eval') + eval_expr = match.group("eval") if eval_expr is not None: result = eval(eval_expr, {}, l) - return '%s' % (result, ) + return "%s" % (result,) # check for an escaped delimiter - if match.group('escaped') is not None: - return '$' + if match.group("escaped") is not None: + return "$" # At this point, we have to match invalid - if match.group('invalid') is None: + if match.group("invalid") is None: # didn't match invalid! - raise ValueError('Unrecognized named group in pattern', - code_formatter.pattern) + raise ValueError( + "Unrecognized named group in pattern", + code_formatter.pattern, + ) - i = match.start('invalid') + i = match.start("invalid") if i == 0: colno = 1 lineno = 1 @@ -295,52 +324,64 @@ class code_formatter(object, metaclass=code_formatter_meta): colno = i - sum(len(z) for z in lines) lineno = len(lines) - raise ValueError('Invalid format string: line %d, col %d' % - (lineno, colno)) + raise ValueError( + "Invalid format string: line %d, col %d" % (lineno, colno) + ) d = code_formatter.pattern.sub(convert, format) self._append(d) -__all__ = [ "code_formatter" ] -if __name__ == '__main__': +__all__ = ["code_formatter"] + +if __name__ == "__main__": from .code_formatter import code_formatter + f = code_formatter() class Foo(dict): def __init__(self, **kwargs): self.update(kwargs) + def __getattr__(self, attr): return self[attr] x = "this is a test" - l = [ [Foo(x=[Foo(y=9)])] ] + l = [[Foo(x=[Foo(y=9)])]] y = code_formatter() - y(''' + y( + """ { this_is_a_test(); } -''') - f(' $y') - f('''$__file__:$__line__ -{''') +""" + ) + f(" $y") + f( + """$__file__:$__line__ +{""" + ) f("${{', '.join(str(x) for x in range(4))}}") - f('${x}') - f('$x') + f("${x}") + f("$x") f.indent() for i in range(5): - f('$x') - f('$i') - f('$0', "zero") - f('$1 $0', "zero", "one") - f('${0}', "he went") - f('${0}asdf', "he went") + f("$x") + f("$i") + f("$0", "zero") + f("$1 $0", "zero", "one") + f("${0}", "he went") + f("${0}asdf", "he went") f.dedent() - f(''' + f( + """ ${{l[0][0]["x"][0].y}} } -''', 1, 9) +""", + 1, + 9, + ) - print(f, end=' ') + print(f, end=" ") diff --git a/build_tools/cxx_config_cc.py b/build_tools/cxx_config_cc.py index c4a2d8957f..a908aa8c17 100644 --- a/build_tools/cxx_config_cc.py +++ b/build_tools/cxx_config_cc.py @@ -46,8 +46,8 @@ import importer from code_formatter import code_formatter parser = argparse.ArgumentParser() -parser.add_argument('modpath', help='module the simobject belongs to') -parser.add_argument('cxx_config_cc', help='cxx config cc file to generate') +parser.add_argument("modpath", help="module the simobject belongs to") +parser.add_argument("cxx_config_cc", help="cxx config cc file to generate") args = parser.parse_args() @@ -63,22 +63,25 @@ import m5.params code = code_formatter() -entry_class = 'CxxConfigDirectoryEntry_%s' % sim_object_name -param_class = '%sCxxConfigParams' % sim_object_name +entry_class = "CxxConfigDirectoryEntry_%s" % sim_object_name +param_class = "%sCxxConfigParams" % sim_object_name + def cxx_bool(b): - return 'true' if b else 'false' + return "true" if b else "false" + code('#include "params/%s.hh"' % sim_object_name) for param in sim_object._params.values(): if isSimObjectClass(param.ptype): - code('#include "%s"' % param.ptype._value_dict['cxx_header']) + code('#include "%s"' % param.ptype._value_dict["cxx_header"]) code('#include "params/%s.hh"' % param.ptype.__name__) else: param.ptype.cxx_ini_predecls(code) -code('''#include "${{sim_object._value_dict['cxx_header']}}" +code( + """#include "${{sim_object._value_dict['cxx_header']}}" #include "base/str.hh" #include "cxx_config/${sim_object_name}.hh" @@ -87,34 +90,39 @@ namespace gem5 ${param_class}::DirectoryEntry::DirectoryEntry() { -''') +""" +) code.indent() for param in sim_object._params.values(): is_vector = isinstance(param, m5.params.VectorParamDesc) is_simobj = issubclass(param.ptype, m5.SimObject.SimObject) - code('parameters["%s"] = new ParamDesc("%s", %s, %s);' % - (param.name, param.name, cxx_bool(is_vector), - cxx_bool(is_simobj))); + code( + 'parameters["%s"] = new ParamDesc("%s", %s, %s);' + % (param.name, param.name, cxx_bool(is_vector), cxx_bool(is_simobj)) + ) for port in sim_object._ports.values(): is_vector = isinstance(port, m5.params.VectorPort) - is_requestor = port.role == 'GEM5 REQUESTOR' + is_requestor = port.role == "GEM5 REQUESTOR" - code('ports["%s"] = new PortDesc("%s", %s, %s);' % - (port.name, port.name, cxx_bool(is_vector), - cxx_bool(is_requestor))) + code( + 'ports["%s"] = new PortDesc("%s", %s, %s);' + % (port.name, port.name, cxx_bool(is_vector), cxx_bool(is_requestor)) + ) code.dedent() -code('''} +code( + """} bool ${param_class}::setSimObject(const std::string &name, SimObject *simObject) { bool ret = true; if (false) { -''') +""" +) code.indent() for param in sim_object._params.values(): @@ -124,14 +132,17 @@ for param in sim_object._params.values(): if is_simobj and not is_vector: code('} else if (name == "${{param.name}}") {') code.indent() - code('this->${{param.name}} = ' - 'dynamic_cast<${{param.ptype.cxx_type}}>(simObject);') - code('if (simObject && !this->${{param.name}})') - code(' ret = false;') + code( + "this->${{param.name}} = " + "dynamic_cast<${{param.ptype.cxx_type}}>(simObject);" + ) + code("if (simObject && !this->${{param.name}})") + code(" ret = false;") code.dedent() code.dedent() -code(''' +code( + """ } else { ret = false; } @@ -146,7 +157,8 @@ ${param_class}::setSimObjectVector(const std::string &name, bool ret = true; if (false) { -''') +""" +) code.indent() for param in sim_object._params.values(): @@ -156,23 +168,28 @@ for param in sim_object._params.values(): if is_simobj and is_vector: code('} else if (name == "${{param.name}}") {') code.indent() - code('this->${{param.name}}.clear();') - code('for (auto i = simObjects.begin(); ' - 'ret && i != simObjects.end(); i ++)') - code('{') + code("this->${{param.name}}.clear();") + code( + "for (auto i = simObjects.begin(); " + "ret && i != simObjects.end(); i ++)" + ) + code("{") code.indent() - code('${{param.ptype.cxx_type}} object = ' - 'dynamic_cast<${{param.ptype.cxx_type}}>(*i);') - code('if (*i && !object)') - code(' ret = false;') - code('else') - code(' this->${{param.name}}.push_back(object);') + code( + "${{param.ptype.cxx_type}} object = " + "dynamic_cast<${{param.ptype.cxx_type}}>(*i);" + ) + code("if (*i && !object)") + code(" ret = false;") + code("else") + code(" this->${{param.name}}.push_back(object);") code.dedent() - code('}') + code("}") code.dedent() code.dedent() -code(''' +code( + """ } else { ret = false; } @@ -193,7 +210,8 @@ ${param_class}::setParam(const std::string &name, bool ret = true; if (false) { -''') +""" +) code.indent() for param in sim_object._params.values(): @@ -203,12 +221,14 @@ for param in sim_object._params.values(): if not is_simobj and not is_vector: code('} else if (name == "${{param.name}}") {') code.indent() - param.ptype.cxx_ini_parse(code, - 'value', 'this->%s' % param.name, 'ret =') + param.ptype.cxx_ini_parse( + code, "value", "this->%s" % param.name, "ret =" + ) code.dedent() code.dedent() -code(''' +code( + """ } else { ret = false; } @@ -223,7 +243,8 @@ ${param_class}::setParamVector(const std::string &name, bool ret = true; if (false) { -''') +""" +) code.indent() for param in sim_object._params.values(): @@ -233,22 +254,23 @@ for param in sim_object._params.values(): if not is_simobj and is_vector: code('} else if (name == "${{param.name}}") {') code.indent() - code('${{param.name}}.clear();') - code('for (auto i = values.begin(); ' - 'ret && i != values.end(); i ++)') - code('{') + code("${{param.name}}.clear();") + code( + "for (auto i = values.begin(); " "ret && i != values.end(); i ++)" + ) + code("{") code.indent() - code('${{param.ptype.cxx_type}} elem;') - param.ptype.cxx_ini_parse(code, - '*i', 'elem', 'ret =') - code('if (ret)') - code(' this->${{param.name}}.push_back(elem);') + code("${{param.ptype.cxx_type}} elem;") + param.ptype.cxx_ini_parse(code, "*i", "elem", "ret =") + code("if (ret)") + code(" this->${{param.name}}.push_back(elem);") code.dedent() - code('}') + code("}") code.dedent() code.dedent() -code(''' +code( + """ } else { ret = false; } @@ -263,15 +285,17 @@ ${param_class}::setPortConnectionCount(const std::string &name, bool ret = true; if (false) { -''') +""" +) code.indent() for port in sim_object._ports.values(): code('} else if (name == "${{port.name}}") {') - code(' this->port_${{port.name}}_connection_count = count;') + code(" this->port_${{port.name}}_connection_count = count;") code.dedent() -code(''' +code( + """ } else { ret = false; } @@ -282,18 +306,21 @@ code(''' SimObject * ${param_class}::simObjectCreate() { -''') +""" +) code.indent() -if hasattr(sim_object, 'abstract') and sim_object.abstract: - code('return nullptr;') +if hasattr(sim_object, "abstract") and sim_object.abstract: + code("return nullptr;") else: - code('return this->create();') + code("return this->create();") code.dedent() -code('''} +code( + """} } // namespace gem5 -''') +""" +) code.write(args.cxx_config_cc) diff --git a/build_tools/cxx_config_hh.py b/build_tools/cxx_config_hh.py index 652c488668..55828e37b7 100644 --- a/build_tools/cxx_config_hh.py +++ b/build_tools/cxx_config_hh.py @@ -46,8 +46,8 @@ import importer from code_formatter import code_formatter parser = argparse.ArgumentParser() -parser.add_argument('modpath', help='module the simobject belongs to') -parser.add_argument('cxx_config_hh', help='cxx config header file to generate') +parser.add_argument("modpath", help="module the simobject belongs to") +parser.add_argument("cxx_config_hh", help="cxx config header file to generate") args = parser.parse_args() @@ -60,10 +60,11 @@ sim_object = getattr(module, sim_object_name) code = code_formatter() -entry_class = 'CxxConfigDirectoryEntry_%s' % sim_object_name -param_class = '%sCxxConfigParams' % sim_object_name +entry_class = "CxxConfigDirectoryEntry_%s" % sim_object_name +param_class = "%sCxxConfigParams" % sim_object_name -code('''#include "params/${sim_object_name}.hh" +code( + """#include "params/${sim_object_name}.hh" #include "sim/cxx_config.hh" @@ -110,6 +111,7 @@ class ${param_class} : public CxxConfigParams, public ${sim_object_name}Params }; } // namespace gem5 -''') +""" +) code.write(args.cxx_config_hh) diff --git a/build_tools/debugflaghh.py b/build_tools/debugflaghh.py index fc86cb0dc5..2e861e2790 100644 --- a/build_tools/debugflaghh.py +++ b/build_tools/debugflaghh.py @@ -44,35 +44,41 @@ parser = argparse.ArgumentParser() parser.add_argument("hh", help="the path of the debug flag header file") parser.add_argument("name", help="the name of the debug flag") parser.add_argument("desc", help="a description of the debug flag") -parser.add_argument("fmt", - help="whether the flag is a format flag (True or False)") -parser.add_argument("components", - help="components of a compound flag, if applicable, joined with :") +parser.add_argument( + "fmt", help="whether the flag is a format flag (True or False)" +) +parser.add_argument( + "components", + help="components of a compound flag, if applicable, joined with :", +) args = parser.parse_args() fmt = args.fmt.lower() -if fmt == 'true': +if fmt == "true": fmt = True -elif fmt == 'false': +elif fmt == "false": fmt = False else: print(f'Unrecognized "FMT" value {fmt}', file=sys.stderr) sys.exit(1) -components = args.components.split(':') if args.components else [] +components = args.components.split(":") if args.components else [] code = code_formatter() -code(''' +code( + """ #ifndef __DEBUG_${{args.name}}_HH__ #define __DEBUG_${{args.name}}_HH__ #include "base/compiler.hh" // For namespace deprecation #include "base/debug.hh" -''') +""" +) for flag in components: code('#include "debug/${flag}.hh"') -code(''' +code( + """ namespace gem5 { @@ -82,14 +88,16 @@ namespace debug namespace unions { -''') +""" +) # Use unions to prevent debug flags from being destructed. It's the # responsibility of the programmer to handle object destruction for members # of the union. We purposefully leave that destructor empty so that we can # use debug flags even in the destructors of other objects. if components: - code(''' + code( + """ inline union ${{args.name}} { ~${{args.name}}() {} @@ -100,9 +108,11 @@ inline union ${{args.name}} } }; } ${{args.name}}; -''') +""" + ) else: - code(''' + code( + """ inline union ${{args.name}} { ~${{args.name}}() {} @@ -110,18 +120,21 @@ inline union ${{args.name}} "${{args.name}}", "${{args.desc}}", ${{"true" if fmt else "false"}} }; } ${{args.name}}; -''') +""" + ) -code(''' +code( + """ } // namespace unions -inline constexpr const auto& ${{args.name}} = +inline constexpr const auto& ${{args.name}} = ::gem5::debug::unions::${{args.name}}.${{args.name}}; } // namespace debug } // namespace gem5 #endif // __DEBUG_${{args.name}}_HH__ -''') +""" +) code.write(args.hh) diff --git a/build_tools/enum_cc.py b/build_tools/enum_cc.py index c706ffe31f..476e49d750 100644 --- a/build_tools/enum_cc.py +++ b/build_tools/enum_cc.py @@ -46,17 +46,18 @@ import importer from code_formatter import code_formatter parser = argparse.ArgumentParser() -parser.add_argument('modpath', help='module the enum belongs to') -parser.add_argument('enum_cc', help='enum cc file to generate') -parser.add_argument('use_python', - help='whether python is enabled in gem5 (True or False)') +parser.add_argument("modpath", help="module the enum belongs to") +parser.add_argument("enum_cc", help="enum cc file to generate") +parser.add_argument( + "use_python", help="whether python is enabled in gem5 (True or False)" +) args = parser.parse_args() use_python = args.use_python.lower() -if use_python == 'true': +if use_python == "true": use_python = True -elif use_python == 'false': +elif use_python == "false": use_python = False else: print(f'Unrecognized "use_python" value {use_python}', file=sys.stderr) @@ -75,41 +76,46 @@ wrapper_name = enum.wrapper_name file_name = enum.__name__ name = enum.__name__ if enum.enum_name is None else enum.enum_name -code('''#include "base/compiler.hh" +code( + """#include "base/compiler.hh" #include "enums/$file_name.hh" namespace gem5 { -''') +""" +) if enum.wrapper_is_struct: - code('const char *${wrapper_name}::${name}Strings' - '[Num_${name}] =') + code("const char *${wrapper_name}::${name}Strings" "[Num_${name}] =") else: if enum.is_class: - code('''\ + code( + """\ const char *${name}Strings[static_cast(${name}::Num_${name})] = -''') +""" + ) else: - code('''GEM5_DEPRECATED_NAMESPACE(Enums, enums); + code( + """GEM5_DEPRECATED_NAMESPACE(Enums, enums); namespace enums -{''') +{""" + ) code.indent(1) - code('const char *${name}Strings[Num_${name}] =') + code("const char *${name}Strings[Num_${name}] =") -code('{') +code("{") code.indent(1) for val in enum.vals: code('"$val",') code.dedent(1) -code('};') +code("};") if not enum.wrapper_is_struct and not enum.is_class: code.dedent(1) - code('} // namespace enums') + code("} // namespace enums") -code('} // namespace gem5') +code("} // namespace gem5") if use_python: @@ -118,7 +124,8 @@ if use_python: enum_name = enum.__name__ if enum.enum_name is None else enum.enum_name wrapper_name = enum_name if enum.is_class else enum.wrapper_name - code('''#include "pybind11/pybind11.h" + code( + """#include "pybind11/pybind11.h" #include "pybind11/stl.h" #include @@ -133,7 +140,8 @@ module_init(py::module_ &m_internal) { py::module_ m = m_internal.def_submodule("enum_${name}"); -''') +""" + ) if enum.is_class: code('py::enum_<${enum_name}>(m, "enum_${name}")') else: @@ -145,16 +153,18 @@ module_init(py::module_ &m_internal) code('.value("${val}", ${wrapper_name}::${val})') code('.value("Num_${name}", ${wrapper_name}::Num_${enum_name})') if not enum.is_class: - code('.export_values()') - code(';') + code(".export_values()") + code(";") code.dedent() - code('}') + code("}") code.dedent() - code(''' + code( + """ static EmbeddedPyBind embed_enum("enum_${name}", module_init); } // namespace gem5 - ''') + """ + ) code.write(args.enum_cc) diff --git a/build_tools/enum_hh.py b/build_tools/enum_hh.py index 2c4a7bb2ce..a5b9f42cba 100644 --- a/build_tools/enum_hh.py +++ b/build_tools/enum_hh.py @@ -46,8 +46,8 @@ import importer from code_formatter import code_formatter parser = argparse.ArgumentParser() -parser.add_argument('modpath', help='module the enum belongs to') -parser.add_argument('enum_hh', help='enum header file to generate') +parser.add_argument("modpath", help="module the enum belongs to") +parser.add_argument("enum_hh", help="enum header file to generate") args = parser.parse_args() @@ -64,53 +64,61 @@ code = code_formatter() # Note that we wrap the enum in a class/struct to act as a namespace, # so that the enum strings can be brief w/o worrying about collisions. wrapper_name = enum.wrapper_name -wrapper = 'struct' if enum.wrapper_is_struct else 'namespace' +wrapper = "struct" if enum.wrapper_is_struct else "namespace" name = enum.__name__ if enum.enum_name is None else enum.enum_name -idem_macro = '__ENUM__%s__%s__' % (wrapper_name, name) +idem_macro = "__ENUM__%s__%s__" % (wrapper_name, name) -code('''\ +code( + """\ #ifndef $idem_macro #define $idem_macro namespace gem5 { -''') +""" +) if enum.is_class: - code('''\ + code( + """\ enum class $name { -''') +""" + ) else: - code('''\ + code( + """\ $wrapper $wrapper_name { enum $name { -''') +""" + ) code.indent(1) code.indent(1) for val in enum.vals: - code('$val = ${{enum.map[val]}},') -code('Num_$name = ${{len(enum.vals)}}') + code("$val = ${{enum.map[val]}},") +code("Num_$name = ${{len(enum.vals)}}") code.dedent(1) -code('};') +code("};") if enum.is_class: - code('''\ + code( + """\ extern const char *${name}Strings[static_cast(${name}::Num_${name})]; -''') +""" + ) elif enum.wrapper_is_struct: - code('static const char *${name}Strings[Num_${name}];') + code("static const char *${name}Strings[Num_${name}];") else: - code('extern const char *${name}Strings[Num_${name}];') + code("extern const char *${name}Strings[Num_${name}];") if not enum.is_class: code.dedent(1) - code('}; // $wrapper_name') + code("}; // $wrapper_name") code() -code('} // namespace gem5') +code("} // namespace gem5") code() -code('#endif // $idem_macro') +code("#endif // $idem_macro") code.write(args.enum_hh) diff --git a/build_tools/grammar.py b/build_tools/grammar.py index 9aba746260..6ac638bcd0 100644 --- a/build_tools/grammar.py +++ b/build_tools/grammar.py @@ -29,73 +29,77 @@ import os import ply.lex import ply.yacc + class ParseError(Exception): def __init__(self, message, token=None): Exception.__init__(self, message) self.token = token + class Grammar(object): def setupLexerFactory(self, **kwargs): - if 'module' in kwargs: + if "module" in kwargs: raise AttributeError("module is an illegal attribute") self.lex_kwargs = kwargs def setupParserFactory(self, **kwargs): - if 'module' in kwargs: + if "module" in kwargs: raise AttributeError("module is an illegal attribute") - if 'output' in kwargs: - dir,tab = os.path.split(output) - if not tab.endswith('.py'): - raise AttributeError('The output file must end with .py') - kwargs['outputdir'] = dir - kwargs['tabmodule'] = tab[:-3] + if "output" in kwargs: + dir, tab = os.path.split(output) + if not tab.endswith(".py"): + raise AttributeError("The output file must end with .py") + kwargs["outputdir"] = dir + kwargs["tabmodule"] = tab[:-3] self.yacc_kwargs = kwargs def __getattr__(self, attr): - if attr == 'lexers': + if attr == "lexers": self.lexers = [] return self.lexers - if attr == 'lex_kwargs': + if attr == "lex_kwargs": self.setupLexerFactory() return self.lex_kwargs - if attr == 'yacc_kwargs': + if attr == "yacc_kwargs": self.setupParserFactory() return self.yacc_kwargs - if attr == 'lex': + if attr == "lex": self.lex = ply.lex.lex(module=self, **self.lex_kwargs) return self.lex - if attr == 'yacc': + if attr == "yacc": self.yacc = ply.yacc.yacc(module=self, **self.yacc_kwargs) return self.yacc - if attr == 'current_lexer': + if attr == "current_lexer": if not self.lexers: return None return self.lexers[-1][0] - if attr == 'current_source': + if attr == "current_source": if not self.lexers: - return '' + return "" return self.lexers[-1][1] - if attr == 'current_line': + if attr == "current_line": if not self.lexers: return -1 return self.current_lexer.lineno raise AttributeError( - "'%s' object has no attribute '%s'" % (type(self), attr)) + "'%s' object has no attribute '%s'" % (type(self), attr) + ) - def parse_string(self, data, source='', debug=None, tracking=0): + def parse_string(self, data, source="", debug=None, tracking=0): if not isinstance(data, str): raise AttributeError( - "argument must be a string, was '%s'" % type(f)) + "argument must be a string, was '%s'" % type(f) + ) lexer = self.lex.clone() lexer.input(data) @@ -114,24 +118,32 @@ class Grammar(object): def parse_file(self, f, **kwargs): if isinstance(f, str): source = f - f = open(f, 'r') + f = open(f, "r") elif isinstance(f, file): source = f.name else: raise AttributeError( - "argument must be either a string or file, was '%s'" % type(f)) + "argument must be either a string or file, was '%s'" % type(f) + ) return self.parse_string(f.read(), source, **kwargs) def p_error(self, t): if t: - msg = "Syntax error at %s:%d:%d\n>>%s<<" % \ - (self.current_source, t.lineno, t.lexpos + 1, t.value) + msg = "Syntax error at %s:%d:%d\n>>%s<<" % ( + self.current_source, + t.lineno, + t.lexpos + 1, + t.value, + ) else: - msg = "Syntax error at end of %s" % (self.current_source, ) + msg = "Syntax error at end of %s" % (self.current_source,) raise ParseError(msg, t) def t_error(self, t): - msg = "Illegal character %s @ %d:%d" % \ - (repr(t.value[0]), t.lineno, t.lexpos) + msg = "Illegal character %s @ %d:%d" % ( + repr(t.value[0]), + t.lineno, + t.lexpos, + ) raise ParseError(msg, t) diff --git a/build_tools/infopy.py b/build_tools/infopy.py index a58cf3967b..4f15f24f98 100644 --- a/build_tools/infopy.py +++ b/build_tools/infopy.py @@ -42,8 +42,8 @@ import sys from code_formatter import code_formatter parser = argparse.ArgumentParser() -parser.add_argument('info_py', help='info.py file path') -parser.add_argument('files', help='file to include in info.py', nargs='*') +parser.add_argument("info_py", help="info.py file path") +parser.add_argument("files", help="file to include in info.py", nargs="*") args = parser.parse_args() @@ -52,8 +52,8 @@ code = code_formatter() for source in args.files: src = os.path.basename(source) - with open(source, 'r') as f: - data = ''.join(f) - code('${src} = ${{repr(data)}}') + with open(source, "r") as f: + data = "".join(f) + code("${src} = ${{repr(data)}}") code.write(args.info_py) diff --git a/build_tools/marshal.py b/build_tools/marshal.py index 9c2964b477..18afe2ca52 100644 --- a/build_tools/marshal.py +++ b/build_tools/marshal.py @@ -67,16 +67,17 @@ if len(sys.argv) < 4: _, cpp, python, modpath, abspath = sys.argv -with open(python, 'r') as f: +with open(python, "r") as f: src = f.read() -compiled = compile(src, python, 'exec') +compiled = compile(src, python, "exec") marshalled = marshal.dumps(compiled) compressed = zlib.compress(marshalled) code = code_formatter() -code('''\ +code( + """\ #include "python/embedded.hh" namespace gem5 @@ -84,14 +85,16 @@ namespace gem5 namespace { -''') +""" +) -bytesToCppArray(code, 'embedded_module_data', compressed) +bytesToCppArray(code, "embedded_module_data", compressed) # The name of the EmbeddedPython object doesn't matter since it's in an # anonymous namespace, and it's constructor takes care of installing it into a # global list. -code(''' +code( + """ EmbeddedPython embedded_module_info( "${abspath}", "${modpath}", @@ -101,6 +104,7 @@ EmbeddedPython embedded_module_info( } // anonymous namespace } // namespace gem5 -''') +""" +) code.write(cpp) diff --git a/build_tools/sim_object_param_struct_cc.py b/build_tools/sim_object_param_struct_cc.py index 1b72e3cb41..0384809456 100644 --- a/build_tools/sim_object_param_struct_cc.py +++ b/build_tools/sim_object_param_struct_cc.py @@ -46,17 +46,18 @@ import importer from code_formatter import code_formatter parser = argparse.ArgumentParser() -parser.add_argument('modpath', help='module the simobject belongs to') -parser.add_argument('param_cc', help='parameter cc file to generate') -parser.add_argument('use_python', - help='whether python is enabled in gem5 (True or False)') +parser.add_argument("modpath", help="module the simobject belongs to") +parser.add_argument("param_cc", help="parameter cc file to generate") +parser.add_argument( + "use_python", help="whether python is enabled in gem5 (True or False)" +) args = parser.parse_args() use_python = args.use_python.lower() -if use_python == 'true': +if use_python == "true": use_python = True -elif use_python == 'false': +elif use_python == "false": use_python = False else: print(f'Unrecognized "use_python" value {use_python}', file=sys.stderr) @@ -64,7 +65,7 @@ else: basename = os.path.basename(args.param_cc) no_ext = os.path.splitext(basename)[0] -sim_object_name = '_'.join(no_ext.split('_')[1:]) +sim_object_name = "_".join(no_ext.split("_")[1:]) importer.install() module = importlib.import_module(args.modpath) @@ -80,14 +81,16 @@ py_class_name = sim_object.pybind_class # the object itself, not including inherited params (which # will also be inherited from the base class's param struct # here). Sort the params based on their key -params = list(map(lambda k_v: k_v[1], - sorted(sim_object._params.local.items()))) +params = list( + map(lambda k_v: k_v[1], sorted(sim_object._params.local.items())) +) ports = sim_object._ports.local # only include pybind if python is enabled in the build if use_python: - code('''#include "pybind11/pybind11.h" + code( + """#include "pybind11/pybind11.h" #include "pybind11/stl.h" #include @@ -99,9 +102,11 @@ if use_python: #include "${{sim_object.cxx_header}}" -''') +""" + ) else: - code(''' + code( + """ #include #include "base/compiler.hh" @@ -109,13 +114,15 @@ else: #include "${{sim_object.cxx_header}}" -''') +""" + ) # only include the python params code if python is enabled. if use_python: for param in params: param.pybind_predecls(code) - code('''namespace py = pybind11; + code( + """namespace py = pybind11; namespace gem5 { @@ -124,39 +131,48 @@ static void module_init(py::module_ &m_internal) { py::module_ m = m_internal.def_submodule("param_${sim_object}"); -''') +""" + ) code.indent() if sim_object._base: - code('py::class_<${sim_object}Params, ' \ - '${{sim_object._base.type}}Params, ' \ - 'std::unique_ptr<${{sim_object}}Params, py::nodelete>>(' \ - 'm, "${sim_object}Params")') + code( + "py::class_<${sim_object}Params, " + "${{sim_object._base.type}}Params, " + "std::unique_ptr<${{sim_object}}Params, py::nodelete>>(" + 'm, "${sim_object}Params")' + ) else: - code('py::class_<${sim_object}Params, ' \ - 'std::unique_ptr<${sim_object}Params, py::nodelete>>(' \ - 'm, "${sim_object}Params")') + code( + "py::class_<${sim_object}Params, " + "std::unique_ptr<${sim_object}Params, py::nodelete>>(" + 'm, "${sim_object}Params")' + ) code.indent() - if not hasattr(sim_object, 'abstract') or not sim_object.abstract: - code('.def(py::init<>())') + if not hasattr(sim_object, "abstract") or not sim_object.abstract: + code(".def(py::init<>())") code('.def("create", &${sim_object}Params::create)') - param_exports = sim_object.cxx_param_exports + [ - PyBindProperty(k) - for k, v in sorted(sim_object._params.local.items()) - ] + [ - PyBindProperty(f"port_{port.name}_connection_count") - for port in ports.values() - ] + param_exports = ( + sim_object.cxx_param_exports + + [ + PyBindProperty(k) + for k, v in sorted(sim_object._params.local.items()) + ] + + [ + PyBindProperty(f"port_{port.name}_connection_count") + for port in ports.values() + ] + ) for exp in param_exports: exp.export(code, f"{sim_object}Params") - code(';') + code(";") code() code.dedent() bases = [] - if 'cxx_base' in sim_object._value_dict: + if "cxx_base" in sim_object._value_dict: # If the c++ base class implied by python inheritance was # overridden, use that value. if sim_object.cxx_base: @@ -170,32 +186,39 @@ py::module_ m = m_internal.def_submodule("param_${sim_object}"); if bases: base_str = ", ".join(bases) - code('py::class_<${{sim_object.cxx_class}}, ${base_str}, ' \ - 'std::unique_ptr<${{sim_object.cxx_class}}, py::nodelete>>(' \ - 'm, "${py_class_name}")') + code( + "py::class_<${{sim_object.cxx_class}}, ${base_str}, " + "std::unique_ptr<${{sim_object.cxx_class}}, py::nodelete>>(" + 'm, "${py_class_name}")' + ) else: - code('py::class_<${{sim_object.cxx_class}}, ' \ - 'std::unique_ptr<${{sim_object.cxx_class}}, py::nodelete>>(' \ - 'm, "${py_class_name}")') + code( + "py::class_<${{sim_object.cxx_class}}, " + "std::unique_ptr<${{sim_object.cxx_class}}, py::nodelete>>(" + 'm, "${py_class_name}")' + ) code.indent() for exp in sim_object.cxx_exports: exp.export(code, sim_object.cxx_class) - code(';') + code(";") code.dedent() code() code.dedent() - code('}') + code("}") code() - code('static EmbeddedPyBind ' - 'embed_obj("${0}", module_init, "${1}");', - sim_object, sim_object._base.type if sim_object._base else "") + code( + "static EmbeddedPyBind " 'embed_obj("${0}", module_init, "${1}");', + sim_object, + sim_object._base.type if sim_object._base else "", + ) code() - code('} // namespace gem5') + code("} // namespace gem5") # include the create() methods whether or not python is enabled. -if not hasattr(sim_object, 'abstract') or not sim_object.abstract: - if 'type' in sim_object.__dict__: - code(''' +if not hasattr(sim_object, "abstract") or not sim_object.abstract: + if "type" in sim_object.__dict__: + code( + """ namespace gem5 { @@ -268,6 +291,7 @@ Dummy${sim_object}Shunt<${{sim_object.cxx_class}}>::Params::create() const } } // namespace gem5 -''') +""" + ) code.write(args.param_cc) diff --git a/build_tools/sim_object_param_struct_hh.py b/build_tools/sim_object_param_struct_hh.py index 261ac9bf75..bf37da2a07 100644 --- a/build_tools/sim_object_param_struct_hh.py +++ b/build_tools/sim_object_param_struct_hh.py @@ -46,8 +46,8 @@ import importer from code_formatter import code_formatter parser = argparse.ArgumentParser() -parser.add_argument('modpath', help='module the simobject belongs to') -parser.add_argument('param_hh', help='parameter header file to generate') +parser.add_argument("modpath", help="module the simobject belongs to") +parser.add_argument("param_hh", help="parameter header file to generate") args = parser.parse_args() @@ -67,8 +67,9 @@ code = code_formatter() # the object itself, not including inherited params (which # will also be inherited from the base class's param struct # here). Sort the params based on their key -params = list(map(lambda k_v: k_v[1], - sorted(sim_object._params.local.items()))) +params = list( + map(lambda k_v: k_v[1], sorted(sim_object._params.local.items())) +) ports = sim_object._ports.local try: ptypes = [p.ptype for p in params] @@ -79,41 +80,44 @@ except: warned_about_nested_templates = False + class CxxClass(object): def __init__(self, sig, template_params=[]): # Split the signature into its constituent parts. This could # potentially be done with regular expressions, but # it's simple enough to pick appart a class signature # manually. - parts = sig.split('<', 1) + parts = sig.split("<", 1) base = parts[0] t_args = [] if len(parts) > 1: # The signature had template arguments. - text = parts[1].rstrip(' \t\n>') - arg = '' + text = parts[1].rstrip(" \t\n>") + arg = "" # Keep track of nesting to avoid splitting on ","s embedded # in the arguments themselves. depth = 0 for c in text: - if c == '<': + if c == "<": depth = depth + 1 if depth > 0 and not warned_about_nested_templates: warned_about_nested_templates = True - print('Nested template argument in cxx_class.' - ' This feature is largely untested and ' - ' may not work.') - elif c == '>': + print( + "Nested template argument in cxx_class." + " This feature is largely untested and " + " may not work." + ) + elif c == ">": depth = depth - 1 - elif c == ',' and depth == 0: + elif c == "," and depth == 0: t_args.append(arg.strip()) - arg = '' + arg = "" else: arg = arg + c if arg: t_args.append(arg.strip()) # Split the non-template part on :: boundaries. - class_path = base.split('::') + class_path = base.split("::") # The namespaces are everything except the last part of the class path. self.namespaces = class_path[:-1] @@ -125,7 +129,7 @@ class CxxClass(object): # Iterate through the template arguments and their values. This # will likely break if parameter packs are used. for arg, param in zip(t_args, template_params): - type_keys = ('class', 'typename') + type_keys = ("class", "typename") # If a parameter is a type, parse it recursively. Otherwise # assume it's a constant, and store it verbatim. if any(param.strip().startswith(kw) for kw in type_keys): @@ -140,21 +144,24 @@ class CxxClass(object): arg.declare(code) # Re-open the target namespace. for ns in self.namespaces: - code('namespace $ns {') + code("namespace $ns {") # If this is a class template... if self.template_params: code('template <${{", ".join(self.template_params)}}>') # The actual class declaration. - code('class ${{self.name}};') + code("class ${{self.name}};") # Close the target namespaces. for ns in reversed(self.namespaces): - code('} // namespace $ns') + code("} // namespace $ns") -code('''\ + +code( + """\ #ifndef __PARAMS__${sim_object}__ #define __PARAMS__${sim_object}__ -''') +""" +) # The base SimObject has a couple of params that get @@ -162,10 +169,12 @@ code('''\ # the normal Param mechanism; we slip them in here (needed # predecls now, actual declarations below) if sim_object == SimObject: - code('''#include ''') + code("""#include """) -cxx_class = CxxClass(sim_object._value_dict['cxx_class'], - sim_object._value_dict['cxx_template_params']) +cxx_class = CxxClass( + sim_object._value_dict["cxx_class"], + sim_object._value_dict["cxx_template_params"], +) # A forward class declaration is sufficient since we are just # declaring a pointer. @@ -186,27 +195,29 @@ for ptype in ptypes: code('#include "enums/${{ptype.__name__}}.hh"') code() -code('namespace gem5') -code('{') -code('') +code("namespace gem5") +code("{") +code("") # now generate the actual param struct code("struct ${sim_object}Params") if sim_object._base: code(" : public ${{sim_object._base.type}}Params") code("{") -if not hasattr(sim_object, 'abstract') or not sim_object.abstract: - if 'type' in sim_object.__dict__: +if not hasattr(sim_object, "abstract") or not sim_object.abstract: + if "type" in sim_object.__dict__: code(" ${{sim_object.cxx_type}} create() const;") code.indent() if sim_object == SimObject: - code(''' + code( + """ SimObjectParams() {} virtual ~SimObjectParams() {} std::string name; - ''') + """ + ) for param in params: param.cxx_decl(code) @@ -214,11 +225,11 @@ for port in ports.values(): port.cxx_decl(code) code.dedent() -code('};') +code("};") code() -code('} // namespace gem5') +code("} // namespace gem5") code() -code('#endif // __PARAMS__${sim_object}__') +code("#endif // __PARAMS__${sim_object}__") code.write(args.param_hh) diff --git a/configs/common/Benchmarks.py b/configs/common/Benchmarks.py index 591c044671..c90e78ed61 100644 --- a/configs/common/Benchmarks.py +++ b/configs/common/Benchmarks.py @@ -28,9 +28,11 @@ from common.SysPaths import script, disk, binary from os import environ as env from m5.defines import buildEnv + class SysConfig: - def __init__(self, script=None, mem=None, disks=None, rootdev=None, - os_type='linux'): + def __init__( + self, script=None, mem=None, disks=None, rootdev=None, os_type="linux" + ): self.scriptname = script self.disknames = disks self.memsize = mem @@ -41,13 +43,13 @@ class SysConfig: if self.scriptname: return script(self.scriptname) else: - return '' + return "" def mem(self): if self.memsize: return self.memsize else: - return '128MB' + return "128MB" def disks(self): if self.disknames: @@ -59,72 +61,117 @@ class SysConfig: if self.root: return self.root else: - return '/dev/sda1' + return "/dev/sda1" def os_type(self): return self.ostype + # Benchmarks are defined as a key in a dict which is a list of SysConfigs # The first defined machine is the test system, the others are driving systems Benchmarks = { - 'PovrayBench': [SysConfig('povray-bench.rcS', '512MB', ['povray.img'])], - 'PovrayAutumn': [SysConfig('povray-autumn.rcS', '512MB', ['povray.img'])], - - 'NetperfStream': [SysConfig('netperf-stream-client.rcS'), - SysConfig('netperf-server.rcS')], - 'NetperfStreamUdp': [SysConfig('netperf-stream-udp-client.rcS'), - SysConfig('netperf-server.rcS')], - 'NetperfUdpLocal': [SysConfig('netperf-stream-udp-local.rcS')], - 'NetperfStreamNT': [SysConfig('netperf-stream-nt-client.rcS'), - SysConfig('netperf-server.rcS')], - 'NetperfMaerts': [SysConfig('netperf-maerts-client.rcS'), - SysConfig('netperf-server.rcS')], - 'SurgeStandard': [SysConfig('surge-server.rcS', '512MB'), - SysConfig('surge-client.rcS', '256MB')], - 'SurgeSpecweb': [SysConfig('spec-surge-server.rcS', '512MB'), - SysConfig('spec-surge-client.rcS', '256MB')], - 'Nhfsstone': [SysConfig('nfs-server-nhfsstone.rcS', '512MB'), - SysConfig('nfs-client-nhfsstone.rcS')], - 'Nfs': [SysConfig('nfs-server.rcS', '900MB'), - SysConfig('nfs-client-dbench.rcS')], - 'NfsTcp': [SysConfig('nfs-server.rcS', '900MB'), - SysConfig('nfs-client-tcp.rcS')], - 'IScsiInitiator': [SysConfig('iscsi-client.rcS', '512MB'), - SysConfig('iscsi-server.rcS', '512MB')], - 'IScsiTarget': [SysConfig('iscsi-server.rcS', '512MB'), - SysConfig('iscsi-client.rcS', '512MB')], - 'Validation': [SysConfig('iscsi-server.rcS', '512MB'), - SysConfig('iscsi-client.rcS', '512MB')], - 'Ping': [SysConfig('ping-server.rcS',), - SysConfig('ping-client.rcS')], - - 'ValAccDelay': [SysConfig('devtime.rcS', '512MB')], - 'ValAccDelay2': [SysConfig('devtimewmr.rcS', '512MB')], - 'ValMemLat': [SysConfig('micro_memlat.rcS', '512MB')], - 'ValMemLat2MB': [SysConfig('micro_memlat2mb.rcS', '512MB')], - 'ValMemLat8MB': [SysConfig('micro_memlat8mb.rcS', '512MB')], - 'ValMemLat': [SysConfig('micro_memlat8.rcS', '512MB')], - 'ValTlbLat': [SysConfig('micro_tlblat.rcS', '512MB')], - 'ValSysLat': [SysConfig('micro_syscall.rcS', '512MB')], - 'ValCtxLat': [SysConfig('micro_ctx.rcS', '512MB')], - 'ValStream': [SysConfig('micro_stream.rcS', '512MB')], - 'ValStreamScale': [SysConfig('micro_streamscale.rcS', '512MB')], - 'ValStreamCopy': [SysConfig('micro_streamcopy.rcS', '512MB')], - - 'MutexTest': [SysConfig('mutex-test.rcS', '128MB')], - 'ArmAndroid-GB': [SysConfig('null.rcS', '256MB', - ['ARMv7a-Gingerbread-Android.SMP.mouse.nolock.clean.img'], - None, 'android-gingerbread')], - 'bbench-gb': [SysConfig('bbench-gb.rcS', '256MB', - ['ARMv7a-Gingerbread-Android.SMP.mouse.nolock.img'], - None, 'android-gingerbread')], - 'ArmAndroid-ICS': [SysConfig('null.rcS', '256MB', - ['ARMv7a-ICS-Android.SMP.nolock.clean.img'], - None, 'android-ics')], - 'bbench-ics': [SysConfig('bbench-ics.rcS', '256MB', - ['ARMv7a-ICS-Android.SMP.nolock.img'], - None, 'android-ics')] + "PovrayBench": [SysConfig("povray-bench.rcS", "512MB", ["povray.img"])], + "PovrayAutumn": [SysConfig("povray-autumn.rcS", "512MB", ["povray.img"])], + "NetperfStream": [ + SysConfig("netperf-stream-client.rcS"), + SysConfig("netperf-server.rcS"), + ], + "NetperfStreamUdp": [ + SysConfig("netperf-stream-udp-client.rcS"), + SysConfig("netperf-server.rcS"), + ], + "NetperfUdpLocal": [SysConfig("netperf-stream-udp-local.rcS")], + "NetperfStreamNT": [ + SysConfig("netperf-stream-nt-client.rcS"), + SysConfig("netperf-server.rcS"), + ], + "NetperfMaerts": [ + SysConfig("netperf-maerts-client.rcS"), + SysConfig("netperf-server.rcS"), + ], + "SurgeStandard": [ + SysConfig("surge-server.rcS", "512MB"), + SysConfig("surge-client.rcS", "256MB"), + ], + "SurgeSpecweb": [ + SysConfig("spec-surge-server.rcS", "512MB"), + SysConfig("spec-surge-client.rcS", "256MB"), + ], + "Nhfsstone": [ + SysConfig("nfs-server-nhfsstone.rcS", "512MB"), + SysConfig("nfs-client-nhfsstone.rcS"), + ], + "Nfs": [ + SysConfig("nfs-server.rcS", "900MB"), + SysConfig("nfs-client-dbench.rcS"), + ], + "NfsTcp": [ + SysConfig("nfs-server.rcS", "900MB"), + SysConfig("nfs-client-tcp.rcS"), + ], + "IScsiInitiator": [ + SysConfig("iscsi-client.rcS", "512MB"), + SysConfig("iscsi-server.rcS", "512MB"), + ], + "IScsiTarget": [ + SysConfig("iscsi-server.rcS", "512MB"), + SysConfig("iscsi-client.rcS", "512MB"), + ], + "Validation": [ + SysConfig("iscsi-server.rcS", "512MB"), + SysConfig("iscsi-client.rcS", "512MB"), + ], + "Ping": [SysConfig("ping-server.rcS"), SysConfig("ping-client.rcS")], + "ValAccDelay": [SysConfig("devtime.rcS", "512MB")], + "ValAccDelay2": [SysConfig("devtimewmr.rcS", "512MB")], + "ValMemLat": [SysConfig("micro_memlat.rcS", "512MB")], + "ValMemLat2MB": [SysConfig("micro_memlat2mb.rcS", "512MB")], + "ValMemLat8MB": [SysConfig("micro_memlat8mb.rcS", "512MB")], + "ValMemLat": [SysConfig("micro_memlat8.rcS", "512MB")], + "ValTlbLat": [SysConfig("micro_tlblat.rcS", "512MB")], + "ValSysLat": [SysConfig("micro_syscall.rcS", "512MB")], + "ValCtxLat": [SysConfig("micro_ctx.rcS", "512MB")], + "ValStream": [SysConfig("micro_stream.rcS", "512MB")], + "ValStreamScale": [SysConfig("micro_streamscale.rcS", "512MB")], + "ValStreamCopy": [SysConfig("micro_streamcopy.rcS", "512MB")], + "MutexTest": [SysConfig("mutex-test.rcS", "128MB")], + "ArmAndroid-GB": [ + SysConfig( + "null.rcS", + "256MB", + ["ARMv7a-Gingerbread-Android.SMP.mouse.nolock.clean.img"], + None, + "android-gingerbread", + ) + ], + "bbench-gb": [ + SysConfig( + "bbench-gb.rcS", + "256MB", + ["ARMv7a-Gingerbread-Android.SMP.mouse.nolock.img"], + None, + "android-gingerbread", + ) + ], + "ArmAndroid-ICS": [ + SysConfig( + "null.rcS", + "256MB", + ["ARMv7a-ICS-Android.SMP.nolock.clean.img"], + None, + "android-ics", + ) + ], + "bbench-ics": [ + SysConfig( + "bbench-ics.rcS", + "256MB", + ["ARMv7a-ICS-Android.SMP.nolock.img"], + None, + "android-ics", + ) + ], } benchs = list(Benchmarks.keys()) diff --git a/configs/common/CacheConfig.py b/configs/common/CacheConfig.py index 61c6a304d7..63ffe6765c 100644 --- a/configs/common/CacheConfig.py +++ b/configs/common/CacheConfig.py @@ -42,9 +42,13 @@ import m5 from m5.objects import * +from gem5.isas import ISA +from gem5.runtime import get_runtime_isa + from common.Caches import * from common import ObjectList + def _get_hwp(hwp_option): if hwp_option == None: return NULL @@ -52,23 +56,25 @@ def _get_hwp(hwp_option): hwpClass = ObjectList.hwp_list.get(hwp_option) return hwpClass() + def _get_cache_opts(level, options): opts = {} - size_attr = '{}_size'.format(level) + size_attr = "{}_size".format(level) if hasattr(options, size_attr): - opts['size'] = getattr(options, size_attr) + opts["size"] = getattr(options, size_attr) - assoc_attr = '{}_assoc'.format(level) + assoc_attr = "{}_assoc".format(level) if hasattr(options, assoc_attr): - opts['assoc'] = getattr(options, assoc_attr) + opts["assoc"] = getattr(options, assoc_attr) - prefetcher_attr = '{}_hwp_type'.format(level) + prefetcher_attr = "{}_hwp_type".format(level) if hasattr(options, prefetcher_attr): - opts['prefetcher'] = _get_hwp(getattr(options, prefetcher_attr)) + opts["prefetcher"] = _get_hwp(getattr(options, prefetcher_attr)) return opts + def config_cache(options, system): if options.external_memory_system and (options.caches or options.l2cache): print("External caches and internal caches are exclusive options.\n") @@ -84,10 +90,12 @@ def config_cache(options, system): print("O3_ARM_v7a_3 is unavailable. Did you compile the O3 model?") sys.exit(1) - dcache_class, icache_class, l2_cache_class, walk_cache_class = \ - core.O3_ARM_v7a_DCache, core.O3_ARM_v7a_ICache, \ - core.O3_ARM_v7aL2, \ - None + dcache_class, icache_class, l2_cache_class, walk_cache_class = ( + core.O3_ARM_v7a_DCache, + core.O3_ARM_v7a_ICache, + core.O3_ARM_v7aL2, + None, + ) elif options.cpu_type == "HPI": try: import cores.arm.HPI as core @@ -95,13 +103,21 @@ def config_cache(options, system): print("HPI is unavailable.") sys.exit(1) - dcache_class, icache_class, l2_cache_class, walk_cache_class = \ - core.HPI_DCache, core.HPI_ICache, core.HPI_L2, None + dcache_class, icache_class, l2_cache_class, walk_cache_class = ( + core.HPI_DCache, + core.HPI_ICache, + core.HPI_L2, + None, + ) else: - dcache_class, icache_class, l2_cache_class, walk_cache_class = \ - L1_DCache, L1_ICache, L2Cache, None + dcache_class, icache_class, l2_cache_class, walk_cache_class = ( + L1_DCache, + L1_ICache, + L2Cache, + None, + ) - if buildEnv['TARGET_ISA'] in ['x86', 'riscv']: + if get_runtime_isa() in [ISA.X86, ISA.RISCV]: walk_cache_class = PageTableWalkerCache # Set the cache line size of the system @@ -118,10 +134,11 @@ def config_cache(options, system): # Provide a clock for the L2 and the L1-to-L2 bus here as they # are not connected using addTwoLevelCacheHierarchy. Use the # same clock as the CPUs. - system.l2 = l2_cache_class(clk_domain=system.cpu_clk_domain, - **_get_cache_opts('l2', options)) + system.l2 = l2_cache_class( + clk_domain=system.cpu_clk_domain, **_get_cache_opts("l2", options) + ) - system.tol2bus = L2XBar(clk_domain = system.cpu_clk_domain) + system.tol2bus = L2XBar(clk_domain=system.cpu_clk_domain) system.l2.cpu_side = system.tol2bus.mem_side_ports system.l2.mem_side = system.membus.cpu_side_ports @@ -130,8 +147,8 @@ def config_cache(options, system): for i in range(options.num_cpus): if options.caches: - icache = icache_class(**_get_cache_opts('l1i', options)) - dcache = dcache_class(**_get_cache_opts('l1d', options)) + icache = icache_class(**_get_cache_opts("l1i", options)) + dcache = dcache_class(**_get_cache_opts("l1d", options)) # If we have a walker cache specified, instantiate two # instances here @@ -159,8 +176,9 @@ def config_cache(options, system): # When connecting the caches, the clock is also inherited # from the CPU in question - system.cpu[i].addPrivateSplitL1Caches(icache, dcache, - iwalkcache, dwalkcache) + system.cpu[i].addPrivateSplitL1Caches( + icache, dcache, iwalkcache, dwalkcache + ) if options.memchecker: # The mem_side ports of the caches haven't been connected yet. @@ -174,47 +192,56 @@ def config_cache(options, system): # on these names. For simplicity, we would advise configuring # it to use this naming scheme; if this isn't possible, change # the names below. - if buildEnv['TARGET_ISA'] in ['x86', 'arm', 'riscv']: + if get_runtime_isa() in [ISA.X86, ISA.ARM, ISA.RISCV]: system.cpu[i].addPrivateSplitL1Caches( - ExternalCache("cpu%d.icache" % i), - ExternalCache("cpu%d.dcache" % i), - ExternalCache("cpu%d.itb_walker_cache" % i), - ExternalCache("cpu%d.dtb_walker_cache" % i)) + ExternalCache("cpu%d.icache" % i), + ExternalCache("cpu%d.dcache" % i), + ExternalCache("cpu%d.itb_walker_cache" % i), + ExternalCache("cpu%d.dtb_walker_cache" % i), + ) else: system.cpu[i].addPrivateSplitL1Caches( - ExternalCache("cpu%d.icache" % i), - ExternalCache("cpu%d.dcache" % i)) + ExternalCache("cpu%d.icache" % i), + ExternalCache("cpu%d.dcache" % i), + ) system.cpu[i].createInterruptController() if options.l2cache: system.cpu[i].connectAllPorts( system.tol2bus.cpu_side_ports, - system.membus.cpu_side_ports, system.membus.mem_side_ports) + system.membus.cpu_side_ports, + system.membus.mem_side_ports, + ) elif options.external_memory_system: system.cpu[i].connectUncachedPorts( - system.membus.cpu_side_ports, system.membus.mem_side_ports) + system.membus.cpu_side_ports, system.membus.mem_side_ports + ) else: system.cpu[i].connectBus(system.membus) return system + # ExternalSlave provides a "port", but when that port connects to a cache, # the connecting CPU SimObject wants to refer to its "cpu_side". # The 'ExternalCache' class provides this adaptation by rewriting the name, # eliminating distracting changes elsewhere in the config code. class ExternalCache(ExternalSlave): def __getattr__(cls, attr): - if (attr == "cpu_side"): + if attr == "cpu_side": attr = "port" return super(ExternalSlave, cls).__getattr__(attr) def __setattr__(cls, attr, value): - if (attr == "cpu_side"): + if attr == "cpu_side": attr = "port" return super(ExternalSlave, cls).__setattr__(attr, value) + def ExternalCacheFactory(port_type): def make(name): - return ExternalCache(port_data=name, port_type=port_type, - addr_ranges=[AllMemory]) + return ExternalCache( + port_data=name, port_type=port_type, addr_ranges=[AllMemory] + ) + return make diff --git a/configs/common/Caches.py b/configs/common/Caches.py index 1468b953c7..e25d16ca1e 100644 --- a/configs/common/Caches.py +++ b/configs/common/Caches.py @@ -39,6 +39,8 @@ from m5.defines import buildEnv from m5.objects import * +from gem5.isas import ISA +from gem5.runtime import get_runtime_isa # Base implementations of L1, L2, IO and TLB-walker caches. There are # used in the regressions and also as base components in the @@ -46,6 +48,7 @@ from m5.objects import * # starting point, and specific parameters can be overridden in the # specific instantiations. + class L1Cache(Cache): assoc = 2 tag_latency = 2 @@ -54,14 +57,17 @@ class L1Cache(Cache): mshrs = 4 tgts_per_mshr = 20 + class L1_ICache(L1Cache): is_read_only = True # Writeback clean lines as well writeback_clean = True + class L1_DCache(L1Cache): pass + class L2Cache(Cache): assoc = 8 tag_latency = 20 @@ -71,26 +77,28 @@ class L2Cache(Cache): tgts_per_mshr = 12 write_buffers = 8 + class IOCache(Cache): assoc = 8 tag_latency = 50 data_latency = 50 response_latency = 50 mshrs = 20 - size = '1kB' + size = "1kB" tgts_per_mshr = 12 + class PageTableWalkerCache(Cache): assoc = 2 tag_latency = 2 data_latency = 2 response_latency = 2 mshrs = 10 - size = '1kB' + size = "1kB" tgts_per_mshr = 12 # the x86 table walker actually writes to the table-walker cache - if buildEnv['TARGET_ISA'] in ['x86', 'riscv']: + if get_runtime_isa() in [ISA.X86, ISA.RISCV]: is_read_only = False else: is_read_only = True diff --git a/configs/common/CpuConfig.py b/configs/common/CpuConfig.py index d34143cbf4..1672d43343 100644 --- a/configs/common/CpuConfig.py +++ b/configs/common/CpuConfig.py @@ -36,6 +36,7 @@ from m5 import fatal import m5.objects + def config_etrace(cpu_cls, cpu_list, options): if issubclass(cpu_cls, m5.objects.DerivO3CPU): # Assign the same file name to all cpus for now. This must be @@ -45,17 +46,21 @@ def config_etrace(cpu_cls, cpu_list, options): # file names. Set the dependency window size equal to the cpu it # is attached to. cpu.traceListener = m5.objects.ElasticTrace( - instFetchTraceFile = options.inst_trace_file, - dataDepTraceFile = options.data_trace_file, - depWindowSize = 3 * cpu.numROBEntries) + instFetchTraceFile=options.inst_trace_file, + dataDepTraceFile=options.data_trace_file, + depWindowSize=3 * cpu.numROBEntries, + ) # Make the number of entries in the ROB, LQ and SQ very # large so that there are no stalls due to resource # limitation as such stalls will get captured in the trace # as compute delay. For replay, ROB, LQ and SQ sizes are # modelled in the Trace CPU. - cpu.numROBEntries = 512; - cpu.LQEntries = 128; - cpu.SQEntries = 128; + cpu.numROBEntries = 512 + cpu.LQEntries = 128 + cpu.SQEntries = 128 else: - fatal("%s does not support data dependency tracing. Use a CPU model of" - " type or inherited from DerivO3CPU.", cpu_cls) + fatal( + "%s does not support data dependency tracing. Use a CPU model of" + " type or inherited from DerivO3CPU.", + cpu_cls, + ) diff --git a/configs/common/FSConfig.py b/configs/common/FSConfig.py index febe146ade..5da951c93b 100644 --- a/configs/common/FSConfig.py +++ b/configs/common/FSConfig.py @@ -39,69 +39,87 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import m5 +import m5.defines from m5.objects import * from m5.util import * from common.Benchmarks import * from common import ObjectList # Populate to reflect supported os types per target ISA -os_types = { 'mips' : [ 'linux' ], - 'riscv' : [ 'linux' ], # TODO that's a lie - 'sparc' : [ 'linux' ], - 'x86' : [ 'linux' ], - 'arm' : [ 'linux', - 'android-gingerbread', - 'android-ics', - 'android-jellybean', - 'android-kitkat', - 'android-nougat', ], - } +os_types = set() +if m5.defines.buildEnv["USE_ARM_ISA"]: + os_types.update( + [ + "linux", + "android-gingerbread", + "android-ics", + "android-jellybean", + "android-kitkat", + "android-nougat", + ] + ) +if m5.defines.buildEnv["USE_MIPS_ISA"]: + os_types.add("linux") +if m5.defines.buildEnv["USE_POWER_ISA"]: + os_types.add("linux") +if m5.defines.buildEnv["USE_RISCV_ISA"]: + os_types.add("linux") # TODO that's a lie +if m5.defines.buildEnv["USE_SPARC_ISA"]: + os_types.add("linux") +if m5.defines.buildEnv["USE_X86_ISA"]: + os_types.add("linux") + class CowIdeDisk(IdeDisk): - image = CowDiskImage(child=RawDiskImage(read_only=True), - read_only=False) + image = CowDiskImage(child=RawDiskImage(read_only=True), read_only=False) def childImage(self, ci): self.image.child.image_file = ci + class MemBus(SystemXBar): badaddr_responder = BadAddr() default = Self.badaddr_responder.pio + def attach_9p(parent, bus): viopci = PciVirtIO() viopci.vio = VirtIO9PDiod() - viodir = os.path.realpath(os.path.join(m5.options.outdir, '9p')) - viopci.vio.root = os.path.join(viodir, 'share') - viopci.vio.socketPath = os.path.join(viodir, 'socket') + viodir = os.path.realpath(os.path.join(m5.options.outdir, "9p")) + viopci.vio.root = os.path.join(viodir, "share") + viopci.vio.socketPath = os.path.join(viodir, "socket") os.makedirs(viopci.vio.root, exist_ok=True) if os.path.exists(viopci.vio.socketPath): os.remove(viopci.vio.socketPath) parent.viopci = viopci parent.attachPciDevice(viopci, bus) + def fillInCmdline(mdesc, template, **kwargs): - kwargs.setdefault('rootdev', mdesc.rootdev()) - kwargs.setdefault('mem', mdesc.mem()) - kwargs.setdefault('script', mdesc.script()) + kwargs.setdefault("rootdev", mdesc.rootdev()) + kwargs.setdefault("mem", mdesc.mem()) + kwargs.setdefault("script", mdesc.script()) return template % kwargs + def makeCowDisks(disk_paths): disks = [] for disk_path in disk_paths: - disk = CowIdeDisk(driveID='device0') - disk.childImage(disk_path); + disk = CowIdeDisk(driveID="device0") + disk.childImage(disk_path) disks.append(disk) return disks + def makeSparcSystem(mem_mode, mdesc=None, cmdline=None): # Constants from iob.cc and uart8250.cc iob_man_addr = 0x9800000000 uart_pio_size = 8 class CowMmDisk(MmDisk): - image = CowDiskImage(child=RawDiskImage(read_only=True), - read_only=False) + image = CowDiskImage( + child=RawDiskImage(read_only=True), read_only=False + ) def childImage(self, ci): self.image.child.image_file = ci @@ -113,12 +131,14 @@ def makeSparcSystem(mem_mode, mdesc=None, cmdline=None): self.readfile = mdesc.script() self.iobus = IOXBar() self.membus = MemBus() - self.bridge = Bridge(delay='50ns') + self.bridge = Bridge(delay="50ns") self.t1000 = T1000() self.t1000.attachOnChipIO(self.membus) self.t1000.attachIO(self.iobus) - self.mem_ranges = [AddrRange(Addr('1MB'), size = '64MB'), - AddrRange(Addr('2GB'), size ='256MB')] + self.mem_ranges = [ + AddrRange(Addr("1MB"), size="64MB"), + AddrRange(Addr("2GB"), size="256MB"), + ] self.bridge.mem_side_port = self.iobus.cpu_side_ports self.bridge.cpu_side_port = self.membus.mem_side_ports self.disk0 = CowMmDisk() @@ -128,36 +148,47 @@ def makeSparcSystem(mem_mode, mdesc=None, cmdline=None): # The puart0 and hvuart are placed on the IO bus, so create ranges # for them. The remaining IO range is rather fragmented, so poke # holes for the iob and partition descriptors etc. - self.bridge.ranges = \ - [ - AddrRange(self.t1000.puart0.pio_addr, - self.t1000.puart0.pio_addr + uart_pio_size - 1), - AddrRange(self.disk0.pio_addr, - self.t1000.fake_jbi.pio_addr + - self.t1000.fake_jbi.pio_size - 1), - AddrRange(self.t1000.fake_clk.pio_addr, - iob_man_addr - 1), - AddrRange(self.t1000.fake_l2_1.pio_addr, - self.t1000.fake_ssi.pio_addr + - self.t1000.fake_ssi.pio_size - 1), - AddrRange(self.t1000.hvuart.pio_addr, - self.t1000.hvuart.pio_addr + uart_pio_size - 1) - ] + self.bridge.ranges = [ + AddrRange( + self.t1000.puart0.pio_addr, + self.t1000.puart0.pio_addr + uart_pio_size - 1, + ), + AddrRange( + self.disk0.pio_addr, + self.t1000.fake_jbi.pio_addr + self.t1000.fake_jbi.pio_size - 1, + ), + AddrRange(self.t1000.fake_clk.pio_addr, iob_man_addr - 1), + AddrRange( + self.t1000.fake_l2_1.pio_addr, + self.t1000.fake_ssi.pio_addr + self.t1000.fake_ssi.pio_size - 1, + ), + AddrRange( + self.t1000.hvuart.pio_addr, + self.t1000.hvuart.pio_addr + uart_pio_size - 1, + ), + ] workload = SparcFsWorkload() # ROM for OBP/Reset/Hypervisor - self.rom = SimpleMemory(image_file=binary('t1000_rom.bin'), - range=AddrRange(0xfff0000000, size='8MB')) + self.rom = SimpleMemory( + image_file=binary("t1000_rom.bin"), + range=AddrRange(0xFFF0000000, size="8MB"), + ) # nvram - self.nvram = SimpleMemory(image_file=binary('nvram1'), - range=AddrRange(0x1f11000000, size='8kB')) + self.nvram = SimpleMemory( + image_file=binary("nvram1"), range=AddrRange(0x1F11000000, size="8kB") + ) # hypervisor description - self.hypervisor_desc = SimpleMemory(image_file=binary('1up-hv.bin'), - range=AddrRange(0x1f12080000, size='8kB')) + self.hypervisor_desc = SimpleMemory( + image_file=binary("1up-hv.bin"), + range=AddrRange(0x1F12080000, size="8kB"), + ) # partition description - self.partition_desc = SimpleMemory(image_file=binary('1up-md.bin'), - range=AddrRange(0x1f12000000, size='8kB')) + self.partition_desc = SimpleMemory( + image_file=binary("1up-md.bin"), + range=AddrRange(0x1F12000000, size="8kB"), + ) self.rom.port = self.membus.mem_side_ports self.nvram.port = self.membus.mem_side_ports @@ -170,10 +201,20 @@ def makeSparcSystem(mem_mode, mdesc=None, cmdline=None): return self -def makeArmSystem(mem_mode, machine_type, num_cpus=1, mdesc=None, - dtb_filename=None, bare_metal=False, cmdline=None, - external_memory="", ruby=False, - vio_9p=None, bootloader=None): + +def makeArmSystem( + mem_mode, + machine_type, + num_cpus=1, + mdesc=None, + dtb_filename=None, + bare_metal=False, + cmdline=None, + external_memory="", + ruby=False, + vio_9p=None, + bootloader=None, +): assert machine_type pci_devices = [] @@ -187,7 +228,7 @@ def makeArmSystem(mem_mode, machine_type, num_cpus=1, mdesc=None, self.readfile = mdesc.script() self.iobus = IOXBar() if not ruby: - self.bridge = Bridge(delay='50ns') + self.bridge = Bridge(delay="50ns") self.bridge.mem_side_port = self.iobus.cpu_side_ports self.membus = MemBus() self.membus.badaddr_responder.warn_access = "warn" @@ -227,13 +268,17 @@ def makeArmSystem(mem_mode, machine_type, num_cpus=1, mdesc=None, self.mem_ranges.append(AddrRange(region.start, size=size_remain)) size_remain = 0 break - warn("Memory size specified spans more than one region. Creating" \ - " another memory controller for that range.") + warn( + "Memory size specified spans more than one region. Creating" + " another memory controller for that range." + ) if size_remain > 0: - fatal("The currently selected ARM platforms doesn't support" \ - " the amount of DRAM you've selected. Please try" \ - " another platform") + fatal( + "The currently selected ARM platforms doesn't support" + " the amount of DRAM you've selected. Please try" + " another platform" + ) if bare_metal: # EOT character on UART will end the simulation @@ -245,16 +290,19 @@ def makeArmSystem(mem_mode, machine_type, num_cpus=1, mdesc=None, if dtb_filename: workload.dtb_filename = binary(dtb_filename) - workload.machine_type = \ + workload.machine_type = ( machine_type if machine_type in ArmMachineType.map else "DTOnly" + ) # Ensure that writes to the UART actually go out early in the boot if not cmdline: - cmdline = 'earlyprintk=pl011,0x1c090000 console=ttyAMA0 ' + \ - 'lpj=19988480 norandmaps rw loglevel=8 ' + \ - 'mem=%(mem)s root=%(rootdev)s' + cmdline = ( + "earlyprintk=pl011,0x1c090000 console=ttyAMA0 " + + "lpj=19988480 norandmaps rw loglevel=8 " + + "mem=%(mem)s root=%(rootdev)s" + ) - if hasattr(self.realview.gic, 'cpu_addr'): + if hasattr(self.realview.gic, "cpu_addr"): self.gic_cpu_addr = self.realview.gic.cpu_addr # This check is for users who have previously put 'android' in @@ -263,30 +311,37 @@ def makeArmSystem(mem_mode, machine_type, num_cpus=1, mdesc=None, # behavior has been replaced with a more explicit option per # the error message below. The disk can have any name now and # doesn't need to include 'android' substring. - if (mdesc.disks() and - os.path.split(mdesc.disks()[0])[-1].lower().count('android')): - if 'android' not in mdesc.os_type(): - fatal("It looks like you are trying to boot an Android " \ - "platform. To boot Android, you must specify " \ - "--os-type with an appropriate Android release on " \ - "the command line.") + if mdesc.disks() and os.path.split(mdesc.disks()[0])[-1].lower().count( + "android" + ): + if "android" not in mdesc.os_type(): + fatal( + "It looks like you are trying to boot an Android " + "platform. To boot Android, you must specify " + "--os-type with an appropriate Android release on " + "the command line." + ) # android-specific tweaks - if 'android' in mdesc.os_type(): + if "android" in mdesc.os_type(): # generic tweaks cmdline += " init=/init" # release-specific tweaks - if 'kitkat' in mdesc.os_type(): - cmdline += " androidboot.hardware=gem5 qemu=1 qemu.gles=0 " + \ - "android.bootanim=0 " - elif 'nougat' in mdesc.os_type(): - cmdline += " androidboot.hardware=gem5 qemu=1 qemu.gles=0 " + \ - "android.bootanim=0 " + \ - "vmalloc=640MB " + \ - "android.early.fstab=/fstab.gem5 " + \ - "androidboot.selinux=permissive " + \ - "video=Virtual-1:1920x1080-16" + if "kitkat" in mdesc.os_type(): + cmdline += ( + " androidboot.hardware=gem5 qemu=1 qemu.gles=0 " + + "android.bootanim=0 " + ) + elif "nougat" in mdesc.os_type(): + cmdline += ( + " androidboot.hardware=gem5 qemu=1 qemu.gles=0 " + + "android.bootanim=0 " + + "vmalloc=640MB " + + "android.early.fstab=/fstab.gem5 " + + "androidboot.selinux=permissive " + + "video=Virtual-1:1920x1080-16" + ) workload.command_line = fillInCmdline(mdesc, cmdline) @@ -296,14 +351,17 @@ def makeArmSystem(mem_mode, machine_type, num_cpus=1, mdesc=None, if external_memory: # I/O traffic enters iobus - self.external_io = ExternalMaster(port_data="external_io", - port_type=external_memory) + self.external_io = ExternalMaster( + port_data="external_io", port_type=external_memory + ) self.external_io.port = self.iobus.cpu_side_ports # Ensure iocache only receives traffic destined for (actual) memory. - self.iocache = ExternalSlave(port_data="iocache", - port_type=external_memory, - addr_ranges=self.mem_ranges) + self.iocache = ExternalSlave( + port_data="iocache", + port_type=external_memory, + addr_ranges=self.mem_ranges, + ) self.iocache.port = self.iobus.mem_side_ports # Let system_port get to nvmem and nothing else. @@ -313,10 +371,11 @@ def makeArmSystem(mem_mode, machine_type, num_cpus=1, mdesc=None, # Attach off-chip devices self.realview.attachIO(self.iobus) elif ruby: - self._dma_ports = [ ] - self._mem_ports = [ ] - self.realview.attachOnChipIO(self.iobus, - dma_ports=self._dma_ports, mem_ports=self._mem_ports) + self._dma_ports = [] + self._mem_ports = [] + self.realview.attachOnChipIO( + self.iobus, dma_ports=self._dma_ports, mem_ports=self._mem_ports + ) self.realview.attachIO(self.iobus, dma_ports=self._dma_ports) else: self.realview.attachOnChipIO(self.membus, self.bridge) @@ -325,8 +384,8 @@ def makeArmSystem(mem_mode, machine_type, num_cpus=1, mdesc=None, for dev in pci_devices: self.realview.attachPciDevice( - dev, self.iobus, - dma_ports=self._dma_ports if ruby else None) + dev, self.iobus, dma_ports=self._dma_ports if ruby else None + ) self.terminal = Terminal() self.vncserver = VncServer() @@ -338,10 +397,12 @@ def makeArmSystem(mem_mode, machine_type, num_cpus=1, mdesc=None, self.system_port = self.membus.cpu_side_ports if ruby: - if buildEnv['PROTOCOL'] == 'MI_example' and num_cpus > 1: - fatal("The MI_example protocol cannot implement Load/Store " - "Exclusive operations. Multicore ARM systems configured " - "with the MI_example protocol will not work properly.") + if buildEnv["PROTOCOL"] == "MI_example" and num_cpus > 1: + fatal( + "The MI_example protocol cannot implement Load/Store " + "Exclusive operations. Multicore ARM systems configured " + "with the MI_example protocol will not work properly." + ) return self @@ -349,8 +410,9 @@ def makeArmSystem(mem_mode, machine_type, num_cpus=1, mdesc=None, def makeLinuxMipsSystem(mem_mode, mdesc=None, cmdline=None): class BaseMalta(Malta): ethernet = NSGigE(pci_bus=0, pci_dev=1, pci_func=0) - ide = IdeController(disks=Parent.disks, - pci_func=0, pci_dev=0, pci_bus=0) + ide = IdeController( + disks=Parent.disks, pci_func=0, pci_dev=0, pci_bus=0 + ) self = System() if not mdesc: @@ -359,8 +421,8 @@ def makeLinuxMipsSystem(mem_mode, mdesc=None, cmdline=None): self.readfile = mdesc.script() self.iobus = IOXBar() self.membus = MemBus() - self.bridge = Bridge(delay='50ns') - self.mem_ranges = [AddrRange('1GB')] + self.bridge = Bridge(delay="50ns") + self.mem_ranges = [AddrRange("1GB")] self.bridge.mem_side_port = self.iobus.cpu_side_ports self.bridge.cpu_side_port = self.membus.mem_side_ports self.disks = makeCowDisks(mdesc.disks()) @@ -370,35 +432,38 @@ def makeLinuxMipsSystem(mem_mode, mdesc=None, cmdline=None): self.malta.ide.dma = self.iobus.cpu_side_ports self.malta.ethernet.pio = self.iobus.mem_side_ports self.malta.ethernet.dma = self.iobus.cpu_side_ports - self.simple_disk = SimpleDisk(disk=RawDiskImage( - image_file = mdesc.disks()[0], read_only = True)) + self.simple_disk = SimpleDisk( + disk=RawDiskImage(image_file=mdesc.disks()[0], read_only=True) + ) self.mem_mode = mem_mode self.terminal = Terminal() - self.console = binary('mips/console') + self.console = binary("mips/console") if not cmdline: - cmdline = 'root=/dev/hda1 console=ttyS0' + cmdline = "root=/dev/hda1 console=ttyS0" self.workload = KernelWorkload(command_line=fillInCmdline(mdesc, cmdline)) self.system_port = self.membus.cpu_side_ports return self + def x86IOAddress(port): IO_address_space_base = 0x8000000000000000 return IO_address_space_base + port + def connectX86ClassicSystem(x86_sys, numCPUs): # Constants similar to x86_traits.hh IO_address_space_base = 0x8000000000000000 - pci_config_address_space_base = 0xc000000000000000 - interrupts_address_space_base = 0xa000000000000000 - APIC_range_size = 1 << 12; + pci_config_address_space_base = 0xC000000000000000 + interrupts_address_space_base = 0xA000000000000000 + APIC_range_size = 1 << 12 x86_sys.membus = MemBus() # North Bridge x86_sys.iobus = IOXBar() - x86_sys.bridge = Bridge(delay='50ns') + x86_sys.bridge = Bridge(delay="50ns") x86_sys.bridge.mem_side_port = x86_sys.iobus.cpu_side_ports x86_sys.bridge.cpu_side_port = x86_sys.membus.mem_side_ports # Allow the bridge to pass through: @@ -407,30 +472,30 @@ def connectX86ClassicSystem(x86_sys, numCPUs): # 2) the bridge to pass through the IO APIC (two pages, already contained in 1), # 3) everything in the IO address range up to the local APIC, and # 4) then the entire PCI address space and beyond. - x86_sys.bridge.ranges = \ - [ + x86_sys.bridge.ranges = [ AddrRange(0xC0000000, 0xFFFF0000), - AddrRange(IO_address_space_base, - interrupts_address_space_base - 1), - AddrRange(pci_config_address_space_base, - Addr.max) - ] + AddrRange(IO_address_space_base, interrupts_address_space_base - 1), + AddrRange(pci_config_address_space_base, Addr.max), + ] # Create a bridge from the IO bus to the memory bus to allow access to # the local APIC (two pages) - x86_sys.apicbridge = Bridge(delay='50ns') + x86_sys.apicbridge = Bridge(delay="50ns") x86_sys.apicbridge.cpu_side_port = x86_sys.iobus.mem_side_ports x86_sys.apicbridge.mem_side_port = x86_sys.membus.cpu_side_ports - x86_sys.apicbridge.ranges = [AddrRange(interrupts_address_space_base, - interrupts_address_space_base + - numCPUs * APIC_range_size - - 1)] + x86_sys.apicbridge.ranges = [ + AddrRange( + interrupts_address_space_base, + interrupts_address_space_base + numCPUs * APIC_range_size - 1, + ) + ] # connect the io bus x86_sys.pc.attachIO(x86_sys.iobus) x86_sys.system_port = x86_sys.membus.cpu_side_ports + def connectX86RubySystem(x86_sys): # North Bridge x86_sys.iobus = IOXBar() @@ -444,7 +509,7 @@ def connectX86RubySystem(x86_sys): def makeX86System(mem_mode, numCPUs=1, mdesc=None, workload=None, Ruby=False): self = System() - self.m5ops_base = 0xffff0000 + self.m5ops_base = 0xFFFF0000 if workload is None: workload = X86FsWorkload() @@ -461,17 +526,22 @@ def makeX86System(mem_mode, numCPUs=1, mdesc=None, workload=None, Ruby=False): # On the PC platform, the memory region 0xC0000000-0xFFFFFFFF is reserved # for various devices. Hence, if the physical memory size is greater than # 3GB, we need to split it into two parts. - excess_mem_size = \ - convert.toMemorySize(mdesc.mem()) - convert.toMemorySize('3GB') + excess_mem_size = convert.toMemorySize(mdesc.mem()) - convert.toMemorySize( + "3GB" + ) if excess_mem_size <= 0: self.mem_ranges = [AddrRange(mdesc.mem())] else: - warn("Physical memory size specified is %s which is greater than " \ - "3GB. Twice the number of memory controllers would be " \ - "created." % (mdesc.mem())) + warn( + "Physical memory size specified is %s which is greater than " + "3GB. Twice the number of memory controllers would be " + "created." % (mdesc.mem()) + ) - self.mem_ranges = [AddrRange('3GB'), - AddrRange(Addr('4GB'), size = excess_mem_size)] + self.mem_ranges = [ + AddrRange("3GB"), + AddrRange(Addr("4GB"), size=excess_mem_size), + ] # Platform self.pc = Pc() @@ -496,78 +566,78 @@ def makeX86System(mem_mode, numCPUs=1, mdesc=None, workload=None, Ruby=False): madt_records = [] for i in range(numCPUs): bp = X86IntelMPProcessor( - local_apic_id = i, - local_apic_version = 0x14, - enable = True, - bootstrap = (i == 0)) + local_apic_id=i, + local_apic_version=0x14, + enable=True, + bootstrap=(i == 0), + ) base_entries.append(bp) - lapic = X86ACPIMadtLAPIC( - acpi_processor_id=i, - apic_id=i, - flags=1) + lapic = X86ACPIMadtLAPIC(acpi_processor_id=i, apic_id=i, flags=1) madt_records.append(lapic) io_apic = X86IntelMPIOAPIC( - id = numCPUs, - version = 0x11, - enable = True, - address = 0xfec00000) + id=numCPUs, version=0x11, enable=True, address=0xFEC00000 + ) self.pc.south_bridge.io_apic.apic_id = io_apic.id base_entries.append(io_apic) - madt_records.append(X86ACPIMadtIOAPIC(id=io_apic.id, - address=io_apic.address, int_base=0)) + madt_records.append( + X86ACPIMadtIOAPIC(id=io_apic.id, address=io_apic.address, int_base=0) + ) # In gem5 Pc::calcPciConfigAddr(), it required "assert(bus==0)", # but linux kernel cannot config PCI device if it was not connected to # PCI bus, so we fix PCI bus id to 0, and ISA bus id to 1. - pci_bus = X86IntelMPBus(bus_id = 0, bus_type='PCI ') + pci_bus = X86IntelMPBus(bus_id=0, bus_type="PCI ") base_entries.append(pci_bus) - isa_bus = X86IntelMPBus(bus_id = 1, bus_type='ISA ') + isa_bus = X86IntelMPBus(bus_id=1, bus_type="ISA ") base_entries.append(isa_bus) - connect_busses = X86IntelMPBusHierarchy(bus_id=1, - subtractive_decode=True, parent_bus=0) + connect_busses = X86IntelMPBusHierarchy( + bus_id=1, subtractive_decode=True, parent_bus=0 + ) ext_entries.append(connect_busses) pci_dev4_inta = X86IntelMPIOIntAssignment( - interrupt_type = 'INT', - polarity = 'ConformPolarity', - trigger = 'ConformTrigger', - source_bus_id = 0, - source_bus_irq = 0 + (4 << 2), - dest_io_apic_id = io_apic.id, - dest_io_apic_intin = 16) + interrupt_type="INT", + polarity="ConformPolarity", + trigger="ConformTrigger", + source_bus_id=0, + source_bus_irq=0 + (4 << 2), + dest_io_apic_id=io_apic.id, + dest_io_apic_intin=16, + ) base_entries.append(pci_dev4_inta) pci_dev4_inta_madt = X86ACPIMadtIntSourceOverride( - bus_source = pci_dev4_inta.source_bus_id, - irq_source = pci_dev4_inta.source_bus_irq, - sys_int = pci_dev4_inta.dest_io_apic_intin, - flags = 0 - ) + bus_source=pci_dev4_inta.source_bus_id, + irq_source=pci_dev4_inta.source_bus_irq, + sys_int=pci_dev4_inta.dest_io_apic_intin, + flags=0, + ) madt_records.append(pci_dev4_inta_madt) + def assignISAInt(irq, apicPin): assign_8259_to_apic = X86IntelMPIOIntAssignment( - interrupt_type = 'ExtInt', - polarity = 'ConformPolarity', - trigger = 'ConformTrigger', - source_bus_id = 1, - source_bus_irq = irq, - dest_io_apic_id = io_apic.id, - dest_io_apic_intin = 0) + interrupt_type="ExtInt", + polarity="ConformPolarity", + trigger="ConformTrigger", + source_bus_id=1, + source_bus_irq=irq, + dest_io_apic_id=io_apic.id, + dest_io_apic_intin=0, + ) base_entries.append(assign_8259_to_apic) assign_to_apic = X86IntelMPIOIntAssignment( - interrupt_type = 'INT', - polarity = 'ConformPolarity', - trigger = 'ConformTrigger', - source_bus_id = 1, - source_bus_irq = irq, - dest_io_apic_id = io_apic.id, - dest_io_apic_intin = apicPin) + interrupt_type="INT", + polarity="ConformPolarity", + trigger="ConformTrigger", + source_bus_id=1, + source_bus_irq=irq, + dest_io_apic_id=io_apic.id, + dest_io_apic_intin=apicPin, + ) base_entries.append(assign_to_apic) # acpi assign_to_apic_acpi = X86ACPIMadtIntSourceOverride( - bus_source = 1, - irq_source = irq, - sys_int = apicPin, - flags = 0 - ) + bus_source=1, irq_source=irq, sys_int=apicPin, flags=0 + ) madt_records.append(assign_to_apic_acpi) + assignISAInt(0, 2) assignISAInt(1, 1) for i in range(3, 15): @@ -575,64 +645,78 @@ def makeX86System(mem_mode, numCPUs=1, mdesc=None, workload=None, Ruby=False): workload.intel_mp_table.base_entries = base_entries workload.intel_mp_table.ext_entries = ext_entries - madt = X86ACPIMadt(local_apic_address=0, - records=madt_records, oem_id='madt') + madt = X86ACPIMadt( + local_apic_address=0, records=madt_records, oem_id="madt" + ) workload.acpi_description_table_pointer.rsdt.entries.append(madt) workload.acpi_description_table_pointer.xsdt.entries.append(madt) - workload.acpi_description_table_pointer.oem_id = 'gem5' - workload.acpi_description_table_pointer.rsdt.oem_id='gem5' - workload.acpi_description_table_pointer.xsdt.oem_id='gem5' + workload.acpi_description_table_pointer.oem_id = "gem5" + workload.acpi_description_table_pointer.rsdt.oem_id = "gem5" + workload.acpi_description_table_pointer.xsdt.oem_id = "gem5" return self -def makeLinuxX86System(mem_mode, numCPUs=1, mdesc=None, Ruby=False, - cmdline=None): + +def makeLinuxX86System( + mem_mode, numCPUs=1, mdesc=None, Ruby=False, cmdline=None +): # Build up the x86 system and then specialize it for Linux self = makeX86System(mem_mode, numCPUs, mdesc, X86FsLinux(), Ruby) # We assume below that there's at least 1MB of memory. We'll require 2 # just to avoid corner cases. phys_mem_size = sum([r.size() for r in self.mem_ranges]) - assert(phys_mem_size >= 0x200000) - assert(len(self.mem_ranges) <= 2) + assert phys_mem_size >= 0x200000 + assert len(self.mem_ranges) <= 2 - entries = \ - [ + entries = [ # Mark the first megabyte of memory as reserved - X86E820Entry(addr = 0, size = '639kB', range_type = 1), - X86E820Entry(addr = 0x9fc00, size = '385kB', range_type = 2), + X86E820Entry(addr=0, size="639kB", range_type=1), + X86E820Entry(addr=0x9FC00, size="385kB", range_type=2), # Mark the rest of physical memory as available - X86E820Entry(addr = 0x100000, - size = '%dB' % (self.mem_ranges[0].size() - 0x100000), - range_type = 1), - ] + X86E820Entry( + addr=0x100000, + size="%dB" % (self.mem_ranges[0].size() - 0x100000), + range_type=1, + ), + ] # Mark [mem_size, 3GB) as reserved if memory less than 3GB, which force # IO devices to be mapped to [0xC0000000, 0xFFFF0000). Requests to this # specific range can pass though bridge to iobus. if len(self.mem_ranges) == 1: - entries.append(X86E820Entry(addr = self.mem_ranges[0].size(), - size='%dB' % (0xC0000000 - self.mem_ranges[0].size()), - range_type=2)) + entries.append( + X86E820Entry( + addr=self.mem_ranges[0].size(), + size="%dB" % (0xC0000000 - self.mem_ranges[0].size()), + range_type=2, + ) + ) # Reserve the last 16kB of the 32-bit address space for the m5op interface - entries.append(X86E820Entry(addr=0xFFFF0000, size='64kB', range_type=2)) + entries.append(X86E820Entry(addr=0xFFFF0000, size="64kB", range_type=2)) # In case the physical memory is greater than 3GB, we split it into two # parts and add a separate e820 entry for the second part. This entry # starts at 0x100000000, which is the first address after the space # reserved for devices. if len(self.mem_ranges) == 2: - entries.append(X86E820Entry(addr = 0x100000000, - size = '%dB' % (self.mem_ranges[1].size()), range_type = 1)) + entries.append( + X86E820Entry( + addr=0x100000000, + size="%dB" % (self.mem_ranges[1].size()), + range_type=1, + ) + ) self.workload.e820_table.entries = entries # Command line if not cmdline: - cmdline = 'earlyprintk=ttyS0 console=ttyS0 lpj=7999923 root=/dev/hda1' + cmdline = "earlyprintk=ttyS0 console=ttyS0 lpj=7999923 root=/dev/hda1" self.workload.command_line = fillInCmdline(mdesc, cmdline) return self + def makeBareMetalRiscvSystem(mem_mode, mdesc=None, cmdline=None): self = System() if not mdesc: @@ -646,7 +730,7 @@ def makeBareMetalRiscvSystem(mem_mode, mdesc=None, cmdline=None): self.iobus = IOXBar() self.membus = MemBus() - self.bridge = Bridge(delay='50ns') + self.bridge = Bridge(delay="50ns") self.bridge.mem_side_port = self.iobus.cpu_side_ports self.bridge.cpu_side_port = self.membus.mem_side_ports # Sv39 has 56 bit physical addresses; use the upper 8 bit for the IO space @@ -656,16 +740,17 @@ def makeBareMetalRiscvSystem(mem_mode, mdesc=None, cmdline=None): self.system_port = self.membus.cpu_side_ports return self + def makeDualRoot(full_system, testSystem, driveSystem, dumpfile): - self = Root(full_system = full_system) + self = Root(full_system=full_system) self.testsys = testSystem self.drivesys = driveSystem self.etherlink = EtherLink() - if hasattr(testSystem, 'realview'): + if hasattr(testSystem, "realview"): self.etherlink.int0 = Parent.testsys.realview.ethernet.interface self.etherlink.int1 = Parent.drivesys.realview.ethernet.interface - elif hasattr(testSystem, 'tsunami'): + elif hasattr(testSystem, "tsunami"): self.etherlink.int0 = Parent.testsys.tsunami.ethernet.interface self.etherlink.int1 = Parent.drivesys.tsunami.ethernet.interface else: @@ -678,31 +763,35 @@ def makeDualRoot(full_system, testSystem, driveSystem, dumpfile): return self -def makeDistRoot(testSystem, - rank, - size, - server_name, - server_port, - sync_repeat, - sync_start, - linkspeed, - linkdelay, - dumpfile): - self = Root(full_system = True) +def makeDistRoot( + testSystem, + rank, + size, + server_name, + server_port, + sync_repeat, + sync_start, + linkspeed, + linkdelay, + dumpfile, +): + self = Root(full_system=True) self.testsys = testSystem - self.etherlink = DistEtherLink(speed = linkspeed, - delay = linkdelay, - dist_rank = rank, - dist_size = size, - server_name = server_name, - server_port = server_port, - sync_start = sync_start, - sync_repeat = sync_repeat) + self.etherlink = DistEtherLink( + speed=linkspeed, + delay=linkdelay, + dist_rank=rank, + dist_size=size, + server_name=server_name, + server_port=server_port, + sync_start=sync_start, + sync_repeat=sync_repeat, + ) - if hasattr(testSystem, 'realview'): + if hasattr(testSystem, "realview"): self.etherlink.int0 = Parent.testsys.realview.ethernet.interface - elif hasattr(testSystem, 'tsunami'): + elif hasattr(testSystem, "tsunami"): self.etherlink.int0 = Parent.testsys.tsunami.ethernet.interface else: fatal("Don't know how to connect DistEtherLink to this system") diff --git a/configs/common/FileSystemConfig.py b/configs/common/FileSystemConfig.py index f60bf232dc..066eb9a811 100644 --- a/configs/common/FileSystemConfig.py +++ b/configs/common/FileSystemConfig.py @@ -48,21 +48,25 @@ from os.path import join as joinpath from os.path import isdir from shutil import rmtree, copyfile + def hex_mask(terms): dec_mask = reduce(operator.or_, [2**i for i in terms], 0) return "%08x" % dec_mask + def file_append(path, contents): - with open(joinpath(*path), 'a') as f: + with open(joinpath(*path), "a") as f: f.write(str(contents)) + def replace_tree(path): if isdir(path): rmtree(path) mkdir(path) -def config_filesystem(system, options = None): - """ This function parses the system object to create the pseudo file system + +def config_filesystem(system, options=None): + """This function parses the system object to create the pseudo file system @param system: The system to create the config for @param options: An optional argument which contains an Options.py options object. This is useful if when use se.py and will set the L2 cache @@ -79,167 +83,200 @@ def config_filesystem(system, options = None): These files are created in the `fs` directory in the outdir path. """ - fsdir = joinpath(m5.options.outdir, 'fs') + fsdir = joinpath(m5.options.outdir, "fs") replace_tree(fsdir) # Set up /proc - procdir = joinpath(fsdir, 'proc') + procdir = joinpath(fsdir, "proc") mkdir(procdir) try: - cpus = \ - [obj for obj in system.descendants() if isinstance(obj, BaseCPU)] + cpus = [ + obj for obj in system.descendants() if isinstance(obj, BaseCPU) + ] except NameError: # BaseCPU is not defined for the NULL ISA cpus = [] cpu_clock = 0 - if hasattr(options, 'cpu_clock'): + if hasattr(options, "cpu_clock"): cpu_clock = toFrequency(options.cpu_clock) / mega l2_size = 0 - if hasattr(options, 'l2_size'): + if hasattr(options, "l2_size"): l2_size = toMemorySize(options.l2_size) / kibi - for i,cpu in enumerate(cpus): - one_cpu = 'processor : {proc}\n' + \ - 'vendor_id : Generic\n' + \ - 'cpu family : 0\n' + \ - 'model : 0\n' + \ - 'model name : Generic\n' + \ - 'stepping : 0\n' + \ - 'cpu MHz : {clock:0.3f}\n' + \ - 'cache size: : {l2_size}K\n' + \ - 'physical id : 0\n' + \ - 'siblings : {num_cpus}\n' + \ - 'core id : {proc}\n' + \ - 'cpu cores : {num_cpus}\n' + \ - 'fpu : yes\n' + \ - 'fpu exception : yes\n' + \ - 'cpuid level : 1\n' + \ - 'wp : yes\n' + \ - 'flags : fpu\n' + \ - 'cache alignment : {cacheline_size}\n' + \ - '\n' - one_cpu = one_cpu.format(proc = i, num_cpus = len(cpus), - # Note: it would be nice to use cpu.clock, but it hasn't - # been finalized yet since m5.instantiate() isn't done. - clock = cpu_clock, - # Note: this assumes the L2 is private to each core - l2_size = l2_size, - cacheline_size=system.cache_line_size.getValue()) - file_append((procdir, 'cpuinfo'), one_cpu) + for i, cpu in enumerate(cpus): + one_cpu = ( + "processor : {proc}\n" + + "vendor_id : Generic\n" + + "cpu family : 0\n" + + "model : 0\n" + + "model name : Generic\n" + + "stepping : 0\n" + + "cpu MHz : {clock:0.3f}\n" + + "cache size: : {l2_size}K\n" + + "physical id : 0\n" + + "siblings : {num_cpus}\n" + + "core id : {proc}\n" + + "cpu cores : {num_cpus}\n" + + "fpu : yes\n" + + "fpu exception : yes\n" + + "cpuid level : 1\n" + + "wp : yes\n" + + "flags : fpu\n" + + "cache alignment : {cacheline_size}\n" + + "\n" + ) + one_cpu = one_cpu.format( + proc=i, + num_cpus=len(cpus), + # Note: it would be nice to use cpu.clock, but it hasn't + # been finalized yet since m5.instantiate() isn't done. + clock=cpu_clock, + # Note: this assumes the L2 is private to each core + l2_size=l2_size, + cacheline_size=system.cache_line_size.getValue(), + ) + file_append((procdir, "cpuinfo"), one_cpu) - file_append((procdir, 'stat'), 'cpu 0 0 0 0 0 0 0\n') + file_append((procdir, "stat"), "cpu 0 0 0 0 0 0 0\n") for i in range(len(cpus)): - file_append((procdir, 'stat'), 'cpu%d 0 0 0 0 0 0 0\n' % i) + file_append((procdir, "stat"), "cpu%d 0 0 0 0 0 0 0\n" % i) # Set up /sys - sysdir = joinpath(fsdir, 'sys') + sysdir = joinpath(fsdir, "sys") mkdir(sysdir) # Set up /sys/devices/system/cpu - cpudir = joinpath(sysdir, 'devices', 'system', 'cpu') + cpudir = joinpath(sysdir, "devices", "system", "cpu") makedirs(cpudir, exist_ok=True) - file_append((cpudir, 'online'), '0-%d' % (len(cpus) - 1)) - file_append((cpudir, 'possible'), '0-%d' % (len(cpus) - 1)) + file_append((cpudir, "online"), "0-%d" % (len(cpus) - 1)) + file_append((cpudir, "possible"), "0-%d" % (len(cpus) - 1)) # Set up /tmp - tmpdir = joinpath(fsdir, 'tmp') + tmpdir = joinpath(fsdir, "tmp") replace_tree(tmpdir) system.redirect_paths = _redirect_paths(options) # Setting the interpreter path. This is used to load the # guest dynamic linker itself from the elf file. - interp = getattr(options, 'interp_dir', None) + interp = getattr(options, "interp_dir", None) if interp: from m5.core import setInterpDir + setInterpDir(interp) - print("Setting the interpreter path to:", interp, - "\nFor dynamically linked applications you might still " - "need to setup the --redirects so that libraries are " - "found\n") + print( + "Setting the interpreter path to:", + interp, + "\nFor dynamically linked applications you might still " + "need to setup the --redirects so that libraries are " + "found\n", + ) + def register_node(cpu_list, mem, node_number): - nodebasedir = joinpath(m5.options.outdir, 'fs', 'sys', 'devices', - 'system', 'node') + nodebasedir = joinpath( + m5.options.outdir, "fs", "sys", "devices", "system", "node" + ) - nodedir = joinpath(nodebasedir,'node%d' % node_number) + nodedir = joinpath(nodebasedir, "node%d" % node_number) makedirs(nodedir, exist_ok=True) - file_append((nodedir, 'cpumap'), hex_mask(cpu_list)) - file_append((nodedir, 'meminfo'), - 'Node %d MemTotal: %dkB' % (node_number, - toMemorySize(str(mem))/kibi)) + file_append((nodedir, "cpumap"), hex_mask(cpu_list)) + file_append( + (nodedir, "meminfo"), + "Node %d MemTotal: %dkB" + % (node_number, toMemorySize(str(mem)) / kibi), + ) -def register_cpu(physical_package_id, core_siblings, - core_id, thread_siblings): - cpudir = joinpath(m5.options.outdir, 'fs', 'sys', 'devices', 'system', - 'cpu', 'cpu%d' % core_id) - makedirs(joinpath(cpudir, 'topology'), exist_ok=True) - makedirs(joinpath(cpudir, 'cache')) +def register_cpu(physical_package_id, core_siblings, core_id, thread_siblings): + cpudir = joinpath( + m5.options.outdir, + "fs", + "sys", + "devices", + "system", + "cpu", + "cpu%d" % core_id, + ) + + makedirs(joinpath(cpudir, "topology"), exist_ok=True) + makedirs(joinpath(cpudir, "cache")) + + file_append((cpudir, "online"), "1") + file_append( + (cpudir, "topology", "physical_package_id"), physical_package_id + ) + file_append((cpudir, "topology", "core_siblings"), hex_mask(core_siblings)) + file_append((cpudir, "topology", "core_id"), core_id) + file_append( + (cpudir, "topology", "thread_siblings"), hex_mask(thread_siblings) + ) - file_append((cpudir, 'online'), '1') - file_append((cpudir, 'topology', 'physical_package_id'), - physical_package_id) - file_append((cpudir, 'topology', 'core_siblings'), - hex_mask(core_siblings)) - file_append((cpudir, 'topology', 'core_id'), core_id) - file_append((cpudir, 'topology', 'thread_siblings'), - hex_mask(thread_siblings)) def register_cache(level, idu_type, size, line_size, assoc, cpus): - fsdir = joinpath(m5.options.outdir, 'fs') + fsdir = joinpath(m5.options.outdir, "fs") for i in cpus: - cachedir = joinpath(fsdir, 'sys', 'devices', 'system', 'cpu', - 'cpu%d' % i, 'cache') + cachedir = joinpath( + fsdir, "sys", "devices", "system", "cpu", "cpu%d" % i, "cache" + ) j = 0 - while isdir(joinpath(cachedir, 'index%d' % j)): + while isdir(joinpath(cachedir, "index%d" % j)): j += 1 - indexdir = joinpath(cachedir, 'index%d' % j) + indexdir = joinpath(cachedir, "index%d" % j) makedirs(indexdir, exist_ok=True) - file_append((indexdir, 'level'), level) - file_append((indexdir, 'type'), idu_type) - file_append((indexdir, 'size'), "%dK" % (toMemorySize(size)/kibi)) - file_append((indexdir, 'coherency_line_size'), line_size) + file_append((indexdir, "level"), level) + file_append((indexdir, "type"), idu_type) + file_append((indexdir, "size"), "%dK" % (toMemorySize(size) / kibi)) + file_append((indexdir, "coherency_line_size"), line_size) # Since cache size = number of indices * associativity * block size num_sets = toMemorySize(size) / int(assoc) * int(line_size) - file_append((indexdir, 'number_of_sets'), num_sets) - file_append((indexdir, 'physical_line_partition'), '1') - file_append((indexdir, 'shared_cpu_map'), hex_mask(cpus)) - file_append((indexdir, 'shared_cpu_list'), - ','.join(str(cpu) for cpu in cpus)) + file_append((indexdir, "number_of_sets"), num_sets) + file_append((indexdir, "physical_line_partition"), "1") + file_append((indexdir, "shared_cpu_map"), hex_mask(cpus)) + file_append( + (indexdir, "shared_cpu_list"), ",".join(str(cpu) for cpu in cpus) + ) + def _redirect_paths(options): # Redirect filesystem syscalls from src to the first matching dests - redirect_paths = [RedirectPath(app_path = "/proc", - host_paths = ["%s/fs/proc" % m5.options.outdir]), - RedirectPath(app_path = "/sys", - host_paths = ["%s/fs/sys" % m5.options.outdir]), - RedirectPath(app_path = "/tmp", - host_paths = ["%s/fs/tmp" % m5.options.outdir])] + redirect_paths = [ + RedirectPath( + app_path="/proc", host_paths=["%s/fs/proc" % m5.options.outdir] + ), + RedirectPath( + app_path="/sys", host_paths=["%s/fs/sys" % m5.options.outdir] + ), + RedirectPath( + app_path="/tmp", host_paths=["%s/fs/tmp" % m5.options.outdir] + ), + ] # Setting the redirect paths so that the guest dynamic linker # can point to the proper /lib collection (e.g. to load libc) - redirects = getattr(options, 'redirects', []) + redirects = getattr(options, "redirects", []) for redirect in redirects: app_path, host_path = redirect.split("=") redirect_paths.append( - RedirectPath(app_path = app_path, host_paths = [ host_path ])) + RedirectPath(app_path=app_path, host_paths=[host_path]) + ) - chroot = getattr(options, 'chroot', None) + chroot = getattr(options, "chroot", None) if chroot: redirect_paths.append( RedirectPath( - app_path = "/", - host_paths = ["%s" % os.path.expanduser(chroot)])) + app_path="/", host_paths=["%s" % os.path.expanduser(chroot)] + ) + ) return redirect_paths diff --git a/configs/common/GPUTLBConfig.py b/configs/common/GPUTLBConfig.py index 740c748d05..b70d6c5516 100644 --- a/configs/common/GPUTLBConfig.py +++ b/configs/common/GPUTLBConfig.py @@ -34,10 +34,12 @@ import m5 from m5.objects import * + def TLB_constructor(options, level, gpu_ctrl=None, full_system=False): if full_system: - constructor_call = "VegaGPUTLB(\ + constructor_call = ( + "VegaGPUTLB(\ gpu_device = gpu_ctrl, \ size = options.L%(level)dTLBentries, \ assoc = options.L%(level)dTLBassoc, \ @@ -48,9 +50,12 @@ def TLB_constructor(options, level, gpu_ctrl=None, full_system=False): clk_domain = SrcClockDomain(\ clock = options.gpu_clock,\ voltage_domain = VoltageDomain(\ - voltage = options.gpu_voltage)))" % locals() + voltage = options.gpu_voltage)))" + % locals() + ) else: - constructor_call = "X86GPUTLB(size = options.L%(level)dTLBentries, \ + constructor_call = ( + "X86GPUTLB(size = options.L%(level)dTLBentries, \ assoc = options.L%(level)dTLBassoc, \ hitLatency = options.L%(level)dAccessLatency,\ missLatency2 = options.L%(level)dMissLatency,\ @@ -59,13 +64,17 @@ def TLB_constructor(options, level, gpu_ctrl=None, full_system=False): clk_domain = SrcClockDomain(\ clock = options.gpu_clock,\ voltage_domain = VoltageDomain(\ - voltage = options.gpu_voltage)))" % locals() + voltage = options.gpu_voltage)))" + % locals() + ) return constructor_call + def Coalescer_constructor(options, level, full_system): if full_system: - constructor_call = "VegaTLBCoalescer(probesPerCycle = \ + constructor_call = ( + "VegaTLBCoalescer(probesPerCycle = \ options.L%(level)dProbesPerCycle, \ tlb_level = %(level)d ,\ coalescingWindow = options.L%(level)dCoalescingWindow,\ @@ -73,30 +82,47 @@ def Coalescer_constructor(options, level, full_system): clk_domain = SrcClockDomain(\ clock = options.gpu_clock,\ voltage_domain = VoltageDomain(\ - voltage = options.gpu_voltage)))" % locals() + voltage = options.gpu_voltage)))" + % locals() + ) else: - constructor_call = "TLBCoalescer(probesPerCycle = \ + constructor_call = ( + "TLBCoalescer(probesPerCycle = \ options.L%(level)dProbesPerCycle, \ coalescingWindow = options.L%(level)dCoalescingWindow,\ disableCoalescing = options.L%(level)dDisableCoalescing,\ clk_domain = SrcClockDomain(\ clock = options.gpu_clock,\ voltage_domain = VoltageDomain(\ - voltage = options.gpu_voltage)))" % locals() + voltage = options.gpu_voltage)))" + % locals() + ) return constructor_call -def create_TLB_Coalescer(options, my_level, my_index, tlb_name, - coalescer_name, gpu_ctrl=None, full_system=False): + +def create_TLB_Coalescer( + options, + my_level, + my_index, + tlb_name, + coalescer_name, + gpu_ctrl=None, + full_system=False, +): # arguments: options, TLB level, number of private structures for this # Level, TLB name and Coalescer name for i in range(my_index): tlb_name.append( - eval(TLB_constructor(options, my_level, gpu_ctrl, full_system))) + eval(TLB_constructor(options, my_level, gpu_ctrl, full_system)) + ) coalescer_name.append( - eval(Coalescer_constructor(options, my_level, full_system))) + eval(Coalescer_constructor(options, my_level, full_system)) + ) -def config_tlb_hierarchy(options, system, shader_idx, gpu_ctrl=None, - full_system=False): + +def config_tlb_hierarchy( + options, system, shader_idx, gpu_ctrl=None, full_system=False +): n_cu = options.num_compute_units if options.TLB_config == "perLane": @@ -111,36 +137,50 @@ def config_tlb_hierarchy(options, system, shader_idx, gpu_ctrl=None, print("Bad option for TLB Configuration.") sys.exit(1) - #------------------------------------------------------------------------- + # ------------------------------------------------------------------------- # A visual representation of the TLB hierarchy # for ease of configuration # < Modify here the width and the number of levels if you want a different # configuration > # width is the number of TLBs of the given type (i.e., D-TLB, I-TLB etc) # for this level - L1 = [{'name': 'sqc', 'width': options.num_sqc, 'TLBarray': [], - 'CoalescerArray': []}, - {'name': 'scalar', 'width' : options.num_scalar_cache, - 'TLBarray': [], 'CoalescerArray': []}, - {'name': 'l1', 'width': num_TLBs, 'TLBarray': [], - 'CoalescerArray': []}] + L1 = [ + { + "name": "sqc", + "width": options.num_sqc, + "TLBarray": [], + "CoalescerArray": [], + }, + { + "name": "scalar", + "width": options.num_scalar_cache, + "TLBarray": [], + "CoalescerArray": [], + }, + { + "name": "l1", + "width": num_TLBs, + "TLBarray": [], + "CoalescerArray": [], + }, + ] - L2 = [{'name': 'l2', 'width': 1, 'TLBarray': [], 'CoalescerArray': []}] - L3 = [{'name': 'l3', 'width': 1, 'TLBarray': [], 'CoalescerArray': []}] + L2 = [{"name": "l2", "width": 1, "TLBarray": [], "CoalescerArray": []}] + L3 = [{"name": "l3", "width": 1, "TLBarray": [], "CoalescerArray": []}] TLB_hierarchy = [L1, L2, L3] - #------------------------------------------------------------------------- + # ------------------------------------------------------------------------- # Create the hiearchy # Call the appropriate constructors and add objects to the system for i in range(len(TLB_hierarchy)): hierarchy_level = TLB_hierarchy[i] - level = i+1 + level = i + 1 for TLB_type in hierarchy_level: - TLB_index = TLB_type['width'] - TLB_array = TLB_type['TLBarray'] - Coalescer_array = TLB_type['CoalescerArray'] + TLB_index = TLB_type["width"] + TLB_array = TLB_type["TLBarray"] + Coalescer_array = TLB_type["CoalescerArray"] # If the sim calls for a fixed L1 TLB size across CUs, # override the TLB entries option if options.tot_L1TLB_size: @@ -148,71 +188,96 @@ def config_tlb_hierarchy(options, system, shader_idx, gpu_ctrl=None, if options.L1TLBassoc > options.L1TLBentries: options.L1TLBassoc = options.L1TLBentries # call the constructors for the TLB and the Coalescer - create_TLB_Coalescer(options, level, TLB_index,\ - TLB_array, Coalescer_array, gpu_ctrl, full_system) + create_TLB_Coalescer( + options, + level, + TLB_index, + TLB_array, + Coalescer_array, + gpu_ctrl, + full_system, + ) - system_TLB_name = TLB_type['name'] + '_tlb' - system_Coalescer_name = TLB_type['name'] + '_coalescer' + system_TLB_name = TLB_type["name"] + "_tlb" + system_Coalescer_name = TLB_type["name"] + "_coalescer" # add the different TLB levels to the system # Modify here if you want to make the TLB hierarchy a child of # the shader. - exec('system.%s = TLB_array' % system_TLB_name) - exec('system.%s = Coalescer_array' % system_Coalescer_name) + exec("system.%s = TLB_array" % system_TLB_name) + exec("system.%s = Coalescer_array" % system_Coalescer_name) - #=========================================================== + # =========================================================== # Specify the TLB hierarchy (i.e., port connections) # All TLBs but the last level TLB need to have a memSidePort - #=========================================================== + # =========================================================== # Each TLB is connected with its Coalescer through a single port. # There is a one-to-one mapping of TLBs to Coalescers at a given level # This won't be modified no matter what the hierarchy looks like. for i in range(len(TLB_hierarchy)): hierarchy_level = TLB_hierarchy[i] - level = i+1 + level = i + 1 for TLB_type in hierarchy_level: - name = TLB_type['name'] - for index in range(TLB_type['width']): - exec('system.%s_coalescer[%d].mem_side_ports[0] = \ - system.%s_tlb[%d].cpu_side_ports[0]' % \ - (name, index, name, index)) + name = TLB_type["name"] + for index in range(TLB_type["width"]): + exec( + "system.%s_coalescer[%d].mem_side_ports[0] = \ + system.%s_tlb[%d].cpu_side_ports[0]" + % (name, index, name, index) + ) # Connect the cpuSidePort of all the coalescers in level 1 # < Modify here if you want a different configuration > for TLB_type in L1: - name = TLB_type['name'] - num_TLBs = TLB_type['width'] - if name == 'l1': # L1 D-TLBs + name = TLB_type["name"] + num_TLBs = TLB_type["width"] + if name == "l1": # L1 D-TLBs tlb_per_cu = num_TLBs // n_cu for cu_idx in range(n_cu): if tlb_per_cu: for tlb in range(tlb_per_cu): - exec('system.cpu[%d].CUs[%d].translation_port[%d] = \ - system.l1_coalescer[%d].cpu_side_ports[%d]' % \ - (shader_idx, cu_idx, tlb, - cu_idx*tlb_per_cu+tlb, 0)) + exec( + "system.cpu[%d].CUs[%d].translation_port[%d] = \ + system.l1_coalescer[%d].cpu_side_ports[%d]" + % ( + shader_idx, + cu_idx, + tlb, + cu_idx * tlb_per_cu + tlb, + 0, + ) + ) else: - exec('system.cpu[%d].CUs[%d].translation_port[%d] = \ - system.l1_coalescer[%d].cpu_side_ports[%d]' % \ - (shader_idx, cu_idx, tlb_per_cu, - cu_idx / (n_cu / num_TLBs), - cu_idx % (n_cu / num_TLBs))) - elif name == 'sqc': # I-TLB + exec( + "system.cpu[%d].CUs[%d].translation_port[%d] = \ + system.l1_coalescer[%d].cpu_side_ports[%d]" + % ( + shader_idx, + cu_idx, + tlb_per_cu, + cu_idx / (n_cu / num_TLBs), + cu_idx % (n_cu / num_TLBs), + ) + ) + elif name == "sqc": # I-TLB for index in range(n_cu): sqc_tlb_index = index / options.cu_per_sqc sqc_tlb_port_id = index % options.cu_per_sqc - exec('system.cpu[%d].CUs[%d].sqc_tlb_port = \ - system.sqc_coalescer[%d].cpu_side_ports[%d]' % \ - (shader_idx, index, sqc_tlb_index, sqc_tlb_port_id)) - elif name == 'scalar': # Scalar D-TLB + exec( + "system.cpu[%d].CUs[%d].sqc_tlb_port = \ + system.sqc_coalescer[%d].cpu_side_ports[%d]" + % (shader_idx, index, sqc_tlb_index, sqc_tlb_port_id) + ) + elif name == "scalar": # Scalar D-TLB for index in range(n_cu): scalar_tlb_index = index / options.cu_per_scalar_cache scalar_tlb_port_id = index % options.cu_per_scalar_cache - exec('system.cpu[%d].CUs[%d].scalar_tlb_port = \ - system.scalar_coalescer[%d].cpu_side_ports[%d]' % \ - (shader_idx, index, scalar_tlb_index, - scalar_tlb_port_id)) + exec( + "system.cpu[%d].CUs[%d].scalar_tlb_port = \ + system.scalar_coalescer[%d].cpu_side_ports[%d]" + % (shader_idx, index, scalar_tlb_index, scalar_tlb_port_id) + ) # Connect the memSidePorts of all the TLBs with the # cpuSidePorts of the Coalescers of the next level @@ -220,23 +285,28 @@ def config_tlb_hierarchy(options, system, shader_idx, gpu_ctrl=None, # L1 <-> L2 l2_coalescer_index = 0 for TLB_type in L1: - name = TLB_type['name'] - for index in range(TLB_type['width']): - exec('system.%s_tlb[%d].mem_side_ports[0] = \ - system.l2_coalescer[0].cpu_side_ports[%d]' % \ - (name, index, l2_coalescer_index)) + name = TLB_type["name"] + for index in range(TLB_type["width"]): + exec( + "system.%s_tlb[%d].mem_side_ports[0] = \ + system.l2_coalescer[0].cpu_side_ports[%d]" + % (name, index, l2_coalescer_index) + ) l2_coalescer_index += 1 # L2 <-> L3 - system.l2_tlb[0].mem_side_ports[0] = \ - system.l3_coalescer[0].cpu_side_ports[0] + system.l2_tlb[0].mem_side_ports[0] = system.l3_coalescer[0].cpu_side_ports[ + 0 + ] # L3 TLB Vega page table walker to memory for full system only if full_system: for TLB_type in L3: - name = TLB_type['name'] - for index in range(TLB_type['width']): - exec('system._dma_ports.append(system.%s_tlb[%d].walker)' % \ - (name, index)) + name = TLB_type["name"] + for index in range(TLB_type["width"]): + exec( + "system._dma_ports.append(system.%s_tlb[%d].walker)" + % (name, index) + ) return system diff --git a/configs/common/GPUTLBOptions.py b/configs/common/GPUTLBOptions.py index 3a1f9ad37e..1a77a2c192 100644 --- a/configs/common/GPUTLBOptions.py +++ b/configs/common/GPUTLBOptions.py @@ -27,77 +27,105 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. + def tlb_options(parser): - #=================================================================== + # =================================================================== # TLB Configuration - #=================================================================== + # =================================================================== parser.add_argument( - "--TLB-config", type=str, default="perCU", - help="Options are: perCU (default), mono, 2CU, or perLane") + "--TLB-config", + type=str, + default="perCU", + help="Options are: perCU (default), mono, 2CU, or perLane", + ) - #=================================================================== + # =================================================================== # L1 TLB Options (D-TLB, I-TLB, Dispatcher-TLB) - #=================================================================== + # =================================================================== parser.add_argument("--L1TLBentries", type=int, default="32") parser.add_argument("--L1TLBassoc", type=int, default="32") - parser.add_argument("--L1AccessLatency", type=int, default="1", - help="latency in gpu cycles") - parser.add_argument("--L1MissLatency", type=int, default="750", - help="latency (in gpu cycles) of a page walk, " - "if this is a last level TLB") + parser.add_argument( + "--L1AccessLatency", + type=int, + default="1", + help="latency in gpu cycles", + ) + parser.add_argument( + "--L1MissLatency", + type=int, + default="750", + help="latency (in gpu cycles) of a page walk, " + "if this is a last level TLB", + ) parser.add_argument("--L1MaxOutstandingReqs", type=int, default="64") parser.add_argument("--L1AccessDistanceStat", action="store_true") parser.add_argument("--tot-L1TLB-size", type=int, default="0") - #=================================================================== + # =================================================================== # L2 TLB Options - #=================================================================== + # =================================================================== parser.add_argument("--L2TLBentries", type=int, default="4096") parser.add_argument("--L2TLBassoc", type=int, default="32") - parser.add_argument("--L2AccessLatency", type=int, default="69", - help="latency in gpu cycles") - parser.add_argument("--L2MissLatency", type=int, default="750", - help="latency (in gpu cycles) of a page walk, " - "if this is a last level TLB") + parser.add_argument( + "--L2AccessLatency", + type=int, + default="69", + help="latency in gpu cycles", + ) + parser.add_argument( + "--L2MissLatency", + type=int, + default="750", + help="latency (in gpu cycles) of a page walk, " + "if this is a last level TLB", + ) parser.add_argument("--L2MaxOutstandingReqs", type=int, default="64") parser.add_argument("--L2AccessDistanceStat", action="store_true") - #=================================================================== + # =================================================================== # L3 TLB Options - #=================================================================== + # =================================================================== parser.add_argument("--L3TLBentries", type=int, default="8192") parser.add_argument("--L3TLBassoc", type=int, default="32") - parser.add_argument("--L3AccessLatency", type=int, default="150", - help="latency in gpu cycles") - parser.add_argument("--L3MissLatency", type=int, default="750", - help="latency (in gpu cycles) of a page walk") + parser.add_argument( + "--L3AccessLatency", + type=int, + default="150", + help="latency in gpu cycles", + ) + parser.add_argument( + "--L3MissLatency", + type=int, + default="750", + help="latency (in gpu cycles) of a page walk", + ) parser.add_argument("--L3MaxOutstandingReqs", type=int, default="64") parser.add_argument("--L3AccessDistanceStat", action="store_true") - #=================================================================== + # =================================================================== # L1 TLBCoalescer Options - #=================================================================== + # =================================================================== parser.add_argument("--L1ProbesPerCycle", type=int, default="2") parser.add_argument("--L1CoalescingWindow", type=int, default="1") parser.add_argument("--L1DisableCoalescing", action="store_true") - #=================================================================== + # =================================================================== # L2 TLBCoalescer Options - #=================================================================== + # =================================================================== parser.add_argument("--L2ProbesPerCycle", type=int, default="2") parser.add_argument("--L2CoalescingWindow", type=int, default="1") parser.add_argument("--L2DisableCoalescing", action="store_true") - #=================================================================== + # =================================================================== # L3 TLBCoalescer Options - #=================================================================== + # =================================================================== parser.add_argument("--L3ProbesPerCycle", type=int, default="2") parser.add_argument("--L3CoalescingWindow", type=int, default="1") diff --git a/configs/common/HMC.py b/configs/common/HMC.py index b12bd0a553..f8321f356b 100644 --- a/configs/common/HMC.py +++ b/configs/common/HMC.py @@ -129,159 +129,303 @@ from m5.util import * def add_options(parser): # *****************************CROSSBAR PARAMETERS************************* # Flit size of the main interconnect [1] - parser.add_argument("--xbar-width", default=32, action="store", type=int, - help="Data width of the main XBar (Bytes)") + parser.add_argument( + "--xbar-width", + default=32, + action="store", + type=int, + help="Data width of the main XBar (Bytes)", + ) # Clock frequency of the main interconnect [1] # This crossbar, is placed on the logic-based of the HMC and it has its # own voltage and clock domains, different from the DRAM dies or from the # host. - parser.add_argument("--xbar-frequency", default='1GHz', type=str, - help="Clock Frequency of the main XBar") + parser.add_argument( + "--xbar-frequency", + default="1GHz", + type=str, + help="Clock Frequency of the main XBar", + ) # Arbitration latency of the HMC XBar [1] - parser.add_argument("--xbar-frontend-latency", default=1, action="store", - type=int, help="Arbitration latency of the XBar") + parser.add_argument( + "--xbar-frontend-latency", + default=1, + action="store", + type=int, + help="Arbitration latency of the XBar", + ) # Latency to forward a packet via the interconnect [1](two levels of FIFOs # at the input and output of the inteconnect) - parser.add_argument("--xbar-forward-latency", default=2, action="store", - type=int, help="Forward latency of the XBar") + parser.add_argument( + "--xbar-forward-latency", + default=2, + action="store", + type=int, + help="Forward latency of the XBar", + ) # Latency to forward a response via the interconnect [1](two levels of # FIFOs at the input and output of the inteconnect) - parser.add_argument("--xbar-response-latency", default=2, action="store", - type=int, help="Response latency of the XBar") + parser.add_argument( + "--xbar-response-latency", + default=2, + action="store", + type=int, + help="Response latency of the XBar", + ) # number of cross which connects 16 Vaults to serial link[7] - parser.add_argument("--number-mem-crossbar", default=4, action="store", - type=int, help="Number of crossbar in HMC") + parser.add_argument( + "--number-mem-crossbar", + default=4, + action="store", + type=int, + help="Number of crossbar in HMC", + ) # *****************************SERIAL LINK PARAMETERS********************** # Number of serial links controllers [1] - parser.add_argument("--num-links-controllers", default=4, action="store", - type=int, help="Number of serial links") + parser.add_argument( + "--num-links-controllers", + default=4, + action="store", + type=int, + help="Number of serial links", + ) # Number of packets (not flits) to store at the request side of the serial # link. This number should be adjusted to achive required bandwidth - parser.add_argument("--link-buffer-size-req", default=10, action="store", - type=int, help="Number of packets to buffer at the\ - request side of the serial link") + parser.add_argument( + "--link-buffer-size-req", + default=10, + action="store", + type=int, + help="Number of packets to buffer at the\ + request side of the serial link", + ) # Number of packets (not flits) to store at the response side of the serial # link. This number should be adjusted to achive required bandwidth - parser.add_argument("--link-buffer-size-rsp", default=10, action="store", - type=int, help="Number of packets to buffer at the\ - response side of the serial link") + parser.add_argument( + "--link-buffer-size-rsp", + default=10, + action="store", + type=int, + help="Number of packets to buffer at the\ + response side of the serial link", + ) # Latency of the serial link composed by SER/DES latency (1.6ns [4]) plus # the PCB trace latency (3ns Estimated based on [5]) - parser.add_argument("--link-latency", default='4.6ns', type=str, - help="Latency of the serial links") + parser.add_argument( + "--link-latency", + default="4.6ns", + type=str, + help="Latency of the serial links", + ) # Clock frequency of the each serial link(SerDes) [1] - parser.add_argument("--link-frequency", default='10GHz', type=str, - help="Clock Frequency of the serial links") + parser.add_argument( + "--link-frequency", + default="10GHz", + type=str, + help="Clock Frequency of the serial links", + ) # Clock frequency of serial link Controller[6] # clk_hmc[Mhz]= num_lanes_per_link * lane_speed [Gbits/s] / # data_path_width * 10^6 # clk_hmc[Mhz]= 16 * 10 Gbps / 256 * 10^6 = 625 Mhz - parser.add_argument("--link-controller-frequency", default='625MHz', - type=str, help="Clock Frequency of the link\ - controller") + parser.add_argument( + "--link-controller-frequency", + default="625MHz", + type=str, + help="Clock Frequency of the link\ + controller", + ) # Latency of the serial link controller to process the packets[1][6] # (ClockDomain = 625 Mhz ) # used here for calculations only - parser.add_argument("--link-ctrl-latency", default=4, action="store", - type=int, help="The number of cycles required for the\ - controller to process the packet") + parser.add_argument( + "--link-ctrl-latency", + default=4, + action="store", + type=int, + help="The number of cycles required for the\ + controller to process the packet", + ) # total_ctrl_latency = link_ctrl_latency + link_latency # total_ctrl_latency = 4(Cycles) * 1.6 ns + 4.6 ns - parser.add_argument("--total-ctrl-latency", default='11ns', type=str, - help="The latency experienced by every packet\ - regardless of size of packet") + parser.add_argument( + "--total-ctrl-latency", + default="11ns", + type=str, + help="The latency experienced by every packet\ + regardless of size of packet", + ) # Number of parallel lanes in each serial link [1] - parser.add_argument("--num-lanes-per-link", default=16, action="store", - type=int, help="Number of lanes per each link") + parser.add_argument( + "--num-lanes-per-link", + default=16, + action="store", + type=int, + help="Number of lanes per each link", + ) # Number of serial links [1] - parser.add_argument("--num-serial-links", default=4, action="store", - type=int, help="Number of serial links") + parser.add_argument( + "--num-serial-links", + default=4, + action="store", + type=int, + help="Number of serial links", + ) # speed of each lane of serial link - SerDes serial interface 10 Gb/s - parser.add_argument("--serial-link-speed", default=10, action="store", - type=int, help="Gbs/s speed of each lane of serial\ - link") + parser.add_argument( + "--serial-link-speed", + default=10, + action="store", + type=int, + help="Gbs/s speed of each lane of serial\ + link", + ) # address range for each of the serial links - parser.add_argument("--serial-link-addr-range", default='1GB', type=str, - help="memory range for each of the serial links.\ - Default: 1GB") + parser.add_argument( + "--serial-link-addr-range", + default="1GB", + type=str, + help="memory range for each of the serial links.\ + Default: 1GB", + ) # *****************************PERFORMANCE MONITORING********************* # The main monitor behind the HMC Controller - parser.add_argument("--enable-global-monitor", action="store_true", - help="The main monitor behind the HMC Controller") + parser.add_argument( + "--enable-global-monitor", + action="store_true", + help="The main monitor behind the HMC Controller", + ) # The link performance monitors - parser.add_argument("--enable-link-monitor", action="store_true", - help="The link monitors") + parser.add_argument( + "--enable-link-monitor", action="store_true", help="The link monitors" + ) # link aggregator enable - put a cross between buffers & links - parser.add_argument("--enable-link-aggr", action="store_true", help="The\ - crossbar between port and Link Controller") + parser.add_argument( + "--enable-link-aggr", + action="store_true", + help="The\ + crossbar between port and Link Controller", + ) - parser.add_argument("--enable-buff-div", action="store_true", - help="Memory Range of Buffer is ivided between total\ - range") + parser.add_argument( + "--enable-buff-div", + action="store_true", + help="Memory Range of Buffer is ivided between total\ + range", + ) # *****************************HMC ARCHITECTURE ************************** # Memory chunk for 16 vault - numbers of vault / number of crossbars - parser.add_argument("--mem-chunk", default=4, action="store", type=int, - help="Chunk of memory range for each cross bar in\ - arch 0") + parser.add_argument( + "--mem-chunk", + default=4, + action="store", + type=int, + help="Chunk of memory range for each cross bar in\ + arch 0", + ) # size of req buffer within crossbar, used for modelling extra latency # when the reuqest go to non-local vault - parser.add_argument("--xbar-buffer-size-req", default=10, action="store", - type=int, help="Number of packets to buffer at the\ - request side of the crossbar") + parser.add_argument( + "--xbar-buffer-size-req", + default=10, + action="store", + type=int, + help="Number of packets to buffer at the\ + request side of the crossbar", + ) # size of response buffer within crossbar, used for modelling extra latency # when the response received from non-local vault - parser.add_argument("--xbar-buffer-size-resp", default=10, action="store", - type=int, help="Number of packets to buffer at the\ - response side of the crossbar") + parser.add_argument( + "--xbar-buffer-size-resp", + default=10, + action="store", + type=int, + help="Number of packets to buffer at the\ + response side of the crossbar", + ) # HMC device architecture. It affects the HMC host controller as well - parser.add_argument("--arch", type=str, choices=["same", "distributed", - "mixed"], default="distributed", help="same: HMC with\ + parser.add_argument( + "--arch", + type=str, + choices=["same", "distributed", "mixed"], + default="distributed", + help="same: HMC with\ 4 links, all with same range.\ndistributed: HMC with\ 4 links with distributed range.\nmixed: mixed with\ - same and distributed range.\nDefault: distributed") + same and distributed range.\nDefault: distributed", + ) # HMC device - number of vaults - parser.add_argument("--hmc-dev-num-vaults", default=16, action="store", - type=int, help="number of independent vaults within\ + parser.add_argument( + "--hmc-dev-num-vaults", + default=16, + action="store", + type=int, + help="number of independent vaults within\ the HMC device. Note: each vault has a memory\ - controller (valut controller)\nDefault: 16") + controller (valut controller)\nDefault: 16", + ) # HMC device - vault capacity or size - parser.add_argument("--hmc-dev-vault-size", default='256MB', type=str, - help="vault storage capacity in bytes. Default:\ - 256MB") - parser.add_argument("--mem-type", type=str, choices=["HMC_2500_1x32"], - default="HMC_2500_1x32", help="type of HMC memory to\ - use. Default: HMC_2500_1x32") - parser.add_argument("--mem-channels", default=1, action="store", type=int, - help="Number of memory channels") - parser.add_argument("--mem-ranks", default=1, action="store", type=int, - help="Number of ranks to iterate across") - parser.add_argument("--burst-length", default=256, action="store", - type=int, help="burst length in bytes. Note: the\ + parser.add_argument( + "--hmc-dev-vault-size", + default="256MB", + type=str, + help="vault storage capacity in bytes. Default:\ + 256MB", + ) + parser.add_argument( + "--mem-type", + type=str, + choices=["HMC_2500_1x32"], + default="HMC_2500_1x32", + help="type of HMC memory to\ + use. Default: HMC_2500_1x32", + ) + parser.add_argument( + "--mem-channels", + default=1, + action="store", + type=int, + help="Number of memory channels", + ) + parser.add_argument( + "--mem-ranks", + default=1, + action="store", + type=int, + help="Number of ranks to iterate across", + ) + parser.add_argument( + "--burst-length", + default=256, + action="store", + type=int, + help="burst length in bytes. Note: the\ cache line size will be set to this value.\nDefault:\ - 256") + 256", + ) # configure HMC host controller @@ -292,8 +436,8 @@ def config_hmc_host_ctrl(opt, system): # Create additional crossbar for arch1 if opt.arch == "distributed" or opt.arch == "mixed": - clk = '100GHz' - vd = VoltageDomain(voltage='1V') + clk = "100GHz" + vd = VoltageDomain(voltage="1V") # Create additional crossbar for arch1 system.membus = NoncoherentXBar(width=8) system.membus.badaddr_responder = BadAddr() @@ -310,42 +454,50 @@ def config_hmc_host_ctrl(opt, system): # Memmory ranges of serial link for arch-0. Same as the ranges of vault # controllers (4 vaults to 1 serial link) if opt.arch == "same": - ser_ranges = [AddrRange(0, (4*slar)-1) for i in - range(opt.num_serial_links)] + ser_ranges = [ + AddrRange(0, (4 * slar) - 1) for i in range(opt.num_serial_links) + ] # Memmory ranges of serial link for arch-1. Distributed range accross # links if opt.arch == "distributed": - ser_ranges = [AddrRange(i*slar, ((i+1)*slar)-1) for i in - range(opt.num_serial_links)] + ser_ranges = [ + AddrRange(i * slar, ((i + 1) * slar) - 1) + for i in range(opt.num_serial_links) + ] # Memmory ranges of serial link for arch-2 'Mixed' address distribution # over links if opt.arch == "mixed": - ser_range0 = AddrRange(0, (1*slar)-1) - ser_range1 = AddrRange(1*slar, 2*slar-1) - ser_range2 = AddrRange(0, (4*slar)-1) - ser_range3 = AddrRange(0, (4*slar)-1) + ser_range0 = AddrRange(0, (1 * slar) - 1) + ser_range1 = AddrRange(1 * slar, 2 * slar - 1) + ser_range2 = AddrRange(0, (4 * slar) - 1) + ser_range3 = AddrRange(0, (4 * slar) - 1) ser_ranges = [ser_range0, ser_range1, ser_range2, ser_range3] # Serial link Controller with 16 SerDes links at 10 Gbps with serial link # ranges w.r.t to architecture - sl = [SerialLink(ranges=ser_ranges[i], - req_size=opt.link_buffer_size_req, - resp_size=opt.link_buffer_size_rsp, - num_lanes=opt.num_lanes_per_link, - link_speed=opt.serial_link_speed, - delay=opt.total_ctrl_latency) for i in - range(opt.num_serial_links)] + sl = [ + SerialLink( + ranges=ser_ranges[i], + req_size=opt.link_buffer_size_req, + resp_size=opt.link_buffer_size_rsp, + num_lanes=opt.num_lanes_per_link, + link_speed=opt.serial_link_speed, + delay=opt.total_ctrl_latency, + ) + for i in range(opt.num_serial_links) + ] system.hmc_host.seriallink = sl # enable global monitor if opt.enable_global_monitor: - system.hmc_host.lmonitor = [CommMonitor() for i in - range(opt.num_serial_links)] + system.hmc_host.lmonitor = [ + CommMonitor() for i in range(opt.num_serial_links) + ] # set the clock frequency for serial link for i in range(opt.num_serial_links): clk = opt.link_controller_frequency - vd = VoltageDomain(voltage='1V') + vd = VoltageDomain(voltage="1V") scd = SrcClockDomain(clock=clk, voltage_domain=vd) system.hmc_host.seriallink[i].clk_domain = scd @@ -387,8 +539,10 @@ def config_hmc_dev(opt, system, hmc_host): # create memory ranges for the vault controllers arv = convert.toMemorySize(opt.hmc_dev_vault_size) - addr_ranges_vaults = [AddrRange(i*arv, ((i+1)*arv-1)) for i in - range(opt.hmc_dev_num_vaults)] + addr_ranges_vaults = [ + AddrRange(i * arv, ((i + 1) * arv - 1)) + for i in range(opt.hmc_dev_num_vaults) + ] system.mem_ranges = addr_ranges_vaults if opt.enable_link_monitor: @@ -396,29 +550,36 @@ def config_hmc_dev(opt, system, hmc_host): system.hmc_dev.lmonitor = lm # 4 HMC Crossbars located in its logic-base (LoB) - xb = [NoncoherentXBar(width=opt.xbar_width, - frontend_latency=opt.xbar_frontend_latency, - forward_latency=opt.xbar_forward_latency, - response_latency=opt.xbar_response_latency) for i in - range(opt.number_mem_crossbar)] + xb = [ + NoncoherentXBar( + width=opt.xbar_width, + frontend_latency=opt.xbar_frontend_latency, + forward_latency=opt.xbar_forward_latency, + response_latency=opt.xbar_response_latency, + ) + for i in range(opt.number_mem_crossbar) + ] system.hmc_dev.xbar = xb for i in range(opt.number_mem_crossbar): clk = opt.xbar_frequency - vd = VoltageDomain(voltage='1V') + vd = VoltageDomain(voltage="1V") scd = SrcClockDomain(clock=clk, voltage_domain=vd) system.hmc_dev.xbar[i].clk_domain = scd # Attach 4 serial link to 4 crossbar/s for i in range(opt.num_serial_links): if opt.enable_link_monitor: - system.hmc_host.seriallink[i].mem_side_port = \ - system.hmc_dev.lmonitor[i].cpu_side_port - system.hmc_dev.lmonitor[i].mem_side_port = \ - system.hmc_dev.xbar[i].cpu_side_ports + system.hmc_host.seriallink[ + i + ].mem_side_port = system.hmc_dev.lmonitor[i].cpu_side_port + system.hmc_dev.lmonitor[i].mem_side_port = system.hmc_dev.xbar[ + i + ].cpu_side_ports else: - system.hmc_host.seriallink[i].mem_side_port = \ - system.hmc_dev.xbar[i].cpu_side_ports + system.hmc_host.seriallink[i].mem_side_port = system.hmc_dev.xbar[ + i + ].cpu_side_ports # Connecting xbar with each other for request arriving at the wrong xbar, # then it will be forward to correct xbar. Bridge is used to connect xbars @@ -426,9 +587,13 @@ def config_hmc_dev(opt, system, hmc_host): numx = len(system.hmc_dev.xbar) # create a list of buffers - system.hmc_dev.buffers = [Bridge(req_size=opt.xbar_buffer_size_req, - resp_size=opt.xbar_buffer_size_resp) - for i in range(numx*(opt.mem_chunk-1))] + system.hmc_dev.buffers = [ + Bridge( + req_size=opt.xbar_buffer_size_req, + resp_size=opt.xbar_buffer_size_resp, + ) + for i in range(numx * (opt.mem_chunk - 1)) + ] # Buffer iterator it = iter(list(range(len(system.hmc_dev.buffers)))) @@ -446,14 +611,18 @@ def config_hmc_dev(opt, system, hmc_host): # Change the default values for ranges of bridge system.hmc_dev.buffers[index].ranges = system.mem_ranges[ - j * int(opt.mem_chunk): - (j + 1) * int(opt.mem_chunk)] + j * int(opt.mem_chunk) : (j + 1) * int(opt.mem_chunk) + ] # Connect the bridge between corssbars - system.hmc_dev.xbar[i].mem_side_ports = \ - system.hmc_dev.buffers[index].cpu_side_port - system.hmc_dev.buffers[index].mem_side_port = \ - system.hmc_dev.xbar[j].cpu_side_ports + system.hmc_dev.xbar[ + i + ].mem_side_ports = system.hmc_dev.buffers[ + index + ].cpu_side_port + system.hmc_dev.buffers[ + index + ].mem_side_port = system.hmc_dev.xbar[j].cpu_side_ports else: # Don't connect the xbar to itself pass @@ -462,37 +631,49 @@ def config_hmc_dev(opt, system, hmc_host): # can only direct traffic to it local vaults if opt.arch == "mixed": system.hmc_dev.buffer30 = Bridge(ranges=system.mem_ranges[0:4]) - system.hmc_dev.xbar[3].mem_side_ports = \ - system.hmc_dev.buffer30.cpu_side_port - system.hmc_dev.buffer30.mem_side_port = \ - system.hmc_dev.xbar[0].cpu_side_ports + system.hmc_dev.xbar[ + 3 + ].mem_side_ports = system.hmc_dev.buffer30.cpu_side_port + system.hmc_dev.buffer30.mem_side_port = system.hmc_dev.xbar[ + 0 + ].cpu_side_ports system.hmc_dev.buffer31 = Bridge(ranges=system.mem_ranges[4:8]) - system.hmc_dev.xbar[3].mem_side_ports = \ - system.hmc_dev.buffer31.cpu_side_port - system.hmc_dev.buffer31.mem_side_port = \ - system.hmc_dev.xbar[1].cpu_side_ports + system.hmc_dev.xbar[ + 3 + ].mem_side_ports = system.hmc_dev.buffer31.cpu_side_port + system.hmc_dev.buffer31.mem_side_port = system.hmc_dev.xbar[ + 1 + ].cpu_side_ports system.hmc_dev.buffer32 = Bridge(ranges=system.mem_ranges[8:12]) - system.hmc_dev.xbar[3].mem_side_ports = \ - system.hmc_dev.buffer32.cpu_side_port - system.hmc_dev.buffer32.mem_side_port = \ - system.hmc_dev.xbar[2].cpu_side_ports + system.hmc_dev.xbar[ + 3 + ].mem_side_ports = system.hmc_dev.buffer32.cpu_side_port + system.hmc_dev.buffer32.mem_side_port = system.hmc_dev.xbar[ + 2 + ].cpu_side_ports system.hmc_dev.buffer20 = Bridge(ranges=system.mem_ranges[0:4]) - system.hmc_dev.xbar[2].mem_side_ports = \ - system.hmc_dev.buffer20.cpu_side_port - system.hmc_dev.buffer20.mem_side_port = \ - system.hmc_dev.xbar[0].cpu_side_ports + system.hmc_dev.xbar[ + 2 + ].mem_side_ports = system.hmc_dev.buffer20.cpu_side_port + system.hmc_dev.buffer20.mem_side_port = system.hmc_dev.xbar[ + 0 + ].cpu_side_ports system.hmc_dev.buffer21 = Bridge(ranges=system.mem_ranges[4:8]) - system.hmc_dev.xbar[2].mem_side_ports = \ - system.hmc_dev.buffer21.cpu_side_port - system.hmc_dev.buffer21.mem_side_port = \ - system.hmc_dev.xbar[1].cpu_side_ports + system.hmc_dev.xbar[ + 2 + ].mem_side_ports = system.hmc_dev.buffer21.cpu_side_port + system.hmc_dev.buffer21.mem_side_port = system.hmc_dev.xbar[ + 1 + ].cpu_side_ports system.hmc_dev.buffer23 = Bridge(ranges=system.mem_ranges[12:16]) - system.hmc_dev.xbar[2].mem_side_ports = \ - system.hmc_dev.buffer23.cpu_side_port - system.hmc_dev.buffer23.mem_side_port = \ - system.hmc_dev.xbar[3].cpu_side_ports + system.hmc_dev.xbar[ + 2 + ].mem_side_ports = system.hmc_dev.buffer23.cpu_side_port + system.hmc_dev.buffer23.mem_side_port = system.hmc_dev.xbar[ + 3 + ].cpu_side_ports diff --git a/configs/common/MemConfig.py b/configs/common/MemConfig.py index 332fd6b28a..baa0d233af 100644 --- a/configs/common/MemConfig.py +++ b/configs/common/MemConfig.py @@ -37,8 +37,8 @@ import m5.objects from common import ObjectList from common import HMC -def create_mem_intf(intf, r, i, intlv_bits, intlv_size, - xor_low_bit): + +def create_mem_intf(intf, r, i, intlv_bits, intlv_size, xor_low_bit): """ Helper function for creating a single memoy controller from the given options. This function is invoked multiple times in config_mem function @@ -46,6 +46,7 @@ def create_mem_intf(intf, r, i, intlv_bits, intlv_size, """ import math + intlv_low_bit = int(math.log(intlv_size, 2)) # Use basic hashing for the channel selection, and preferably use @@ -53,7 +54,7 @@ def create_mem_intf(intf, r, i, intlv_bits, intlv_size, # the details of the caches here, make an educated guess. 4 MByte # 4-way associative with 64 byte cache lines is 6 offset bits and # 14 index bits. - if (xor_low_bit): + if xor_low_bit: xor_high_bit = xor_low_bit + intlv_bits - 1 else: xor_high_bit = 0 @@ -67,13 +68,15 @@ def create_mem_intf(intf, r, i, intlv_bits, intlv_size, # If the channel bits are appearing after the column # bits, we need to add the appropriate number of bits # for the row buffer size - if interface.addr_mapping.value == 'RoRaBaChCo': + if interface.addr_mapping.value == "RoRaBaChCo": # This computation only really needs to happen # once, but as we rely on having an instance we # end up having to repeat it for each and every # one - rowbuffer_size = interface.device_rowbuffer_size.value * \ - interface.devices_per_rank.value + rowbuffer_size = ( + interface.device_rowbuffer_size.value + * interface.devices_per_rank.value + ) intlv_low_bit = int(math.log(rowbuffer_size, 2)) @@ -83,7 +86,7 @@ def create_mem_intf(intf, r, i, intlv_bits, intlv_size, # If the channel bits are appearing after the low order # address bits (buffer bits), we need to add the appropriate # number of bits for the buffer size - if interface.addr_mapping.value == 'RoRaBaChCo': + if interface.addr_mapping.value == "RoRaBaChCo": # This computation only really needs to happen # once, but as we rely on having an instance we # end up having to repeat it for each and every @@ -94,14 +97,17 @@ def create_mem_intf(intf, r, i, intlv_bits, intlv_size, # We got all we need to configure the appropriate address # range - interface.range = m5.objects.AddrRange(r.start, size = r.size(), - intlvHighBit = \ - intlv_low_bit + intlv_bits - 1, - xorHighBit = xor_high_bit, - intlvBits = intlv_bits, - intlvMatch = i) + interface.range = m5.objects.AddrRange( + r.start, + size=r.size(), + intlvHighBit=intlv_low_bit + intlv_bits - 1, + xorHighBit=xor_high_bit, + intlvBits=intlv_bits, + intlvMatch=i, + ) return interface + def config_mem(options, system): """ Create the memory controllers based on the options and attach them. @@ -125,8 +131,9 @@ def config_mem(options, system): # Optional options opt_tlm_memory = getattr(options, "tlm_memory", None) - opt_external_memory_system = getattr(options, "external_memory_system", - None) + opt_external_memory_system = getattr( + options, "external_memory_system", None + ) opt_elastic_trace_en = getattr(options, "elastic_trace_en", False) opt_mem_ranks = getattr(options, "mem_ranks", None) opt_nvm_ranks = getattr(options, "nvm_ranks", None) @@ -149,15 +156,18 @@ def config_mem(options, system): port_type="tlm_slave", port_data=opt_tlm_memory, port=system.membus.mem_side_ports, - addr_ranges=system.mem_ranges) + addr_ranges=system.mem_ranges, + ) system.workload.addr_check = False return if opt_external_memory_system: subsystem.external_memory = m5.objects.ExternalSlave( port_type=opt_external_memory_system, - port_data="init_mem0", port=xbar.mem_side_ports, - addr_ranges=system.mem_ranges) + port_data="init_mem0", + port=xbar.mem_side_ports, + addr_ranges=system.mem_ranges, + ) subsystem.workload.addr_check = False return @@ -165,8 +175,9 @@ def config_mem(options, system): import math from m5.util import fatal + intlv_bits = int(math.log(nbr_mem_ctrls, 2)) - if 2 ** intlv_bits != nbr_mem_ctrls: + if 2**intlv_bits != nbr_mem_ctrls: fatal("Number of memory channels must be a power of 2") if opt_mem_type: @@ -178,8 +189,10 @@ def config_mem(options, system): mem_ctrls = [] if opt_elastic_trace_en and not issubclass(intf, m5.objects.SimpleMemory): - fatal("When elastic trace is enabled, configure mem-type as " - "simple-mem.") + fatal( + "When elastic trace is enabled, configure mem-type as " + "simple-mem." + ) # The default behaviour is to interleave memory channels on 128 # byte granularity, or cache line granularity if larger than 128 @@ -199,13 +212,16 @@ def config_mem(options, system): for i in range(nbr_mem_ctrls): if opt_mem_type and (not opt_nvm_type or range_iter % 2 != 0): # Create the DRAM interface - dram_intf = create_mem_intf(intf, r, i, - intlv_bits, intlv_size, opt_xor_low_bit) + dram_intf = create_mem_intf( + intf, r, i, intlv_bits, intlv_size, opt_xor_low_bit + ) # Set the number of ranks based on the command-line # options if it was explicitly set - if issubclass(intf, m5.objects.DRAMInterface) and \ - opt_mem_ranks: + if ( + issubclass(intf, m5.objects.DRAMInterface) + and opt_mem_ranks + ): dram_intf.ranks_per_channel = opt_mem_ranks # Enable low-power DRAM states if option is set @@ -213,9 +229,11 @@ def config_mem(options, system): dram_intf.enable_dram_powerdown = opt_dram_powerdown if opt_elastic_trace_en: - dram_intf.latency = '1ns' - print("For elastic trace, over-riding Simple Memory " - "latency to 1ns.") + dram_intf.latency = "1ns" + print( + "For elastic trace, over-riding Simple Memory " + "latency to 1ns." + ) # Create the controller that will drive the interface mem_ctrl = dram_intf.controller() @@ -223,13 +241,16 @@ def config_mem(options, system): mem_ctrls.append(mem_ctrl) elif opt_nvm_type and (not opt_mem_type or range_iter % 2 == 0): - nvm_intf = create_mem_intf(n_intf, r, i, - intlv_bits, intlv_size, opt_xor_low_bit) + nvm_intf = create_mem_intf( + n_intf, r, i, intlv_bits, intlv_size, opt_xor_low_bit + ) # Set the number of ranks based on the command-line # options if it was explicitly set - if issubclass(n_intf, m5.objects.NVMInterface) and \ - opt_nvm_ranks: + if ( + issubclass(n_intf, m5.objects.NVMInterface) + and opt_nvm_ranks + ): nvm_intf.ranks_per_channel = opt_nvm_ranks # Create a controller if not sharing a channel with DRAM @@ -244,13 +265,13 @@ def config_mem(options, system): # hook up NVM interface when channel is shared with DRAM + NVM for i in range(len(nvm_intfs)): - mem_ctrls[i].nvm = nvm_intfs[i]; + mem_ctrls[i].nvm = nvm_intfs[i] # Connect the controller to the xbar port for i in range(len(mem_ctrls)): if opt_mem_type == "HMC_2500_1x32": # Connect the controllers to the membus - mem_ctrls[i].port = xbar[i//4].mem_side_ports + mem_ctrls[i].port = xbar[i // 4].mem_side_ports # Set memory device size. There is an independent controller # for each vault. All vaults are same size. mem_ctrls[i].dram.device_size = options.hmc_dev_vault_size diff --git a/configs/common/ObjectList.py b/configs/common/ObjectList.py index 685dbc1403..ce529677e7 100644 --- a/configs/common/ObjectList.py +++ b/configs/common/ObjectList.py @@ -34,18 +34,20 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +from gem5.runtime import get_supported_isas import m5.objects import m5.internal.params import inspect import sys from textwrap import TextWrapper + class ObjectList(object): - """ Creates a list of objects that are sub-classes of a given class. """ + """Creates a list of objects that are sub-classes of a given class.""" def _is_obj_class(self, cls): """Determine if a class is a a sub class of the provided base class - that can be instantiated. + that can be instantiated. """ # We can't use the normal inspect.isclass because the ParamFactory @@ -63,16 +65,20 @@ class ObjectList(object): sub_cls = self._sub_classes[real_name] return sub_cls except KeyError: - print("{} is not a valid sub-class of {}.".format(name, \ - self.base_cls)) + print( + "{} is not a valid sub-class of {}.".format( + name, self.base_cls + ) + ) raise def print(self): """Print a list of available sub-classes and aliases.""" print("Available {} classes:".format(self.base_cls)) - doc_wrapper = TextWrapper(initial_indent="\t\t", - subsequent_indent="\t\t") + doc_wrapper = TextWrapper( + initial_indent="\t\t", subsequent_indent="\t\t" + ) for name, cls in list(self._sub_classes.items()): print("\t{}".format(name)) @@ -117,6 +123,7 @@ class ObjectList(object): self._aliases = {} self._add_aliases(aliases) + class CPUList(ObjectList): def _is_obj_class(self, cls): """Determine if a class is a CPU that can be instantiated""" @@ -124,35 +131,42 @@ class CPUList(ObjectList): # We can't use the normal inspect.isclass because the ParamFactory # and ProxyFactory classes have a tendency to confuse it. try: - return super(CPUList, self)._is_obj_class(cls) and \ - not issubclass(cls, m5.objects.CheckerCPU) + return super(CPUList, self)._is_obj_class(cls) and not issubclass( + cls, m5.objects.CheckerCPU + ) except (TypeError, AttributeError): return False def _add_objects(self): super(CPUList, self)._add_objects() - from m5.defines import buildEnv from importlib import import_module - for package in [ "generic", buildEnv['TARGET_ISA']]: + + for isa in { + "generic", + } | {isa.name.lower() for isa in get_supported_isas()}: try: - package = import_module(".cores." + package, - package=__name__.rpartition('.')[0]) + package = import_module( + ".cores." + isa, package=__name__.rpartition(".")[0] + ) except ImportError: # No timing models for this ISA continue - for mod_name, module in \ - inspect.getmembers(package, inspect.ismodule): - for name, cls in inspect.getmembers(module, - self._is_obj_class): + for mod_name, module in inspect.getmembers( + package, inspect.ismodule + ): + for name, cls in inspect.getmembers( + module, self._is_obj_class + ): self._sub_classes[name] = cls + class EnumList(ObjectList): - """ Creates a list of possible values for a given enum class. """ + """Creates a list of possible values for a given enum class.""" def _add_objects(self): - """ Add all enum values to the ObjectList """ + """Add all enum values to the ObjectList""" self._sub_classes = {} for (key, value) in list(self.base_cls.__members__.items()): # All Enums have a value Num_NAME at the end which we @@ -160,31 +174,37 @@ class EnumList(ObjectList): if not key.startswith("Num_"): self._sub_classes[key] = value -rp_list = ObjectList(getattr(m5.objects, 'BaseReplacementPolicy', None)) -bp_list = ObjectList(getattr(m5.objects, 'BranchPredictor', None)) -cpu_list = CPUList(getattr(m5.objects, 'BaseCPU', None)) -hwp_list = ObjectList(getattr(m5.objects, 'BasePrefetcher', None)) -indirect_bp_list = ObjectList(getattr(m5.objects, 'IndirectPredictor', None)) -mem_list = ObjectList(getattr(m5.objects, 'AbstractMemory', None)) -dram_addr_map_list = EnumList(getattr(m5.internal.params, 'enum_AddrMap', - None)) + +rp_list = ObjectList(getattr(m5.objects, "BaseReplacementPolicy", None)) +bp_list = ObjectList(getattr(m5.objects, "BranchPredictor", None)) +cpu_list = CPUList(getattr(m5.objects, "BaseCPU", None)) +hwp_list = ObjectList(getattr(m5.objects, "BasePrefetcher", None)) +indirect_bp_list = ObjectList(getattr(m5.objects, "IndirectPredictor", None)) +mem_list = ObjectList(getattr(m5.objects, "AbstractMemory", None)) +dram_addr_map_list = EnumList( + getattr(m5.internal.params, "enum_AddrMap", None) +) # Platform aliases. The platforms listed here might not be compiled, # we make sure they exist before we add them to the platform list. -_platform_aliases_all = [ - ("VExpress_GEM5", "VExpress_GEM5_V1"), - ] -platform_list = ObjectList(getattr(m5.objects, 'Platform', None), \ - _platform_aliases_all) +_platform_aliases_all = [("VExpress_GEM5", "VExpress_GEM5_V1")] +platform_list = ObjectList( + getattr(m5.objects, "Platform", None), _platform_aliases_all +) + def _subclass_tester(name): sub_class = getattr(m5.objects, name, None) def tester(cls): - return sub_class is not None and cls is not None and \ - issubclass(cls, sub_class) + return ( + sub_class is not None + and cls is not None + and issubclass(cls, sub_class) + ) return tester + is_kvm_cpu = _subclass_tester("BaseKvmCPU") is_noncaching_cpu = _subclass_tester("NonCachingSimpleCPU") diff --git a/configs/common/Options.py b/configs/common/Options.py index a63cc7b089..81d7791285 100644 --- a/configs/common/Options.py +++ b/configs/common/Options.py @@ -97,6 +97,7 @@ class ListPlatform(argparse.Action): ObjectList.platform_list.print() sys.exit(0) + # Add the very basic options that work also in the case of the no ISA # being used, and consequently no CPUs, but rather various types of # testers and traffic generators. @@ -104,41 +105,77 @@ class ListPlatform(argparse.Action): def addNoISAOptions(parser): parser.add_argument("-n", "--num-cpus", type=int, default=1) - parser.add_argument("--sys-voltage", action="store", type=str, - default='1.0V', - help="""Top-level voltage for blocks running at system - power supply""") - parser.add_argument("--sys-clock", action="store", type=str, - default='1GHz', - help="""Top-level clock for blocks running at system - speed""") + parser.add_argument( + "--sys-voltage", + action="store", + type=str, + default="1.0V", + help="""Top-level voltage for blocks running at system + power supply""", + ) + parser.add_argument( + "--sys-clock", + action="store", + type=str, + default="1GHz", + help="""Top-level clock for blocks running at system + speed""", + ) # Memory Options - parser.add_argument("--list-mem-types", - action=ListMem, nargs=0, - help="List available memory types") - parser.add_argument("--mem-type", default="DDR3_1600_8x8", - choices=ObjectList.mem_list.get_names(), - help="type of memory to use") - parser.add_argument("--mem-channels", type=int, default=1, - help="number of memory channels") - parser.add_argument("--mem-ranks", type=int, default=None, - help="number of memory ranks per channel") parser.add_argument( - "--mem-size", action="store", type=str, default="512MB", - help="Specify the physical memory size (single memory)") - parser.add_argument("--enable-dram-powerdown", action="store_true", - help="Enable low-power states in DRAMInterface") - parser.add_argument("--mem-channels-intlv", type=int, default=0, - help="Memory channels interleave") + "--list-mem-types", + action=ListMem, + nargs=0, + help="List available memory types", + ) + parser.add_argument( + "--mem-type", + default="DDR3_1600_8x8", + choices=ObjectList.mem_list.get_names(), + help="type of memory to use", + ) + parser.add_argument( + "--mem-channels", type=int, default=1, help="number of memory channels" + ) + parser.add_argument( + "--mem-ranks", + type=int, + default=None, + help="number of memory ranks per channel", + ) + parser.add_argument( + "--mem-size", + action="store", + type=str, + default="512MB", + help="Specify the physical memory size (single memory)", + ) + parser.add_argument( + "--enable-dram-powerdown", + action="store_true", + help="Enable low-power states in DRAMInterface", + ) + parser.add_argument( + "--mem-channels-intlv", + type=int, + default=0, + help="Memory channels interleave", + ) parser.add_argument("--memchecker", action="store_true") # Cache Options - parser.add_argument("--external-memory-system", type=str, - help="use external ports of this port_type for caches") - parser.add_argument("--tlm-memory", type=str, - help="use external port for SystemC TLM cosimulation") + parser.add_argument( + "--external-memory-system", + type=str, + help="use external ports of this port_type for caches", + ) + parser.add_argument( + "--tlm-memory", + type=str, + help="use external port for SystemC TLM cosimulation", + ) parser.add_argument("--caches", action="store_true") parser.add_argument("--l2cache", action="store_true") parser.add_argument("--num-dirs", type=int, default=1) @@ -158,26 +195,44 @@ def addNoISAOptions(parser): parser.add_argument("--ruby", action="store_true") # Run duration options - parser.add_argument("-m", "--abs-max-tick", type=int, default=m5.MaxTick, - metavar="TICKS", help="Run to absolute simulated tick " - "specified including ticks from a restored checkpoint") parser.add_argument( - "--rel-max-tick", type=int, default=None, metavar="TICKS", + "-m", + "--abs-max-tick", + type=int, + default=m5.MaxTick, + metavar="TICKS", + help="Run to absolute simulated tick " + "specified including ticks from a restored checkpoint", + ) + parser.add_argument( + "--rel-max-tick", + type=int, + default=None, + metavar="TICKS", help="Simulate for specified number of" " ticks relative to the simulation start tick (e.g. if " - "restoring a checkpoint)") - parser.add_argument("--maxtime", type=float, default=None, - help="Run to the specified absolute simulated time in " - "seconds") + "restoring a checkpoint)", + ) parser.add_argument( - "-P", "--param", action="append", default=[], + "--maxtime", + type=float, + default=None, + help="Run to the specified absolute simulated time in " "seconds", + ) + parser.add_argument( + "-P", + "--param", + action="append", + default=[], help="Set a SimObject parameter relative to the root node. " "An extended Python multi range slicing syntax can be used " "for arrays. For example: " "'system.cpu[0,1,3:8:2].max_insts_all_threads = 42' " "sets max_insts_all_threads for cpus 0, 1, 3, 5 and 7 " "Direct parameters of the root object are not accessible, " - "only parameters of its children.") + "only parameters of its children.", + ) + # Add common options that assume a non-NULL ISA. @@ -187,273 +242,519 @@ def addCommonOptions(parser): addNoISAOptions(parser) # system options - parser.add_argument("--list-cpu-types", - action=ListCpu, nargs=0, - help="List available CPU types") - parser.add_argument("--cpu-type", default="AtomicSimpleCPU", - choices=ObjectList.cpu_list.get_names(), - help="type of cpu to run with") - parser.add_argument("--list-bp-types", - action=ListBp, nargs=0, - help="List available branch predictor types") - parser.add_argument("--list-indirect-bp-types", - action=ListIndirectBP, nargs=0, - help="List available indirect branch predictor types") - parser.add_argument("--bp-type", default=None, - choices=ObjectList.bp_list.get_names(), - help=""" + parser.add_argument( + "--list-cpu-types", + action=ListCpu, + nargs=0, + help="List available CPU types", + ) + parser.add_argument( + "--cpu-type", + default="AtomicSimpleCPU", + choices=ObjectList.cpu_list.get_names(), + help="type of cpu to run with", + ) + parser.add_argument( + "--list-bp-types", + action=ListBp, + nargs=0, + help="List available branch predictor types", + ) + parser.add_argument( + "--list-indirect-bp-types", + action=ListIndirectBP, + nargs=0, + help="List available indirect branch predictor types", + ) + parser.add_argument( + "--bp-type", + default=None, + choices=ObjectList.bp_list.get_names(), + help=""" type of branch predictor to run with (if not set, use the default branch predictor of - the selected CPU)""") - parser.add_argument("--indirect-bp-type", default=None, - choices=ObjectList.indirect_bp_list.get_names(), - help="type of indirect branch predictor to run with") + the selected CPU)""", + ) + parser.add_argument( + "--indirect-bp-type", + default=None, + choices=ObjectList.indirect_bp_list.get_names(), + help="type of indirect branch predictor to run with", + ) - parser.add_argument("--list-rp-types", - action=ListRP, nargs=0, - help="List available replacement policy types") + parser.add_argument( + "--list-rp-types", + action=ListRP, + nargs=0, + help="List available replacement policy types", + ) - parser.add_argument("--list-hwp-types", - action=ListHWP, nargs=0, - help="List available hardware prefetcher types") - parser.add_argument("--l1i-hwp-type", default=None, - choices=ObjectList.hwp_list.get_names(), - help=""" + parser.add_argument( + "--list-hwp-types", + action=ListHWP, + nargs=0, + help="List available hardware prefetcher types", + ) + parser.add_argument( + "--l1i-hwp-type", + default=None, + choices=ObjectList.hwp_list.get_names(), + help=""" type of hardware prefetcher to use with the L1 instruction cache. (if not set, use the default prefetcher of - the selected cache)""") - parser.add_argument("--l1d-hwp-type", default=None, - choices=ObjectList.hwp_list.get_names(), - help=""" + the selected cache)""", + ) + parser.add_argument( + "--l1d-hwp-type", + default=None, + choices=ObjectList.hwp_list.get_names(), + help=""" type of hardware prefetcher to use with the L1 data cache. (if not set, use the default prefetcher of - the selected cache)""") - parser.add_argument("--l2-hwp-type", default=None, - choices=ObjectList.hwp_list.get_names(), - help=""" + the selected cache)""", + ) + parser.add_argument( + "--l2-hwp-type", + default=None, + choices=ObjectList.hwp_list.get_names(), + help=""" type of hardware prefetcher to use with the L2 cache. (if not set, use the default prefetcher of - the selected cache)""") + the selected cache)""", + ) parser.add_argument("--checker", action="store_true") - parser.add_argument("--cpu-clock", action="store", type=str, - default='2GHz', - help="Clock for blocks running at CPU speed") - parser.add_argument("--smt", action="store_true", default=False, - help=""" + parser.add_argument( + "--cpu-clock", + action="store", + type=str, + default="2GHz", + help="Clock for blocks running at CPU speed", + ) + parser.add_argument( + "--smt", + action="store_true", + default=False, + help=""" Only used if multiple programs are specified. If true, then the number of threads per cpu is same as the - number of programs.""") + number of programs.""", + ) parser.add_argument( - "--elastic-trace-en", action="store_true", + "--elastic-trace-en", + action="store_true", help="""Enable capture of data dependency and instruction - fetch traces using elastic trace probe.""") + fetch traces using elastic trace probe.""", + ) # Trace file paths input to trace probe in a capture simulation and input # to Trace CPU in a replay simulation - parser.add_argument("--inst-trace-file", action="store", type=str, - help="""Instruction fetch trace file input to + parser.add_argument( + "--inst-trace-file", + action="store", + type=str, + help="""Instruction fetch trace file input to Elastic Trace probe in a capture simulation and - Trace CPU in a replay simulation""", default="") - parser.add_argument("--data-trace-file", action="store", type=str, - help="""Data dependency trace file input to + Trace CPU in a replay simulation""", + default="", + ) + parser.add_argument( + "--data-trace-file", + action="store", + type=str, + help="""Data dependency trace file input to Elastic Trace probe in a capture simulation and - Trace CPU in a replay simulation""", default="") + Trace CPU in a replay simulation""", + default="", + ) # dist-gem5 options - parser.add_argument("--dist", action="store_true", - help="Parallel distributed gem5 simulation.") parser.add_argument( - "--dist-sync-on-pseudo-op", action="store_true", - help="Use a pseudo-op to start dist-gem5 synchronization.") + "--dist", + action="store_true", + help="Parallel distributed gem5 simulation.", + ) parser.add_argument( - "--is-switch", action="store_true", + "--dist-sync-on-pseudo-op", + action="store_true", + help="Use a pseudo-op to start dist-gem5 synchronization.", + ) + parser.add_argument( + "--is-switch", + action="store_true", help="Select the network switch simulator process for a" - "distributed gem5 run") - parser.add_argument("--dist-rank", default=0, action="store", type=int, - help="Rank of this system within the dist gem5 run.") + "distributed gem5 run", + ) parser.add_argument( - "--dist-size", default=0, action="store", type=int, - help="Number of gem5 processes within the dist gem5 run.") + "--dist-rank", + default=0, + action="store", + type=int, + help="Rank of this system within the dist gem5 run.", + ) parser.add_argument( - "--dist-server-name", default="127.0.0.1", action="store", type=str, - help="Name of the message server host\nDEFAULT: localhost") - parser.add_argument("--dist-server-port", - default=2200, - action="store", type=int, - help="Message server listen port\nDEFAULT: 2200") + "--dist-size", + default=0, + action="store", + type=int, + help="Number of gem5 processes within the dist gem5 run.", + ) parser.add_argument( - "--dist-sync-repeat", default="0us", action="store", type=str, + "--dist-server-name", + default="127.0.0.1", + action="store", + type=str, + help="Name of the message server host\nDEFAULT: localhost", + ) + parser.add_argument( + "--dist-server-port", + default=2200, + action="store", + type=int, + help="Message server listen port\nDEFAULT: 2200", + ) + parser.add_argument( + "--dist-sync-repeat", + default="0us", + action="store", + type=str, help="Repeat interval for synchronisation barriers among " - "dist-gem5 processes\nDEFAULT: --ethernet-linkdelay") + "dist-gem5 processes\nDEFAULT: --ethernet-linkdelay", + ) parser.add_argument( - "--dist-sync-start", default="5200000000000t", action="store", + "--dist-sync-start", + default="5200000000000t", + action="store", type=str, help="Time to schedule the first dist synchronisation barrier\n" - "DEFAULT:5200000000000t") - parser.add_argument("--ethernet-linkspeed", default="10Gbps", - action="store", type=str, - help="Link speed in bps\nDEFAULT: 10Gbps") - parser.add_argument("--ethernet-linkdelay", default="10us", - action="store", type=str, - help="Link delay in seconds\nDEFAULT: 10us") + "DEFAULT:5200000000000t", + ) + parser.add_argument( + "--ethernet-linkspeed", + default="10Gbps", + action="store", + type=str, + help="Link speed in bps\nDEFAULT: 10Gbps", + ) + parser.add_argument( + "--ethernet-linkdelay", + default="10us", + action="store", + type=str, + help="Link delay in seconds\nDEFAULT: 10us", + ) # Run duration options - parser.add_argument("-I", "--maxinsts", action="store", type=int, - default=None, help="""Total number of instructions to - simulate (default: run forever)""") - parser.add_argument("--work-item-id", action="store", type=int, - help="the specific work id for exit & checkpointing") - parser.add_argument("--num-work-ids", action="store", type=int, - help="Number of distinct work item types") - parser.add_argument("--work-begin-cpu-id-exit", action="store", type=int, - help="exit when work starts on the specified cpu") - parser.add_argument("--work-end-exit-count", action="store", type=int, - help="exit at specified work end count") - parser.add_argument("--work-begin-exit-count", action="store", type=int, - help="exit at specified work begin count") - parser.add_argument("--init-param", action="store", type=int, default=0, - help="""Parameter available in simulation with m5 - initparam""") parser.add_argument( - "--initialize-only", action="store_true", default=False, + "-I", + "--maxinsts", + action="store", + type=int, + default=None, + help="""Total number of instructions to + simulate (default: run forever)""", + ) + parser.add_argument( + "--work-item-id", + action="store", + type=int, + help="the specific work id for exit & checkpointing", + ) + parser.add_argument( + "--num-work-ids", + action="store", + type=int, + help="Number of distinct work item types", + ) + parser.add_argument( + "--work-begin-cpu-id-exit", + action="store", + type=int, + help="exit when work starts on the specified cpu", + ) + parser.add_argument( + "--work-end-exit-count", + action="store", + type=int, + help="exit at specified work end count", + ) + parser.add_argument( + "--work-begin-exit-count", + action="store", + type=int, + help="exit at specified work begin count", + ) + parser.add_argument( + "--init-param", + action="store", + type=int, + default=0, + help="""Parameter available in simulation with m5 + initparam""", + ) + parser.add_argument( + "--initialize-only", + action="store_true", + default=False, help="""Exit after initialization. Do not simulate time. - Useful when gem5 is run as a library.""") + Useful when gem5 is run as a library.""", + ) # Simpoint options - parser.add_argument("--simpoint-profile", action="store_true", - help="Enable basic block profiling for SimPoints") - parser.add_argument("--simpoint-interval", type=int, default=10000000, - help="SimPoint interval in num of instructions") parser.add_argument( - "--take-simpoint-checkpoints", action="store", type=str, - help="") - parser.add_argument("--restore-simpoint-checkpoint", action="store_true", - default=False, - help="restore from a simpoint checkpoint taken with " + - "--take-simpoint-checkpoints") + "--simpoint-profile", + action="store_true", + help="Enable basic block profiling for SimPoints", + ) + parser.add_argument( + "--simpoint-interval", + type=int, + default=10000000, + help="SimPoint interval in num of instructions", + ) + parser.add_argument( + "--take-simpoint-checkpoints", + action="store", + type=str, + help="", + ) + parser.add_argument( + "--restore-simpoint-checkpoint", + action="store_true", + default=False, + help="restore from a simpoint checkpoint taken with " + + "--take-simpoint-checkpoints", + ) # Checkpointing options # Note that performing checkpointing via python script files will override # checkpoint instructions built into binaries. parser.add_argument( - "--take-checkpoints", action="store", type=str, - help=" take checkpoints at tick M and every N ticks thereafter") + "--take-checkpoints", + action="store", + type=str, + help=" take checkpoints at tick M and every N ticks thereafter", + ) parser.add_argument( - "--max-checkpoints", action="store", type=int, - help="the maximum number of checkpoints to drop", default=5) + "--max-checkpoints", + action="store", + type=int, + help="the maximum number of checkpoints to drop", + default=5, + ) parser.add_argument( - "--checkpoint-dir", action="store", type=str, - help="Place all checkpoints in this absolute directory") - parser.add_argument("-r", "--checkpoint-restore", action="store", type=int, - help="restore from checkpoint ") - parser.add_argument("--checkpoint-at-end", action="store_true", - help="take a checkpoint at end of run") + "--checkpoint-dir", + action="store", + type=str, + help="Place all checkpoints in this absolute directory", + ) parser.add_argument( - "--work-begin-checkpoint-count", action="store", type=int, - help="checkpoint at specified work begin count") + "-r", + "--checkpoint-restore", + action="store", + type=int, + help="restore from checkpoint ", + ) parser.add_argument( - "--work-end-checkpoint-count", action="store", type=int, - help="checkpoint at specified work end count") + "--checkpoint-at-end", + action="store_true", + help="take a checkpoint at end of run", + ) parser.add_argument( - "--work-cpus-checkpoint-count", action="store", type=int, - help="checkpoint and exit when active cpu count is reached") - parser.add_argument("--restore-with-cpu", action="store", - default="AtomicSimpleCPU", - choices=ObjectList.cpu_list.get_names(), - help="cpu type for restoring from a checkpoint") + "--work-begin-checkpoint-count", + action="store", + type=int, + help="checkpoint at specified work begin count", + ) + parser.add_argument( + "--work-end-checkpoint-count", + action="store", + type=int, + help="checkpoint at specified work end count", + ) + parser.add_argument( + "--work-cpus-checkpoint-count", + action="store", + type=int, + help="checkpoint and exit when active cpu count is reached", + ) + parser.add_argument( + "--restore-with-cpu", + action="store", + default="AtomicSimpleCPU", + choices=ObjectList.cpu_list.get_names(), + help="cpu type for restoring from a checkpoint", + ) # CPU Switching - default switch model goes from a checkpoint # to a timing simple CPU with caches to warm up, then to detailed CPU for # data measurement parser.add_argument( - "--repeat-switch", action="store", type=int, default=None, - help="switch back and forth between CPUs with period ") + "--repeat-switch", + action="store", + type=int, + default=None, + help="switch back and forth between CPUs with period ", + ) parser.add_argument( - "-s", "--standard-switch", action="store", type=int, default=None, - help="switch from timing to Detailed CPU after warmup period of ") - parser.add_argument("-p", "--prog-interval", type=str, - help="CPU Progress Interval") + "-s", + "--standard-switch", + action="store", + type=int, + default=None, + help="switch from timing to Detailed CPU after warmup period of ", + ) + parser.add_argument( + "-p", "--prog-interval", type=str, help="CPU Progress Interval" + ) # Fastforwarding and simpoint related materials parser.add_argument( - "-W", "--warmup-insts", action="store", type=int, default=None, - help="Warmup period in total instructions (requires --standard-switch)") + "-W", + "--warmup-insts", + action="store", + type=int, + default=None, + help="Warmup period in total instructions (requires --standard-switch)", + ) parser.add_argument( - "--bench", action="store", type=str, default=None, - help="base names for --take-checkpoint and --checkpoint-restore") + "--bench", + action="store", + type=str, + default=None, + help="base names for --take-checkpoint and --checkpoint-restore", + ) parser.add_argument( - "-F", "--fast-forward", action="store", type=str, default=None, - help="Number of instructions to fast forward before switching") + "-F", + "--fast-forward", + action="store", + type=str, + default=None, + help="Number of instructions to fast forward before switching", + ) parser.add_argument( - "-S", "--simpoint", action="store_true", default=False, + "-S", + "--simpoint", + action="store_true", + default=False, help="""Use workload simpoints as an instruction offset for - --checkpoint-restore or --take-checkpoint.""") + --checkpoint-restore or --take-checkpoint.""", + ) parser.add_argument( - "--at-instruction", action="store_true", default=False, + "--at-instruction", + action="store_true", + default=False, help="""Treat value of --checkpoint-restore or --take-checkpoint as a - number of instructions.""") - parser.add_argument("--spec-input", default="ref", - choices=["ref", "test", "train", "smred", "mdred", - "lgred"], - help="Input set size for SPEC CPU2000 benchmarks.") - parser.add_argument("--arm-iset", default="arm", - choices=["arm", "thumb", "aarch64"], - help="ARM instruction set.") + number of instructions.""", + ) parser.add_argument( - "--stats-root", action="append", default=[], + "--spec-input", + default="ref", + choices=["ref", "test", "train", "smred", "mdred", "lgred"], + help="Input set size for SPEC CPU2000 benchmarks.", + ) + parser.add_argument( + "--arm-iset", + default="arm", + choices=["arm", "thumb", "aarch64"], + help="ARM instruction set.", + ) + parser.add_argument( + "--stats-root", + action="append", + default=[], help="If given, dump only stats of objects under the given SimObject. " "SimObjects are identified with Python notation as in: " "system.cpu[0].mmu. All elements of an array can be selected at " "once with: system.cpu[:].mmu. If given multiple times, dump stats " "that are present under any of the roots. If not given, dump all " - "stats. ") + "stats. ", + ) + parser.add_argument( + "--override-vendor-string", + action="store", + type=str, + default=None, + help="Override vendor string returned by CPUID instruction in X86.", + ) def addSEOptions(parser): # Benchmark options - parser.add_argument("-c", "--cmd", default="", - help="The binary to run in syscall emulation mode.") - parser.add_argument("-o", "--options", default="", - help="""The options to pass to the binary, use " " - around the entire string""") - parser.add_argument("-e", "--env", default="", - help="Initialize workload environment from text file.") - parser.add_argument("-i", "--input", default="", - help="Read stdin from a file.") - parser.add_argument("--output", default="", - help="Redirect stdout to a file.") - parser.add_argument("--errout", default="", - help="Redirect stderr to a file.") - parser.add_argument("--chroot", action="store", type=str, default=None, - help="The chroot option allows a user to alter the " - "search path for processes running in SE mode. " - "Normally, the search path would begin at the " - "root of the filesystem (i.e. /). With chroot, " - "a user can force the process to begin looking at" - "some other location (i.e. /home/user/rand_dir)." - "The intended use is to trick sophisticated " - "software which queries the __HOST__ filesystem " - "for information or functionality. Instead of " - "finding files on the __HOST__ filesystem, the " - "process will find the user's replacment files.") - parser.add_argument("--interp-dir", action="store", type=str, - default=None, - help="The interp-dir option is used for " - "setting the interpreter's path. This will " - "allow to load the guest dynamic linker/loader " - "itself from the elf binary. The option points to " - "the parent folder of the guest /lib in the " - "host fs") + parser.add_argument( + "-c", + "--cmd", + default="", + help="The binary to run in syscall emulation mode.", + ) + parser.add_argument( + "-o", + "--options", + default="", + help="""The options to pass to the binary, use " " + around the entire string""", + ) + parser.add_argument( + "-e", + "--env", + default="", + help="Initialize workload environment from text file.", + ) + parser.add_argument( + "-i", "--input", default="", help="Read stdin from a file." + ) + parser.add_argument( + "--output", default="", help="Redirect stdout to a file." + ) + parser.add_argument( + "--errout", default="", help="Redirect stderr to a file." + ) + parser.add_argument( + "--chroot", + action="store", + type=str, + default=None, + help="The chroot option allows a user to alter the " + "search path for processes running in SE mode. " + "Normally, the search path would begin at the " + "root of the filesystem (i.e. /). With chroot, " + "a user can force the process to begin looking at" + "some other location (i.e. /home/user/rand_dir)." + "The intended use is to trick sophisticated " + "software which queries the __HOST__ filesystem " + "for information or functionality. Instead of " + "finding files on the __HOST__ filesystem, the " + "process will find the user's replacment files.", + ) + parser.add_argument( + "--interp-dir", + action="store", + type=str, + default=None, + help="The interp-dir option is used for " + "setting the interpreter's path. This will " + "allow to load the guest dynamic linker/loader " + "itself from the elf binary. The option points to " + "the parent folder of the guest /lib in the " + "host fs", + ) - parser.add_argument("--redirects", action="append", type=str, - default=[], - help="A collection of one or more redirect paths " - "to be used in syscall emulation." - "Usage: gem5.opt [...] --redirects /dir1=/path/" - "to/host/dir1 --redirects /dir2=/path/to/host/dir2") - parser.add_argument("--wait-gdb", default=False, action='store_true', - help="Wait for remote GDB to connect.") + parser.add_argument( + "--redirects", + action="append", + type=str, + default=[], + help="A collection of one or more redirect paths " + "to be used in syscall emulation." + "Usage: gem5.opt [...] --redirects /dir1=/path/" + "to/host/dir1 --redirects /dir2=/path/to/host/dir2", + ) + parser.add_argument( + "--wait-gdb", + default=False, + action="store_true", + help="Wait for remote GDB to connect.", + ) def addFSOptions(parser): @@ -461,73 +762,128 @@ def addFSOptions(parser): # Simulation options parser.add_argument( - "--timesync", action="store_true", - help="Prevent simulated time from getting ahead of real time") + "--timesync", + action="store_true", + help="Prevent simulated time from getting ahead of real time", + ) # System options parser.add_argument("--kernel", action="store", type=str) - parser.add_argument("--os-type", action="store", - choices=os_types[str(buildEnv['TARGET_ISA'])], - default="linux", - help="Specifies type of OS to boot") + parser.add_argument( + "--os-type", + action="store", + choices=os_types, + default="linux", + help="Specifies type of OS to boot", + ) parser.add_argument("--script", action="store", type=str) parser.add_argument( - "--frame-capture", action="store_true", + "--frame-capture", + action="store_true", help="Stores changed frame buffers from the VNC server to compressed " - "files in the gem5 output directory") + "files in the gem5 output directory", + ) - if buildEnv['TARGET_ISA'] == "arm": + if buildEnv["USE_ARM_ISA"]: parser.add_argument( - "--bare-metal", action="store_true", - help="Provide the raw system without the linux specific bits") - parser.add_argument("--list-machine-types", - action=ListPlatform, nargs=0, - help="List available platform types") - parser.add_argument("--machine-type", action="store", - choices=ObjectList.platform_list.get_names(), - default="VExpress_GEM5_V1") + "--bare-metal", + action="store_true", + help="Provide the raw system without the linux specific bits", + ) parser.add_argument( - "--dtb-filename", action="store", type=str, + "--list-machine-types", + action=ListPlatform, + nargs=0, + help="List available platform types", + ) + parser.add_argument( + "--machine-type", + action="store", + choices=ObjectList.platform_list.get_names(), + default="VExpress_GEM5_V1", + ) + parser.add_argument( + "--dtb-filename", + action="store", + type=str, help="Specifies device tree blob file to use with device-tree-" - "enabled kernels") + "enabled kernels", + ) parser.add_argument( - "--enable-context-switch-stats-dump", action="store_true", + "--enable-context-switch-stats-dump", + action="store_true", help="Enable stats dump at context " - "switches and dump tasks file (required for Streamline)") + "switches and dump tasks file (required for Streamline)", + ) parser.add_argument("--vio-9p", action="store_true", help=vio_9p_help) parser.add_argument( - "--bootloader", action='append', - help="executable file that runs before the --kernel") + "--bootloader", + action="append", + help="executable file that runs before the --kernel", + ) # Benchmark options parser.add_argument( - "--dual", action="store_true", - help="Simulate two systems attached with an ethernet link") + "--dual", + action="store_true", + help="Simulate two systems attached with an ethernet link", + ) parser.add_argument( - "-b", "--benchmark", action="store", type=str, dest="benchmark", - help="Specify the benchmark to run. Available benchmarks: %s" % - DefinedBenchmarks) + "-b", + "--benchmark", + action="store", + type=str, + dest="benchmark", + help="Specify the benchmark to run. Available benchmarks: %s" + % DefinedBenchmarks, + ) # Metafile options parser.add_argument( - "--etherdump", action="store", type=str, dest="etherdump", + "--etherdump", + action="store", + type=str, + dest="etherdump", help="Specify the filename to dump a pcap capture of the" - "ethernet traffic") + "ethernet traffic", + ) # Disk Image Options - parser.add_argument("--disk-image", action="append", type=str, - default=[], help="Path to the disk images to use.") - parser.add_argument("--root-device", action="store", type=str, - default=None, help="OS device name for root partition") + parser.add_argument( + "--disk-image", + action="append", + type=str, + default=[], + help="Path to the disk images to use.", + ) + parser.add_argument( + "--root-device", + action="store", + type=str, + default=None, + help="OS device name for root partition", + ) # Command line options - parser.add_argument("--command-line", action="store", type=str, - default=None, - help="Template for the kernel command line.") parser.add_argument( - "--command-line-file", action="store", default=None, type=str, - help="File with a template for the kernel command line") + "--command-line", + action="store", + type=str, + default=None, + help="Template for the kernel command line.", + ) + parser.add_argument( + "--command-line-file", + action="store", + default=None, + type=str, + help="File with a template for the kernel command line", + ) # Debug option - parser.add_argument("--wait-gdb", default=False, action='store_true', - help="Wait for remote GDB to connect.") + parser.add_argument( + "--wait-gdb", + default=False, + action="store_true", + help="Wait for remote GDB to connect.", + ) diff --git a/configs/common/SimpleOpts.py b/configs/common/SimpleOpts.py index fabc8e048b..96c73f57b8 100644 --- a/configs/common/SimpleOpts.py +++ b/configs/common/SimpleOpts.py @@ -44,21 +44,22 @@ from argparse import ArgumentParser # add the args we want to be able to control from the command line parser = ArgumentParser() + def add_option(*args, **kwargs): - """Call "add_option" to the global options parser - """ + """Call "add_option" to the global options parser""" if called_parse_args: m5.fatal("Can't add an option after calling SimpleOpts.parse_args") parser.add_argument(*args, **kwargs) + def parse_args(): global called_parse_args called_parse_args = True return parser.parse_args() + def print_help(*args, **kwargs): parser.print_help(*args, **kwargs) - diff --git a/configs/common/Simulation.py b/configs/common/Simulation.py index 24167731a1..731b3fcaa5 100644 --- a/configs/common/Simulation.py +++ b/configs/common/Simulation.py @@ -49,27 +49,28 @@ from m5.defines import buildEnv from m5.objects import * from m5.util import * -addToPath('../common') +addToPath("../common") + def getCPUClass(cpu_type): """Returns the required cpu class and the mode of operation.""" cls = ObjectList.cpu_list.get(cpu_type) return cls, cls.memory_mode() + def setCPUClass(options): """Returns two cpu classes and the initial mode of operation. - Restoring from a checkpoint or fast forwarding through a benchmark - can be done using one type of cpu, and then the actual - simulation can be carried out using another type. This function - returns these two types of cpus and the initial mode of operation - depending on the options provided. + Restoring from a checkpoint or fast forwarding through a benchmark + can be done using one type of cpu, and then the actual + simulation can be carried out using another type. This function + returns these two types of cpus and the initial mode of operation + depending on the options provided. """ TmpClass, test_mem_mode = getCPUClass(options.cpu_type) CPUClass = None - if TmpClass.require_caches() and \ - not options.caches and not options.ruby: + if TmpClass.require_caches() and not options.caches and not options.ruby: fatal("%s must be used with caches" % options.cpu_type) if options.checkpoint_restore != None: @@ -79,20 +80,22 @@ def setCPUClass(options): elif options.fast_forward: CPUClass = TmpClass TmpClass = AtomicSimpleCPU - test_mem_mode = 'atomic' + test_mem_mode = "atomic" # Ruby only supports atomic accesses in noncaching mode - if test_mem_mode == 'atomic' and options.ruby: + if test_mem_mode == "atomic" and options.ruby: warn("Memory mode will be changed to atomic_noncaching") - test_mem_mode = 'atomic_noncaching' + test_mem_mode = "atomic_noncaching" return (TmpClass, test_mem_mode, CPUClass) + def setMemClass(options): """Returns a memory controller class.""" return ObjectList.mem_list.get(options.mem_type) + def setWorkCountOptions(system, options): if options.work_item_id != None: system.work_item_id = options.work_item_id @@ -111,6 +114,7 @@ def setWorkCountOptions(system, options): if options.work_cpus_checkpoint_count != None: system.work_cpus_ckpt_count = options.work_cpus_checkpoint_count + def findCptDir(options, cptdir, testsys): """Figures out the directory from which the checkpointed state is read. @@ -137,7 +141,7 @@ def findCptDir(options, cptdir, testsys): if options.simpoint: # assume workload 0 has the simpoint if testsys.cpu[0].workload[0].simpoint == 0: - fatal('Unable to find simpoint') + fatal("Unable to find simpoint") inst += int(testsys.cpu[0].workload[0].simpoint) checkpoint_dir = joinpath(cptdir, "cpt.%s.%s" % (options.bench, inst)) @@ -148,8 +152,10 @@ def findCptDir(options, cptdir, testsys): # Restore from SimPoint checkpoints # Assumes that the checkpoint dir names are formatted as follows: dirs = listdir(cptdir) - expr = re.compile('cpt\.simpoint_(\d+)_inst_(\d+)' + - '_weight_([\d\.e\-]+)_interval_(\d+)_warmup_(\d+)') + expr = re.compile( + "cpt\.simpoint_(\d+)_inst_(\d+)" + + "_weight_([\d\.e\-]+)_interval_(\d+)_warmup_(\d+)" + ) cpts = [] for dir in dirs: match = expr.match(dir) @@ -159,7 +165,7 @@ def findCptDir(options, cptdir, testsys): cpt_num = options.checkpoint_restore if cpt_num > len(cpts): - fatal('Checkpoint %d not found', cpt_num) + fatal("Checkpoint %d not found", cpt_num) checkpoint_dir = joinpath(cptdir, cpts[cpt_num - 1]) match = expr.match(cpts[cpt_num - 1]) if match: @@ -176,30 +182,33 @@ def findCptDir(options, cptdir, testsys): if testsys.switch_cpus != None: testsys.switch_cpus[0].simpoint_start_insts = simpoint_start_insts - print("Resuming from SimPoint", end=' ') - print("#%d, start_inst:%d, weight:%f, interval:%d, warmup:%d" % - (index, start_inst, weight_inst, interval_length, warmup_length)) + print("Resuming from SimPoint", end=" ") + print( + "#%d, start_inst:%d, weight:%f, interval:%d, warmup:%d" + % (index, start_inst, weight_inst, interval_length, warmup_length) + ) else: dirs = listdir(cptdir) - expr = re.compile('cpt\.([0-9]+)') + expr = re.compile("cpt\.([0-9]+)") cpts = [] for dir in dirs: match = expr.match(dir) if match: cpts.append(match.group(1)) - cpts.sort(key = lambda a: int(a)) + cpts.sort(key=lambda a: int(a)) cpt_num = options.checkpoint_restore if cpt_num > len(cpts): - fatal('Checkpoint %d not found', cpt_num) + fatal("Checkpoint %d not found", cpt_num) cpt_starttick = int(cpts[cpt_num - 1]) checkpoint_dir = joinpath(cptdir, "cpt.%s" % cpts[cpt_num - 1]) return cpt_starttick, checkpoint_dir + def scriptCheckpoints(options, maxtick, cptdir): if options.at_instruction or options.simpoint: checkpoint_inst = int(options.take_checkpoints) @@ -219,8 +228,11 @@ def scriptCheckpoints(options, maxtick, cptdir): exit_cause = exit_event.getCause() if exit_cause == "a thread reached the max instruction count": - m5.checkpoint(joinpath(cptdir, "cpt.%s.%d" % \ - (options.bench, checkpoint_inst))) + m5.checkpoint( + joinpath( + cptdir, "cpt.%s.%d" % (options.bench, checkpoint_inst) + ) + ) print("Checkpoint written.") else: @@ -242,8 +254,10 @@ def scriptCheckpoints(options, maxtick, cptdir): sim_ticks = when max_checkpoints = options.max_checkpoints - while num_checkpoints < max_checkpoints and \ - exit_cause == "simulate() limit reached": + while ( + num_checkpoints < max_checkpoints + and exit_cause == "simulate() limit reached" + ): if (sim_ticks + period) > maxtick: exit_event = m5.simulate(maxtick - sim_ticks) exit_cause = exit_event.getCause() @@ -260,6 +274,7 @@ def scriptCheckpoints(options, maxtick, cptdir): return exit_event + def benchCheckpoints(options, maxtick, cptdir): exit_event = m5.simulate(maxtick - m5.curTick()) exit_cause = exit_event.getCause() @@ -279,13 +294,18 @@ def benchCheckpoints(options, maxtick, cptdir): return exit_event + # Set up environment for taking SimPoint checkpoints # Expecting SimPoint files generated by SimPoint 3.2 def parseSimpointAnalysisFile(options, testsys): import re - simpoint_filename, weight_filename, interval_length, warmup_length = \ - options.take_simpoint_checkpoints.split(",", 3) + ( + simpoint_filename, + weight_filename, + interval_length, + warmup_length, + ) = options.take_simpoint_checkpoints.split(",", 3) print("simpoint analysis file:", simpoint_filename) print("simpoint weight file:", weight_filename) print("interval length:", interval_length) @@ -309,20 +329,19 @@ def parseSimpointAnalysisFile(options, testsys): if m: interval = int(m.group(1)) else: - fatal('unrecognized line in simpoint file!') + fatal("unrecognized line in simpoint file!") line = weight_file.readline() if not line: - fatal('not enough lines in simpoint weight file!') + fatal("not enough lines in simpoint weight file!") m = re.match("([0-9\.e\-]+)\s+(\d+)", line) if m: weight = float(m.group(1)) else: - fatal('unrecognized line in simpoint weight file!') + fatal("unrecognized line in simpoint weight file!") - if (interval * interval_length - warmup_length > 0): - starting_inst_count = \ - interval * interval_length - warmup_length + if interval * interval_length - warmup_length > 0: + starting_inst_count = interval * interval_length - warmup_length actual_warmup_length = warmup_length else: # Not enough room for proper warmup @@ -330,15 +349,20 @@ def parseSimpointAnalysisFile(options, testsys): starting_inst_count = 0 actual_warmup_length = interval * interval_length - simpoints.append((interval, weight, starting_inst_count, - actual_warmup_length)) + simpoints.append( + (interval, weight, starting_inst_count, actual_warmup_length) + ) # Sort SimPoints by starting inst count simpoints.sort(key=lambda obj: obj[2]) for s in simpoints: interval, weight, starting_inst_count, actual_warmup_length = s - print(str(interval), str(weight), starting_inst_count, - actual_warmup_length) + print( + str(interval), + str(weight), + starting_inst_count, + actual_warmup_length, + ) simpoint_start_insts.append(starting_inst_count) print("Total # of simpoints:", len(simpoints)) @@ -346,6 +370,7 @@ def parseSimpointAnalysisFile(options, testsys): return (simpoints, interval_length) + def takeSimpointCheckpoints(simpoints, interval_length, cptdir): num_checkpoints = 0 index = 0 @@ -369,22 +394,34 @@ def takeSimpointCheckpoints(simpoints, interval_length, cptdir): code = exit_event.getCode() if exit_cause == "simpoint starting point found": - m5.checkpoint(joinpath(cptdir, - "cpt.simpoint_%02d_inst_%d_weight_%f_interval_%d_warmup_%d" - % (index, starting_inst_count, weight, interval_length, - actual_warmup_length))) - print("Checkpoint #%d written. start inst:%d weight:%f" % - (num_checkpoints, starting_inst_count, weight)) + m5.checkpoint( + joinpath( + cptdir, + "cpt.simpoint_%02d_inst_%d_weight_%f_interval_%d_warmup_%d" + % ( + index, + starting_inst_count, + weight, + interval_length, + actual_warmup_length, + ), + ) + ) + print( + "Checkpoint #%d written. start inst:%d weight:%f" + % (num_checkpoints, starting_inst_count, weight) + ) num_checkpoints += 1 last_chkpnt_inst_count = starting_inst_count else: break index += 1 - print('Exiting @ tick %i because %s' % (m5.curTick(), exit_cause)) + print("Exiting @ tick %i because %s" % (m5.curTick(), exit_cause)) print("%d checkpoints taken" % num_checkpoints) sys.exit(code) + def restoreSimpointCheckpoint(): exit_event = m5.simulate() exit_cause = exit_event.getCause() @@ -401,9 +438,10 @@ def restoreSimpointCheckpoint(): print("Done running SimPoint!") sys.exit(exit_event.getCode()) - print('Exiting @ tick %i because %s' % (m5.curTick(), exit_cause)) + print("Exiting @ tick %i because %s" % (m5.curTick(), exit_cause)) sys.exit(exit_event.getCode()) + def repeatSwitch(testsys, repeat_switch_cpu_list, maxtick, switch_freq): print("starting switch loop") while True: @@ -424,6 +462,7 @@ def repeatSwitch(testsys, repeat_switch_cpu_list, maxtick, switch_freq): exit_event = m5.simulate(maxtick - m5.curTick()) return exit_event + def run(options, root, testsys, cpu_class): if options.checkpoint_dir: cptdir = options.checkpoint_dir @@ -461,9 +500,17 @@ def run(options, root, testsys, cpu_class): for i in range(np): testsys.cpu[i].max_insts_any_thread = options.maxinsts + if options.override_vendor_string is not None: + for i in range(len(testsys.cpu)): + for j in range(len(testsys.cpu[i].isa)): + testsys.cpu[i].isa[ + j + ].vendor_string = options.override_vendor_string + if cpu_class: - switch_cpus = [cpu_class(switched_out=True, cpu_id=(i)) - for i in range(np)] + switch_cpus = [ + cpu_class(switched_out=True, cpu_id=(i)) for i in range(np) + ] for i in range(np): if options.fast_forward: @@ -471,8 +518,7 @@ def run(options, root, testsys, cpu_class): switch_cpus[i].system = testsys switch_cpus[i].workload = testsys.cpu[i].workload switch_cpus[i].clk_domain = testsys.cpu[i].clk_domain - switch_cpus[i].progress_interval = \ - testsys.cpu[i].progress_interval + switch_cpus[i].progress_interval = testsys.cpu[i].progress_interval switch_cpus[i].isa = testsys.cpu[i].isa # simulation period if options.maxinsts: @@ -485,9 +531,11 @@ def run(options, root, testsys, cpu_class): switch_cpus[i].branchPred = bpClass() if options.indirect_bp_type: IndirectBPClass = ObjectList.indirect_bp_list.get( - options.indirect_bp_type) - switch_cpus[i].branchPred.indirectBranchPred = \ - IndirectBPClass() + options.indirect_bp_type + ) + switch_cpus[ + i + ].branchPred.indirectBranchPred = IndirectBPClass() switch_cpus[i].createThreads() # If elastic tracing is enabled attach the elastic trace probe @@ -500,16 +548,16 @@ def run(options, root, testsys, cpu_class): if options.repeat_switch: switch_class = getCPUClass(options.cpu_type)[0] - if switch_class.require_caches() and \ - not options.caches: + if switch_class.require_caches() and not options.caches: print("%s: Must be used with caches" % str(switch_class)) sys.exit(1) if not switch_class.support_take_over(): print("%s: CPU switching not supported" % str(switch_class)) sys.exit(1) - repeat_switch_cpus = [switch_class(switched_out=True, \ - cpu_id=(i)) for i in range(np)] + repeat_switch_cpus = [ + switch_class(switched_out=True, cpu_id=(i)) for i in range(np) + ] for i in range(np): repeat_switch_cpus[i].system = testsys @@ -523,24 +571,30 @@ def run(options, root, testsys, cpu_class): if options.checker: repeat_switch_cpus[i].addCheckerCpu() + repeat_switch_cpus[i].createThreads() + testsys.repeat_switch_cpus = repeat_switch_cpus if cpu_class: - repeat_switch_cpu_list = [(switch_cpus[i], repeat_switch_cpus[i]) - for i in range(np)] + repeat_switch_cpu_list = [ + (switch_cpus[i], repeat_switch_cpus[i]) for i in range(np) + ] else: - repeat_switch_cpu_list = [(testsys.cpu[i], repeat_switch_cpus[i]) - for i in range(np)] + repeat_switch_cpu_list = [ + (testsys.cpu[i], repeat_switch_cpus[i]) for i in range(np) + ] if options.standard_switch: - switch_cpus = [TimingSimpleCPU(switched_out=True, cpu_id=(i)) - for i in range(np)] - switch_cpus_1 = [DerivO3CPU(switched_out=True, cpu_id=(i)) - for i in range(np)] + switch_cpus = [ + TimingSimpleCPU(switched_out=True, cpu_id=(i)) for i in range(np) + ] + switch_cpus_1 = [ + DerivO3CPU(switched_out=True, cpu_id=(i)) for i in range(np) + ] for i in range(np): - switch_cpus[i].system = testsys - switch_cpus_1[i].system = testsys + switch_cpus[i].system = testsys + switch_cpus_1[i].system = testsys switch_cpus[i].workload = testsys.cpu[i].workload switch_cpus_1[i].workload = testsys.cpu[i].workload switch_cpus[i].clk_domain = testsys.cpu[i].clk_domain @@ -557,16 +611,17 @@ def run(options, root, testsys, cpu_class): # Fast forward to a simpoint (warning: time consuming) elif options.simpoint: if testsys.cpu[i].workload[0].simpoint == 0: - fatal('simpoint not found') - testsys.cpu[i].max_insts_any_thread = \ + fatal("simpoint not found") + testsys.cpu[i].max_insts_any_thread = ( testsys.cpu[i].workload[0].simpoint + ) # No distance specified, just switch else: testsys.cpu[i].max_insts_any_thread = 1 # warmup period if options.warmup_insts: - switch_cpus[i].max_insts_any_thread = options.warmup_insts + switch_cpus[i].max_insts_any_thread = options.warmup_insts # simulation period if options.maxinsts: @@ -577,25 +632,29 @@ def run(options, root, testsys, cpu_class): switch_cpus[i].addCheckerCpu() switch_cpus_1[i].addCheckerCpu() + switch_cpus[i].createThreads() + switch_cpus_1[i].createThreads() + testsys.switch_cpus = switch_cpus testsys.switch_cpus_1 = switch_cpus_1 - switch_cpu_list = [ - (testsys.cpu[i], switch_cpus[i]) for i in range(np) - ] + switch_cpu_list = [(testsys.cpu[i], switch_cpus[i]) for i in range(np)] switch_cpu_list1 = [ (switch_cpus[i], switch_cpus_1[i]) for i in range(np) ] # set the checkpoint in the cpu before m5.instantiate is called - if options.take_checkpoints != None and \ - (options.simpoint or options.at_instruction): + if options.take_checkpoints != None and ( + options.simpoint or options.at_instruction + ): offset = int(options.take_checkpoints) # Set an instruction break point if options.simpoint: for i in range(np): if testsys.cpu[i].workload[0].simpoint == 0: - fatal('no simpoint for testsys.cpu[%d].workload[0]', i) - checkpoint_inst = int(testsys.cpu[i].workload[0].simpoint) + offset + fatal("no simpoint for testsys.cpu[%d].workload[0]", i) + checkpoint_inst = ( + int(testsys.cpu[i].workload[0].simpoint) + offset + ) testsys.cpu[i].max_insts_any_thread = checkpoint_inst # used for output below options.take_checkpoints = checkpoint_inst @@ -607,7 +666,9 @@ def run(options, root, testsys, cpu_class): testsys.cpu[i].max_insts_any_thread = offset if options.take_simpoint_checkpoints != None: - simpoints, interval_length = parseSimpointAnalysisFile(options, testsys) + simpoints, interval_length = parseSimpointAnalysisFile( + options, testsys + ) checkpoint_dir = None if options.checkpoint_restore: @@ -640,31 +701,43 @@ def run(options, root, testsys, cpu_class): # the ticks per simulated second maxtick_from_rel += cpt_starttick if options.at_instruction or options.simpoint: - warn("Relative max tick specified with --at-instruction or" \ - " --simpoint\n These options don't specify the " \ - "checkpoint start tick, so assuming\n you mean " \ - "absolute max tick") + warn( + "Relative max tick specified with --at-instruction or" + " --simpoint\n These options don't specify the " + "checkpoint start tick, so assuming\n you mean " + "absolute max tick" + ) explicit_maxticks += 1 if options.maxtime: maxtick_from_maxtime = m5.ticks.fromSeconds(options.maxtime) explicit_maxticks += 1 if explicit_maxticks > 1: - warn("Specified multiple of --abs-max-tick, --rel-max-tick, --maxtime."\ - " Using least") + warn( + "Specified multiple of --abs-max-tick, --rel-max-tick, --maxtime." + " Using least" + ) maxtick = min([maxtick_from_abs, maxtick_from_rel, maxtick_from_maxtime]) if options.checkpoint_restore != None and maxtick < cpt_starttick: - fatal("Bad maxtick (%d) specified: " \ - "Checkpoint starts starts from tick: %d", maxtick, cpt_starttick) + fatal( + "Bad maxtick (%d) specified: " + "Checkpoint starts starts from tick: %d", + maxtick, + cpt_starttick, + ) if options.standard_switch or cpu_class: if options.standard_switch: - print("Switch at instruction count:%s" % - str(testsys.cpu[0].max_insts_any_thread)) + print( + "Switch at instruction count:%s" + % str(testsys.cpu[0].max_insts_any_thread) + ) exit_event = m5.simulate() elif cpu_class and options.fast_forward: - print("Switch at instruction count:%s" % - str(testsys.cpu[0].max_insts_any_thread)) + print( + "Switch at instruction count:%s" + % str(testsys.cpu[0].max_insts_any_thread) + ) exit_event = m5.simulate() else: print("Switch at curTick count:%s" % str(10000)) @@ -674,32 +747,37 @@ def run(options, root, testsys, cpu_class): m5.switchCpus(testsys, switch_cpu_list) if options.standard_switch: - print("Switch at instruction count:%d" % - (testsys.switch_cpus[0].max_insts_any_thread)) + print( + "Switch at instruction count:%d" + % (testsys.switch_cpus[0].max_insts_any_thread) + ) - #warmup instruction count may have already been set + # warmup instruction count may have already been set if options.warmup_insts: exit_event = m5.simulate() else: exit_event = m5.simulate(options.standard_switch) print("Switching CPUS @ tick %s" % (m5.curTick())) - print("Simulation ends instruction count:%d" % - (testsys.switch_cpus_1[0].max_insts_any_thread)) + print( + "Simulation ends instruction count:%d" + % (testsys.switch_cpus_1[0].max_insts_any_thread) + ) m5.switchCpus(testsys, switch_cpu_list1) # If we're taking and restoring checkpoints, use checkpoint_dir # option only for finding the checkpoints to restore from. This # lets us test checkpointing by restoring from one set of # checkpoints, generating a second set, and then comparing them. - if (options.take_checkpoints or options.take_simpoint_checkpoints) \ - and options.checkpoint_restore: + if ( + options.take_checkpoints or options.take_simpoint_checkpoints + ) and options.checkpoint_restore: if m5.options.outdir: cptdir = m5.options.outdir else: cptdir = getcwd() - if options.take_checkpoints != None : + if options.take_checkpoints != None: # Checkpoints being taken via the command line at and at # subsequent periods of . Checkpoint instructions # received from the benchmark running are ignored and skipped in @@ -722,13 +800,15 @@ def run(options, root, testsys, cpu_class): # If checkpoints are being taken, then the checkpoint instruction # will occur in the benchmark code it self. if options.repeat_switch and maxtick > options.repeat_switch: - exit_event = repeatSwitch(testsys, repeat_switch_cpu_list, - maxtick, options.repeat_switch) + exit_event = repeatSwitch( + testsys, repeat_switch_cpu_list, maxtick, options.repeat_switch + ) else: exit_event = benchCheckpoints(options, maxtick, cptdir) - print('Exiting @ tick %i because %s' % - (m5.curTick(), exit_event.getCause())) + print( + "Exiting @ tick %i because %s" % (m5.curTick(), exit_event.getCause()) + ) if options.checkpoint_at_end: m5.checkpoint(joinpath(cptdir, "cpt.%d")) diff --git a/configs/common/SysPaths.py b/configs/common/SysPaths.py index 762efaf97d..7c0f5bf59b 100644 --- a/configs/common/SysPaths.py +++ b/configs/common/SysPaths.py @@ -29,9 +29,10 @@ import os, sys config_path = os.path.dirname(os.path.abspath(__file__)) config_root = os.path.dirname(config_path) + class PathSearchFunc(object): _sys_paths = None - environment_variable = 'M5_PATH' + environment_variable = "M5_PATH" def __init__(self, subdirs, sys_paths=None): if isinstance(subdirs, str): @@ -46,9 +47,9 @@ class PathSearchFunc(object): else: if self._sys_paths is None: try: - paths = os.environ[self.environment_variable].split(':') + paths = os.environ[self.environment_variable].split(":") except KeyError: - paths = [ '/dist/m5/system', '/n/poolfs/z/dist/m5/system' ] + paths = ["/dist/m5/system", "/n/poolfs/z/dist/m5/system"] # expand '~' and '~user' in paths paths = list(map(os.path.expanduser, paths)) @@ -59,8 +60,10 @@ class PathSearchFunc(object): if not paths: raise IOError( "Can't find system files directory, " - "check your {} environment variable" - .format(self.environment_variable)) + "check your {} environment variable".format( + self.environment_variable + ) + ) self._sys_paths = list(paths) @@ -69,9 +72,13 @@ class PathSearchFunc(object): try: return next(p for p in paths if os.path.exists(p)) except StopIteration: - raise IOError("Can't find file '{}' on {}." - .format(filepath, self.environment_variable)) + raise IOError( + "Can't find file '{}' on {}.".format( + filepath, self.environment_variable + ) + ) -disk = PathSearchFunc('disks') -binary = PathSearchFunc('binaries') -script = PathSearchFunc('boot', sys_paths=[config_root]) + +disk = PathSearchFunc("disks") +binary = PathSearchFunc("binaries") +script = PathSearchFunc("boot", sys_paths=[config_root]) diff --git a/configs/common/__init__.py b/configs/common/__init__.py index 9b43643f96..4fe0002684 100644 --- a/configs/common/__init__.py +++ b/configs/common/__init__.py @@ -32,4 +32,3 @@ # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/configs/common/cores/arm/HPI.py b/configs/common/cores/arm/HPI.py index 3a11133a5b..c7a8127555 100644 --- a/configs/common/cores/arm/HPI.py +++ b/configs/common/cores/arm/HPI.py @@ -58,41 +58,47 @@ def make_implicant(implicant_string): ret_match <<= 1 shift = True - if char == '_': + if char == "_": shift = False - elif char == '0': + elif char == "0": ret_mask |= 1 - elif char == '1': + elif char == "1": ret_mask |= 1 ret_match |= 1 - elif char == 'x': + elif char == "x": pass else: print("Can't parse implicant character", char) return (ret_mask, ret_match) + # ,----- 36 thumb # | ,--- 35 bigThumb # | |,-- 34 aarch64 -a64_inst = make_implicant('0_01xx__xxxx_xxxx_xxxx_xxxx__xxxx_xxxx_xxxx_xxxx') -a32_inst = make_implicant('0_00xx__xxxx_xxxx_xxxx_xxxx__xxxx_xxxx_xxxx_xxxx') -t32_inst = make_implicant('1_10xx__xxxx_xxxx_xxxx_xxxx__xxxx_xxxx_xxxx_xxxx') -t16_inst = make_implicant('1_00xx__xxxx_xxxx_xxxx_xxxx__xxxx_xxxx_xxxx_xxxx') -any_inst = make_implicant('x_xxxx__xxxx_xxxx_xxxx_xxxx__xxxx_xxxx_xxxx_xxxx') +a64_inst = make_implicant("0_01xx__xxxx_xxxx_xxxx_xxxx__xxxx_xxxx_xxxx_xxxx") +a32_inst = make_implicant("0_00xx__xxxx_xxxx_xxxx_xxxx__xxxx_xxxx_xxxx_xxxx") +t32_inst = make_implicant("1_10xx__xxxx_xxxx_xxxx_xxxx__xxxx_xxxx_xxxx_xxxx") +t16_inst = make_implicant("1_00xx__xxxx_xxxx_xxxx_xxxx__xxxx_xxxx_xxxx_xxxx") +any_inst = make_implicant("x_xxxx__xxxx_xxxx_xxxx_xxxx__xxxx_xxxx_xxxx_xxxx") # | || -any_a64_inst = \ - make_implicant('x_x1xx__xxxx_xxxx_xxxx_xxxx__xxxx_xxxx_xxxx_xxxx') -any_non_a64_inst = \ - make_implicant('x_x0xx__xxxx_xxxx_xxxx_xxxx__xxxx_xxxx_xxxx_xxxx') +any_a64_inst = make_implicant( + "x_x1xx__xxxx_xxxx_xxxx_xxxx__xxxx_xxxx_xxxx_xxxx" +) +any_non_a64_inst = make_implicant( + "x_x0xx__xxxx_xxxx_xxxx_xxxx__xxxx_xxxx_xxxx_xxxx" +) + def encode_opcode(pattern): def encode(opcode_string): a64_mask, a64_match = pattern mask, match = make_implicant(opcode_string) return (a64_mask | mask), (a64_match | match) + return encode + a64_opcode = encode_opcode(a64_inst) a32_opcode = encode_opcode(a32_inst) t32_opcode = encode_opcode(t32_inst) @@ -100,30 +106,37 @@ t16_opcode = encode_opcode(t16_inst) # These definitions (in some form) should probably be part of TimingExpr + def literal(value): def body(env): ret = TimingExprLiteral() ret.value = value return ret + return body + def bin(op, left, right): def body(env): ret = TimingExprBin() - ret.op = 'timingExpr' + op + ret.op = "timingExpr" + op ret.left = left(env) ret.right = right(env) return ret + return body + def un(op, arg): def body(env): ret = TimingExprUn() - ret.op = 'timingExpr' + op + ret.op = "timingExpr" + op ret.arg = arg(env) return ret + return body + def ref(name): def body(env): if name in env: @@ -133,8 +146,10 @@ def ref(name): print("Invalid expression name", name) ret = TimingExprNull() return ret + return body + def if_expr(cond, true_expr, false_expr): def body(env): ret = TimingExprIf() @@ -142,21 +157,18 @@ def if_expr(cond, true_expr, false_expr): ret.trueExpr = true_expr(env) ret.falseExpr = false_expr(env) return ret + return body -def src(index): + +def src_reg(index): def body(env): ret = TimingExprSrcReg() ret.index = index return ret + return body -def int_reg(reg): - def body(env): - ret = TimingExprReadIntReg() - ret.reg = reg(env) - return ret - return body def let(bindings, expr): def body(env): @@ -180,972 +192,1296 @@ def let(bindings, expr): ret.expr = expr(new_env) return ret + return body + def expr_top(expr): return expr([]) + class HPI_DefaultInt(MinorFUTiming): - description = 'HPI_DefaultInt' + description = "HPI_DefaultInt" mask, match = any_non_a64_inst srcRegsRelativeLats = [3, 3, 2, 2, 2, 1, 0] + class HPI_DefaultA64Int(MinorFUTiming): - description = 'HPI_DefaultA64Int' + description = "HPI_DefaultA64Int" mask, match = any_a64_inst # r, l, (c) srcRegsRelativeLats = [2, 2, 2, 0] + class HPI_DefaultMul(MinorFUTiming): - description = 'HPI_DefaultMul' + description = "HPI_DefaultMul" mask, match = any_non_a64_inst # f, f, f, r, l, a? srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 0] + class HPI_DefaultA64Mul(MinorFUTiming): - description = 'HPI_DefaultA64Mul' + description = "HPI_DefaultA64Mul" mask, match = any_a64_inst # a (zr for mul), l, r srcRegsRelativeLats = [0, 0, 0, 0] # extraCommitLat = 1 + class HPI_DefaultVfp(MinorFUTiming): - description = 'HPI_DefaultVfp' + description = "HPI_DefaultVfp" mask, match = any_non_a64_inst # cpsr, z, z, z, cpacr, fpexc, l_lo, r_lo, l_hi, r_hi (from vadd2h) - srcRegsRelativeLats = [5, 5, 5, 5, 5, 5, 2, 2, 2, 2, 2, 2, 2, 2, 0] + srcRegsRelativeLats = [5, 5, 5, 5, 5, 5, 2, 2, 2, 2, 2, 2, 2, 2, 0] + class HPI_DefaultA64Vfp(MinorFUTiming): - description = 'HPI_DefaultA64Vfp' + description = "HPI_DefaultA64Vfp" mask, match = any_a64_inst # cpsr, cpacr_el1, fpscr_exc, ... srcRegsRelativeLats = [5, 5, 5, 2] + class HPI_FMADD_A64(MinorFUTiming): - description = 'HPI_FMADD_A64' - mask, match = a64_opcode('0001_1111_0x0x_xxxx__0xxx_xxxx_xxxx_xxxx') + description = "HPI_FMADD_A64" + mask, match = a64_opcode("0001_1111_0x0x_xxxx__0xxx_xxxx_xxxx_xxxx") # t # cpsr, cpacr_el1, fpscr_exc, 1, 1, 2, 2, 3, 3, fpscr_exc, d, d, d, d - srcRegsRelativeLats = [5, 5, 5, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0] + srcRegsRelativeLats = [5, 5, 5, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0] + class HPI_FMSUB_D_A64(MinorFUTiming): - description = 'HPI_FMSUB_D_A64' - mask, match = a64_opcode('0001_1111_0x0x_xxxx__1xxx_xxxx_xxxx_xxxx') + description = "HPI_FMSUB_D_A64" + mask, match = a64_opcode("0001_1111_0x0x_xxxx__1xxx_xxxx_xxxx_xxxx") # t # cpsr, cpacr_el1, fpscr_exc, 1, 1, 2, 2, 3, 3, fpscr_exc, d, d, d, d - srcRegsRelativeLats = [5, 5, 5, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0] + srcRegsRelativeLats = [5, 5, 5, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0] + class HPI_FMOV_A64(MinorFUTiming): - description = 'HPI_FMOV_A64' - mask, match = a64_opcode('0001_1110_0x10_0000__0100_00xx_xxxx_xxxx') + description = "HPI_FMOV_A64" + mask, match = a64_opcode("0001_1110_0x10_0000__0100_00xx_xxxx_xxxx") # cpsr, cpacr_el1, fpscr_exc, 1, 1, 2, 2, 3, 3, fpscr_exc, d, d, d, d srcRegsRelativeLats = [5, 5, 5, 0] + class HPI_ADD_SUB_vector_scalar_A64(MinorFUTiming): - description = 'HPI_ADD_SUB_vector_scalar_A64' - mask, match = a64_opcode('01x1_1110_xx1x_xxxx__1000_01xx_xxxx_xxxx') + description = "HPI_ADD_SUB_vector_scalar_A64" + mask, match = a64_opcode("01x1_1110_xx1x_xxxx__1000_01xx_xxxx_xxxx") # cpsr, z, z, z, cpacr, fpexc, l0, r0, l1, r1, l2, r2, l3, r3 (for vadd2h) srcRegsRelativeLats = [5, 5, 5, 4] class HPI_ADD_SUB_vector_vector_A64(MinorFUTiming): - description = 'HPI_ADD_SUB_vector_vector_A64' - mask, match = a64_opcode('0xx0_1110_xx1x_xxxx__1000_01xx_xxxx_xxxx') + description = "HPI_ADD_SUB_vector_vector_A64" + mask, match = a64_opcode("0xx0_1110_xx1x_xxxx__1000_01xx_xxxx_xxxx") # cpsr, z, z, z, cpacr, fpexc, l0, r0, l1, r1, l2, r2, l3, r3 (for vadd2h) srcRegsRelativeLats = [5, 5, 5, 4] + class HPI_FDIV_scalar_32_A64(MinorFUTiming): - description = 'HPI_FDIV_scalar_32_A64' - mask, match = a64_opcode('0001_1110_001x_xxxx__0001_10xx_xxxx_xxxx') + description = "HPI_FDIV_scalar_32_A64" + mask, match = a64_opcode("0001_1110_001x_xxxx__0001_10xx_xxxx_xxxx") extraCommitLat = 6 - srcRegsRelativeLats = [0, 0, 0, 20, 4] + srcRegsRelativeLats = [0, 0, 0, 20, 4] + class HPI_FDIV_scalar_64_A64(MinorFUTiming): - description = 'HPI_FDIV_scalar_64_A64' - mask, match = a64_opcode('0001_1110_011x_xxxx__0001_10xx_xxxx_xxxx') + description = "HPI_FDIV_scalar_64_A64" + mask, match = a64_opcode("0001_1110_011x_xxxx__0001_10xx_xxxx_xxxx") extraCommitLat = 15 - srcRegsRelativeLats = [0, 0, 0, 20, 4] + srcRegsRelativeLats = [0, 0, 0, 20, 4] + # CINC CINV CSEL CSET CSETM CSINC CSINC CSINV CSINV CSNEG class HPI_Cxxx_A64(MinorFUTiming): - description = 'HPI_Cxxx_A64' - mask, match = a64_opcode('xx01_1010_100x_xxxx_xxxx__0xxx_xxxx_xxxx') + description = "HPI_Cxxx_A64" + mask, match = a64_opcode("xx01_1010_100x_xxxx_xxxx__0xxx_xxxx_xxxx") srcRegsRelativeLats = [3, 3, 3, 2, 2] + class HPI_DefaultMem(MinorFUTiming): - description = 'HPI_DefaultMem' + description = "HPI_DefaultMem" mask, match = any_non_a64_inst srcRegsRelativeLats = [1, 1, 1, 1, 1, 2] # Assume that LDR/STR take 2 cycles for resolving dependencies # (1 + 1 of the FU) extraAssumedLat = 2 + class HPI_DefaultMem64(MinorFUTiming): - description = 'HPI_DefaultMem64' + description = "HPI_DefaultMem64" mask, match = any_a64_inst srcRegsRelativeLats = [2] # Assume that LDR/STR take 2 cycles for resolving dependencies # (1 + 1 of the FU) extraAssumedLat = 3 + class HPI_DataProcessingMovShiftr(MinorFUTiming): - description = 'HPI_DataProcessingMovShiftr' - mask, match = a32_opcode('xxxx_0001_101x_xxxx__xxxx_xxxx_xxx1_xxxx') + description = "HPI_DataProcessingMovShiftr" + mask, match = a32_opcode("xxxx_0001_101x_xxxx__xxxx_xxxx_xxx1_xxxx") srcRegsRelativeLats = [3, 3, 2, 2, 2, 1, 0] + class HPI_DataProcessingMayShift(MinorFUTiming): - description = 'HPI_DataProcessingMayShift' - mask, match = a32_opcode('xxxx_000x_xxxx_xxxx__xxxx_xxxx_xxxx_xxxx') + description = "HPI_DataProcessingMayShift" + mask, match = a32_opcode("xxxx_000x_xxxx_xxxx__xxxx_xxxx_xxxx_xxxx") srcRegsRelativeLats = [3, 3, 2, 2, 1, 1, 0] + class HPI_DataProcessingNoShift(MinorFUTiming): - description = 'HPI_DataProcessingNoShift' - mask, match = a32_opcode('xxxx_000x_xxxx_xxxx__xxxx_0000_0xx0_xxxx') + description = "HPI_DataProcessingNoShift" + mask, match = a32_opcode("xxxx_000x_xxxx_xxxx__xxxx_0000_0xx0_xxxx") srcRegsRelativeLats = [3, 3, 2, 2, 2, 1, 0] + class HPI_DataProcessingAllowShifti(MinorFUTiming): - description = 'HPI_DataProcessingAllowShifti' - mask, match = a32_opcode('xxxx_000x_xxxx_xxxx__xxxx_xxxx_xxx0_xxxx') + description = "HPI_DataProcessingAllowShifti" + mask, match = a32_opcode("xxxx_000x_xxxx_xxxx__xxxx_xxxx_xxx0_xxxx") srcRegsRelativeLats = [3, 3, 2, 2, 1, 1, 0] + class HPI_DataProcessingSuppressShift(MinorFUTiming): - description = 'HPI_DataProcessingSuppressShift' - mask, match = a32_opcode('xxxx_000x_xxxx_xxxx__xxxx_xxxx_xxxx_xxxx') + description = "HPI_DataProcessingSuppressShift" + mask, match = a32_opcode("xxxx_000x_xxxx_xxxx__xxxx_xxxx_xxxx_xxxx") srcRegsRelativeLats = [] suppress = True + class HPI_DataProcessingSuppressBranch(MinorFUTiming): - description = 'HPI_DataProcessingSuppressBranch' - mask, match = a32_opcode('xxxx_1010_xxxx_xxxx__xxxx_xxxx_xxxx_xxxx') + description = "HPI_DataProcessingSuppressBranch" + mask, match = a32_opcode("xxxx_1010_xxxx_xxxx__xxxx_xxxx_xxxx_xxxx") srcRegsRelativeLats = [] suppress = True + class HPI_BFI_T1(MinorFUTiming): - description = 'HPI_BFI_T1' - mask, match = t32_opcode('1111_0x11_0110_xxxx__0xxx_xxxx_xxxx_xxxx') + description = "HPI_BFI_T1" + mask, match = t32_opcode("1111_0x11_0110_xxxx__0xxx_xxxx_xxxx_xxxx") srcRegsRelativeLats = [0, 0, 0, 1, 1, 0] + class HPI_BFI_A1(MinorFUTiming): - description = 'HPI_BFI_A1' - mask, match = a32_opcode('xxxx_0111_110x_xxxx__xxxx_xxxx_x001_xxxx') + description = "HPI_BFI_A1" + mask, match = a32_opcode("xxxx_0111_110x_xxxx__xxxx_xxxx_x001_xxxx") # f, f, f, dest, src srcRegsRelativeLats = [0, 0, 0, 1, 1, 0] + class HPI_CLZ_T1(MinorFUTiming): - description = 'HPI_CLZ_T1' - mask, match = t32_opcode('1111_1010_1011_xxxx__1111_xxxx_1000_xxxx') + description = "HPI_CLZ_T1" + mask, match = t32_opcode("1111_1010_1011_xxxx__1111_xxxx_1000_xxxx") srcRegsRelativeLats = [3, 3, 2, 2, 2, 1, 0] + class HPI_CLZ_A1(MinorFUTiming): - description = 'HPI_CLZ_A1' - mask, match = a32_opcode('xxxx_0001_0110_xxxx__xxxx_xxxx_0001_xxxx') + description = "HPI_CLZ_A1" + mask, match = a32_opcode("xxxx_0001_0110_xxxx__xxxx_xxxx_0001_xxxx") srcRegsRelativeLats = [3, 3, 2, 2, 2, 1, 0] + class HPI_CMN_immediate_A1(MinorFUTiming): - description = 'HPI_CMN_immediate_A1' - mask, match = a32_opcode('xxxx_0011_0111_xxxx__xxxx_xxxx_xxxx_xxxx') + description = "HPI_CMN_immediate_A1" + mask, match = a32_opcode("xxxx_0011_0111_xxxx__xxxx_xxxx_xxxx_xxxx") srcRegsRelativeLats = [3, 3, 3, 2, 2, 3, 3, 3, 0] + class HPI_CMN_register_A1(MinorFUTiming): - description = 'HPI_CMN_register_A1' - mask, match = a32_opcode('xxxx_0001_0111_xxxx__xxxx_xxxx_xxx0_xxxx') + description = "HPI_CMN_register_A1" + mask, match = a32_opcode("xxxx_0001_0111_xxxx__xxxx_xxxx_xxx0_xxxx") srcRegsRelativeLats = [3, 3, 3, 2, 2, 3, 3, 3, 0] + class HPI_CMP_immediate_A1(MinorFUTiming): - description = 'HPI_CMP_immediate_A1' - mask, match = a32_opcode('xxxx_0011_0101_xxxx__xxxx_xxxx_xxxx_xxxx') + description = "HPI_CMP_immediate_A1" + mask, match = a32_opcode("xxxx_0011_0101_xxxx__xxxx_xxxx_xxxx_xxxx") srcRegsRelativeLats = [3, 3, 3, 2, 2, 3, 3, 3, 0] + class HPI_CMP_register_A1(MinorFUTiming): - description = 'HPI_CMP_register_A1' - mask, match = a32_opcode('xxxx_0001_0101_xxxx__xxxx_xxxx_xxx0_xxxx') + description = "HPI_CMP_register_A1" + mask, match = a32_opcode("xxxx_0001_0101_xxxx__xxxx_xxxx_xxx0_xxxx") srcRegsRelativeLats = [3, 3, 3, 2, 2, 3, 3, 3, 0] + class HPI_MLA_T1(MinorFUTiming): - description = 'HPI_MLA_T1' - mask, match = t32_opcode('1111_1011_0000_xxxx__xxxx_xxxx_0000_xxxx') + description = "HPI_MLA_T1" + mask, match = t32_opcode("1111_1011_0000_xxxx__xxxx_xxxx_0000_xxxx") # z, z, z, a, l?, r? srcRegsRelativeLats = [0, 0, 0, 0, 0, 2, 0] + class HPI_MLA_A1(MinorFUTiming): - description = 'HPI_MLA_A1' - mask, match = a32_opcode('xxxx_0000_001x_xxxx__xxxx_xxxx_1001_xxxx') + description = "HPI_MLA_A1" + mask, match = a32_opcode("xxxx_0000_001x_xxxx__xxxx_xxxx_1001_xxxx") # z, z, z, a, l?, r? srcRegsRelativeLats = [0, 0, 0, 0, 0, 2, 0] + class HPI_MADD_A64(MinorFUTiming): - description = 'HPI_MADD_A64' - mask, match = a64_opcode('x001_1011_000x_xxxx__0xxx_xxxx_xxxx_xxxx') + description = "HPI_MADD_A64" + mask, match = a64_opcode("x001_1011_000x_xxxx__0xxx_xxxx_xxxx_xxxx") # a, l?, r? srcRegsRelativeLats = [1, 1, 1, 0] extraCommitLat = 1 + class HPI_MLS_T1(MinorFUTiming): - description = 'HPI_MLS_T1' - mask, match = t32_opcode('1111_1011_0000_xxxx__xxxx_xxxx_0001_xxxx') + description = "HPI_MLS_T1" + mask, match = t32_opcode("1111_1011_0000_xxxx__xxxx_xxxx_0001_xxxx") # z, z, z, l?, a, r? srcRegsRelativeLats = [0, 0, 0, 2, 0, 0, 0] + class HPI_MLS_A1(MinorFUTiming): - description = 'HPI_MLS_A1' - mask, match = a32_opcode('xxxx_0000_0110_xxxx__xxxx_xxxx_1001_xxxx') + description = "HPI_MLS_A1" + mask, match = a32_opcode("xxxx_0000_0110_xxxx__xxxx_xxxx_1001_xxxx") # z, z, z, l?, a, r? srcRegsRelativeLats = [0, 0, 0, 2, 0, 0, 0] + class HPI_MOVT_A1(MinorFUTiming): - description = 'HPI_MOVT_A1' - mask, match = t32_opcode('xxxx_0010_0100_xxxx__xxxx_xxxx_xxxx_xxxx') + description = "HPI_MOVT_A1" + mask, match = t32_opcode("xxxx_0010_0100_xxxx__xxxx_xxxx_xxxx_xxxx") + class HPI_MUL_T1(MinorFUTiming): - description = 'HPI_MUL_T1' - mask, match = t16_opcode('0100_0011_01xx_xxxx') + description = "HPI_MUL_T1" + mask, match = t16_opcode("0100_0011_01xx_xxxx") + + class HPI_MUL_T2(MinorFUTiming): - description = 'HPI_MUL_T2' - mask, match = t32_opcode('1111_1011_0000_xxxx_1111_xxxx_0000_xxxx') + description = "HPI_MUL_T2" + mask, match = t32_opcode("1111_1011_0000_xxxx_1111_xxxx_0000_xxxx") + class HPI_PKH_T1(MinorFUTiming): - description = 'HPI_PKH_T1' - mask, match = t32_opcode('1110_1010_110x_xxxx__xxxx_xxxx_xxxx_xxxx') + description = "HPI_PKH_T1" + mask, match = t32_opcode("1110_1010_110x_xxxx__xxxx_xxxx_xxxx_xxxx") srcRegsRelativeLats = [0, 0, 0, 2, 1, 0] + class HPI_PKH_A1(MinorFUTiming): - description = 'HPI_PKH_A1' - mask, match = a32_opcode('xxxx_0110_1000_xxxx__xxxx_xxxx_xx01_xxxx') + description = "HPI_PKH_A1" + mask, match = a32_opcode("xxxx_0110_1000_xxxx__xxxx_xxxx_xx01_xxxx") srcRegsRelativeLats = [0, 0, 0, 2, 1, 0] + class HPI_QADD_QSUB_T1(MinorFUTiming): - description = 'HPI_QADD_QSUB_T1' - mask, match = t32_opcode('1111_1010_1000_xxxx__1111_xxxx_10x0_xxxx') + description = "HPI_QADD_QSUB_T1" + mask, match = t32_opcode("1111_1010_1000_xxxx__1111_xxxx_10x0_xxxx") srcRegsRelativeLats = [0, 0, 0, 1, 1, 0] + class HPI_QADD_QSUB_A1(MinorFUTiming): - description = 'HPI_QADD_QSUB_A1' - mask, match = a32_opcode('xxxx_0001_00x0_xxxx__xxxx_xxxx_0101_xxxx') + description = "HPI_QADD_QSUB_A1" + mask, match = a32_opcode("xxxx_0001_00x0_xxxx__xxxx_xxxx_0101_xxxx") srcRegsRelativeLats = [0, 0, 0, 1, 1, 0] + # T1 QADD16 QADD8 QSUB16 QSUB8 UQADD16 UQADD8 UQSUB16 UQSUB8 class HPI_QADD_ETC_T1(MinorFUTiming): - description = 'HPI_QADD_ETC_T1' - mask, match = t32_opcode('1111_1010_1x0x_xxxx__1111_xxxx_0x01_xxxx') + description = "HPI_QADD_ETC_T1" + mask, match = t32_opcode("1111_1010_1x0x_xxxx__1111_xxxx_0x01_xxxx") srcRegsRelativeLats = [0, 0, 0, 1, 1, 0] + # A1 QADD16 QADD8 QSAX QSUB16 QSUB8 UQADD16 UQADD8 UQASX UQSAX UQSUB16 UQSUB8 class HPI_QADD_ETC_A1(MinorFUTiming): - description = 'HPI_QADD_ETC_A1' - mask, match = a32_opcode('xxxx_0110_0x10_xxxx__xxxx_xxxx_xxx1_xxxx') + description = "HPI_QADD_ETC_A1" + mask, match = a32_opcode("xxxx_0110_0x10_xxxx__xxxx_xxxx_xxx1_xxxx") srcRegsRelativeLats = [0, 0, 0, 1, 1, 0] + class HPI_QASX_QSAX_UQASX_UQSAX_T1(MinorFUTiming): - description = 'HPI_QASX_QSAX_UQASX_UQSAX_T1' - mask, match = t32_opcode('1111_1010_1x10_xxxx__1111_xxxx_0x01_xxxx') + description = "HPI_QASX_QSAX_UQASX_UQSAX_T1" + mask, match = t32_opcode("1111_1010_1x10_xxxx__1111_xxxx_0x01_xxxx") srcRegsRelativeLats = [0, 0, 0, 1, 1, 0] + class HPI_QDADD_QDSUB_T1(MinorFUTiming): - description = 'HPI_QDADD_QDSUB_T1' - mask, match = t32_opcode('1111_1010_1000_xxxx__1111_xxxx_10x1_xxxx') + description = "HPI_QDADD_QDSUB_T1" + mask, match = t32_opcode("1111_1010_1000_xxxx__1111_xxxx_10x1_xxxx") srcRegsRelativeLats = [0, 0, 0, 0, 1, 0] + class HPI_QDADD_QDSUB_A1(MinorFUTiming): - description = 'HPI_QDADD_QSUB_A1' - mask, match = a32_opcode('xxxx_0001_01x0_xxxx__xxxx_xxxx_0101_xxxx') + description = "HPI_QDADD_QSUB_A1" + mask, match = a32_opcode("xxxx_0001_01x0_xxxx__xxxx_xxxx_0101_xxxx") srcRegsRelativeLats = [0, 0, 0, 0, 1, 0] + class HPI_RBIT_A1(MinorFUTiming): - description = 'HPI_RBIT_A1' - mask, match = a32_opcode('xxxx_0110_1111_xxxx__xxxx_xxxx_0011_xxxx') + description = "HPI_RBIT_A1" + mask, match = a32_opcode("xxxx_0110_1111_xxxx__xxxx_xxxx_0011_xxxx") srcRegsRelativeLats = [0, 0, 0, 1, 0] + class HPI_REV_REV16_A1(MinorFUTiming): - description = 'HPI_REV_REV16_A1' - mask, match = a32_opcode('xxxx_0110_1011_xxxx__xxxx_xxxx_x011_xxxx') + description = "HPI_REV_REV16_A1" + mask, match = a32_opcode("xxxx_0110_1011_xxxx__xxxx_xxxx_x011_xxxx") srcRegsRelativeLats = [0, 0, 0, 1, 0] + class HPI_REVSH_A1(MinorFUTiming): - description = 'HPI_REVSH_A1' - mask, match = a32_opcode('xxxx_0110_1111_xxxx__xxxx_xxxx_1011_xxxx') + description = "HPI_REVSH_A1" + mask, match = a32_opcode("xxxx_0110_1111_xxxx__xxxx_xxxx_1011_xxxx") srcRegsRelativeLats = [0, 0, 0, 1, 0] + class HPI_ADD_ETC_A1(MinorFUTiming): - description = 'HPI_ADD_ETC_A1' - mask, match = a32_opcode('xxxx_0110_0xx1_xxxx__xxxx_xxxx_x001_xxxx') + description = "HPI_ADD_ETC_A1" + mask, match = a32_opcode("xxxx_0110_0xx1_xxxx__xxxx_xxxx_x001_xxxx") srcRegsRelativeLats = [0, 0, 0, 2, 2, 0] + class HPI_ADD_ETC_T1(MinorFUTiming): - description = 'HPI_ADD_ETC_A1' - mask, match = t32_opcode('1111_1010_100x_xxxx__1111_xxxx_0xx0_xxxx') + description = "HPI_ADD_ETC_A1" + mask, match = t32_opcode("1111_1010_100x_xxxx__1111_xxxx_0xx0_xxxx") srcRegsRelativeLats = [0, 0, 0, 2, 2, 0] + class HPI_SASX_SHASX_UASX_UHASX_A1(MinorFUTiming): - description = 'HPI_SASX_SHASX_UASX_UHASX_A1' - mask, match = a32_opcode('xxxx_0110_0xx1_xxxx__xxxx_xxxx_0011_xxxx') + description = "HPI_SASX_SHASX_UASX_UHASX_A1" + mask, match = a32_opcode("xxxx_0110_0xx1_xxxx__xxxx_xxxx_0011_xxxx") srcRegsRelativeLats = [3, 3, 2, 2, 2, 1, 0] + class HPI_SBFX_UBFX_A1(MinorFUTiming): - description = 'HPI_SBFX_UBFX_A1' - mask, match = a32_opcode('xxxx_0111_1x1x_xxxx__xxxx_xxxx_x101_xxxx') + description = "HPI_SBFX_UBFX_A1" + mask, match = a32_opcode("xxxx_0111_1x1x_xxxx__xxxx_xxxx_x101_xxxx") srcRegsRelativeLats = [0, 0, 0, 1, 0] + ### SDIV -sdiv_lat_expr = expr_top(let([ - ('left', un('SignExtend32To64', int_reg(src(4)))), - ('right', un('SignExtend32To64', int_reg(src(3)))), - ('either_signed', bin('Or', - bin('SLessThan', ref('left'), literal(0)), - bin('SLessThan', ref('right'), literal(0)))), - ('left_size', un('SizeInBits', un('Abs', ref('left')))), - ('signed_adjust', if_expr(ref('either_signed'), literal(1), literal(0))), - ('right_size', un('SizeInBits', - bin('UDiv', un('Abs', ref('right')), - if_expr(ref('either_signed'), literal(4), literal(2))))), - ('left_minus_right', if_expr( - bin('SLessThan', ref('left_size'), ref('right_size')), - literal(0), - bin('Sub', ref('left_size'), ref('right_size')))) - ], - bin('Add', - ref('signed_adjust'), - if_expr(bin('Equal', ref('right'), literal(0)), - literal(0), - bin('UDiv', ref('left_minus_right'), literal(4)))) - )) +sdiv_lat_expr = expr_top( + let( + [ + ("left", un("SignExtend32To64", src_reg(4))), + ("right", un("SignExtend32To64", src_reg(3))), + ( + "either_signed", + bin( + "Or", + bin("SLessThan", ref("left"), literal(0)), + bin("SLessThan", ref("right"), literal(0)), + ), + ), + ("left_size", un("SizeInBits", un("Abs", ref("left")))), + ( + "signed_adjust", + if_expr(ref("either_signed"), literal(1), literal(0)), + ), + ( + "right_size", + un( + "SizeInBits", + bin( + "UDiv", + un("Abs", ref("right")), + if_expr(ref("either_signed"), literal(4), literal(2)), + ), + ), + ), + ( + "left_minus_right", + if_expr( + bin("SLessThan", ref("left_size"), ref("right_size")), + literal(0), + bin("Sub", ref("left_size"), ref("right_size")), + ), + ), + ], + bin( + "Add", + ref("signed_adjust"), + if_expr( + bin("Equal", ref("right"), literal(0)), + literal(0), + bin("UDiv", ref("left_minus_right"), literal(4)), + ), + ), + ) +) + +sdiv_lat_expr64 = expr_top( + let( + [ + ("left", un("SignExtend32To64", src_reg(0))), + ("right", un("SignExtend32To64", src_reg(1))), + ( + "either_signed", + bin( + "Or", + bin("SLessThan", ref("left"), literal(0)), + bin("SLessThan", ref("right"), literal(0)), + ), + ), + ("left_size", un("SizeInBits", un("Abs", ref("left")))), + ( + "signed_adjust", + if_expr(ref("either_signed"), literal(1), literal(0)), + ), + ( + "right_size", + un( + "SizeInBits", + bin( + "UDiv", + un("Abs", ref("right")), + if_expr(ref("either_signed"), literal(4), literal(2)), + ), + ), + ), + ( + "left_minus_right", + if_expr( + bin("SLessThan", ref("left_size"), ref("right_size")), + literal(0), + bin("Sub", ref("left_size"), ref("right_size")), + ), + ), + ], + bin( + "Add", + ref("signed_adjust"), + if_expr( + bin("Equal", ref("right"), literal(0)), + literal(0), + bin("UDiv", ref("left_minus_right"), literal(4)), + ), + ), + ) +) -sdiv_lat_expr64 = expr_top(let([ - ('left', un('SignExtend32To64', int_reg(src(0)))), - ('right', un('SignExtend32To64', int_reg(src(1)))), - ('either_signed', bin('Or', - bin('SLessThan', ref('left'), literal(0)), - bin('SLessThan', ref('right'), literal(0)))), - ('left_size', un('SizeInBits', un('Abs', ref('left')))), - ('signed_adjust', if_expr(ref('either_signed'), literal(1), literal(0))), - ('right_size', un('SizeInBits', - bin('UDiv', un('Abs', ref('right')), - if_expr(ref('either_signed'), literal(4), literal(2))))), - ('left_minus_right', if_expr( - bin('SLessThan', ref('left_size'), ref('right_size')), - literal(0), - bin('Sub', ref('left_size'), ref('right_size')))) - ], - bin('Add', - ref('signed_adjust'), - if_expr(bin('Equal', ref('right'), literal(0)), - literal(0), - bin('UDiv', ref('left_minus_right'), literal(4)))) - )) class HPI_SDIV_A1(MinorFUTiming): - description = 'HPI_SDIV_A1' - mask, match = a32_opcode('xxxx_0111_0001_xxxx__xxxx_xxxx_0001_xxxx') + description = "HPI_SDIV_A1" + mask, match = a32_opcode("xxxx_0111_0001_xxxx__xxxx_xxxx_0001_xxxx") extraCommitLat = 0 srcRegsRelativeLats = [] extraCommitLatExpr = sdiv_lat_expr + class HPI_SDIV_A64(MinorFUTiming): - description = 'HPI_SDIV_A64' - mask, match = a64_opcode('x001_1010_110x_xxxx__0000_11xx_xxxx_xxxx') + description = "HPI_SDIV_A64" + mask, match = a64_opcode("x001_1010_110x_xxxx__0000_11xx_xxxx_xxxx") extraCommitLat = 0 srcRegsRelativeLats = [] extraCommitLatExpr = sdiv_lat_expr64 + ### SEL + class HPI_SEL_A1(MinorFUTiming): - description = 'HPI_SEL_A1' - mask, match = a32_opcode('xxxx_0110_1000_xxxx__xxxx_xxxx_1011_xxxx') + description = "HPI_SEL_A1" + mask, match = a32_opcode("xxxx_0110_1000_xxxx__xxxx_xxxx_1011_xxxx") srcRegsRelativeLats = [0, 0, 0, 0, 2, 2, 0] + class HPI_SEL_A1_Suppress(MinorFUTiming): - description = 'HPI_SEL_A1_Suppress' - mask, match = a32_opcode('xxxx_0110_1000_xxxx__xxxx_xxxx_1011_xxxx') + description = "HPI_SEL_A1_Suppress" + mask, match = a32_opcode("xxxx_0110_1000_xxxx__xxxx_xxxx_1011_xxxx") srcRegsRelativeLats = [] suppress = True + class HPI_SHSAX_SSAX_UHSAX_USAX_A1(MinorFUTiming): - description = 'HPI_SHSAX_SSAX_UHSAX_USAX_A1' - mask, match = a32_opcode('xxxx_0110_0xx1_xxxx__xxxx_xxxx_0101_xxxx') + description = "HPI_SHSAX_SSAX_UHSAX_USAX_A1" + mask, match = a32_opcode("xxxx_0110_0xx1_xxxx__xxxx_xxxx_0101_xxxx") # As Default srcRegsRelativeLats = [3, 3, 2, 2, 2, 1, 0] + class HPI_USUB_ETC_A1(MinorFUTiming): - description = 'HPI_USUB_ETC_A1' - mask, match = a32_opcode('xxxx_0110_0xx1_xxxx__xxxx_xxxx_x111_xxxx') + description = "HPI_USUB_ETC_A1" + mask, match = a32_opcode("xxxx_0110_0xx1_xxxx__xxxx_xxxx_x111_xxxx") srcRegsRelativeLats = [0, 0, 0, 2, 2, 0] + class HPI_SMLABB_T1(MinorFUTiming): - description = 'HPI_SMLABB_T1' - mask, match = t32_opcode('1111_1011_0001_xxxx__xxxx_xxxx_00xx_xxxx') + description = "HPI_SMLABB_T1" + mask, match = t32_opcode("1111_1011_0001_xxxx__xxxx_xxxx_00xx_xxxx") srcRegsRelativeLats = [0, 0, 0, 0, 0, 2, 0] + class HPI_SMLABB_A1(MinorFUTiming): - description = 'HPI_SMLABB_A1' - mask, match = a32_opcode('xxxx_0001_0000_xxxx__xxxx_xxxx_1xx0_xxxx') + description = "HPI_SMLABB_A1" + mask, match = a32_opcode("xxxx_0001_0000_xxxx__xxxx_xxxx_1xx0_xxxx") srcRegsRelativeLats = [0, 0, 0, 0, 0, 2, 0] + class HPI_SMLAD_T1(MinorFUTiming): - description = 'HPI_SMLAD_T1' - mask, match = t32_opcode('1111_1011_0010_xxxx__xxxx_xxxx_000x_xxxx') + description = "HPI_SMLAD_T1" + mask, match = t32_opcode("1111_1011_0010_xxxx__xxxx_xxxx_000x_xxxx") srcRegsRelativeLats = [0, 0, 0, 0, 0, 2, 0] + class HPI_SMLAD_A1(MinorFUTiming): - description = 'HPI_SMLAD_A1' - mask, match = a32_opcode('xxxx_0111_0000_xxxx__xxxx_xxxx_00x1_xxxx') + description = "HPI_SMLAD_A1" + mask, match = a32_opcode("xxxx_0111_0000_xxxx__xxxx_xxxx_00x1_xxxx") # z, z, z, l, r, a srcRegsRelativeLats = [0, 0, 0, 0, 0, 2, 0] + class HPI_SMLAL_T1(MinorFUTiming): - description = 'HPI_SMLAL_T1' - mask, match = t32_opcode('1111_1011_1100_xxxx__xxxx_xxxx_0000_xxxx') + description = "HPI_SMLAL_T1" + mask, match = t32_opcode("1111_1011_1100_xxxx__xxxx_xxxx_0000_xxxx") + + class HPI_SMLAL_A1(MinorFUTiming): - description = 'HPI_SMLAL_A1' - mask, match = a32_opcode('xxxx_0000_111x_xxxx__xxxx_xxxx_1001_xxxx') + description = "HPI_SMLAL_A1" + mask, match = a32_opcode("xxxx_0000_111x_xxxx__xxxx_xxxx_1001_xxxx") + class HPI_SMLALBB_T1(MinorFUTiming): - description = 'HPI_SMLALBB_T1' - mask, match = t32_opcode('1111_1011_1100_xxxx__xxxx_xxxx_10xx_xxxx') + description = "HPI_SMLALBB_T1" + mask, match = t32_opcode("1111_1011_1100_xxxx__xxxx_xxxx_10xx_xxxx") + + class HPI_SMLALBB_A1(MinorFUTiming): - description = 'HPI_SMLALBB_A1' - mask, match = a32_opcode('xxxx_0001_0100_xxxx__xxxx_xxxx_1xx0_xxxx') + description = "HPI_SMLALBB_A1" + mask, match = a32_opcode("xxxx_0001_0100_xxxx__xxxx_xxxx_1xx0_xxxx") + class HPI_SMLALD_T1(MinorFUTiming): - description = 'HPI_SMLALD_T1' - mask, match = t32_opcode('1111_1011_1100_xxxx__xxxx_xxxx_110x_xxxx') + description = "HPI_SMLALD_T1" + mask, match = t32_opcode("1111_1011_1100_xxxx__xxxx_xxxx_110x_xxxx") + + class HPI_SMLALD_A1(MinorFUTiming): - description = 'HPI_SMLALD_A1' - mask, match = a32_opcode('xxxx_0111_0100_xxxx__xxxx_xxxx_00x1_xxxx') + description = "HPI_SMLALD_A1" + mask, match = a32_opcode("xxxx_0111_0100_xxxx__xxxx_xxxx_00x1_xxxx") + class HPI_SMLAWB_T1(MinorFUTiming): - description = 'HPI_SMLAWB_T1' - mask, match = t32_opcode('1111_1011_0011_xxxx__xxxx_xxxx_000x_xxxx') + description = "HPI_SMLAWB_T1" + mask, match = t32_opcode("1111_1011_0011_xxxx__xxxx_xxxx_000x_xxxx") srcRegsRelativeLats = [0, 0, 0, 0, 0, 2, 0] + class HPI_SMLAWB_A1(MinorFUTiming): - description = 'HPI_SMLAWB_A1' - mask, match = a32_opcode('xxxx_0001_0010_xxxx__xxxx_xxxx_1x00_xxxx') + description = "HPI_SMLAWB_A1" + mask, match = a32_opcode("xxxx_0001_0010_xxxx__xxxx_xxxx_1x00_xxxx") srcRegsRelativeLats = [0, 0, 0, 0, 0, 2, 0] + class HPI_SMLSD_A1(MinorFUTiming): - description = 'HPI_SMLSD_A1' - mask, match = a32_opcode('xxxx_0111_0000_xxxx__xxxx_xxxx_01x1_xxxx') + description = "HPI_SMLSD_A1" + mask, match = a32_opcode("xxxx_0111_0000_xxxx__xxxx_xxxx_01x1_xxxx") + class HPI_SMLSLD_T1(MinorFUTiming): - description = 'HPI_SMLSLD_T1' - mask, match = t32_opcode('1111_1011_1101_xxxx__xxxx_xxxx_110x_xxxx') + description = "HPI_SMLSLD_T1" + mask, match = t32_opcode("1111_1011_1101_xxxx__xxxx_xxxx_110x_xxxx") + + class HPI_SMLSLD_A1(MinorFUTiming): - description = 'HPI_SMLSLD_A1' - mask, match = a32_opcode('xxxx_0111_0100_xxxx__xxxx_xxxx_01x1_xxxx') + description = "HPI_SMLSLD_A1" + mask, match = a32_opcode("xxxx_0111_0100_xxxx__xxxx_xxxx_01x1_xxxx") + class HPI_SMMLA_T1(MinorFUTiming): - description = 'HPI_SMMLA_T1' - mask, match = t32_opcode('1111_1011_0101_xxxx__xxxx_xxxx_000x_xxxx') + description = "HPI_SMMLA_T1" + mask, match = t32_opcode("1111_1011_0101_xxxx__xxxx_xxxx_000x_xxxx") # ^^^^ != 1111 srcRegsRelativeLats = [0, 0, 0, 2, 0, 0, 0] + class HPI_SMMLA_A1(MinorFUTiming): - description = 'HPI_SMMLA_A1' + description = "HPI_SMMLA_A1" # Note that this must be after the encoding for SMMUL - mask, match = a32_opcode('xxxx_0111_0101_xxxx__xxxx_xxxx_00x1_xxxx') + mask, match = a32_opcode("xxxx_0111_0101_xxxx__xxxx_xxxx_00x1_xxxx") # ^^^^ != 1111 srcRegsRelativeLats = [0, 0, 0, 2, 0, 0, 0] + class HPI_SMMLS_T1(MinorFUTiming): - description = 'HPI_SMMLS_T1' - mask, match = t32_opcode('1111_1011_0110_xxxx__xxxx_xxxx_000x_xxxx') + description = "HPI_SMMLS_T1" + mask, match = t32_opcode("1111_1011_0110_xxxx__xxxx_xxxx_000x_xxxx") srcRegsRelativeLats = [0, 0, 0, 2, 0, 0, 0] + class HPI_SMMLS_A1(MinorFUTiming): - description = 'HPI_SMMLS_A1' - mask, match = a32_opcode('xxxx_0111_0101_xxxx__xxxx_xxxx_11x1_xxxx') + description = "HPI_SMMLS_A1" + mask, match = a32_opcode("xxxx_0111_0101_xxxx__xxxx_xxxx_11x1_xxxx") srcRegsRelativeLats = [0, 0, 0, 2, 0, 0, 0] + class HPI_SMMUL_T1(MinorFUTiming): - description = 'HPI_SMMUL_T1' - mask, match = t32_opcode('1111_1011_0101_xxxx__1111_xxxx_000x_xxxx') + description = "HPI_SMMUL_T1" + mask, match = t32_opcode("1111_1011_0101_xxxx__1111_xxxx_000x_xxxx") srcRegsRelativeLats = [0, 0, 0, 0, 0, 0] + class HPI_SMMUL_A1(MinorFUTiming): - description = 'HPI_SMMUL_A1' - mask, match = a32_opcode('xxxx_0111_0101_xxxx__1111_xxxx_00x1_xxxx') + description = "HPI_SMMUL_A1" + mask, match = a32_opcode("xxxx_0111_0101_xxxx__1111_xxxx_00x1_xxxx") srcRegsRelativeLats = [0, 0, 0, 0, 0, 0] + class HPI_SMUAD_T1(MinorFUTiming): - description = 'HPI_SMUAD_T1' - mask, match = t32_opcode('1111_1011_0010_xxxx__1111_xxxx_000x_xxxx') + description = "HPI_SMUAD_T1" + mask, match = t32_opcode("1111_1011_0010_xxxx__1111_xxxx_000x_xxxx") + + class HPI_SMUAD_A1(MinorFUTiming): - description = 'HPI_SMUAD_A1' - mask, match = a32_opcode('xxxx_0111_0000_xxxx__1111_xxxx_00x1_xxxx') + description = "HPI_SMUAD_A1" + mask, match = a32_opcode("xxxx_0111_0000_xxxx__1111_xxxx_00x1_xxxx") + class HPI_SMULBB_T1(MinorFUTiming): - description = 'HPI_SMULBB_T1' - mask, match = t32_opcode('1111_1011_0001_xxxx__1111_xxxx_00xx_xxxx') + description = "HPI_SMULBB_T1" + mask, match = t32_opcode("1111_1011_0001_xxxx__1111_xxxx_00xx_xxxx") + + class HPI_SMULBB_A1(MinorFUTiming): - description = 'HPI_SMULBB_A1' - mask, match = a32_opcode('xxxx_0001_0110_xxxx__xxxx_xxxx_1xx0_xxxx') + description = "HPI_SMULBB_A1" + mask, match = a32_opcode("xxxx_0001_0110_xxxx__xxxx_xxxx_1xx0_xxxx") + class HPI_SMULL_T1(MinorFUTiming): - description = 'HPI_SMULL_T1' - mask, match = t32_opcode('1111_1011_1000_xxxx__xxxx_xxxx_0000_xxxx') + description = "HPI_SMULL_T1" + mask, match = t32_opcode("1111_1011_1000_xxxx__xxxx_xxxx_0000_xxxx") + + class HPI_SMULL_A1(MinorFUTiming): - description = 'HPI_SMULL_A1' - mask, match = a32_opcode('xxxx_0000_110x_xxxx__xxxx_xxxx_1001_xxxx') + description = "HPI_SMULL_A1" + mask, match = a32_opcode("xxxx_0000_110x_xxxx__xxxx_xxxx_1001_xxxx") + class HPI_SMULWB_T1(MinorFUTiming): - description = 'HPI_SMULWB_T1' - mask, match = t32_opcode('1111_1011_0011_xxxx__1111_xxxx_000x_xxxx') + description = "HPI_SMULWB_T1" + mask, match = t32_opcode("1111_1011_0011_xxxx__1111_xxxx_000x_xxxx") + + class HPI_SMULWB_A1(MinorFUTiming): - description = 'HPI_SMULWB_A1' - mask, match = a32_opcode('xxxx_0001_0010_xxxx__xxxx_xxxx_1x10_xxxx') + description = "HPI_SMULWB_A1" + mask, match = a32_opcode("xxxx_0001_0010_xxxx__xxxx_xxxx_1x10_xxxx") + class HPI_SMUSD_T1(MinorFUTiming): - description = 'HPI_SMUSD_T1' - mask, match = t32_opcode('1111_1011_0100_xxxx__1111_xxxx_000x_xxxx') + description = "HPI_SMUSD_T1" + mask, match = t32_opcode("1111_1011_0100_xxxx__1111_xxxx_000x_xxxx") + + class HPI_SMUSD_A1(MinorFUTiming): - description = 'HPI_SMUSD_A1' - mask, match = a32_opcode('xxxx_0111_0000_xxxx__1111_xxxx_01x1_xxxx') + description = "HPI_SMUSD_A1" + mask, match = a32_opcode("xxxx_0111_0000_xxxx__1111_xxxx_01x1_xxxx") + class HPI_SSAT_USAT_no_shift_A1(MinorFUTiming): - description = 'HPI_SSAT_USAT_no_shift_A1' + description = "HPI_SSAT_USAT_no_shift_A1" # Order *before* shift - mask, match = a32_opcode('xxxx_0110_1x1x_xxxx__xxxx_0000_0001_xxxx') + mask, match = a32_opcode("xxxx_0110_1x1x_xxxx__xxxx_0000_0001_xxxx") srcRegsRelativeLats = [0, 0, 0, 2, 0] + class HPI_SSAT_USAT_shift_A1(MinorFUTiming): - description = 'HPI_SSAT_USAT_shift_A1' + description = "HPI_SSAT_USAT_shift_A1" # Order after shift - mask, match = a32_opcode('xxxx_0110_1x1x_xxxx__xxxx_xxxx_xx01_xxxx') + mask, match = a32_opcode("xxxx_0110_1x1x_xxxx__xxxx_xxxx_xx01_xxxx") srcRegsRelativeLats = [0, 0, 0, 1, 0] + class HPI_SSAT16_USAT16_A1(MinorFUTiming): - description = 'HPI_SSAT16_USAT16_A1' - mask, match = a32_opcode('xxxx_0110_1x10_xxxx__xxxx_xxxx_0011_xxxx') + description = "HPI_SSAT16_USAT16_A1" + mask, match = a32_opcode("xxxx_0110_1x10_xxxx__xxxx_xxxx_0011_xxxx") srcRegsRelativeLats = [0, 0, 0, 2, 0] + class HPI_SXTAB_T1(MinorFUTiming): - description = 'HPI_SXTAB_T1' - mask, match = t32_opcode('1111_1010_0100_xxxx__1111_xxxx_1xxx_xxxx') + description = "HPI_SXTAB_T1" + mask, match = t32_opcode("1111_1010_0100_xxxx__1111_xxxx_1xxx_xxxx") srcRegsRelativeLats = [0, 0, 0, 1, 2, 0] + class HPI_SXTAB_SXTAB16_SXTAH_UXTAB_UXTAB16_UXTAH_A1(MinorFUTiming): - description = 'HPI_SXTAB_SXTAB16_SXTAH_UXTAB_UXTAB16_UXTAH_A1' + description = "HPI_SXTAB_SXTAB16_SXTAH_UXTAB_UXTAB16_UXTAH_A1" # Place AFTER HPI_SXTB_SXTB16_SXTH_UXTB_UXTB16_UXTH_A1 # e6[9d][^f]0070 are undefined - mask, match = a32_opcode('xxxx_0110_1xxx_xxxx__xxxx_xxxx_0111_xxxx') + mask, match = a32_opcode("xxxx_0110_1xxx_xxxx__xxxx_xxxx_0111_xxxx") srcRegsRelativeLats = [0, 0, 0, 1, 2, 0] + class HPI_SXTAB16_T1(MinorFUTiming): - description = 'HPI_SXTAB16_T1' - mask, match = t32_opcode('1111_1010_0010_xxxx__1111_xxxx_1xxx_xxxx') + description = "HPI_SXTAB16_T1" + mask, match = t32_opcode("1111_1010_0010_xxxx__1111_xxxx_1xxx_xxxx") srcRegsRelativeLats = [0, 0, 0, 1, 2, 0] + class HPI_SXTAH_T1(MinorFUTiming): - description = 'HPI_SXTAH_T1' - mask, match = t32_opcode('1111_1010_0000_xxxx__1111_xxxx_1xxx_xxxx') + description = "HPI_SXTAH_T1" + mask, match = t32_opcode("1111_1010_0000_xxxx__1111_xxxx_1xxx_xxxx") srcRegsRelativeLats = [0, 0, 0, 1, 2, 0] + class HPI_SXTB_T1(MinorFUTiming): - description = 'HPI_SXTB_T1' - mask, match = t16_opcode('1011_0010_01xx_xxxx') + description = "HPI_SXTB_T1" + mask, match = t16_opcode("1011_0010_01xx_xxxx") + + class HPI_SXTB_T2(MinorFUTiming): - description = 'HPI_SXTB_T2' - mask, match = t32_opcode('1111_1010_0100_1111__1111_xxxx_1xxx_xxxx') + description = "HPI_SXTB_T2" + mask, match = t32_opcode("1111_1010_0100_1111__1111_xxxx_1xxx_xxxx") srcRegsRelativeLats = [0, 0, 0, 1, 2, 0] + class HPI_SXTB_SXTB16_SXTH_UXTB_UXTB16_UXTH_A1(MinorFUTiming): - description = 'HPI_SXTB_SXTB16_SXTH_UXTB_UXTB16_UXTH_A1' + description = "HPI_SXTB_SXTB16_SXTH_UXTB_UXTB16_UXTH_A1" # e6[9d]f0070 are undefined - mask, match = a32_opcode('xxxx_0110_1xxx_1111__xxxx_xxxx_0111_xxxx') + mask, match = a32_opcode("xxxx_0110_1xxx_1111__xxxx_xxxx_0111_xxxx") srcRegsRelativeLats = [0, 0, 0, 2, 0] + class HPI_SXTB16_T1(MinorFUTiming): - description = 'HPI_SXTB16_T1' - mask, match = t32_opcode('1111_1010_0010_1111__1111_xxxx_1xxx_xxxx') + description = "HPI_SXTB16_T1" + mask, match = t32_opcode("1111_1010_0010_1111__1111_xxxx_1xxx_xxxx") srcRegsRelativeLats = [0, 0, 0, 1, 2, 0] + class HPI_SXTH_T1(MinorFUTiming): - description = 'HPI_SXTH_T1' - mask, match = t16_opcode('1011_0010_00xx_xxxx') + description = "HPI_SXTH_T1" + mask, match = t16_opcode("1011_0010_00xx_xxxx") + + class HPI_SXTH_T2(MinorFUTiming): - description = 'HPI_SXTH_T2' - mask, match = t32_opcode('1111_1010_0000_1111__1111_xxxx_1xxx_xxxx') + description = "HPI_SXTH_T2" + mask, match = t32_opcode("1111_1010_0000_1111__1111_xxxx_1xxx_xxxx") srcRegsRelativeLats = [0, 0, 0, 1, 2, 0] -class HPI_UDIV_T1(MinorFUTiming): - description = 'HPI_UDIV_T1' - mask, match = t32_opcode('1111_1011_1011_xxxx__xxxx_xxxx_1111_xxxx') -udiv_lat_expr = expr_top(let([ - ('left', int_reg(src(4))), - ('right', int_reg(src(3))), - ('left_size', un('SizeInBits', ref('left'))), - ('right_size', un('SizeInBits', - bin('UDiv', ref('right'), literal(2)))), - ('left_minus_right', if_expr( - bin('SLessThan', ref('left_size'), ref('right_size')), - literal(0), - bin('Sub', ref('left_size'), ref('right_size')))) - ], - if_expr(bin('Equal', ref('right'), literal(0)), - literal(0), - bin('UDiv', ref('left_minus_right'), literal(4))) - )) +class HPI_UDIV_T1(MinorFUTiming): + description = "HPI_UDIV_T1" + mask, match = t32_opcode("1111_1011_1011_xxxx__xxxx_xxxx_1111_xxxx") + + +udiv_lat_expr = expr_top( + let( + [ + ("left", src_reg(4)), + ("right", src_reg(3)), + ("left_size", un("SizeInBits", ref("left"))), + ( + "right_size", + un("SizeInBits", bin("UDiv", ref("right"), literal(2))), + ), + ( + "left_minus_right", + if_expr( + bin("SLessThan", ref("left_size"), ref("right_size")), + literal(0), + bin("Sub", ref("left_size"), ref("right_size")), + ), + ), + ], + if_expr( + bin("Equal", ref("right"), literal(0)), + literal(0), + bin("UDiv", ref("left_minus_right"), literal(4)), + ), + ) +) + class HPI_UDIV_A1(MinorFUTiming): - description = 'HPI_UDIV_A1' - mask, match = a32_opcode('xxxx_0111_0011_xxxx__xxxx_xxxx_0001_xxxx') + description = "HPI_UDIV_A1" + mask, match = a32_opcode("xxxx_0111_0011_xxxx__xxxx_xxxx_0001_xxxx") extraCommitLat = 0 srcRegsRelativeLats = [] extraCommitLatExpr = udiv_lat_expr + class HPI_UMAAL_T1(MinorFUTiming): - description = 'HPI_UMAAL_T1' - mask, match = t32_opcode('1111_1011_1110_xxxx__xxxx_xxxx_0110_xxxx') + description = "HPI_UMAAL_T1" + mask, match = t32_opcode("1111_1011_1110_xxxx__xxxx_xxxx_0110_xxxx") # z, z, z, dlo, dhi, l, r extraCommitLat = 1 srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 0, 0] + class HPI_UMAAL_A1(MinorFUTiming): - description = 'HPI_UMAAL_A1' - mask, match = a32_opcode('xxxx_0000_0100_xxxx__xxxx_xxxx_1001_xxxx') + description = "HPI_UMAAL_A1" + mask, match = a32_opcode("xxxx_0000_0100_xxxx__xxxx_xxxx_1001_xxxx") # z, z, z, dlo, dhi, l, r extraCommitLat = 1 srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 0, 0] + class HPI_UMLAL_T1(MinorFUTiming): - description = 'HPI_UMLAL_T1' - mask, match = t32_opcode('1111_1011_1110_xxxx__xxxx_xxxx_0000_xxxx') + description = "HPI_UMLAL_T1" + mask, match = t32_opcode("1111_1011_1110_xxxx__xxxx_xxxx_0000_xxxx") + class HPI_UMLAL_A1(MinorFUTiming): - description = 'HPI_UMLAL_A1' - mask, match = t32_opcode('xxxx_0000_101x_xxxx__xxxx_xxxx_1001_xxxx') + description = "HPI_UMLAL_A1" + mask, match = t32_opcode("xxxx_0000_101x_xxxx__xxxx_xxxx_1001_xxxx") + class HPI_UMULL_T1(MinorFUTiming): - description = 'HPI_UMULL_T1' - mask, match = t32_opcode('1111_1011_1010_xxxx__xxxx_xxxx_0000_xxxx') + description = "HPI_UMULL_T1" + mask, match = t32_opcode("1111_1011_1010_xxxx__xxxx_xxxx_0000_xxxx") + class HPI_UMULL_A1(MinorFUTiming): - description = 'HPI_UMULL_A1' - mask, match = a32_opcode('xxxx_0000_100x_xxxx__xxxx_xxxx_1001_xxxx') + description = "HPI_UMULL_A1" + mask, match = a32_opcode("xxxx_0000_100x_xxxx__xxxx_xxxx_1001_xxxx") + class HPI_USAD8_USADA8_A1(MinorFUTiming): - description = 'HPI_USAD8_USADA8_A1' - mask, match = a32_opcode('xxxx_0111_1000_xxxx__xxxx_xxxx_0001_xxxx') + description = "HPI_USAD8_USADA8_A1" + mask, match = a32_opcode("xxxx_0111_1000_xxxx__xxxx_xxxx_0001_xxxx") srcRegsRelativeLats = [0, 0, 0, 0, 0, 2, 0] + class HPI_USAD8_USADA8_A1_Suppress(MinorFUTiming): - description = 'HPI_USAD8_USADA8_A1_Suppress' - mask, match = a32_opcode('xxxx_0111_1000_xxxx__xxxx_xxxx_0001_xxxx') + description = "HPI_USAD8_USADA8_A1_Suppress" + mask, match = a32_opcode("xxxx_0111_1000_xxxx__xxxx_xxxx_0001_xxxx") srcRegsRelativeLats = [] suppress = True + class HPI_VMOV_immediate_A1(MinorFUTiming): - description = 'HPI_VMOV_register_A1' - mask, match = a32_opcode('1111_0010_0x10_xxxx_xxxx_0001_xxx1_xxxx') + description = "HPI_VMOV_register_A1" + mask, match = a32_opcode("1111_0010_0x10_xxxx_xxxx_0001_xxx1_xxxx") # cpsr, z, z, z, hcptr, nsacr, cpacr, fpexc, scr srcRegsRelativeLats = [5, 5, 5, 5, 5, 5, 5, 5, 5, 0] + class HPI_VMRS_A1(MinorFUTiming): - description = 'HPI_VMRS_A1' - mask, match = a32_opcode('xxxx_1110_1111_0001_xxxx_1010_xxx1_xxxx') + description = "HPI_VMRS_A1" + mask, match = a32_opcode("xxxx_1110_1111_0001_xxxx_1010_xxx1_xxxx") # cpsr,z,z,z,hcptr,nsacr,cpacr,scr,r42 srcRegsRelativeLats = [5, 5, 5, 5, 5, 5, 5, 5, 5, 0] + class HPI_VMOV_register_A2(MinorFUTiming): - description = 'HPI_VMOV_register_A2' - mask, match = a32_opcode('xxxx_1110_1x11_0000_xxxx_101x_01x0_xxxx') + description = "HPI_VMOV_register_A2" + mask, match = a32_opcode("xxxx_1110_1x11_0000_xxxx_101x_01x0_xxxx") # cpsr, z, r39, z, hcptr, nsacr, cpacr, fpexc, scr, f4, f5, f0, f1 - srcRegsRelativeLats = \ - [5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 0] + srcRegsRelativeLats = [ + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 0, + ] + # VADD.I16 D/VADD.F32 D/VADD.I8 D/VADD.I32 D class HPI_VADD2H_A32(MinorFUTiming): - description = 'Vadd2hALU' - mask, match = a32_opcode('1111_0010_0xxx_xxxx__xxxx_1000_xxx0_xxxx') + description = "Vadd2hALU" + mask, match = a32_opcode("1111_0010_0xxx_xxxx__xxxx_1000_xxx0_xxxx") # cpsr, z, z, z, cpacr, fpexc, l0, r0, l1, r1, l2, r2, l3, r3 (for vadd2h) - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 0] + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 0] + # VAQQHN.I16 Q/VAQQHN.I32 Q/VAQQHN.I64 Q class HPI_VADDHN_A32(MinorFUTiming): - description = 'VaddhnALU' - mask, match = a32_opcode('1111_0010_1xxx_xxxx__xxxx_0100_x0x0_xxxx') + description = "VaddhnALU" + mask, match = a32_opcode("1111_0010_1xxx_xxxx__xxxx_0100_x0x0_xxxx") # cpsr, z, z, z, cpacr, fpexc, l0, l1, l2, l3, r0, r1, r2, r3 - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 0] + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 0] + class HPI_VADDL_A32(MinorFUTiming): - description = 'VaddlALU' - mask, match = a32_opcode('1111_001x_1xxx_xxxx__xxxx_0000_x0x0_xxxx') - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 0] + description = "VaddlALU" + mask, match = a32_opcode("1111_001x_1xxx_xxxx__xxxx_0000_x0x0_xxxx") + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 0] + class HPI_VADDW_A32(MinorFUTiming): - description = 'HPI_VADDW_A32' - mask, match = a32_opcode('1111_001x_1xxx_xxxx__xxxx_0001_x0x0_xxxx') + description = "HPI_VADDW_A32" + mask, match = a32_opcode("1111_001x_1xxx_xxxx__xxxx_0001_x0x0_xxxx") # cpsr, z, z, z, cpacr, fpexc, l0, l1, l2, l3, r0, r1 - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 3, 3, 0] + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 3, 3, 0] + # VHADD/VHSUB S8,S16,S32,U8,U16,U32 Q and D class HPI_VHADD_A32(MinorFUTiming): - description = 'HPI_VHADD_A32' - mask, match = a32_opcode('1111_001x_0xxx_xxxx__xxxx_00x0_xxx0_xxxx') - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 0] + description = "HPI_VHADD_A32" + mask, match = a32_opcode("1111_001x_0xxx_xxxx__xxxx_00x0_xxx0_xxxx") + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 0] + class HPI_VPADAL_A32(MinorFUTiming): - description = 'VpadalALU' - mask, match = a32_opcode('1111_0011_1x11_xx00__xxxx_0110_xxx0_xxxx') - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 0] + description = "VpadalALU" + mask, match = a32_opcode("1111_0011_1x11_xx00__xxxx_0110_xxx0_xxxx") + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 0] + # VPADDH.I16 class HPI_VPADDH_A32(MinorFUTiming): - description = 'VpaddhALU' - mask, match = a32_opcode('1111_0010_0xxx_xxxx__xxxx_1011_xxx1_xxxx') - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 0] + description = "VpaddhALU" + mask, match = a32_opcode("1111_0010_0xxx_xxxx__xxxx_1011_xxx1_xxxx") + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 0] + # VPADDH.F32 class HPI_VPADDS_A32(MinorFUTiming): - description = 'VpaddsALU' - mask, match = a32_opcode('1111_0011_0x0x_xxxx__xxxx_1101_xxx0_xxxx') - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 0] + description = "VpaddsALU" + mask, match = a32_opcode("1111_0011_0x0x_xxxx__xxxx_1101_xxx0_xxxx") + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 0] + # VPADDL.S16 class HPI_VPADDL_A32(MinorFUTiming): - description = 'VpaddlALU' - mask, match = a32_opcode('1111_0011_1x11_xx00__xxxx_0010_xxx0_xxxx') - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 0] + description = "VpaddlALU" + mask, match = a32_opcode("1111_0011_1x11_xx00__xxxx_0010_xxx0_xxxx") + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 0] + # VRADDHN.I16 class HPI_VRADDHN_A32(MinorFUTiming): - description = 'HPI_VRADDHN_A32' - mask, match = a32_opcode('1111_0011_1xxx_xxxx__xxxx_0100_x0x0_xxxx') + description = "HPI_VRADDHN_A32" + mask, match = a32_opcode("1111_0011_1xxx_xxxx__xxxx_0100_x0x0_xxxx") # cpsr, z, z, z, cpacr, fpexc, l0, l1, l2, l3, r0, r1, r2, r3 - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 0] + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 0] + class HPI_VRHADD_A32(MinorFUTiming): - description = 'VrhaddALU' - mask, match = a32_opcode('1111_001x_0xxx_xxxx__xxxx_0001_xxx0_xxxx') - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 0] + description = "VrhaddALU" + mask, match = a32_opcode("1111_001x_0xxx_xxxx__xxxx_0001_xxx0_xxxx") + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 0] + class HPI_VQADD_A32(MinorFUTiming): - description = 'VqaddALU' - mask, match = a32_opcode('1111_001x_0xxx_xxxx__xxxx_0000_xxx1_xxxx') - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 0] + description = "VqaddALU" + mask, match = a32_opcode("1111_001x_0xxx_xxxx__xxxx_0000_xxx1_xxxx") + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 0] + class HPI_VANDQ_A32(MinorFUTiming): - description = 'VandqALU' - mask, match = a32_opcode('1111_0010_0x00_xxxx__xxxx_0001_xxx1_xxxx') - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 5, 5, 5, 5, 5, 5, 5, 5, 0] + description = "VandqALU" + mask, match = a32_opcode("1111_0010_0x00_xxxx__xxxx_0001_xxx1_xxxx") + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 5, 5, 5, 5, 5, 5, 5, 5, 0] + # VMUL (integer) class HPI_VMULI_A32(MinorFUTiming): - description = 'VmuliALU' - mask, match = a32_opcode('1111_001x_0xxx_xxxx__xxxx_1001_xxx1_xxxx') - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 0] + description = "VmuliALU" + mask, match = a32_opcode("1111_001x_0xxx_xxxx__xxxx_1001_xxx1_xxxx") + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 0] + # VBIC (reg) class HPI_VBIC_A32(MinorFUTiming): - description = 'VbicALU' - mask, match = a32_opcode('1111_0010_0x01_xxxx__xxxx_0001_xxx1_xxxx') - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 5, 5, 5, 5, 5, 5, 5, 5, 0] + description = "VbicALU" + mask, match = a32_opcode("1111_0010_0x01_xxxx__xxxx_0001_xxx1_xxxx") + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 5, 5, 5, 5, 5, 5, 5, 5, 0] + # VBIF VBIT VBSL class HPI_VBIF_ETC_A32(MinorFUTiming): - description = 'VbifALU' - mask, match = a32_opcode('1111_0011_0xxx_xxxx__xxxx_0001_xxx1_xxxx') - srcRegsRelativeLats = \ - [0, 0, 0, 0, 0, 0, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0] + description = "VbifALU" + mask, match = a32_opcode("1111_0011_0xxx_xxxx__xxxx_0001_xxx1_xxxx") + srcRegsRelativeLats = [ + 0, + 0, + 0, + 0, + 0, + 0, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 0, + ] + class HPI_VACGE_A32(MinorFUTiming): - description = 'VacgeALU' - mask, match = a32_opcode('1111_0011_0xxx_xxxx__xxxx_1110_xxx1_xxxx') - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0] + description = "VacgeALU" + mask, match = a32_opcode("1111_0011_0xxx_xxxx__xxxx_1110_xxx1_xxxx") + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0] + # VCEQ.F32 class HPI_VCEQ_A32(MinorFUTiming): - description = 'VceqALU' - mask, match = a32_opcode('1111_0010_0x0x_xxxx__xxxx_1110_xxx0_xxxx') - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0] + description = "VceqALU" + mask, match = a32_opcode("1111_0010_0x0x_xxxx__xxxx_1110_xxx0_xxxx") + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0] + # VCEQ.[IS]... register class HPI_VCEQI_A32(MinorFUTiming): - description = 'VceqiALU' - mask, match = a32_opcode('1111_0011_0xxx_xxxx__xxxx_1000_xxx1_xxxx') - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0] + description = "VceqiALU" + mask, match = a32_opcode("1111_0011_0xxx_xxxx__xxxx_1000_xxx1_xxxx") + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0] + # VCEQ.[IS]... immediate class HPI_VCEQII_A32(MinorFUTiming): - description = 'HPI_VCEQII_A32' - mask, match = a32_opcode('1111_0011_1x11_xx01__xxxx_0x01_0xx0_xxxx') - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0] + description = "HPI_VCEQII_A32" + mask, match = a32_opcode("1111_0011_1x11_xx01__xxxx_0x01_0xx0_xxxx") + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0] + class HPI_VTST_A32(MinorFUTiming): - description = 'HPI_VTST_A32' - mask, match = a32_opcode('1111_0010_0xxx_xxxx__xxxx_1000_xxx1_xxxx') - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0] + description = "HPI_VTST_A32" + mask, match = a32_opcode("1111_0010_0xxx_xxxx__xxxx_1000_xxx1_xxxx") + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0] + class HPI_VCLZ_A32(MinorFUTiming): - description = 'HPI_VCLZ_A32' - mask, match = a32_opcode('1111_0011_1x11_xx00__xxxx_0100_1xx0_xxxx') - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0] + description = "HPI_VCLZ_A32" + mask, match = a32_opcode("1111_0011_1x11_xx00__xxxx_0100_1xx0_xxxx") + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0] + class HPI_VCNT_A32(MinorFUTiming): - description = 'HPI_VCNT_A32' - mask, match = a32_opcode('1111_0011_1x11_xx00__xxxx_0101_0xx0_xxxx') - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0] + description = "HPI_VCNT_A32" + mask, match = a32_opcode("1111_0011_1x11_xx00__xxxx_0101_0xx0_xxxx") + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0] + class HPI_VEXT_A32(MinorFUTiming): - description = 'HPI_VCNT_A32' - mask, match = a32_opcode('1111_0010_1x11_xxxx__xxxx_xxxx_xxx0_xxxx') - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0] + description = "HPI_VCNT_A32" + mask, match = a32_opcode("1111_0010_1x11_xxxx__xxxx_xxxx_xxx0_xxxx") + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0] + # VMAX VMIN integer class HPI_VMAXI_A32(MinorFUTiming): - description = 'HPI_VMAXI_A32' - mask, match = a32_opcode('1111_001x_0xxx_xxxx__xxxx_0110_xxxx_xxxx') - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0] + description = "HPI_VMAXI_A32" + mask, match = a32_opcode("1111_001x_0xxx_xxxx__xxxx_0110_xxxx_xxxx") + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0] + # VMAX VMIN float class HPI_VMAXS_A32(MinorFUTiming): - description = 'HPI_VMAXS_A32' - mask, match = a32_opcode('1111_0010_0xxx_xxxx__xxxx_1111_xxx0_xxxx') - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0] + description = "HPI_VMAXS_A32" + mask, match = a32_opcode("1111_0010_0xxx_xxxx__xxxx_1111_xxx0_xxxx") + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0] + # VNEG integer class HPI_VNEGI_A32(MinorFUTiming): - description = 'HPI_VNEGI_A32' - mask, match = a32_opcode('1111_0011_1x11_xx01__xxxx_0x11_1xx0_xxxx') - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0] + description = "HPI_VNEGI_A32" + mask, match = a32_opcode("1111_0011_1x11_xx01__xxxx_0x11_1xx0_xxxx") + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0] + # VNEG float class HPI_VNEGF_A32(MinorFUTiming): - description = 'HPI_VNEGF_A32' - mask, match = a32_opcode('xxxx_1110_1x11_0001__xxxx_101x_01x0_xxxx') - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0] + description = "HPI_VNEGF_A32" + mask, match = a32_opcode("xxxx_1110_1x11_0001__xxxx_101x_01x0_xxxx") + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0] + # VREV16 VREV32 VREV64 class HPI_VREVN_A32(MinorFUTiming): - description = 'HPI_VREVN_A32' - mask, match = a32_opcode('1111_0011_1x11_xx00__xxxx_000x_xxx0_xxxx') - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0] + description = "HPI_VREVN_A32" + mask, match = a32_opcode("1111_0011_1x11_xx00__xxxx_000x_xxx0_xxxx") + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0] + class HPI_VQNEG_A32(MinorFUTiming): - description = 'HPI_VQNEG_A32' - mask, match = a32_opcode('1111_0011_1x11_xx00__xxxx_0111_1xx0_xxxx') - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0] + description = "HPI_VQNEG_A32" + mask, match = a32_opcode("1111_0011_1x11_xx00__xxxx_0111_1xx0_xxxx") + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0] + class HPI_VSWP_A32(MinorFUTiming): - description = 'HPI_VSWP_A32' - mask, match = a32_opcode('1111_0011_1x11_xx10__xxxx_0000_0xx0_xxxx') - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 0] + description = "HPI_VSWP_A32" + mask, match = a32_opcode("1111_0011_1x11_xx10__xxxx_0000_0xx0_xxxx") + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 0] + class HPI_VTRN_A32(MinorFUTiming): - description = 'HPI_VTRN_A32' - mask, match = a32_opcode('1111_0011_1x11_xx10__xxxx_0000_1xx0_xxxx') + description = "HPI_VTRN_A32" + mask, match = a32_opcode("1111_0011_1x11_xx10__xxxx_0000_1xx0_xxxx") # cpsr, z, z, z, cpact, fpexc, o0, d0, o1, d1, o2, d2, o3, d3 - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 0] + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 0] + # VQMOVN VQMOVUN class HPI_VQMOVN_A32(MinorFUTiming): - description = 'HPI_VQMOVN_A32' - mask, match = a32_opcode('1111_0011_1x11_xx10__xxxx_0010_xxx0_xxxx') + description = "HPI_VQMOVN_A32" + mask, match = a32_opcode("1111_0011_1x11_xx10__xxxx_0010_xxx0_xxxx") # cpsr, z, z, z, cpact, fpexc, o[0], o[1], o[2], o[3], fpscr - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 0] + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 0] + # VUZP double word class HPI_VUZP_A32(MinorFUTiming): - description = 'HPI_VUZP_A32' - mask, match = a32_opcode('1111_0011_1x11_xx10__xxxx_0001_00x0_xxxx') - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 0] + description = "HPI_VUZP_A32" + mask, match = a32_opcode("1111_0011_1x11_xx10__xxxx_0001_00x0_xxxx") + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 0] + # VDIV.F32 class HPI_VDIV32_A32(MinorFUTiming): - description = 'HPI_VDIV32_A32' - mask, match = a32_opcode('xxxx_1110_1x00_xxxx__xxxx_1010_x0x0_xxxx') + description = "HPI_VDIV32_A32" + mask, match = a32_opcode("xxxx_1110_1x00_xxxx__xxxx_1010_x0x0_xxxx") # cpsr, z, z, z, cpact, fpexc, fpscr_exc, l, r extraCommitLat = 9 - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 20, 4, 4, 0] + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 20, 4, 4, 0] + # VDIV.F64 class HPI_VDIV64_A32(MinorFUTiming): - description = 'HPI_VDIV64_A32' - mask, match = a32_opcode('xxxx_1110_1x00_xxxx__xxxx_1011_x0x0_xxxx') + description = "HPI_VDIV64_A32" + mask, match = a32_opcode("xxxx_1110_1x00_xxxx__xxxx_1011_x0x0_xxxx") # cpsr, z, z, z, cpact, fpexc, fpscr_exc, l, r extraCommitLat = 18 - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 20, 4, 4, 0] + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 20, 4, 4, 0] + class HPI_VZIP_A32(MinorFUTiming): - description = 'HPI_VZIP_A32' - mask, match = a32_opcode('1111_0011_1x11_xx10__xxxx_0001_1xx0_xxxx') - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 0] + description = "HPI_VZIP_A32" + mask, match = a32_opcode("1111_0011_1x11_xx10__xxxx_0001_1xx0_xxxx") + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 0] + # VPMAX integer class HPI_VPMAX_A32(MinorFUTiming): - description = 'HPI_VPMAX_A32' - mask, match = a32_opcode('1111_001x_0xxx_xxxx__xxxx_1010_xxxx_xxxx') + description = "HPI_VPMAX_A32" + mask, match = a32_opcode("1111_001x_0xxx_xxxx__xxxx_1010_xxxx_xxxx") # cpsr, z, z, z, cpact, fpexc, l0, r0, l1, r1, fpscr - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 0] + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 0] + # VPMAX float class HPI_VPMAXF_A32(MinorFUTiming): - description = 'HPI_VPMAXF_A32' - mask, match = a32_opcode('1111_0011_0xxx_xxxx__xxxx_1111_xxx0_xxxx') - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 0] + description = "HPI_VPMAXF_A32" + mask, match = a32_opcode("1111_0011_0xxx_xxxx__xxxx_1111_xxx0_xxxx") + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 0] + class HPI_VMOVN_A32(MinorFUTiming): - description = 'HPI_VMOVN_A32' - mask, match = a32_opcode('1111_0011_1x11_xx10__xxxx_0010_00x0_xxxx') - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 0] + description = "HPI_VMOVN_A32" + mask, match = a32_opcode("1111_0011_1x11_xx10__xxxx_0010_00x0_xxxx") + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 0] + class HPI_VMOVL_A32(MinorFUTiming): - description = 'HPI_VMOVL_A32' - mask, match = a32_opcode('1111_001x_1xxx_x000__xxxx_1010_00x1_xxxx') - srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 0] + description = "HPI_VMOVL_A32" + mask, match = a32_opcode("1111_001x_1xxx_x000__xxxx_1010_00x1_xxxx") + srcRegsRelativeLats = [0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 0] + # VSQRT.F64 class HPI_VSQRT64_A32(MinorFUTiming): - description = 'HPI_VSQRT64_A32' - mask, match = a32_opcode('xxxx_1110_1x11_0001__xxxx_1011_11x0_xxxx') + description = "HPI_VSQRT64_A32" + mask, match = a32_opcode("xxxx_1110_1x11_0001__xxxx_1011_11x0_xxxx") extraCommitLat = 18 srcRegsRelativeLats = [] + # VSQRT.F32 class HPI_VSQRT32_A32(MinorFUTiming): - description = 'HPI_VSQRT32_A32' - mask, match = a32_opcode('xxxx_1110_1x11_0001__xxxx_1010_11x0_xxxx') + description = "HPI_VSQRT32_A32" + mask, match = a32_opcode("xxxx_1110_1x11_0001__xxxx_1010_11x0_xxxx") extraCommitLat = 9 srcRegsRelativeLats = [] + class HPI_FloatSimdFU(MinorFU): - opClasses = minorMakeOpClassSet([ - 'FloatAdd', 'FloatCmp', 'FloatCvt', 'FloatMult', 'FloatDiv', - 'FloatSqrt', 'FloatMisc', 'FloatMultAcc', - 'SimdAdd', 'SimdAddAcc', 'SimdAlu', 'SimdCmp', 'SimdCvt', - 'SimdMisc', 'SimdMult', 'SimdMultAcc', 'SimdShift', 'SimdShiftAcc', - 'SimdSqrt', 'SimdFloatAdd', 'SimdFloatAlu', 'SimdFloatCmp', - 'SimdFloatCvt', 'SimdFloatDiv', 'SimdFloatMisc', 'SimdFloatMult', - 'SimdFloatMultAcc', 'SimdFloatSqrt']) + opClasses = minorMakeOpClassSet( + [ + "FloatAdd", + "FloatCmp", + "FloatCvt", + "FloatMult", + "FloatDiv", + "FloatSqrt", + "FloatMisc", + "FloatMultAcc", + "SimdAdd", + "SimdAddAcc", + "SimdAlu", + "SimdCmp", + "SimdCvt", + "SimdMisc", + "SimdMult", + "SimdMultAcc", + "SimdShift", + "SimdShiftAcc", + "SimdSqrt", + "SimdFloatAdd", + "SimdFloatAlu", + "SimdFloatCmp", + "SimdFloatCvt", + "SimdFloatDiv", + "SimdFloatMisc", + "SimdFloatMult", + "SimdFloatMultAcc", + "SimdFloatSqrt", + ] + ) timings = [ # VUZP and VZIP must be before VADDW/L - HPI_VUZP_A32(), HPI_VZIP_A32(), - HPI_VADD2H_A32(), HPI_VADDHN_A32(), - HPI_VADDL_A32(), HPI_VADDW_A32(), - HPI_VHADD_A32(), HPI_VPADAL_A32(), - HPI_VPADDH_A32(), HPI_VPADDS_A32(), - HPI_VPADDL_A32(), HPI_VRADDHN_A32(), - HPI_VRHADD_A32(), HPI_VQADD_A32(), - HPI_VANDQ_A32(), HPI_VBIC_A32(), - HPI_VBIF_ETC_A32(), HPI_VACGE_A32(), - HPI_VCEQ_A32(), HPI_VCEQI_A32(), - HPI_VCEQII_A32(), HPI_VTST_A32(), - HPI_VCLZ_A32(), HPI_VCNT_A32(), - HPI_VEXT_A32(), HPI_VMAXI_A32(), - HPI_VMAXS_A32(), HPI_VNEGI_A32(), - HPI_VNEGF_A32(), HPI_VREVN_A32(), - HPI_VQNEG_A32(), HPI_VSWP_A32(), - HPI_VTRN_A32(), HPI_VPMAX_A32(), - HPI_VPMAXF_A32(), HPI_VMOVN_A32(), + HPI_VUZP_A32(), + HPI_VZIP_A32(), + HPI_VADD2H_A32(), + HPI_VADDHN_A32(), + HPI_VADDL_A32(), + HPI_VADDW_A32(), + HPI_VHADD_A32(), + HPI_VPADAL_A32(), + HPI_VPADDH_A32(), + HPI_VPADDS_A32(), + HPI_VPADDL_A32(), + HPI_VRADDHN_A32(), + HPI_VRHADD_A32(), + HPI_VQADD_A32(), + HPI_VANDQ_A32(), + HPI_VBIC_A32(), + HPI_VBIF_ETC_A32(), + HPI_VACGE_A32(), + HPI_VCEQ_A32(), + HPI_VCEQI_A32(), + HPI_VCEQII_A32(), + HPI_VTST_A32(), + HPI_VCLZ_A32(), + HPI_VCNT_A32(), + HPI_VEXT_A32(), + HPI_VMAXI_A32(), + HPI_VMAXS_A32(), + HPI_VNEGI_A32(), + HPI_VNEGF_A32(), + HPI_VREVN_A32(), + HPI_VQNEG_A32(), + HPI_VSWP_A32(), + HPI_VTRN_A32(), + HPI_VPMAX_A32(), + HPI_VPMAXF_A32(), + HPI_VMOVN_A32(), HPI_VMRS_A1(), HPI_VMOV_immediate_A1(), HPI_VMOV_register_A2(), - HPI_VQMOVN_A32(), HPI_VMOVL_A32(), - HPI_VDIV32_A32(), HPI_VDIV64_A32(), - HPI_VSQRT32_A32(), HPI_VSQRT64_A32(), + HPI_VQMOVN_A32(), + HPI_VMOVL_A32(), + HPI_VDIV32_A32(), + HPI_VDIV64_A32(), + HPI_VSQRT32_A32(), + HPI_VSQRT64_A32(), HPI_VMULI_A32(), # Add before here HPI_FMADD_A64(), @@ -1153,14 +1489,17 @@ class HPI_FloatSimdFU(MinorFU): HPI_FMOV_A64(), HPI_ADD_SUB_vector_scalar_A64(), HPI_ADD_SUB_vector_vector_A64(), - HPI_FDIV_scalar_32_A64(), HPI_FDIV_scalar_64_A64(), + HPI_FDIV_scalar_32_A64(), + HPI_FDIV_scalar_64_A64(), HPI_DefaultA64Vfp(), - HPI_DefaultVfp()] + HPI_DefaultVfp(), + ] opLat = 6 + class HPI_IntFU(MinorFU): - opClasses = minorMakeOpClassSet(['IntAlu']) + opClasses = minorMakeOpClassSet(["IntAlu"]) # IMPORTANT! Keep the order below, add new entries *at the head* timings = [ HPI_SSAT_USAT_no_shift_A1(), @@ -1179,17 +1518,14 @@ class HPI_IntFU(MinorFU): HPI_SASX_SHASX_UASX_UHASX_A1(), HPI_SHSAX_SSAX_UHSAX_USAX_A1(), HPI_SXTB_SXTB16_SXTH_UXTB_UXTB16_UXTH_A1(), - # Must be after HPI_SXTB_SXTB16_SXTH_UXTB_UXTB16_UXTH_A1 HPI_SXTAB_SXTAB16_SXTAH_UXTAB_UXTAB16_UXTAH_A1(), - HPI_SXTAB_T1(), HPI_SXTAB16_T1(), HPI_SXTAH_T1(), HPI_SXTB_T2(), HPI_SXTB16_T1(), HPI_SXTH_T2(), - HPI_PKH_A1(), HPI_PKH_T1(), HPI_SBFX_UBFX_A1(), @@ -1200,24 +1536,22 @@ class HPI_IntFU(MinorFU): HPI_USAD8_USADA8_A1(), HPI_BFI_A1(), HPI_BFI_T1(), - HPI_CMN_register_A1(), HPI_CMN_immediate_A1(), HPI_CMP_register_A1(), HPI_CMP_immediate_A1(), - HPI_DataProcessingNoShift(), HPI_DataProcessingMovShiftr(), HPI_DataProcessingMayShift(), - HPI_Cxxx_A64(), - HPI_DefaultA64Int(), - HPI_DefaultInt()] + HPI_DefaultInt(), + ] opLat = 3 + class HPI_Int2FU(MinorFU): - opClasses = minorMakeOpClassSet(['IntAlu']) + opClasses = minorMakeOpClassSet(["IntAlu"]) # IMPORTANT! Keep the order below, add new entries *at the head* timings = [ HPI_SSAT_USAT_no_shift_A1(), @@ -1236,17 +1570,14 @@ class HPI_Int2FU(MinorFU): HPI_SASX_SHASX_UASX_UHASX_A1(), HPI_SHSAX_SSAX_UHSAX_USAX_A1(), HPI_SXTB_SXTB16_SXTH_UXTB_UXTB16_UXTH_A1(), - # Must be after HPI_SXTB_SXTB16_SXTH_UXTB_UXTB16_UXTH_A1 HPI_SXTAB_SXTAB16_SXTAH_UXTAB_UXTAB16_UXTAH_A1(), - HPI_SXTAB_T1(), HPI_SXTAB16_T1(), HPI_SXTAH_T1(), HPI_SXTB_T2(), HPI_SXTB16_T1(), HPI_SXTH_T2(), - HPI_PKH_A1(), HPI_PKH_T1(), HPI_SBFX_UBFX_A1(), @@ -1257,16 +1588,13 @@ class HPI_Int2FU(MinorFU): HPI_USAD8_USADA8_A1_Suppress(), HPI_BFI_A1(), HPI_BFI_T1(), - - HPI_CMN_register_A1(), # Need to check for shift + HPI_CMN_register_A1(), # Need to check for shift HPI_CMN_immediate_A1(), - HPI_CMP_register_A1(), # Need to check for shift + HPI_CMP_register_A1(), # Need to check for shift HPI_CMP_immediate_A1(), - HPI_DataProcessingNoShift(), HPI_DataProcessingAllowShifti(), # HPI_DataProcessingAllowMovShiftr(), - # Data processing ops that match SuppressShift but are *not* # to be suppressed here HPI_CLZ_A1(), @@ -1275,63 +1603,80 @@ class HPI_Int2FU(MinorFU): # Can you dual issue a branch? # HPI_DataProcessingSuppressBranch(), HPI_Cxxx_A64(), - HPI_DefaultA64Int(), - HPI_DefaultInt()] + HPI_DefaultInt(), + ] opLat = 3 + class HPI_IntMulFU(MinorFU): - opClasses = minorMakeOpClassSet(['IntMult']) + opClasses = minorMakeOpClassSet(["IntMult"]) timings = [ - HPI_MLA_A1(), HPI_MLA_T1(), - HPI_MLS_A1(), HPI_MLS_T1(), - HPI_SMLABB_A1(), HPI_SMLABB_T1(), - HPI_SMLAWB_A1(), HPI_SMLAWB_T1(), - HPI_SMLAD_A1(), HPI_SMLAD_T1(), - HPI_SMMUL_A1(), HPI_SMMUL_T1(), + HPI_MLA_A1(), + HPI_MLA_T1(), + HPI_MLS_A1(), + HPI_MLS_T1(), + HPI_SMLABB_A1(), + HPI_SMLABB_T1(), + HPI_SMLAWB_A1(), + HPI_SMLAWB_T1(), + HPI_SMLAD_A1(), + HPI_SMLAD_T1(), + HPI_SMMUL_A1(), + HPI_SMMUL_T1(), # SMMUL_A1 must be before SMMLA_A1 - HPI_SMMLA_A1(), HPI_SMMLA_T1(), - HPI_SMMLS_A1(), HPI_SMMLS_T1(), - HPI_UMAAL_A1(), HPI_UMAAL_T1(), - + HPI_SMMLA_A1(), + HPI_SMMLA_T1(), + HPI_SMMLS_A1(), + HPI_SMMLS_T1(), + HPI_UMAAL_A1(), + HPI_UMAAL_T1(), HPI_MADD_A64(), HPI_DefaultA64Mul(), - HPI_DefaultMul()] + HPI_DefaultMul(), + ] opLat = 3 - cantForwardFromFUIndices = [0, 1, 5] # Int1, Int2, Mem + cantForwardFromFUIndices = [0, 1, 5] # Int1, Int2, Mem + class HPI_IntDivFU(MinorFU): - opClasses = minorMakeOpClassSet(['IntDiv']) - timings = [HPI_SDIV_A1(), HPI_UDIV_A1(), - HPI_SDIV_A64()] + opClasses = minorMakeOpClassSet(["IntDiv"]) + timings = [HPI_SDIV_A1(), HPI_UDIV_A1(), HPI_SDIV_A64()] issueLat = 3 opLat = 3 + class HPI_MemFU(MinorFU): - opClasses = minorMakeOpClassSet(['MemRead', 'MemWrite', 'FloatMemRead', - 'FloatMemWrite']) + opClasses = minorMakeOpClassSet( + ["MemRead", "MemWrite", "FloatMemRead", "FloatMemWrite"] + ) timings = [HPI_DefaultMem(), HPI_DefaultMem64()] opLat = 1 - cantForwardFromFUIndices = [5] # Mem (this FU) + cantForwardFromFUIndices = [5] # Mem (this FU) + class HPI_MiscFU(MinorFU): - opClasses = minorMakeOpClassSet(['IprAccess', 'InstPrefetch']) + opClasses = minorMakeOpClassSet(["IprAccess", "InstPrefetch"]) opLat = 1 + class HPI_FUPool(MinorFUPool): - funcUnits = [HPI_IntFU(), # 0 - HPI_Int2FU(), # 1 - HPI_IntMulFU(), # 2 - HPI_IntDivFU(), # 3 - HPI_FloatSimdFU(), # 4 - HPI_MemFU(), # 5 - HPI_MiscFU() # 6 - ] + funcUnits = [ + HPI_IntFU(), # 0 + HPI_Int2FU(), # 1 + HPI_IntMulFU(), # 2 + HPI_IntDivFU(), # 3 + HPI_FloatSimdFU(), # 4 + HPI_MemFU(), # 5 + HPI_MiscFU(), # 6 + ] + class HPI_MMU(ArmMMU): itb = ArmTLB(entry_type="instruction", size=256) dtb = ArmTLB(entry_type="data", size=256) + class HPI_BP(TournamentBP): localPredictorSize = 64 localCtrBits = 2 @@ -1345,28 +1690,29 @@ class HPI_BP(TournamentBP): RASSize = 8 instShiftAmt = 2 + class HPI_ICache(Cache): data_latency = 1 tag_latency = 1 response_latency = 1 mshrs = 2 tgts_per_mshr = 8 - size = '32kB' + size = "32kB" assoc = 2 # No prefetcher, this is handled by the core + class HPI_DCache(Cache): data_latency = 1 tag_latency = 1 response_latency = 1 mshrs = 4 tgts_per_mshr = 8 - size = '32kB' + size = "32kB" assoc = 4 write_buffers = 4 - prefetcher = StridePrefetcher( - queue_size=4, - degree=4) + prefetcher = StridePrefetcher(queue_size=4, degree=4) + class HPI_L2(Cache): data_latency = 13 @@ -1374,11 +1720,12 @@ class HPI_L2(Cache): response_latency = 5 mshrs = 4 tgts_per_mshr = 8 - size = '1024kB' + size = "1024kB" assoc = 16 write_buffers = 16 # prefetcher FIXME + class HPI(ArmMinorCPU): # Inherit the doc string from the module to avoid repeating it # here. @@ -1430,9 +1777,13 @@ class HPI(ArmMinorCPU): mmu = HPI_MMU() + __all__ = [ "HPI_BP", - "HPI_ITB", "HPI_DTB", - "HPI_ICache", "HPI_DCache", "HPI_L2", + "HPI_ITB", + "HPI_DTB", + "HPI_ICache", + "HPI_DCache", + "HPI_L2", "HPI", ] diff --git a/configs/common/cores/arm/O3_ARM_v7a.py b/configs/common/cores/arm/O3_ARM_v7a.py index d032a1aa88..77dc4e42a4 100644 --- a/configs/common/cores/arm/O3_ARM_v7a.py +++ b/configs/common/cores/arm/O3_ARM_v7a.py @@ -28,65 +28,82 @@ from m5.objects import * # Simple ALU Instructions have a latency of 1 class O3_ARM_v7a_Simple_Int(FUDesc): - opList = [ OpDesc(opClass='IntAlu', opLat=1) ] + opList = [OpDesc(opClass="IntAlu", opLat=1)] count = 2 + # Complex ALU instructions have a variable latencies class O3_ARM_v7a_Complex_Int(FUDesc): - opList = [ OpDesc(opClass='IntMult', opLat=3, pipelined=True), - OpDesc(opClass='IntDiv', opLat=12, pipelined=False), - OpDesc(opClass='IprAccess', opLat=3, pipelined=True) ] + opList = [ + OpDesc(opClass="IntMult", opLat=3, pipelined=True), + OpDesc(opClass="IntDiv", opLat=12, pipelined=False), + OpDesc(opClass="IprAccess", opLat=3, pipelined=True), + ] count = 1 # Floating point and SIMD instructions class O3_ARM_v7a_FP(FUDesc): - opList = [ OpDesc(opClass='SimdAdd', opLat=4), - OpDesc(opClass='SimdAddAcc', opLat=4), - OpDesc(opClass='SimdAlu', opLat=4), - OpDesc(opClass='SimdCmp', opLat=4), - OpDesc(opClass='SimdCvt', opLat=3), - OpDesc(opClass='SimdMisc', opLat=3), - OpDesc(opClass='SimdMult',opLat=5), - OpDesc(opClass='SimdMultAcc',opLat=5), - OpDesc(opClass='SimdShift',opLat=3), - OpDesc(opClass='SimdShiftAcc', opLat=3), - OpDesc(opClass='SimdSqrt', opLat=9), - OpDesc(opClass='SimdFloatAdd',opLat=5), - OpDesc(opClass='SimdFloatAlu',opLat=5), - OpDesc(opClass='SimdFloatCmp', opLat=3), - OpDesc(opClass='SimdFloatCvt', opLat=3), - OpDesc(opClass='SimdFloatDiv', opLat=3), - OpDesc(opClass='SimdFloatMisc', opLat=3), - OpDesc(opClass='SimdFloatMult', opLat=3), - OpDesc(opClass='SimdFloatMultAcc',opLat=5), - OpDesc(opClass='SimdFloatSqrt', opLat=9), - OpDesc(opClass='FloatAdd', opLat=5), - OpDesc(opClass='FloatCmp', opLat=5), - OpDesc(opClass='FloatCvt', opLat=5), - OpDesc(opClass='FloatDiv', opLat=9, pipelined=False), - OpDesc(opClass='FloatSqrt', opLat=33, pipelined=False), - OpDesc(opClass='FloatMult', opLat=4), - OpDesc(opClass='FloatMultAcc', opLat=5), - OpDesc(opClass='FloatMisc', opLat=3) ] + opList = [ + OpDesc(opClass="SimdAdd", opLat=4), + OpDesc(opClass="SimdAddAcc", opLat=4), + OpDesc(opClass="SimdAlu", opLat=4), + OpDesc(opClass="SimdCmp", opLat=4), + OpDesc(opClass="SimdCvt", opLat=3), + OpDesc(opClass="SimdMisc", opLat=3), + OpDesc(opClass="SimdMult", opLat=5), + OpDesc(opClass="SimdMultAcc", opLat=5), + OpDesc(opClass="SimdShift", opLat=3), + OpDesc(opClass="SimdShiftAcc", opLat=3), + OpDesc(opClass="SimdSqrt", opLat=9), + OpDesc(opClass="SimdFloatAdd", opLat=5), + OpDesc(opClass="SimdFloatAlu", opLat=5), + OpDesc(opClass="SimdFloatCmp", opLat=3), + OpDesc(opClass="SimdFloatCvt", opLat=3), + OpDesc(opClass="SimdFloatDiv", opLat=3), + OpDesc(opClass="SimdFloatMisc", opLat=3), + OpDesc(opClass="SimdFloatMult", opLat=3), + OpDesc(opClass="SimdFloatMultAcc", opLat=5), + OpDesc(opClass="SimdFloatSqrt", opLat=9), + OpDesc(opClass="FloatAdd", opLat=5), + OpDesc(opClass="FloatCmp", opLat=5), + OpDesc(opClass="FloatCvt", opLat=5), + OpDesc(opClass="FloatDiv", opLat=9, pipelined=False), + OpDesc(opClass="FloatSqrt", opLat=33, pipelined=False), + OpDesc(opClass="FloatMult", opLat=4), + OpDesc(opClass="FloatMultAcc", opLat=5), + OpDesc(opClass="FloatMisc", opLat=3), + ] count = 2 # Load/Store Units class O3_ARM_v7a_Load(FUDesc): - opList = [ OpDesc(opClass='MemRead',opLat=2), - OpDesc(opClass='FloatMemRead',opLat=2) ] + opList = [ + OpDesc(opClass="MemRead", opLat=2), + OpDesc(opClass="FloatMemRead", opLat=2), + ] count = 1 + class O3_ARM_v7a_Store(FUDesc): - opList = [ OpDesc(opClass='MemWrite',opLat=2), - OpDesc(opClass='FloatMemWrite',opLat=2) ] + opList = [ + OpDesc(opClass="MemWrite", opLat=2), + OpDesc(opClass="FloatMemWrite", opLat=2), + ] count = 1 + # Functional Units for this CPU class O3_ARM_v7a_FUP(FUPool): - FUList = [O3_ARM_v7a_Simple_Int(), O3_ARM_v7a_Complex_Int(), - O3_ARM_v7a_Load(), O3_ARM_v7a_Store(), O3_ARM_v7a_FP()] + FUList = [ + O3_ARM_v7a_Simple_Int(), + O3_ARM_v7a_Complex_Int(), + O3_ARM_v7a_Load(), + O3_ARM_v7a_Store(), + O3_ARM_v7a_FP(), + ] + # Bi-Mode Branch Predictor class O3_ARM_v7a_BP(BiModeBP): @@ -99,6 +116,7 @@ class O3_ARM_v7a_BP(BiModeBP): RASSize = 16 instShiftAmt = 2 + class O3_ARM_v7a_3(ArmO3CPU): LQEntries = 16 SQEntries = 16 @@ -143,6 +161,7 @@ class O3_ARM_v7a_3(ArmO3CPU): switched_out = False branchPred = O3_ARM_v7a_BP() + # Instruction Cache class O3_ARM_v7a_ICache(Cache): tag_latency = 1 @@ -150,12 +169,13 @@ class O3_ARM_v7a_ICache(Cache): response_latency = 1 mshrs = 2 tgts_per_mshr = 8 - size = '32kB' + size = "32kB" assoc = 2 is_read_only = True # Writeback clean lines as well writeback_clean = True + # Data Cache class O3_ARM_v7a_DCache(Cache): tag_latency = 2 @@ -163,12 +183,13 @@ class O3_ARM_v7a_DCache(Cache): response_latency = 2 mshrs = 6 tgts_per_mshr = 8 - size = '32kB' + size = "32kB" assoc = 2 write_buffers = 16 # Consider the L2 a victim cache also for clean lines writeback_clean = True + # L2 Cache class O3_ARM_v7aL2(Cache): tag_latency = 12 @@ -176,12 +197,12 @@ class O3_ARM_v7aL2(Cache): response_latency = 12 mshrs = 16 tgts_per_mshr = 8 - size = '1MB' + size = "1MB" assoc = 16 write_buffers = 8 prefetch_on_access = True - clusivity = 'mostly_excl' + clusivity = "mostly_excl" # Simple stride prefetcher - prefetcher = StridePrefetcher(degree=8, latency = 1) + prefetcher = StridePrefetcher(degree=8, latency=1) tags = BaseSetAssoc() replacement_policy = RandomRP() diff --git a/configs/common/cores/arm/__init__.py b/configs/common/cores/arm/__init__.py index dbc3b3e4da..135b75f802 100644 --- a/configs/common/cores/arm/__init__.py +++ b/configs/common/cores/arm/__init__.py @@ -36,9 +36,7 @@ from pkgutil import iter_modules from importlib import import_module -_cpu_modules = [ - name for _, name, ispkg in iter_modules(__path__) if not ispkg -] +_cpu_modules = [name for _, name, ispkg in iter_modules(__path__) if not ispkg] for c in _cpu_modules: try: diff --git a/configs/common/cores/arm/ex5_LITTLE.py b/configs/common/cores/arm/ex5_LITTLE.py index 57f6a6b812..6974837dc5 100644 --- a/configs/common/cores/arm/ex5_LITTLE.py +++ b/configs/common/cores/arm/ex5_LITTLE.py @@ -27,70 +27,89 @@ from m5.objects import * -#----------------------------------------------------------------------- +# ----------------------------------------------------------------------- # ex5 LITTLE core (based on the ARM Cortex-A7) -#----------------------------------------------------------------------- +# ----------------------------------------------------------------------- # Simple ALU Instructions have a latency of 3 class ex5_LITTLE_Simple_Int(MinorDefaultIntFU): - opList = [ OpDesc(opClass='IntAlu', opLat=4) ] + opList = [OpDesc(opClass="IntAlu", opLat=4)] + # Complex ALU instructions have a variable latencies class ex5_LITTLE_Complex_IntMul(MinorDefaultIntMulFU): - opList = [ OpDesc(opClass='IntMult', opLat=7) ] + opList = [OpDesc(opClass="IntMult", opLat=7)] + class ex5_LITTLE_Complex_IntDiv(MinorDefaultIntDivFU): - opList = [ OpDesc(opClass='IntDiv', opLat=9) ] + opList = [OpDesc(opClass="IntDiv", opLat=9)] + # Floating point and SIMD instructions class ex5_LITTLE_FP(MinorDefaultFloatSimdFU): - opList = [ OpDesc(opClass='SimdAdd', opLat=6), - OpDesc(opClass='SimdAddAcc', opLat=4), - OpDesc(opClass='SimdAlu', opLat=4), - OpDesc(opClass='SimdCmp', opLat=1), - OpDesc(opClass='SimdCvt', opLat=3), - OpDesc(opClass='SimdMisc', opLat=3), - OpDesc(opClass='SimdMult',opLat=4), - OpDesc(opClass='SimdMultAcc',opLat=5), - OpDesc(opClass='SimdShift',opLat=3), - OpDesc(opClass='SimdShiftAcc', opLat=3), - OpDesc(opClass='SimdSqrt', opLat=9), - OpDesc(opClass='SimdFloatAdd',opLat=8), - OpDesc(opClass='SimdFloatAlu',opLat=6), - OpDesc(opClass='SimdFloatCmp', opLat=6), - OpDesc(opClass='SimdFloatCvt', opLat=6), - OpDesc(opClass='SimdFloatDiv', opLat=20, pipelined=False), - OpDesc(opClass='SimdFloatMisc', opLat=6), - OpDesc(opClass='SimdFloatMult', opLat=15), - OpDesc(opClass='SimdFloatMultAcc',opLat=6), - OpDesc(opClass='SimdFloatSqrt', opLat=17), - OpDesc(opClass='FloatAdd', opLat=8), - OpDesc(opClass='FloatCmp', opLat=6), - OpDesc(opClass='FloatCvt', opLat=6), - OpDesc(opClass='FloatDiv', opLat=15, pipelined=False), - OpDesc(opClass='FloatSqrt', opLat=33), - OpDesc(opClass='FloatMult', opLat=6) ] + opList = [ + OpDesc(opClass="SimdAdd", opLat=6), + OpDesc(opClass="SimdAddAcc", opLat=4), + OpDesc(opClass="SimdAlu", opLat=4), + OpDesc(opClass="SimdCmp", opLat=1), + OpDesc(opClass="SimdCvt", opLat=3), + OpDesc(opClass="SimdMisc", opLat=3), + OpDesc(opClass="SimdMult", opLat=4), + OpDesc(opClass="SimdMultAcc", opLat=5), + OpDesc(opClass="SimdShift", opLat=3), + OpDesc(opClass="SimdShiftAcc", opLat=3), + OpDesc(opClass="SimdSqrt", opLat=9), + OpDesc(opClass="SimdFloatAdd", opLat=8), + OpDesc(opClass="SimdFloatAlu", opLat=6), + OpDesc(opClass="SimdFloatCmp", opLat=6), + OpDesc(opClass="SimdFloatCvt", opLat=6), + OpDesc(opClass="SimdFloatDiv", opLat=20, pipelined=False), + OpDesc(opClass="SimdFloatMisc", opLat=6), + OpDesc(opClass="SimdFloatMult", opLat=15), + OpDesc(opClass="SimdFloatMultAcc", opLat=6), + OpDesc(opClass="SimdFloatSqrt", opLat=17), + OpDesc(opClass="FloatAdd", opLat=8), + OpDesc(opClass="FloatCmp", opLat=6), + OpDesc(opClass="FloatCvt", opLat=6), + OpDesc(opClass="FloatDiv", opLat=15, pipelined=False), + OpDesc(opClass="FloatSqrt", opLat=33), + OpDesc(opClass="FloatMult", opLat=6), + ] + # Load/Store Units class ex5_LITTLE_MemFU(MinorDefaultMemFU): - opList = [ OpDesc(opClass='MemRead',opLat=1), - OpDesc(opClass='MemWrite',opLat=1) ] + opList = [ + OpDesc(opClass="MemRead", opLat=1), + OpDesc(opClass="MemWrite", opLat=1), + ] + # Misc Unit class ex5_LITTLE_MiscFU(MinorDefaultMiscFU): - opList = [ OpDesc(opClass='IprAccess',opLat=1), - OpDesc(opClass='InstPrefetch',opLat=1) ] + opList = [ + OpDesc(opClass="IprAccess", opLat=1), + OpDesc(opClass="InstPrefetch", opLat=1), + ] + # Functional Units for this CPU class ex5_LITTLE_FUP(MinorFUPool): - funcUnits = [ex5_LITTLE_Simple_Int(), ex5_LITTLE_Simple_Int(), - ex5_LITTLE_Complex_IntMul(), ex5_LITTLE_Complex_IntDiv(), - ex5_LITTLE_FP(), ex5_LITTLE_MemFU(), - ex5_LITTLE_MiscFU()] + funcUnits = [ + ex5_LITTLE_Simple_Int(), + ex5_LITTLE_Simple_Int(), + ex5_LITTLE_Complex_IntMul(), + ex5_LITTLE_Complex_IntDiv(), + ex5_LITTLE_FP(), + ex5_LITTLE_MemFU(), + ex5_LITTLE_MiscFU(), + ] + class ex5_LITTLE(ArmMinorCPU): executeFuncUnits = ex5_LITTLE_FUP() + class L1Cache(Cache): tag_latency = 2 data_latency = 2 @@ -99,19 +118,22 @@ class L1Cache(Cache): # Consider the L2 a victim cache also for clean lines writeback_clean = True + class L1I(L1Cache): mshrs = 2 - size = '32kB' + size = "32kB" assoc = 2 is_read_only = True tgts_per_mshr = 20 + class L1D(L1Cache): mshrs = 4 - size = '32kB' + size = "32kB" assoc = 4 write_buffers = 4 + # L2 Cache class L2(Cache): tag_latency = 9 @@ -119,12 +141,12 @@ class L2(Cache): response_latency = 9 mshrs = 8 tgts_per_mshr = 12 - size = '512kB' + size = "512kB" assoc = 8 write_buffers = 16 prefetch_on_access = True - clusivity = 'mostly_excl' + clusivity = "mostly_excl" # Simple stride prefetcher - prefetcher = StridePrefetcher(degree=1, latency = 1) + prefetcher = StridePrefetcher(degree=1, latency=1) tags = BaseSetAssoc() replacement_policy = RandomRP() diff --git a/configs/common/cores/arm/ex5_big.py b/configs/common/cores/arm/ex5_big.py index de7a45063a..70af6b8414 100644 --- a/configs/common/cores/arm/ex5_big.py +++ b/configs/common/cores/arm/ex5_big.py @@ -27,66 +27,80 @@ from m5.objects import * -#----------------------------------------------------------------------- +# ----------------------------------------------------------------------- # ex5 big core (based on the ARM Cortex-A15) -#----------------------------------------------------------------------- +# ----------------------------------------------------------------------- # Simple ALU Instructions have a latency of 1 class ex5_big_Simple_Int(FUDesc): - opList = [ OpDesc(opClass='IntAlu', opLat=1) ] + opList = [OpDesc(opClass="IntAlu", opLat=1)] count = 2 + # Complex ALU instructions have a variable latencies class ex5_big_Complex_Int(FUDesc): - opList = [ OpDesc(opClass='IntMult', opLat=4, pipelined=True), - OpDesc(opClass='IntDiv', opLat=11, pipelined=False), - OpDesc(opClass='IprAccess', opLat=3, pipelined=True) ] + opList = [ + OpDesc(opClass="IntMult", opLat=4, pipelined=True), + OpDesc(opClass="IntDiv", opLat=11, pipelined=False), + OpDesc(opClass="IprAccess", opLat=3, pipelined=True), + ] count = 1 + # Floating point and SIMD instructions class ex5_big_FP(FUDesc): - opList = [ OpDesc(opClass='SimdAdd', opLat=3), - OpDesc(opClass='SimdAddAcc', opLat=4), - OpDesc(opClass='SimdAlu', opLat=4), - OpDesc(opClass='SimdCmp', opLat=4), - OpDesc(opClass='SimdCvt', opLat=3), - OpDesc(opClass='SimdMisc', opLat=3), - OpDesc(opClass='SimdMult',opLat=6), - OpDesc(opClass='SimdMultAcc',opLat=5), - OpDesc(opClass='SimdShift',opLat=3), - OpDesc(opClass='SimdShiftAcc', opLat=3), - OpDesc(opClass='SimdSqrt', opLat=9), - OpDesc(opClass='SimdFloatAdd',opLat=6), - OpDesc(opClass='SimdFloatAlu',opLat=5), - OpDesc(opClass='SimdFloatCmp', opLat=3), - OpDesc(opClass='SimdFloatCvt', opLat=3), - OpDesc(opClass='SimdFloatDiv', opLat=21), - OpDesc(opClass='SimdFloatMisc', opLat=3), - OpDesc(opClass='SimdFloatMult', opLat=6), - OpDesc(opClass='SimdFloatMultAcc',opLat=1), - OpDesc(opClass='SimdFloatSqrt', opLat=9), - OpDesc(opClass='FloatAdd', opLat=6), - OpDesc(opClass='FloatCmp', opLat=5), - OpDesc(opClass='FloatCvt', opLat=5), - OpDesc(opClass='FloatDiv', opLat=12, pipelined=False), - OpDesc(opClass='FloatSqrt', opLat=33, pipelined=False), - OpDesc(opClass='FloatMult', opLat=8) ] + opList = [ + OpDesc(opClass="SimdAdd", opLat=3), + OpDesc(opClass="SimdAddAcc", opLat=4), + OpDesc(opClass="SimdAlu", opLat=4), + OpDesc(opClass="SimdCmp", opLat=4), + OpDesc(opClass="SimdCvt", opLat=3), + OpDesc(opClass="SimdMisc", opLat=3), + OpDesc(opClass="SimdMult", opLat=6), + OpDesc(opClass="SimdMultAcc", opLat=5), + OpDesc(opClass="SimdShift", opLat=3), + OpDesc(opClass="SimdShiftAcc", opLat=3), + OpDesc(opClass="SimdSqrt", opLat=9), + OpDesc(opClass="SimdFloatAdd", opLat=6), + OpDesc(opClass="SimdFloatAlu", opLat=5), + OpDesc(opClass="SimdFloatCmp", opLat=3), + OpDesc(opClass="SimdFloatCvt", opLat=3), + OpDesc(opClass="SimdFloatDiv", opLat=21), + OpDesc(opClass="SimdFloatMisc", opLat=3), + OpDesc(opClass="SimdFloatMult", opLat=6), + OpDesc(opClass="SimdFloatMultAcc", opLat=1), + OpDesc(opClass="SimdFloatSqrt", opLat=9), + OpDesc(opClass="FloatAdd", opLat=6), + OpDesc(opClass="FloatCmp", opLat=5), + OpDesc(opClass="FloatCvt", opLat=5), + OpDesc(opClass="FloatDiv", opLat=12, pipelined=False), + OpDesc(opClass="FloatSqrt", opLat=33, pipelined=False), + OpDesc(opClass="FloatMult", opLat=8), + ] count = 2 # Load/Store Units class ex5_big_Load(FUDesc): - opList = [ OpDesc(opClass='MemRead',opLat=2) ] + opList = [OpDesc(opClass="MemRead", opLat=2)] count = 1 + class ex5_big_Store(FUDesc): - opList = [OpDesc(opClass='MemWrite',opLat=2) ] + opList = [OpDesc(opClass="MemWrite", opLat=2)] count = 1 + # Functional Units for this CPU class ex5_big_FUP(FUPool): - FUList = [ex5_big_Simple_Int(), ex5_big_Complex_Int(), - ex5_big_Load(), ex5_big_Store(), ex5_big_FP()] + FUList = [ + ex5_big_Simple_Int(), + ex5_big_Complex_Int(), + ex5_big_Load(), + ex5_big_Store(), + ex5_big_FP(), + ] + # Bi-Mode Branch Predictor class ex5_big_BP(BiModeBP): @@ -99,6 +113,7 @@ class ex5_big_BP(BiModeBP): RASSize = 48 instShiftAmt = 2 + class ex5_big(ArmO3CPU): LQEntries = 16 SQEntries = 16 @@ -142,6 +157,7 @@ class ex5_big(ArmO3CPU): switched_out = False branchPred = ex5_big_BP() + class L1Cache(Cache): tag_latency = 2 data_latency = 2 @@ -150,20 +166,23 @@ class L1Cache(Cache): # Consider the L2 a victim cache also for clean lines writeback_clean = True + # Instruction Cache class L1I(L1Cache): mshrs = 2 - size = '32kB' + size = "32kB" assoc = 2 is_read_only = True + # Data Cache class L1D(L1Cache): mshrs = 6 - size = '32kB' + size = "32kB" assoc = 2 write_buffers = 16 + # L2 Cache class L2(Cache): tag_latency = 15 @@ -171,12 +190,12 @@ class L2(Cache): response_latency = 15 mshrs = 16 tgts_per_mshr = 8 - size = '2MB' + size = "2MB" assoc = 16 write_buffers = 8 prefetch_on_access = True - clusivity = 'mostly_excl' + clusivity = "mostly_excl" # Simple stride prefetcher - prefetcher = StridePrefetcher(degree=8, latency = 1) + prefetcher = StridePrefetcher(degree=8, latency=1) tags = BaseSetAssoc() replacement_policy = RandomRP() diff --git a/configs/common/cpu2000.py b/configs/common/cpu2000.py index 266bba0f70..3b1b390618 100644 --- a/configs/common/cpu2000.py +++ b/configs/common/cpu2000.py @@ -29,7 +29,8 @@ import sys from os.path import basename, exists, join as joinpath, normpath from os.path import isdir, isfile, islink -spec_dist = os.environ.get('M5_CPU2000', '/dist/m5/cpu2000') +spec_dist = os.environ.get("M5_CPU2000", "/dist/m5/cpu2000") + def copyfiles(srcdir, dstdir): from filecmp import cmp as filecmp @@ -45,8 +46,8 @@ def copyfiles(srcdir, dstdir): root = normpath(root) prefix = os.path.commonprefix([root, srcdir]) - root = root[len(prefix):] - if root.startswith('/'): + root = root[len(prefix) :] + if root.startswith("/"): root = root[1:] for entry in dirs: @@ -62,68 +63,70 @@ def copyfiles(srcdir, dstdir): # some of the spec benchmarks expect to be run from one directory up. # just create some symlinks that solve the problem - inlink = joinpath(dstdir, 'input') - outlink = joinpath(dstdir, 'output') + inlink = joinpath(dstdir, "input") + outlink = joinpath(dstdir, "output") if not exists(inlink): - os.symlink('.', inlink) + os.symlink(".", inlink) if not exists(outlink): - os.symlink('.', outlink) + os.symlink(".", outlink) + class Benchmark(object): def __init__(self, isa, os, input_set): - if not hasattr(self.__class__, 'name'): + if not hasattr(self.__class__, "name"): self.name = self.__class__.__name__ - if not hasattr(self.__class__, 'binary'): + if not hasattr(self.__class__, "binary"): self.binary = self.name - if not hasattr(self.__class__, 'args'): + if not hasattr(self.__class__, "args"): self.args = [] - if not hasattr(self.__class__, 'output'): - self.output = '%s.out' % self.name + if not hasattr(self.__class__, "output"): + self.output = "%s.out" % self.name - if not hasattr(self.__class__, 'simpoint'): + if not hasattr(self.__class__, "simpoint"): self.simpoint = None try: func = getattr(self.__class__, input_set) except AttributeError: raise AttributeError( - 'The benchmark %s does not have the %s input set' % \ - (self.name, input_set)) + "The benchmark %s does not have the %s input set" + % (self.name, input_set) + ) - executable = joinpath(spec_dist, 'binaries', isa, os, self.binary) + executable = joinpath(spec_dist, "binaries", isa, os, self.binary) if not isfile(executable): - raise AttributeError('%s not found' % executable) + raise AttributeError("%s not found" % executable) self.executable = executable # root of tree for input & output data files - data_dir = joinpath(spec_dist, 'data', self.name) + data_dir = joinpath(spec_dist, "data", self.name) # optional subtree with files shared across input sets - all_dir = joinpath(data_dir, 'all') + all_dir = joinpath(data_dir, "all") # dirs for input & output files for this input set - inputs_dir = joinpath(data_dir, input_set, 'input') - outputs_dir = joinpath(data_dir, input_set, 'output') + inputs_dir = joinpath(data_dir, input_set, "input") + outputs_dir = joinpath(data_dir, input_set, "output") # keep around which input set was specified self.input_set = input_set if not isdir(inputs_dir): - raise AttributeError('%s not found' % inputs_dir) + raise AttributeError("%s not found" % inputs_dir) - self.inputs_dir = [ inputs_dir ] + self.inputs_dir = [inputs_dir] if isdir(all_dir): - self.inputs_dir += [ joinpath(all_dir, 'input') ] + self.inputs_dir += [joinpath(all_dir, "input")] if isdir(outputs_dir): self.outputs_dir = outputs_dir - if not hasattr(self.__class__, 'stdin'): - self.stdin = joinpath(inputs_dir, '%s.in' % self.name) + if not hasattr(self.__class__, "stdin"): + self.stdin = joinpath(inputs_dir, "%s.in" % self.name) if not isfile(self.stdin): self.stdin = None - if not hasattr(self.__class__, 'stdout'): - self.stdout = joinpath(outputs_dir, '%s.out' % self.name) + if not hasattr(self.__class__, "stdout"): + self.stdout = joinpath(outputs_dir, "%s.out" % self.name) if not isfile(self.stdout): self.stdout = None @@ -132,14 +135,14 @@ class Benchmark(object): def makeProcessArgs(self, **kwargs): # set up default args for Process object process_args = {} - process_args['cmd'] = [ self.name ] + self.args - process_args['executable'] = self.executable + process_args["cmd"] = [self.name] + self.args + process_args["executable"] = self.executable if self.stdin: - process_args['input'] = self.stdin + process_args["input"] = self.stdin if self.stdout: - process_args['output'] = self.stdout + process_args["output"] = self.stdout if self.simpoint: - process_args['simpoint'] = self.simpoint + process_args["simpoint"] = self.simpoint # explicit keywords override defaults process_args.update(kwargs) @@ -150,12 +153,13 @@ class Benchmark(object): # figure out working directory: use m5's outdir unless # overridden by Process's cwd param - cwd = process_args.get('cwd') + cwd = process_args.get("cwd") if not cwd: from m5 import options + cwd = options.outdir - process_args['cwd'] = cwd + process_args["cwd"] = cwd if not isdir(cwd): os.makedirs(cwd) # copy input files to working directory @@ -163,590 +167,872 @@ class Benchmark(object): copyfiles(d, cwd) # generate Process object from m5.objects import Process + return Process(**process_args) def __str__(self): return self.name + class DefaultBenchmark(Benchmark): - def ref(self, isa, os): pass - def test(self, isa, os): pass - def train(self, isa, os): pass - -class MinneDefaultBenchmark(DefaultBenchmark): - def smred(self, isa, os): pass - def mdred(self, isa, os): pass - def lgred(self, isa, os): pass - -class ammp(MinneDefaultBenchmark): - name = 'ammp' - number = 188 - lang = 'C' - simpoint = 108*100E6 - -class applu(MinneDefaultBenchmark): - name = 'applu' - number = 173 - lang = 'F77' - simpoint = 2179*100E6 - -class apsi(MinneDefaultBenchmark): - name = 'apsi' - number = 301 - lang = 'F77' - simpoint = 3408*100E6 - -class art(DefaultBenchmark): - name = 'art' - number = 179 - lang = 'C' + def ref(self, isa, os): + pass def test(self, isa, os): - self.args = [ '-scanfile', 'c756hel.in', - '-trainfile1', 'a10.img', - '-stride', '2', - '-startx', '134', - '-starty', '220', - '-endx', '139', - '-endy', '225', - '-objects', '1' ] - self.output = 'test.out' + pass def train(self, isa, os): - self.args = [ '-scanfile', 'c756hel.in', - '-trainfile1', 'a10.img', - '-stride', '2', - '-startx', '134', - '-starty', '220', - '-endx', '184', - '-endy', '240', - '-objects', '3' ] - self.output = 'train.out' + pass + + +class MinneDefaultBenchmark(DefaultBenchmark): + def smred(self, isa, os): + pass + + def mdred(self, isa, os): + pass def lgred(self, isa, os): - self.args = ['-scanfile', 'c756hel.in', - '-trainfile1', 'a10.img', - '-stride', '5', - '-startx', '134', - '-starty', '220', - '-endx', '184', - '-endy', '240', - '-objects', '1' ] - self.output = 'lgred.out' + pass + + +class ammp(MinneDefaultBenchmark): + name = "ammp" + number = 188 + lang = "C" + simpoint = 108 * 100e6 + + +class applu(MinneDefaultBenchmark): + name = "applu" + number = 173 + lang = "F77" + simpoint = 2179 * 100e6 + + +class apsi(MinneDefaultBenchmark): + name = "apsi" + number = 301 + lang = "F77" + simpoint = 3408 * 100e6 + + +class art(DefaultBenchmark): + name = "art" + number = 179 + lang = "C" + + def test(self, isa, os): + self.args = [ + "-scanfile", + "c756hel.in", + "-trainfile1", + "a10.img", + "-stride", + "2", + "-startx", + "134", + "-starty", + "220", + "-endx", + "139", + "-endy", + "225", + "-objects", + "1", + ] + self.output = "test.out" + + def train(self, isa, os): + self.args = [ + "-scanfile", + "c756hel.in", + "-trainfile1", + "a10.img", + "-stride", + "2", + "-startx", + "134", + "-starty", + "220", + "-endx", + "184", + "-endy", + "240", + "-objects", + "3", + ] + self.output = "train.out" + + def lgred(self, isa, os): + self.args = [ + "-scanfile", + "c756hel.in", + "-trainfile1", + "a10.img", + "-stride", + "5", + "-startx", + "134", + "-starty", + "220", + "-endx", + "184", + "-endy", + "240", + "-objects", + "1", + ] + self.output = "lgred.out" class art110(art): def ref(self, isa, os): - self.args = [ '-scanfile', 'c756hel.in', - '-trainfile1', 'a10.img', - '-trainfile2', 'hc.img', - '-stride', '2', - '-startx', '110', - '-starty', '200', - '-endx', '160', - '-endy', '240', - '-objects', '10' ] - self.output = 'ref.1.out' - self.simpoint = 340*100E6 + self.args = [ + "-scanfile", + "c756hel.in", + "-trainfile1", + "a10.img", + "-trainfile2", + "hc.img", + "-stride", + "2", + "-startx", + "110", + "-starty", + "200", + "-endx", + "160", + "-endy", + "240", + "-objects", + "10", + ] + self.output = "ref.1.out" + self.simpoint = 340 * 100e6 + class art470(art): def ref(self, isa, os): - self.args = [ '-scanfile', 'c756hel.in', - '-trainfile1', 'a10.img', - '-trainfile2', 'hc.img', - '-stride', '2', - '-startx', '470', - '-starty', '140', - '-endx', '520', - '-endy', '180', - '-objects', '10' ] - self.output = 'ref.2.out' - self.simpoint = 365*100E6 + self.args = [ + "-scanfile", + "c756hel.in", + "-trainfile1", + "a10.img", + "-trainfile2", + "hc.img", + "-stride", + "2", + "-startx", + "470", + "-starty", + "140", + "-endx", + "520", + "-endy", + "180", + "-objects", + "10", + ] + self.output = "ref.2.out" + self.simpoint = 365 * 100e6 + class equake(DefaultBenchmark): - name = 'equake' + name = "equake" number = 183 - lang = 'C' - simpoint = 812*100E6 + lang = "C" + simpoint = 812 * 100e6 + + def lgred(self, isa, os): + pass - def lgred(self, isa, os): pass class facerec(MinneDefaultBenchmark): - name = 'facerec' + name = "facerec" number = 187 - lang = 'F' - simpoint = 375*100E6 + lang = "F" + simpoint = 375 * 100e6 + class fma3d(MinneDefaultBenchmark): - name = 'fma3d' + name = "fma3d" number = 191 - lang = 'F' - simpoint = 2541*100E6 + lang = "F" + simpoint = 2541 * 100e6 + class galgel(MinneDefaultBenchmark): - name = 'galgel' + name = "galgel" number = 178 - lang = 'F' - simpoint = 2491*100E6 + lang = "F" + simpoint = 2491 * 100e6 + class lucas(MinneDefaultBenchmark): - name = 'lucas' + name = "lucas" number = 189 - lang = 'F' - simpoint = 545*100E6 + lang = "F" + simpoint = 545 * 100e6 + class mesa(Benchmark): - name = 'mesa' + name = "mesa" number = 177 - lang = 'C' + lang = "C" stdin = None def __set_args(self, frames): - self.args = [ '-frames', frames, '-meshfile', '%s.in' % self.name, - '-ppmfile', '%s.ppm' % self.name ] + self.args = [ + "-frames", + frames, + "-meshfile", + "%s.in" % self.name, + "-ppmfile", + "%s.ppm" % self.name, + ] def test(self, isa, os): - self.__set_args('10') + self.__set_args("10") def train(self, isa, os): - self.__set_args('500') + self.__set_args("500") def ref(self, isa, os): - self.__set_args('1000') - self.simpoint = 1135*100E6 + self.__set_args("1000") + self.simpoint = 1135 * 100e6 def lgred(self, isa, os): - self.__set_args('1') + self.__set_args("1") + class mgrid(MinneDefaultBenchmark): - name = 'mgrid' + name = "mgrid" number = 172 - lang = 'F77' - simpoint = 3292*100E6 + lang = "F77" + simpoint = 3292 * 100e6 + class sixtrack(DefaultBenchmark): - name = 'sixtrack' + name = "sixtrack" number = 200 - lang = 'F77' - simpoint = 3043*100E6 + lang = "F77" + simpoint = 3043 * 100e6 + + def lgred(self, isa, os): + pass - def lgred(self, isa, os): pass class swim(MinneDefaultBenchmark): - name = 'swim' + name = "swim" number = 171 - lang = 'F77' - simpoint = 2079*100E6 + lang = "F77" + simpoint = 2079 * 100e6 + class wupwise(DefaultBenchmark): - name = 'wupwise' + name = "wupwise" number = 168 - lang = 'F77' - simpoint = 3237*100E6 + lang = "F77" + simpoint = 3237 * 100e6 + + def lgred(self, isa, os): + pass - def lgred(self, isa, os): pass class bzip2(DefaultBenchmark): - name = 'bzip2' + name = "bzip2" number = 256 - lang = 'C' + lang = "C" def test(self, isa, os): - self.args = [ 'input.random' ] + self.args = ["input.random"] def train(self, isa, os): - self.args = [ 'input.compressed' ] + self.args = ["input.compressed"] + class bzip2_source(bzip2): def ref(self, isa, os): - self.simpoint = 977*100E6 - self.args = [ 'input.source', '58' ] + self.simpoint = 977 * 100e6 + self.args = ["input.source", "58"] def lgred(self, isa, os): - self.args = [ 'input.source', '1' ] + self.args = ["input.source", "1"] + class bzip2_graphic(bzip2): def ref(self, isa, os): - self.simpoint = 718*100E6 - self.args = [ 'input.graphic', '58' ] + self.simpoint = 718 * 100e6 + self.args = ["input.graphic", "58"] def lgred(self, isa, os): - self.args = [ 'input.graphic', '1' ] + self.args = ["input.graphic", "1"] + class bzip2_program(bzip2): def ref(self, isa, os): - self.simpoint = 458*100E6 - self.args = [ 'input.program', '58' ] + self.simpoint = 458 * 100e6 + self.args = ["input.program", "58"] def lgred(self, isa, os): - self.args = [ 'input.program', '1' ] + self.args = ["input.program", "1"] + class crafty(MinneDefaultBenchmark): - name = 'crafty' + name = "crafty" number = 186 - lang = 'C' - simpoint = 774*100E6 + lang = "C" + simpoint = 774 * 100e6 + class eon(MinneDefaultBenchmark): - name = 'eon' + name = "eon" number = 252 - lang = 'CXX' + lang = "CXX" stdin = None + class eon_kajiya(eon): - args = [ 'chair.control.kajiya', 'chair.camera', 'chair.surfaces', - 'chair.kajiya.ppm', 'ppm', 'pixels_out.kajiya'] - output = 'kajiya_log.out' + args = [ + "chair.control.kajiya", + "chair.camera", + "chair.surfaces", + "chair.kajiya.ppm", + "ppm", + "pixels_out.kajiya", + ] + output = "kajiya_log.out" class eon_cook(eon): - args = [ 'chair.control.cook', 'chair.camera', 'chair.surfaces', - 'chair.cook.ppm', 'ppm', 'pixels_out.cook' ] - output = 'cook_log.out' + args = [ + "chair.control.cook", + "chair.camera", + "chair.surfaces", + "chair.cook.ppm", + "ppm", + "pixels_out.cook", + ] + output = "cook_log.out" + class eon_rushmeier(eon): - args = [ 'chair.control.rushmeier', 'chair.camera', 'chair.surfaces', - 'chair.rushmeier.ppm', 'ppm', 'pixels_out.rushmeier' ] - output = 'rushmeier_log.out' - simpoint = 403*100E6 + args = [ + "chair.control.rushmeier", + "chair.camera", + "chair.surfaces", + "chair.rushmeier.ppm", + "ppm", + "pixels_out.rushmeier", + ] + output = "rushmeier_log.out" + simpoint = 403 * 100e6 + class gap(DefaultBenchmark): - name = 'gap' + name = "gap" number = 254 - lang = 'C' + lang = "C" def __set_args(self, size): - self.args = [ '-l', './', '-q', '-m', size ] + self.args = ["-l", "./", "-q", "-m", size] def test(self, isa, os): - self.__set_args('64M') + self.__set_args("64M") def train(self, isa, os): - self.__set_args('128M') + self.__set_args("128M") def ref(self, isa, os): - self.__set_args('192M') - self.simpoint = 674*100E6 + self.__set_args("192M") + self.simpoint = 674 * 100e6 def lgred(self, isa, os): - self.__set_args('64M') + self.__set_args("64M") def mdred(self, isa, os): - self.__set_args('64M') + self.__set_args("64M") def smred(self, isa, os): - self.__set_args('64M') + self.__set_args("64M") + class gcc(DefaultBenchmark): - name = 'gcc' + name = "gcc" number = 176 - lang = 'C' + lang = "C" def test(self, isa, os): - self.args = [ 'cccp.i', '-o', 'cccp.s' ] + self.args = ["cccp.i", "-o", "cccp.s"] def train(self, isa, os): - self.args = [ 'cp-decl.i', '-o', 'cp-decl.s' ] + self.args = ["cp-decl.i", "-o", "cp-decl.s"] def smred(self, isa, os): - self.args = [ 'c-iterate.i', '-o', 'c-iterate.s' ] + self.args = ["c-iterate.i", "-o", "c-iterate.s"] def mdred(self, isa, os): - self.args = [ 'rdlanal.i', '-o', 'rdlanal.s' ] + self.args = ["rdlanal.i", "-o", "rdlanal.s"] def lgred(self, isa, os): - self.args = [ 'cp-decl.i', '-o', 'cp-decl.s' ] + self.args = ["cp-decl.i", "-o", "cp-decl.s"] + class gcc_166(gcc): def ref(self, isa, os): - self.simpoint = 389*100E6 - self.args = [ '166.i', '-o', '166.s' ] + self.simpoint = 389 * 100e6 + self.args = ["166.i", "-o", "166.s"] + class gcc_200(gcc): def ref(self, isa, os): - self.simpoint = 736*100E6 - self.args = [ '200.i', '-o', '200.s' ] + self.simpoint = 736 * 100e6 + self.args = ["200.i", "-o", "200.s"] + class gcc_expr(gcc): def ref(self, isa, os): - self.simpoint = 36*100E6 - self.args = [ 'expr.i', '-o', 'expr.s' ] + self.simpoint = 36 * 100e6 + self.args = ["expr.i", "-o", "expr.s"] + class gcc_integrate(gcc): def ref(self, isa, os): - self.simpoint = 4*100E6 - self.args = [ 'integrate.i', '-o', 'integrate.s' ] + self.simpoint = 4 * 100e6 + self.args = ["integrate.i", "-o", "integrate.s"] + class gcc_scilab(gcc): def ref(self, isa, os): - self.simpoint = 207*100E6 - self.args = [ 'scilab.i', '-o', 'scilab.s' ] + self.simpoint = 207 * 100e6 + self.args = ["scilab.i", "-o", "scilab.s"] + class gzip(DefaultBenchmark): - name = 'gzip' + name = "gzip" number = 164 - lang = 'C' + lang = "C" def test(self, isa, os): - self.args = [ 'input.compressed', '2' ] + self.args = ["input.compressed", "2"] def train(self, isa, os): - self.args = [ 'input.combined', '32' ] + self.args = ["input.combined", "32"] + class gzip_source(gzip): def ref(self, isa, os): - self.simpoint = 334*100E6 - self.args = [ 'input.source', '1' ] + self.simpoint = 334 * 100e6 + self.args = ["input.source", "1"] + def smred(self, isa, os): - self.args = [ 'input.source', '1' ] + self.args = ["input.source", "1"] + def mdred(self, isa, os): - self.args = [ 'input.source', '1' ] + self.args = ["input.source", "1"] + def lgred(self, isa, os): - self.args = [ 'input.source', '1' ] + self.args = ["input.source", "1"] + class gzip_log(gzip): def ref(self, isa, os): - self.simpoint = 265*100E6 - self.args = [ 'input.log', '60' ] + self.simpoint = 265 * 100e6 + self.args = ["input.log", "60"] + def smred(self, isa, os): - self.args = [ 'input.log', '1' ] + self.args = ["input.log", "1"] + def mdred(self, isa, os): - self.args = [ 'input.log', '1' ] + self.args = ["input.log", "1"] + def lgred(self, isa, os): - self.args = [ 'input.log', '1' ] + self.args = ["input.log", "1"] + class gzip_graphic(gzip): def ref(self, isa, os): - self.simpoint = 653*100E6 - self.args = [ 'input.graphic', '60' ] + self.simpoint = 653 * 100e6 + self.args = ["input.graphic", "60"] + def smred(self, isa, os): - self.args = [ 'input.graphic', '1' ] + self.args = ["input.graphic", "1"] + def mdred(self, isa, os): - self.args = [ 'input.graphic', '1' ] + self.args = ["input.graphic", "1"] + def lgred(self, isa, os): - self.args = [ 'input.graphic', '1' ] + self.args = ["input.graphic", "1"] + class gzip_random(gzip): def ref(self, isa, os): - self.simpoint = 623*100E6 - self.args = [ 'input.random', '60' ] + self.simpoint = 623 * 100e6 + self.args = ["input.random", "60"] + def smred(self, isa, os): - self.args = [ 'input.random', '1' ] + self.args = ["input.random", "1"] + def mdred(self, isa, os): - self.args = [ 'input.random', '1' ] + self.args = ["input.random", "1"] + def lgred(self, isa, os): - self.args = [ 'input.random', '1' ] + self.args = ["input.random", "1"] + class gzip_program(gzip): def ref(self, isa, os): - self.simpoint = 1189*100E6 - self.args = [ 'input.program', '60' ] + self.simpoint = 1189 * 100e6 + self.args = ["input.program", "60"] + def smred(self, isa, os): - self.args = [ 'input.program', '1' ] + self.args = ["input.program", "1"] + def mdred(self, isa, os): - self.args = [ 'input.program', '1' ] + self.args = ["input.program", "1"] + def lgred(self, isa, os): - self.args = [ 'input.program', '1' ] + self.args = ["input.program", "1"] + class mcf(MinneDefaultBenchmark): - name = 'mcf' + name = "mcf" number = 181 - lang = 'C' - args = [ 'mcf.in' ] - simpoint = 553*100E6 + lang = "C" + args = ["mcf.in"] + simpoint = 553 * 100e6 + class parser(MinneDefaultBenchmark): - name = 'parser' + name = "parser" number = 197 - lang = 'C' - args = [ '2.1.dict', '-batch' ] - simpoint = 1146*100E6 + lang = "C" + args = ["2.1.dict", "-batch"] + simpoint = 1146 * 100e6 + class perlbmk(DefaultBenchmark): - name = 'perlbmk' + name = "perlbmk" number = 253 - lang = 'C' + lang = "C" def test(self, isa, os): - self.args = [ '-I.', '-I', 'lib', 'test.pl' ] - self.stdin = 'test.in' + self.args = ["-I.", "-I", "lib", "test.pl"] + self.stdin = "test.in" + class perlbmk_diffmail(perlbmk): def ref(self, isa, os): - self.simpoint = 141*100E6 - self.args = [ '-I', 'lib', 'diffmail.pl', '2', '550', '15', '24', - '23', '100' ] + self.simpoint = 141 * 100e6 + self.args = [ + "-I", + "lib", + "diffmail.pl", + "2", + "550", + "15", + "24", + "23", + "100", + ] def train(self, isa, os): - self.args = [ '-I', 'lib', 'diffmail.pl', '2', '350', '15', '24', - '23', '150' ] + self.args = [ + "-I", + "lib", + "diffmail.pl", + "2", + "350", + "15", + "24", + "23", + "150", + ] + class perlbmk_scrabbl(perlbmk): def train(self, isa, os): - self.args = [ '-I.', '-I', 'lib', 'scrabbl.pl' ] - self.stdin = 'scrabbl.in' + self.args = ["-I.", "-I", "lib", "scrabbl.pl"] + self.stdin = "scrabbl.in" + class perlbmk_makerand(perlbmk): def ref(self, isa, os): - self.simpoint = 11*100E6 - self.args = [ '-I', 'lib', 'makerand.pl' ] + self.simpoint = 11 * 100e6 + self.args = ["-I", "lib", "makerand.pl"] def lgred(self, isa, os): - self.args = [ '-I.', '-I', 'lib', 'lgred.makerand.pl' ] + self.args = ["-I.", "-I", "lib", "lgred.makerand.pl"] def mdred(self, isa, os): - self.args = [ '-I.', '-I', 'lib', 'mdred.makerand.pl' ] + self.args = ["-I.", "-I", "lib", "mdred.makerand.pl"] def smred(self, isa, os): - self.args = [ '-I.', '-I', 'lib', 'smred.makerand.pl' ] + self.args = ["-I.", "-I", "lib", "smred.makerand.pl"] + class perlbmk_perfect(perlbmk): def ref(self, isa, os): - self.simpoint = 5*100E6 - self.args = [ '-I', 'lib', 'perfect.pl', 'b', '3', 'm', '4' ] + self.simpoint = 5 * 100e6 + self.args = ["-I", "lib", "perfect.pl", "b", "3", "m", "4"] def train(self, isa, os): - self.args = [ '-I', 'lib', 'perfect.pl', 'b', '3' ] + self.args = ["-I", "lib", "perfect.pl", "b", "3"] + class perlbmk_splitmail1(perlbmk): def ref(self, isa, os): - self.simpoint = 405*100E6 - self.args = [ '-I', 'lib', 'splitmail.pl', '850', '5', '19', - '18', '1500' ] + self.simpoint = 405 * 100e6 + self.args = [ + "-I", + "lib", + "splitmail.pl", + "850", + "5", + "19", + "18", + "1500", + ] + class perlbmk_splitmail2(perlbmk): def ref(self, isa, os): - self.args = [ '-I', 'lib', 'splitmail.pl', '704', '12', '26', - '16', '836' ] + self.args = [ + "-I", + "lib", + "splitmail.pl", + "704", + "12", + "26", + "16", + "836", + ] + class perlbmk_splitmail3(perlbmk): def ref(self, isa, os): - self.args = [ '-I', 'lib', 'splitmail.pl', '535', '13', '25', - '24', '1091' ] + self.args = [ + "-I", + "lib", + "splitmail.pl", + "535", + "13", + "25", + "24", + "1091", + ] + class perlbmk_splitmail4(perlbmk): def ref(self, isa, os): - self.args = [ '-I', 'lib', 'splitmail.pl', '957', '12', '23', - '26', '1014' ] + self.args = [ + "-I", + "lib", + "splitmail.pl", + "957", + "12", + "23", + "26", + "1014", + ] + class twolf(Benchmark): - name = 'twolf' + name = "twolf" number = 300 - lang = 'C' + lang = "C" stdin = None def test(self, isa, os): - self.args = [ 'test' ] + self.args = ["test"] def train(self, isa, os): - self.args = [ 'train' ] + self.args = ["train"] def ref(self, isa, os): - self.simpoint = 1066*100E6 - self.args = [ 'ref' ] + self.simpoint = 1066 * 100e6 + self.args = ["ref"] def smred(self, isa, os): - self.args = [ 'smred' ] + self.args = ["smred"] def mdred(self, isa, os): - self.args = [ 'mdred' ] + self.args = ["mdred"] def lgred(self, isa, os): - self.args = [ 'lgred' ] + self.args = ["lgred"] + class vortex(Benchmark): - name = 'vortex' + name = "vortex" number = 255 - lang = 'C' + lang = "C" stdin = None def __init__(self, isa, os, input_set): - if (isa in ('arm', 'thumb', 'aarch64')): - self.endian = 'lendian' - elif (isa == 'sparc' or isa == 'sparc32'): - self.endian = 'bendian' + if isa in ("arm", "thumb", "aarch64"): + self.endian = "lendian" + elif isa == "sparc" or isa == "sparc32": + self.endian = "bendian" else: raise AttributeError("unknown ISA %s" % isa) super(vortex, self).__init__(isa, os, input_set) def test(self, isa, os): - self.args = [ '%s.raw' % self.endian ] - self.output = 'vortex.out' + self.args = ["%s.raw" % self.endian] + self.output = "vortex.out" def train(self, isa, os): - self.args = [ '%s.raw' % self.endian ] - self.output = 'vortex.out' + self.args = ["%s.raw" % self.endian] + self.output = "vortex.out" def smred(self, isa, os): - self.args = [ '%s.raw' % self.endian ] - self.output = 'vortex.out' + self.args = ["%s.raw" % self.endian] + self.output = "vortex.out" def mdred(self, isa, os): - self.args = [ '%s.raw' % self.endian ] - self.output = 'vortex.out' + self.args = ["%s.raw" % self.endian] + self.output = "vortex.out" def lgred(self, isa, os): - self.args = [ '%s.raw' % self.endian ] - self.output = 'vortex.out' + self.args = ["%s.raw" % self.endian] + self.output = "vortex.out" + class vortex1(vortex): def ref(self, isa, os): - self.args = [ '%s1.raw' % self.endian ] - self.output = 'vortex1.out' - self.simpoint = 271*100E6 + self.args = ["%s1.raw" % self.endian] + self.output = "vortex1.out" + self.simpoint = 271 * 100e6 class vortex2(vortex): def ref(self, isa, os): - self.simpoint = 1024*100E6 - self.args = [ '%s2.raw' % self.endian ] - self.output = 'vortex2.out' + self.simpoint = 1024 * 100e6 + self.args = ["%s2.raw" % self.endian] + self.output = "vortex2.out" + class vortex3(vortex): def ref(self, isa, os): - self.simpoint = 564*100E6 - self.args = [ '%s3.raw' % self.endian ] - self.output = 'vortex3.out' + self.simpoint = 564 * 100e6 + self.args = ["%s3.raw" % self.endian] + self.output = "vortex3.out" + class vpr(MinneDefaultBenchmark): - name = 'vpr' + name = "vpr" number = 175 - lang = 'C' + lang = "C" + # not sure about vpr minnespec place.in class vpr_place(vpr): - args = [ 'net.in', 'arch.in', 'place.out', 'dum.out', '-nodisp', - '-place_only', '-init_t', '5', '-exit_t', '0.005', - '-alpha_t', '0.9412', '-inner_num', '2' ] - output = 'place_log.out' + args = [ + "net.in", + "arch.in", + "place.out", + "dum.out", + "-nodisp", + "-place_only", + "-init_t", + "5", + "-exit_t", + "0.005", + "-alpha_t", + "0.9412", + "-inner_num", + "2", + ] + output = "place_log.out" + class vpr_route(vpr): - simpoint = 476*100E6 - args = [ 'net.in', 'arch.in', 'place.in', 'route.out', '-nodisp', - '-route_only', '-route_chan_width', '15', - '-pres_fac_mult', '2', '-acc_fac', '1', - '-first_iter_pres_fac', '4', '-initial_pres_fac', '8' ] - output = 'route_log.out' + simpoint = 476 * 100e6 + args = [ + "net.in", + "arch.in", + "place.in", + "route.out", + "-nodisp", + "-route_only", + "-route_chan_width", + "15", + "-pres_fac_mult", + "2", + "-acc_fac", + "1", + "-first_iter_pres_fac", + "4", + "-initial_pres_fac", + "8", + ] + output = "route_log.out" -all = [ ammp, applu, apsi, art, art110, art470, equake, facerec, fma3d, galgel, - lucas, mesa, mgrid, sixtrack, swim, wupwise, bzip2_source, - bzip2_graphic, bzip2_program, crafty, eon_kajiya, eon_cook, - eon_rushmeier, gap, gcc_166, gcc_200, gcc_expr, gcc_integrate, - gcc_scilab, gzip_source, gzip_log, gzip_graphic, gzip_random, - gzip_program, mcf, parser, perlbmk_diffmail, perlbmk_makerand, - perlbmk_perfect, perlbmk_splitmail1, perlbmk_splitmail2, - perlbmk_splitmail3, perlbmk_splitmail4, twolf, vortex1, vortex2, - vortex3, vpr_place, vpr_route ] -__all__ = [ x.__name__ for x in all ] +all = [ + ammp, + applu, + apsi, + art, + art110, + art470, + equake, + facerec, + fma3d, + galgel, + lucas, + mesa, + mgrid, + sixtrack, + swim, + wupwise, + bzip2_source, + bzip2_graphic, + bzip2_program, + crafty, + eon_kajiya, + eon_cook, + eon_rushmeier, + gap, + gcc_166, + gcc_200, + gcc_expr, + gcc_integrate, + gcc_scilab, + gzip_source, + gzip_log, + gzip_graphic, + gzip_random, + gzip_program, + mcf, + parser, + perlbmk_diffmail, + perlbmk_makerand, + perlbmk_perfect, + perlbmk_splitmail1, + perlbmk_splitmail2, + perlbmk_splitmail3, + perlbmk_splitmail4, + twolf, + vortex1, + vortex2, + vortex3, + vpr_place, + vpr_route, +] -if __name__ == '__main__': +__all__ = [x.__name__ for x in all] + +if __name__ == "__main__": from pprint import pprint + for bench in all: - for input_set in 'ref', 'test', 'train': - print('class: %s' % bench.__name__) - x = bench('x86', 'linux', input_set) - print('%s: %s' % (x, input_set)) + for input_set in "ref", "test", "train": + print("class: %s" % bench.__name__) + x = bench("x86", "linux", input_set) + print("%s: %s" % (x, input_set)) pprint(x.makeProcessArgs()) print() diff --git a/configs/dist/sw.py b/configs/dist/sw.py index 41edf9e21b..726735773e 100644 --- a/configs/dist/sw.py +++ b/configs/dist/sw.py @@ -35,33 +35,39 @@ from m5.defines import buildEnv from m5.objects import * from m5.util import addToPath, fatal -addToPath('../') +addToPath("../") from common import Simulation from common import Options + def build_switch(args): # instantiate an EtherSwitch switch = EtherSwitch() # instantiate distEtherLinks to connect switch ports # to other gem5 instances - switch.portlink = [DistEtherLink(speed = args.ethernet_linkspeed, - delay = args.ethernet_linkdelay, - dist_rank = args.dist_rank, - dist_size = args.dist_size, - server_name = args.dist_server_name, - server_port = args.dist_server_port, - sync_start = args.dist_sync_start, - sync_repeat = args.dist_sync_repeat, - is_switch = True, - num_nodes = args.dist_size) - for i in range(args.dist_size)] + switch.portlink = [ + DistEtherLink( + speed=args.ethernet_linkspeed, + delay=args.ethernet_linkdelay, + dist_rank=args.dist_rank, + dist_size=args.dist_size, + server_name=args.dist_server_name, + server_port=args.dist_server_port, + sync_start=args.dist_sync_start, + sync_repeat=args.dist_sync_repeat, + is_switch=True, + num_nodes=args.dist_size, + ) + for i in range(args.dist_size) + ] for (i, link) in enumerate(switch.portlink): link.int0 = switch.interface[i] return switch + def main(): # Add options parser = argparse.ArgumentParser() @@ -70,8 +76,9 @@ def main(): args = parser.parse_args() system = build_switch(args) - root = Root(full_system = True, system = system) + root = Root(full_system=True, system=system) Simulation.run(args, root, None, None) + if __name__ == "__m5_main__": main() diff --git a/configs/dram/lat_mem_rd.py b/configs/dram/lat_mem_rd.py index d69457d21e..74a94997bb 100644 --- a/configs/dram/lat_mem_rd.py +++ b/configs/dram/lat_mem_rd.py @@ -42,11 +42,11 @@ from m5.objects import * from m5.util import addToPath from m5.stats import periodicStatDump -addToPath('../') +addToPath("../") from common import ObjectList from common import MemConfig -addToPath('../../util') +addToPath("../../util") import protolib # this script is helpful to observe the memory latency for various @@ -61,8 +61,15 @@ try: except: print("Did not find packet proto definitions, attempting to generate") from subprocess import call - error = call(['protoc', '--python_out=configs/dram', - '--proto_path=src/proto', 'src/proto/packet.proto']) + + error = call( + [ + "protoc", + "--python_out=configs/dram", + "--proto_path=src/proto", + "src/proto/packet.proto", + ] + ) if not error: print("Generated packet proto definitions") @@ -79,24 +86,34 @@ except: parser = argparse.ArgumentParser() -parser.add_argument("--mem-type", default="DDR3_1600_8x8", - choices=ObjectList.mem_list.get_names(), - help = "type of memory to use") -parser.add_argument("--mem-size", action="store", type=str, - default="16MB", - help="Specify the memory size") -parser.add_argument("--reuse-trace", action="store_true", - help="Prevent generation of traces and reuse existing") +parser.add_argument( + "--mem-type", + default="DDR3_1600_8x8", + choices=ObjectList.mem_list.get_names(), + help="type of memory to use", +) +parser.add_argument( + "--mem-size", + action="store", + type=str, + default="16MB", + help="Specify the memory size", +) +parser.add_argument( + "--reuse-trace", + action="store_true", + help="Prevent generation of traces and reuse existing", +) args = parser.parse_args() # start by creating the system itself, using a multi-layer 2.0 GHz # crossbar, delivering 64 bytes / 3 cycles (one header cycle) which # amounts to 42.7 GByte/s per layer and thus per port -system = System(membus = SystemXBar(width = 32)) -system.clk_domain = SrcClockDomain(clock = '2.0GHz', - voltage_domain = - VoltageDomain(voltage = '1V')) +system = System(membus=SystemXBar(width=32)) +system.clk_domain = SrcClockDomain( + clock="2.0GHz", voltage_domain=VoltageDomain(voltage="1V") +) mem_range = AddrRange(args.mem_size) system.mem_ranges = [mem_range] @@ -122,12 +139,12 @@ for ctrl in system.mem_ctrls: if isinstance(ctrl, m5.objects.MemCtrl): # make the DRAM refresh interval sufficiently infinite to avoid # latency spikes - ctrl.tREFI = '100s' + ctrl.tREFI = "100s" # use the same concept as the utilisation sweep, and print the config # so that we can later read it in cfg_file_name = os.path.join(m5.options.outdir, "lat_mem_rd.cfg") -cfg_file = open(cfg_file_name, 'w') +cfg_file = open(cfg_file_name, "w") # set an appropriate burst length in bytes burst_size = 64 @@ -137,6 +154,7 @@ system.cache_line_size = burst_size def is_pow2(num): return num != 0 and ((num & (num - 1)) == 0) + # assume we start every range at 0 max_range = int(mem_range.end) @@ -164,7 +182,7 @@ itt = 150 * 1000 # the actual measurement def create_trace(filename, max_addr, burst_size, itt): try: - proto_out = gzip.open(filename, 'wb') + proto_out = gzip.open(filename, "wb") except IOError: print("Failed to open ", filename, " for writing") exit(-1) @@ -184,6 +202,7 @@ def create_trace(filename, max_addr, burst_size, itt): addrs = list(range(0, max_addr, burst_size)) import random + random.shuffle(addrs) tick = 0 @@ -202,6 +221,7 @@ def create_trace(filename, max_addr, burst_size, itt): proto_out.close() + # this will take a while, so keep the user informed print("Generating traces, please wait...") @@ -211,22 +231,23 @@ period = int(itt * (max_range / burst_size)) # now we create the states for each range for r in ranges: - filename = os.path.join(m5.options.outdir, - 'lat_mem_rd%d.trc.gz' % nxt_range) + filename = os.path.join( + m5.options.outdir, "lat_mem_rd%d.trc.gz" % nxt_range + ) if not args.reuse_trace: # create the actual random trace for this range create_trace(filename, r, burst_size, itt) # the warming state - cfg_file.write("STATE %d %d TRACE %s 0\n" % - (nxt_state, period, filename)) + cfg_file.write("STATE %d %d TRACE %s 0\n" % (nxt_state, period, filename)) nxt_state = nxt_state + 1 # the measuring states for i in range(iterations): - cfg_file.write("STATE %d %d TRACE %s 0\n" % - (nxt_state, period, filename)) + cfg_file.write( + "STATE %d %d TRACE %s 0\n" % (nxt_state, period, filename) + ) nxt_state = nxt_state + 1 nxt_range = nxt_range + 1 @@ -242,8 +263,7 @@ cfg_file.write("TRANSITION %d %d 1\n" % (nxt_state - 1, nxt_state - 1)) cfg_file.close() # create a traffic generator, and point it to the file we just created -system.tgen = TrafficGen(config_file = cfg_file_name, - progress_check = '10s') +system.tgen = TrafficGen(config_file=cfg_file_name, progress_check="10s") # add a communication monitor system.monitor = CommMonitor() @@ -267,19 +287,20 @@ class L3Cache(Cache): tgts_per_mshr = 12 write_buffers = 16 + # note that everything is in the same clock domain, 2.0 GHz as # specified above -system.l1cache = L1_DCache(size = '64kB') +system.l1cache = L1_DCache(size="64kB") system.monitor.mem_side_port = system.l1cache.cpu_side -system.l2cache = L2Cache(size = '512kB', writeback_clean = True) +system.l2cache = L2Cache(size="512kB", writeback_clean=True) system.l2cache.xbar = L2XBar() system.l1cache.mem_side = system.l2cache.xbar.cpu_side_ports system.l2cache.cpu_side = system.l2cache.xbar.mem_side_ports # make the L3 mostly exclusive, and correspondingly ensure that the L2 # writes back also clean lines to the L3 -system.l3cache = L3Cache(size = '4MB', clusivity = 'mostly_excl') +system.l3cache = L3Cache(size="4MB", clusivity="mostly_excl") system.l3cache.xbar = L2XBar() system.l2cache.mem_side = system.l3cache.xbar.cpu_side_ports system.l3cache.cpu_side = system.l3cache.xbar.mem_side_ports @@ -292,8 +313,8 @@ system.system_port = system.membus.cpu_side_ports periodicStatDump(period) # run Forrest, run! -root = Root(full_system = False, system = system) -root.system.mem_mode = 'timing' +root = Root(full_system=False, system=system) +root.system.mem_mode = "timing" m5.instantiate() m5.simulate(nxt_state * period) diff --git a/configs/dram/low_power_sweep.py b/configs/dram/low_power_sweep.py index 514700769e..7f8591b692 100644 --- a/configs/dram/low_power_sweep.py +++ b/configs/dram/low_power_sweep.py @@ -40,7 +40,7 @@ from m5.objects import * from m5.util import addToPath from m5.stats import periodicStatDump -addToPath('../') +addToPath("../") from common import ObjectList from common import MemConfig @@ -52,46 +52,70 @@ from common import MemConfig # through an idle state with no requests to enforce self-refresh. parser = argparse.ArgumentParser( - formatter_class=argparse.ArgumentDefaultsHelpFormatter) + formatter_class=argparse.ArgumentDefaultsHelpFormatter +) # Use a single-channel DDR4-2400 in 16x4 configuration by default -parser.add_argument("--mem-type", default="DDR4_2400_16x4", - choices=ObjectList.mem_list.get_names(), - help = "type of memory to use") +parser.add_argument( + "--mem-type", + default="DDR4_2400_16x4", + choices=ObjectList.mem_list.get_names(), + help="type of memory to use", +) -parser.add_argument("--mem-ranks", "-r", type=int, default=1, - help = "Number of ranks to iterate across") +parser.add_argument( + "--mem-ranks", + "-r", + type=int, + default=1, + help="Number of ranks to iterate across", +) -parser.add_argument("--page-policy", "-p", - choices=["close_adaptive", "open_adaptive"], - default="close_adaptive", help="controller page policy") +parser.add_argument( + "--page-policy", + "-p", + choices=["close_adaptive", "open_adaptive"], + default="close_adaptive", + help="controller page policy", +) -parser.add_argument("--itt-list", "-t", default="1 20 100", - help="a list of multipliers for the max value of itt, " \ - "e.g. \"1 20 100\"") +parser.add_argument( + "--itt-list", + "-t", + default="1 20 100", + help="a list of multipliers for the max value of itt, " 'e.g. "1 20 100"', +) -parser.add_argument("--rd-perc", type=int, default=100, - help = "Percentage of read commands") +parser.add_argument( + "--rd-perc", type=int, default=100, help="Percentage of read commands" +) -parser.add_argument("--addr-map", - choices=m5.objects.AddrMap.vals, - default="RoRaBaCoCh", help = "DRAM address map policy") +parser.add_argument( + "--addr-map", + choices=m5.objects.AddrMap.vals, + default="RoRaBaCoCh", + help="DRAM address map policy", +) -parser.add_argument("--idle-end", type=int, default=50000000, - help = "time in ps of an idle period at the end ") +parser.add_argument( + "--idle-end", + type=int, + default=50000000, + help="time in ps of an idle period at the end ", +) args = parser.parse_args() # Start with the system itself, using a multi-layer 2.0 GHz # crossbar, delivering 64 bytes / 3 cycles (one header cycle) # which amounts to 42.7 GByte/s per layer and thus per port. -system = System(membus = IOXBar(width = 32)) -system.clk_domain = SrcClockDomain(clock = '2.0GHz', - voltage_domain = - VoltageDomain(voltage = '1V')) +system = System(membus=IOXBar(width=32)) +system.clk_domain = SrcClockDomain( + clock="2.0GHz", voltage_domain=VoltageDomain(voltage="1V") +) # We are fine with 256 MB memory for now. -mem_range = AddrRange('256MB') +mem_range = AddrRange("256MB") # Start address is 0 system.mem_ranges = [mem_range] @@ -130,20 +154,27 @@ period = 250000000 # We specify the states in a config file input to the traffic generator. cfg_file_name = "lowp_sweep.cfg" -cfg_file_path = os.path.dirname(__file__) + "/" +cfg_file_name -cfg_file = open(cfg_file_path, 'w') +cfg_file_path = os.path.dirname(__file__) + "/" + cfg_file_name +cfg_file = open(cfg_file_path, "w") # Get the number of banks nbr_banks = int(system.mem_ctrls[0].dram.banks_per_rank.value) # determine the burst size in bytes -burst_size = int((system.mem_ctrls[0].dram.devices_per_rank.value * - system.mem_ctrls[0].dram.device_bus_width.value * - system.mem_ctrls[0].dram.burst_length.value) / 8) +burst_size = int( + ( + system.mem_ctrls[0].dram.devices_per_rank.value + * system.mem_ctrls[0].dram.device_bus_width.value + * system.mem_ctrls[0].dram.burst_length.value + ) + / 8 +) # next, get the page size in bytes (the rowbuffer size is already in bytes) -page_size = system.mem_ctrls[0].dram.devices_per_rank.value * \ - system.mem_ctrls[0].dram.device_rowbuffer_size.value +page_size = ( + system.mem_ctrls[0].dram.devices_per_rank.value + * system.mem_ctrls[0].dram.device_rowbuffer_size.value +) # Inter-request delay should be such that we can hit as many transitions # to/from low power states as possible to. We provide a min and max itt to the @@ -151,23 +182,25 @@ page_size = system.mem_ctrls[0].dram.devices_per_rank.value * \ # seconds and we need it in ticks (ps). itt_min = system.mem_ctrls[0].dram.tBURST.value * 1000000000000 -#The itt value when set to (tRAS + tRP + tCK) covers the case where +# The itt value when set to (tRAS + tRP + tCK) covers the case where # a read command is delayed beyond the delay from ACT to PRE_PDN entry of the # previous command. For write command followed by precharge, this delay # between a write and power down entry will be tRCD + tCL + tWR + tRP + tCK. # As we use this delay as a unit and create multiples of it as bigger delays # for the sweep, this parameter works for reads, writes and mix of them. -pd_entry_time = (system.mem_ctrls[0].dram.tRAS.value + - system.mem_ctrls[0].dram.tRP.value + - system.mem_ctrls[0].dram.tCK.value) * 1000000000000 +pd_entry_time = ( + system.mem_ctrls[0].dram.tRAS.value + + system.mem_ctrls[0].dram.tRP.value + + system.mem_ctrls[0].dram.tCK.value +) * 1000000000000 # We sweep itt max using the multipliers specified by the user. itt_max_str = args.itt_list.strip().split() -itt_max_multiples = [ int(x) for x in itt_max_str ] +itt_max_multiples = [int(x) for x in itt_max_str] if len(itt_max_multiples) == 0: fatal("String for itt-max-list detected empty\n") -itt_max_values = [ pd_entry_time * m for m in itt_max_multiples ] +itt_max_values = [pd_entry_time * m for m in itt_max_multiples] # Generate request addresses in the entire range, assume we start at 0 max_addr = mem_range.end @@ -180,12 +213,14 @@ stride_values = [burst_size, mid_stride, max_stride] # be selective about bank utilization instead of going from 1 to the number of # banks -bank_util_values = [1, int(nbr_banks/2), nbr_banks] +bank_util_values = [1, int(nbr_banks / 2), nbr_banks] # Next we create the config file, but first a comment -cfg_file.write("""# STATE state# period mode=DRAM +cfg_file.write( + """# STATE state# period mode=DRAM # read_percent start_addr end_addr req_size min_itt max_itt data_limit -# stride_size page_size #banks #banks_util addr_map #ranks\n""") +# stride_size page_size #banks #banks_util addr_map #ranks\n""" +) addr_map = m5.objects.AddrMap.map[args.addr_map] @@ -193,12 +228,27 @@ nxt_state = 0 for itt_max in itt_max_values: for bank in bank_util_values: for stride_size in stride_values: - cfg_file.write("STATE %d %d %s %d 0 %d %d " - "%d %d %d %d %d %d %d %d %d\n" % - (nxt_state, period, "DRAM", args.rd_perc, max_addr, - burst_size, itt_min, itt_max, 0, stride_size, - page_size, nbr_banks, bank, addr_map, - args.mem_ranks)) + cfg_file.write( + "STATE %d %d %s %d 0 %d %d " + "%d %d %d %d %d %d %d %d %d\n" + % ( + nxt_state, + period, + "DRAM", + args.rd_perc, + max_addr, + burst_size, + itt_min, + itt_max, + 0, + stride_size, + page_size, + nbr_banks, + bank, + addr_map, + args.mem_ranks, + ) + ) nxt_state = nxt_state + 1 # State for idle period @@ -217,7 +267,7 @@ cfg_file.write("TRANSITION %d %d 1\n" % (nxt_state, nxt_state)) cfg_file.close() # create a traffic generator, and point it to the file we just created -system.tgen = TrafficGen(config_file = cfg_file_path) +system.tgen = TrafficGen(config_file=cfg_file_path) # add a communication monitor system.monitor = CommMonitor() @@ -232,8 +282,8 @@ system.system_port = system.membus.cpu_side_ports # every period, dump and reset all stats periodicStatDump(period) -root = Root(full_system = False, system = system) -root.system.mem_mode = 'timing' +root = Root(full_system=False, system=system) +root.system.mem_mode = "timing" m5.instantiate() @@ -242,8 +292,10 @@ m5.instantiate() m5.simulate(nxt_state * period + idle_period) print("--- Done DRAM low power sweep ---") print("Fixed params - ") -print("\tburst: %d, banks: %d, max stride: %d, itt min: %s ns" % \ - (burst_size, nbr_banks, max_stride, itt_min)) +print( + "\tburst: %d, banks: %d, max stride: %d, itt min: %s ns" + % (burst_size, nbr_banks, max_stride, itt_min) +) print("Swept params - ") print("\titt max multiples input:", itt_max_multiples) print("\titt max values", itt_max_values) diff --git a/configs/dram/sweep.py b/configs/dram/sweep.py index 0205f0dee9..ca7b70d4ed 100644 --- a/configs/dram/sweep.py +++ b/configs/dram/sweep.py @@ -41,7 +41,7 @@ from m5.objects import * from m5.util import addToPath from m5.stats import periodicStatDump -addToPath('../') +addToPath("../") from common import ObjectList from common import MemConfig @@ -54,29 +54,44 @@ from common import MemConfig parser = argparse.ArgumentParser() dram_generators = { - "DRAM" : lambda x: x.createDram, - "DRAM_ROTATE" : lambda x: x.createDramRot, + "DRAM": lambda x: x.createDram, + "DRAM_ROTATE": lambda x: x.createDramRot, } # Use a single-channel DDR3-1600 x64 (8x8 topology) by default -parser.add_argument("--mem-type", default="DDR3_1600_8x8", - choices=ObjectList.mem_list.get_names(), - help = "type of memory to use") +parser.add_argument( + "--mem-type", + default="DDR3_1600_8x8", + choices=ObjectList.mem_list.get_names(), + help="type of memory to use", +) -parser.add_argument("--mem-ranks", "-r", type=int, default=1, - help = "Number of ranks to iterate across") +parser.add_argument( + "--mem-ranks", + "-r", + type=int, + default=1, + help="Number of ranks to iterate across", +) -parser.add_argument("--rd_perc", type=int, default=100, - help = "Percentage of read commands") +parser.add_argument( + "--rd_perc", type=int, default=100, help="Percentage of read commands" +) -parser.add_argument("--mode", default="DRAM", - choices=list(dram_generators.keys()), - help = "DRAM: Random traffic; \ - DRAM_ROTATE: Traffic rotating across banks and ranks") +parser.add_argument( + "--mode", + default="DRAM", + choices=list(dram_generators.keys()), + help="DRAM: Random traffic; \ + DRAM_ROTATE: Traffic rotating across banks and ranks", +) -parser.add_argument("--addr-map", - choices=ObjectList.dram_addr_map_list.get_names(), - default="RoRaBaCoCh", help = "DRAM address map policy") +parser.add_argument( + "--addr-map", + choices=ObjectList.dram_addr_map_list.get_names(), + default="RoRaBaCoCh", + help="DRAM address map policy", +) args = parser.parse_args() @@ -86,13 +101,13 @@ args = parser.parse_args() # start with the system itself, using a multi-layer 2.0 GHz # crossbar, delivering 64 bytes / 3 cycles (one header cycle) # which amounts to 42.7 GByte/s per layer and thus per port -system = System(membus = IOXBar(width = 32)) -system.clk_domain = SrcClockDomain(clock = '2.0GHz', - voltage_domain = - VoltageDomain(voltage = '1V')) +system = System(membus=IOXBar(width=32)) +system.clk_domain = SrcClockDomain( + clock="2.0GHz", voltage_domain=VoltageDomain(voltage="1V") +) # we are fine with 256 MB memory for now -mem_range = AddrRange('256MB') +mem_range = AddrRange("256MB") system.mem_ranges = [mem_range] # do not worry about reserving space for the backing store @@ -131,18 +146,31 @@ period = 250000000 nbr_banks = system.mem_ctrls[0].dram.banks_per_rank.value # determine the burst length in bytes -burst_size = int((system.mem_ctrls[0].dram.devices_per_rank.value * - system.mem_ctrls[0].dram.device_bus_width.value * - system.mem_ctrls[0].dram.burst_length.value) / 8) +burst_size = int( + ( + system.mem_ctrls[0].dram.devices_per_rank.value + * system.mem_ctrls[0].dram.device_bus_width.value + * system.mem_ctrls[0].dram.burst_length.value + ) + / 8 +) # next, get the page size in bytes -page_size = system.mem_ctrls[0].dram.devices_per_rank.value * \ - system.mem_ctrls[0].dram.device_rowbuffer_size.value +page_size = ( + system.mem_ctrls[0].dram.devices_per_rank.value + * system.mem_ctrls[0].dram.device_rowbuffer_size.value +) # match the maximum bandwidth of the memory, the parameter is in seconds # and we need it in ticks (ps) -itt = getattr(system.mem_ctrls[0].dram.tBURST_MIN, 'value', - system.mem_ctrls[0].dram.tBURST.value) * 1000000000000 +itt = ( + getattr( + system.mem_ctrls[0].dram.tBURST_MIN, + "value", + system.mem_ctrls[0].dram.tBURST.value, + ) + * 1000000000000 +) # assume we start at 0 max_addr = mem_range.end @@ -168,27 +196,43 @@ system.system_port = system.membus.cpu_side_ports periodicStatDump(period) # run Forrest, run! -root = Root(full_system = False, system = system) -root.system.mem_mode = 'timing' +root = Root(full_system=False, system=system) +root.system.mem_mode = "timing" m5.instantiate() + def trace(): addr_map = ObjectList.dram_addr_map_list.get(args.addr_map) generator = dram_generators[args.mode](system.tgen) for stride_size in range(burst_size, max_stride + 1, burst_size): for bank in range(1, nbr_banks + 1): num_seq_pkts = int(math.ceil(float(stride_size) / burst_size)) - yield generator(period, - 0, max_addr, burst_size, int(itt), int(itt), - args.rd_perc, 0, - num_seq_pkts, page_size, nbr_banks, bank, - addr_map, args.mem_ranks) + yield generator( + period, + 0, + max_addr, + burst_size, + int(itt), + int(itt), + args.rd_perc, + 0, + num_seq_pkts, + page_size, + nbr_banks, + bank, + addr_map, + args.mem_ranks, + ) yield system.tgen.createExit(0) + system.tgen.start(trace()) m5.simulate() -print("DRAM sweep with burst: %d, banks: %d, max stride: %d, request \ - generation period: %d" % (burst_size, nbr_banks, max_stride, itt)) +print( + "DRAM sweep with burst: %d, banks: %d, max stride: %d, request \ + generation period: %d" + % (burst_size, nbr_banks, max_stride, itt) +) diff --git a/configs/example/apu_se.py b/configs/example/apu_se.py index b5fb9ff847..acf527bdf7 100644 --- a/configs/example/apu_se.py +++ b/configs/example/apu_se.py @@ -35,8 +35,10 @@ import inspect import m5 from m5.objects import * from m5.util import addToPath +from gem5.isas import ISA +from gem5.runtime import get_runtime_isa -addToPath('../') +addToPath("../") from ruby import Ruby @@ -53,143 +55,307 @@ parser = argparse.ArgumentParser() Options.addCommonOptions(parser) Options.addSEOptions(parser) -parser.add_argument("--cpu-only-mode", action="store_true", default=False, - help="APU mode. Used to take care of problems in " - "Ruby.py while running APU protocols") -parser.add_argument("-u", "--num-compute-units", type=int, default=4, - help="number of GPU compute units"), -parser.add_argument("--num-cp", type=int, default=0, - help="Number of GPU Command Processors (CP)") -parser.add_argument("--benchmark-root", - help="Root of benchmark directory tree") +parser.add_argument( + "--cpu-only-mode", + action="store_true", + default=False, + help="APU mode. Used to take care of problems in " + "Ruby.py while running APU protocols", +) +parser.add_argument( + "-u", + "--num-compute-units", + type=int, + default=4, + help="number of GPU compute units", +), +parser.add_argument( + "--num-cp", + type=int, + default=0, + help="Number of GPU Command Processors (CP)", +) +parser.add_argument( + "--benchmark-root", help="Root of benchmark directory tree" +) # not super important now, but to avoid putting the number 4 everywhere, make # it an option/knob -parser.add_argument("--cu-per-sqc", type=int, default=4, help="number of CUs" - "sharing an SQC (icache, and thus icache TLB)") -parser.add_argument('--cu-per-scalar-cache', type=int, default=4, - help='Number of CUs sharing a scalar cache') -parser.add_argument("--simds-per-cu", type=int, default=4, help="SIMD units" - "per CU") -parser.add_argument('--cu-per-sa', type=int, default=4, - help='Number of CUs per shader array. This must be a ' - 'multiple of options.cu-per-sqc and options.cu-per-scalar') -parser.add_argument('--sa-per-complex', type=int, default=1, - help='Number of shader arrays per complex') -parser.add_argument('--num-gpu-complexes', type=int, default=1, - help='Number of GPU complexes') -parser.add_argument("--wf-size", type=int, default=64, - help="Wavefront size(in workitems)") -parser.add_argument("--sp-bypass-path-length", type=int, default=4, - help="Number of stages of bypass path in vector ALU for " - "Single Precision ops") -parser.add_argument("--dp-bypass-path-length", type=int, default=4, - help="Number of stages of bypass path in vector ALU for " - "Double Precision ops") +parser.add_argument( + "--cu-per-sqc", + type=int, + default=4, + help="number of CUs" "sharing an SQC (icache, and thus icache TLB)", +) +parser.add_argument( + "--cu-per-scalar-cache", + type=int, + default=4, + help="Number of CUs sharing a scalar cache", +) +parser.add_argument( + "--simds-per-cu", type=int, default=4, help="SIMD units" "per CU" +) +parser.add_argument( + "--cu-per-sa", + type=int, + default=4, + help="Number of CUs per shader array. This must be a " + "multiple of options.cu-per-sqc and options.cu-per-scalar", +) +parser.add_argument( + "--sa-per-complex", + type=int, + default=1, + help="Number of shader arrays per complex", +) +parser.add_argument( + "--num-gpu-complexes", type=int, default=1, help="Number of GPU complexes" +) +parser.add_argument( + "--wf-size", type=int, default=64, help="Wavefront size(in workitems)" +) +parser.add_argument( + "--sp-bypass-path-length", + type=int, + default=4, + help="Number of stages of bypass path in vector ALU for " + "Single Precision ops", +) +parser.add_argument( + "--dp-bypass-path-length", + type=int, + default=4, + help="Number of stages of bypass path in vector ALU for " + "Double Precision ops", +) # issue period per SIMD unit: number of cycles before issuing another vector parser.add_argument( - "--issue-period", type=int, default=4, - help="Number of cycles per vector instruction issue period") -parser.add_argument("--glbmem-wr-bus-width", type=int, default=32, - help="VGPR to Coalescer (Global Memory) data bus width " - "in bytes") -parser.add_argument("--glbmem-rd-bus-width", type=int, default=32, - help="Coalescer to VGPR (Global Memory) data bus width in " - "bytes") -# Currently we only support 1 local memory pipe -parser.add_argument("--shr-mem-pipes-per-cu", type=int, default=1, - help="Number of Shared Memory pipelines per CU") -# Currently we only support 1 global memory pipe -parser.add_argument("--glb-mem-pipes-per-cu", type=int, default=1, - help="Number of Global Memory pipelines per CU") -parser.add_argument("--wfs-per-simd", type=int, default=10, help="Number of " - "WF slots per SIMD") - -parser.add_argument("--registerManagerPolicy", type=str, default="static", - help="Register manager policy") -parser.add_argument("--vreg-file-size", type=int, default=2048, - help="number of physical vector registers per SIMD") -parser.add_argument("--vreg-min-alloc", type=int, default=4, - help="Minimum number of registers that can be allocated " - "from the VRF. The total number of registers will be " - "aligned to this value.") - -parser.add_argument("--sreg-file-size", type=int, default=2048, - help="number of physical vector registers per SIMD") -parser.add_argument("--sreg-min-alloc", type=int, default=4, - help="Minimum number of registers that can be allocated " - "from the SRF. The total number of registers will be " - "aligned to this value.") - -parser.add_argument("--bw-scalor", type=int, default=0, - help="bandwidth scalor for scalability analysis") -parser.add_argument("--CPUClock", type=str, default="2GHz", - help="CPU clock") -parser.add_argument("--gpu-clock", type=str, default="1GHz", - help="GPU clock") -parser.add_argument("--cpu-voltage", action="store", type=str, - default='1.0V', - help="""CPU voltage domain""") -parser.add_argument("--gpu-voltage", action="store", type=str, - default='1.0V', - help="""CPU voltage domain""") -parser.add_argument("--CUExecPolicy", type=str, default="OLDEST-FIRST", - help="WF exec policy (OLDEST-FIRST, ROUND-ROBIN)") -parser.add_argument("--SegFaultDebug", action="store_true", - help="checks for GPU seg fault before TLB access") -parser.add_argument("--FunctionalTLB", action="store_true", - help="Assumes TLB has no latency") -parser.add_argument("--LocalMemBarrier", action="store_true", - help="Barrier does not wait for writethroughs to complete") + "--issue-period", + type=int, + default=4, + help="Number of cycles per vector instruction issue period", +) parser.add_argument( - "--countPages", action="store_true", - help="Count Page Accesses and output in per-CU output files") -parser.add_argument("--TLB-prefetch", type=int, help="prefetch depth for" - "TLBs") -parser.add_argument("--pf-type", type=str, help="type of prefetch: " - "PF_CU, PF_WF, PF_PHASE, PF_STRIDE") -parser.add_argument("--pf-stride", type=int, help="set prefetch stride") -parser.add_argument("--numLdsBanks", type=int, default=32, - help="number of physical banks per LDS module") -parser.add_argument("--ldsBankConflictPenalty", type=int, default=1, - help="number of cycles per LDS bank conflict") -parser.add_argument("--lds-size", type=int, default=65536, - help="Size of the LDS in bytes") -parser.add_argument('--fast-forward-pseudo-op', action='store_true', - help='fast forward using kvm until the m5_switchcpu' - ' pseudo-op is encountered, then switch cpus. subsequent' - ' m5_switchcpu pseudo-ops will toggle back and forth') -parser.add_argument("--num-hw-queues", type=int, default=10, - help="number of hw queues in packet processor") -parser.add_argument("--reg-alloc-policy", type=str, default="dynamic", - help="register allocation policy (simple/dynamic)") + "--glbmem-wr-bus-width", + type=int, + default=32, + help="VGPR to Coalescer (Global Memory) data bus width " "in bytes", +) +parser.add_argument( + "--glbmem-rd-bus-width", + type=int, + default=32, + help="Coalescer to VGPR (Global Memory) data bus width in " "bytes", +) +# Currently we only support 1 local memory pipe +parser.add_argument( + "--shr-mem-pipes-per-cu", + type=int, + default=1, + help="Number of Shared Memory pipelines per CU", +) +# Currently we only support 1 global memory pipe +parser.add_argument( + "--glb-mem-pipes-per-cu", + type=int, + default=1, + help="Number of Global Memory pipelines per CU", +) +parser.add_argument( + "--wfs-per-simd", + type=int, + default=10, + help="Number of " "WF slots per SIMD", +) -parser.add_argument("--dgpu", action="store_true", default=False, - help="Configure the system as a dGPU instead of an APU. " - "The dGPU config has its own local memory pool and is not " - "coherent with the host through hardware. Data is " - "transfered from host to device memory using runtime calls " - "that copy data over a PCIe-like IO bus.") +parser.add_argument( + "--registerManagerPolicy", + type=str, + default="static", + help="Register manager policy", +) +parser.add_argument( + "--vreg-file-size", + type=int, + default=2048, + help="number of physical vector registers per SIMD", +) +parser.add_argument( + "--vreg-min-alloc", + type=int, + default=4, + help="Minimum number of registers that can be allocated " + "from the VRF. The total number of registers will be " + "aligned to this value.", +) + +parser.add_argument( + "--sreg-file-size", + type=int, + default=2048, + help="number of physical vector registers per SIMD", +) +parser.add_argument( + "--sreg-min-alloc", + type=int, + default=4, + help="Minimum number of registers that can be allocated " + "from the SRF. The total number of registers will be " + "aligned to this value.", +) + +parser.add_argument( + "--bw-scalor", + type=int, + default=0, + help="bandwidth scalor for scalability analysis", +) +parser.add_argument("--CPUClock", type=str, default="2GHz", help="CPU clock") +parser.add_argument("--gpu-clock", type=str, default="1GHz", help="GPU clock") +parser.add_argument( + "--cpu-voltage", + action="store", + type=str, + default="1.0V", + help="""CPU voltage domain""", +) +parser.add_argument( + "--gpu-voltage", + action="store", + type=str, + default="1.0V", + help="""CPU voltage domain""", +) +parser.add_argument( + "--CUExecPolicy", + type=str, + default="OLDEST-FIRST", + help="WF exec policy (OLDEST-FIRST, ROUND-ROBIN)", +) +parser.add_argument( + "--SegFaultDebug", + action="store_true", + help="checks for GPU seg fault before TLB access", +) +parser.add_argument( + "--FunctionalTLB", action="store_true", help="Assumes TLB has no latency" +) +parser.add_argument( + "--LocalMemBarrier", + action="store_true", + help="Barrier does not wait for writethroughs to complete", +) +parser.add_argument( + "--countPages", + action="store_true", + help="Count Page Accesses and output in per-CU output files", +) +parser.add_argument( + "--max-cu-tokens", + type=int, + default=4, + help="Number of coalescer tokens per CU", +) +parser.add_argument( + "--vrf_lm_bus_latency", + type=int, + default=1, + help="Latency while accessing shared memory", +) +parser.add_argument( + "--mem-req-latency", + type=int, + default=50, + help="Latency for requests from the cu to ruby.", +) +parser.add_argument( + "--mem-resp-latency", + type=int, + default=50, + help="Latency for responses from ruby to the cu.", +) +parser.add_argument( + "--TLB-prefetch", type=int, help="prefetch depth for" "TLBs" +) +parser.add_argument( + "--pf-type", + type=str, + help="type of prefetch: " "PF_CU, PF_WF, PF_PHASE, PF_STRIDE", +) +parser.add_argument("--pf-stride", type=int, help="set prefetch stride") +parser.add_argument( + "--numLdsBanks", + type=int, + default=32, + help="number of physical banks per LDS module", +) +parser.add_argument( + "--ldsBankConflictPenalty", + type=int, + default=1, + help="number of cycles per LDS bank conflict", +) +parser.add_argument( + "--lds-size", type=int, default=65536, help="Size of the LDS in bytes" +) +parser.add_argument( + "--fast-forward-pseudo-op", + action="store_true", + help="fast forward using kvm until the m5_switchcpu" + " pseudo-op is encountered, then switch cpus. subsequent" + " m5_switchcpu pseudo-ops will toggle back and forth", +) +parser.add_argument( + "--num-hw-queues", + type=int, + default=10, + help="number of hw queues in packet processor", +) +parser.add_argument( + "--reg-alloc-policy", + type=str, + default="dynamic", + help="register allocation policy (simple/dynamic)", +) + +parser.add_argument( + "--dgpu", + action="store_true", + default=False, + help="Configure the system as a dGPU instead of an APU. " + "The dGPU config has its own local memory pool and is not " + "coherent with the host through hardware. Data is " + "transfered from host to device memory using runtime calls " + "that copy data over a PCIe-like IO bus.", +) # Mtype option -#-- 1 1 1 C_RW_S (Cached-ReadWrite-Shared) -#-- 1 1 0 C_RW_US (Cached-ReadWrite-Unshared) -#-- 1 0 1 C_RO_S (Cached-ReadOnly-Shared) -#-- 1 0 0 C_RO_US (Cached-ReadOnly-Unshared) -#-- 0 1 x UC_L2 (Uncached_GL2) -#-- 0 0 x UC_All (Uncached_All_Load) +# -- 1 1 1 C_RW_S (Cached-ReadWrite-Shared) +# -- 1 1 0 C_RW_US (Cached-ReadWrite-Unshared) +# -- 1 0 1 C_RO_S (Cached-ReadOnly-Shared) +# -- 1 0 0 C_RO_US (Cached-ReadOnly-Unshared) +# -- 0 1 x UC_L2 (Uncached_GL2) +# -- 0 0 x UC_All (Uncached_All_Load) # default value: 5/C_RO_S (only allow caching in GL2 for read. Shared) -parser.add_argument("--m-type", type=int, default=5, - help="Default Mtype for GPU memory accesses. This is the " - "value used for all memory accesses on an APU and is the " - "default mode for dGPU unless explicitly overwritten by " - "the driver on a per-page basis. Valid values are " - "between 0-7") +parser.add_argument( + "--m-type", + type=int, + default=5, + help="Default Mtype for GPU memory accesses. This is the " + "value used for all memory accesses on an APU and is the " + "default mode for dGPU unless explicitly overwritten by " + "the driver on a per-page basis. Valid values are " + "between 0-7", +) -parser.add_argument("--gfx-version", type=str, default='gfx801', - choices=GfxVersion.vals, - help="Gfx version for gpu" - "Note: gfx902 is not fully supported by ROCm") +parser.add_argument( + "--gfx-version", + type=str, + default="gfx801", + choices=GfxVersion.vals, + help="Gfx version for gpu" "Note: gfx902 is not fully supported by ROCm", +) Ruby.define_options(parser) @@ -206,21 +372,20 @@ if args.benchmark_root: benchmark_path = [args.benchmark_root] else: # Set default benchmark search path to current dir - benchmark_path = ['.'] + benchmark_path = ["."] ########################## Sanity Check ######################## # Currently the gpu model requires ruby -if buildEnv['PROTOCOL'] == 'None': +if buildEnv["PROTOCOL"] == "None": fatal("GPU model requires ruby") # Currently the gpu model requires only timing or detailed CPU -if not (args.cpu_type == "TimingSimpleCPU" or - args.cpu_type == "DerivO3CPU"): +if not (args.cpu_type == "TimingSimpleCPU" or args.cpu_type == "DerivO3CPU"): fatal("GPU model requires TimingSimpleCPU or DerivO3CPU") # This file can support multiple compute units -assert(args.num_compute_units >= 1) +assert args.num_compute_units >= 1 # Currently, the sqc (I-Cache of GPU) is shared by # multiple compute units(CUs). The protocol works just fine @@ -229,20 +394,28 @@ assert(args.num_compute_units >= 1) # sharing sqc is the common usage) n_cu = args.num_compute_units num_sqc = int(math.ceil(float(n_cu) / args.cu_per_sqc)) -args.num_sqc = num_sqc # pass this to Ruby +args.num_sqc = num_sqc # pass this to Ruby num_scalar_cache = int(math.ceil(float(n_cu) / args.cu_per_scalar_cache)) args.num_scalar_cache = num_scalar_cache -print('Num SQC = ', num_sqc, 'Num scalar caches = ', num_scalar_cache, - 'Num CU = ', n_cu) +print( + "Num SQC = ", + num_sqc, + "Num scalar caches = ", + num_scalar_cache, + "Num CU = ", + n_cu, +) ########################## Creating the GPU system ######################## # shader is the GPU -shader = Shader(n_wf = args.wfs_per_simd, - clk_domain = SrcClockDomain( - clock = args.gpu_clock, - voltage_domain = VoltageDomain( - voltage = args.gpu_voltage))) +shader = Shader( + n_wf=args.wfs_per_simd, + clk_domain=SrcClockDomain( + clock=args.gpu_clock, + voltage_domain=VoltageDomain(voltage=args.gpu_voltage), + ), +) # VIPER GPU protocol implements release consistency at GPU side. So, # we make their writes visible to the global memory and should read @@ -252,7 +425,7 @@ shader = Shader(n_wf = args.wfs_per_simd, # means pipeline initiates a acquire/release operation at kernel launch/end. # VIPER protocol is write-through based, and thus only impl_kern_launch_acq # needs to set. -if (buildEnv['PROTOCOL'] == 'GPU_VIPER'): +if buildEnv["PROTOCOL"] == "GPU_VIPER": shader.impl_kern_launch_acq = True shader.impl_kern_end_rel = False else: @@ -267,33 +440,36 @@ if args.TLB_config == "perLane": # List of compute units; one GPU can have multiple compute units compute_units = [] for i in range(n_cu): - compute_units.append(ComputeUnit(cu_id = i, perLaneTLB = per_lane, - num_SIMDs = args.simds_per_cu, - wf_size = args.wf_size, - spbypass_pipe_length = \ - args.sp_bypass_path_length, - dpbypass_pipe_length = \ - args.dp_bypass_path_length, - issue_period = args.issue_period, - coalescer_to_vrf_bus_width = \ - args.glbmem_rd_bus_width, - vrf_to_coalescer_bus_width = \ - args.glbmem_wr_bus_width, - num_global_mem_pipes = \ - args.glb_mem_pipes_per_cu, - num_shared_mem_pipes = \ - args.shr_mem_pipes_per_cu, - n_wf = args.wfs_per_simd, - execPolicy = args.CUExecPolicy, - debugSegFault = args.SegFaultDebug, - functionalTLB = args.FunctionalTLB, - localMemBarrier = args.LocalMemBarrier, - countPages = args.countPages, - localDataStore = \ - LdsState(banks = args.numLdsBanks, - bankConflictPenalty = \ - args.ldsBankConflictPenalty, - size = args.lds_size))) + compute_units.append( + ComputeUnit( + cu_id=i, + perLaneTLB=per_lane, + num_SIMDs=args.simds_per_cu, + wf_size=args.wf_size, + spbypass_pipe_length=args.sp_bypass_path_length, + dpbypass_pipe_length=args.dp_bypass_path_length, + issue_period=args.issue_period, + coalescer_to_vrf_bus_width=args.glbmem_rd_bus_width, + vrf_to_coalescer_bus_width=args.glbmem_wr_bus_width, + num_global_mem_pipes=args.glb_mem_pipes_per_cu, + num_shared_mem_pipes=args.shr_mem_pipes_per_cu, + n_wf=args.wfs_per_simd, + execPolicy=args.CUExecPolicy, + debugSegFault=args.SegFaultDebug, + functionalTLB=args.FunctionalTLB, + localMemBarrier=args.LocalMemBarrier, + countPages=args.countPages, + max_cu_tokens=args.max_cu_tokens, + vrf_lm_bus_latency=args.vrf_lm_bus_latency, + mem_req_latency=args.mem_req_latency, + mem_resp_latency=args.mem_resp_latency, + localDataStore=LdsState( + banks=args.numLdsBanks, + bankConflictPenalty=args.ldsBankConflictPenalty, + size=args.lds_size, + ), + ) + ) wavefronts = [] vrfs = [] vrf_pool_mgrs = [] @@ -301,48 +477,65 @@ for i in range(n_cu): srf_pool_mgrs = [] for j in range(args.simds_per_cu): for k in range(shader.n_wf): - wavefronts.append(Wavefront(simdId = j, wf_slot_id = k, - wf_size = args.wf_size)) + wavefronts.append( + Wavefront(simdId=j, wf_slot_id=k, wf_size=args.wf_size) + ) if args.reg_alloc_policy == "simple": - vrf_pool_mgrs.append(SimplePoolManager(pool_size = \ - args.vreg_file_size, - min_alloc = \ - args.vreg_min_alloc)) - srf_pool_mgrs.append(SimplePoolManager(pool_size = \ - args.sreg_file_size, - min_alloc = \ - args.vreg_min_alloc)) + vrf_pool_mgrs.append( + SimplePoolManager( + pool_size=args.vreg_file_size, + min_alloc=args.vreg_min_alloc, + ) + ) + srf_pool_mgrs.append( + SimplePoolManager( + pool_size=args.sreg_file_size, + min_alloc=args.vreg_min_alloc, + ) + ) elif args.reg_alloc_policy == "dynamic": - vrf_pool_mgrs.append(DynPoolManager(pool_size = \ - args.vreg_file_size, - min_alloc = \ - args.vreg_min_alloc)) - srf_pool_mgrs.append(DynPoolManager(pool_size = \ - args.sreg_file_size, - min_alloc = \ - args.vreg_min_alloc)) + vrf_pool_mgrs.append( + DynPoolManager( + pool_size=args.vreg_file_size, + min_alloc=args.vreg_min_alloc, + ) + ) + srf_pool_mgrs.append( + DynPoolManager( + pool_size=args.sreg_file_size, + min_alloc=args.vreg_min_alloc, + ) + ) - vrfs.append(VectorRegisterFile(simd_id=j, wf_size=args.wf_size, - num_regs=args.vreg_file_size)) - srfs.append(ScalarRegisterFile(simd_id=j, wf_size=args.wf_size, - num_regs=args.sreg_file_size)) + vrfs.append( + VectorRegisterFile( + simd_id=j, wf_size=args.wf_size, num_regs=args.vreg_file_size + ) + ) + srfs.append( + ScalarRegisterFile( + simd_id=j, wf_size=args.wf_size, num_regs=args.sreg_file_size + ) + ) compute_units[-1].wavefronts = wavefronts compute_units[-1].vector_register_file = vrfs compute_units[-1].scalar_register_file = srfs - compute_units[-1].register_manager = \ - RegisterManager(policy=args.registerManagerPolicy, - vrf_pool_managers=vrf_pool_mgrs, - srf_pool_managers=srf_pool_mgrs) + compute_units[-1].register_manager = RegisterManager( + policy=args.registerManagerPolicy, + vrf_pool_managers=vrf_pool_mgrs, + srf_pool_managers=srf_pool_mgrs, + ) if args.TLB_prefetch: compute_units[-1].prefetch_depth = args.TLB_prefetch compute_units[-1].prefetch_prev_type = args.pf_type # attach the LDS and the CU to the bus (actually a Bridge) compute_units[-1].ldsPort = compute_units[-1].ldsBus.cpu_side_port - compute_units[-1].ldsBus.mem_side_port = \ - compute_units[-1].localDataStore.cuPort + compute_units[-1].ldsBus.mem_side_port = compute_units[ + -1 + ].localDataStore.cuPort # Attach compute units to GPU shader.CUs = compute_units @@ -361,20 +554,22 @@ cpu_list = [] CpuClass, mem_mode = Simulation.getCPUClass(args.cpu_type) if CpuClass == AtomicSimpleCPU: fatal("AtomicSimpleCPU is not supported") -if mem_mode != 'timing': +if mem_mode != "timing": fatal("Only the timing memory mode is supported") shader.timing = True if args.fast_forward and args.fast_forward_pseudo_op: - fatal("Cannot fast-forward based both on the number of instructions and" - " on pseudo-ops") + fatal( + "Cannot fast-forward based both on the number of instructions and" + " on pseudo-ops" + ) fast_forward = args.fast_forward or args.fast_forward_pseudo_op if fast_forward: FutureCpuClass, future_mem_mode = CpuClass, mem_mode CpuClass = X86KvmCPU - mem_mode = 'atomic_noncaching' + mem_mode = "atomic_noncaching" # Leave shader.timing untouched, because its value only matters at the # start of the simulation and because we require switching cpus # *before* the first kernel launch. @@ -383,11 +578,13 @@ if fast_forward: # Initial CPUs to be used during fast-forwarding. for i in range(args.num_cpus): - cpu = CpuClass(cpu_id = i, - clk_domain = SrcClockDomain( - clock = args.CPUClock, - voltage_domain = VoltageDomain( - voltage = args.cpu_voltage))) + cpu = CpuClass( + cpu_id=i, + clk_domain=SrcClockDomain( + clock=args.CPUClock, + voltage_domain=VoltageDomain(voltage=args.cpu_voltage), + ), + ) cpu_list.append(cpu) if args.fast_forward: @@ -400,20 +597,24 @@ else: # CPs to be used throughout the simulation. for i in range(args.num_cp): - cp = MainCpuClass(cpu_id = args.num_cpus + i, - clk_domain = SrcClockDomain( - clock = args.CPUClock, - voltage_domain = VoltageDomain( - voltage = args.cpu_voltage))) + cp = MainCpuClass( + cpu_id=args.num_cpus + i, + clk_domain=SrcClockDomain( + clock=args.CPUClock, + voltage_domain=VoltageDomain(voltage=args.cpu_voltage), + ), + ) cp_list.append(cp) # Main CPUs (to be used after fast-forwarding if fast-forwarding is specified). for i in range(args.num_cpus): - cpu = MainCpuClass(cpu_id = i, - clk_domain = SrcClockDomain( - clock = args.CPUClock, - voltage_domain = VoltageDomain( - voltage = args.cpu_voltage))) + cpu = MainCpuClass( + cpu_id=i, + clk_domain=SrcClockDomain( + clock=args.CPUClock, + voltage_domain=VoltageDomain(voltage=args.cpu_voltage), + ), + ) if fast_forward: cpu.switched_out = True future_cpu_list.append(cpu) @@ -434,21 +635,25 @@ if args.dgpu: # HSA kernel mode driver # dGPUPoolID is 0 because we only have one memory pool -gpu_driver = GPUComputeDriver(filename = "kfd", isdGPU = args.dgpu, - gfxVersion = args.gfx_version, - dGPUPoolID = 0, m_type = args.m_type) +gpu_driver = GPUComputeDriver( + filename="kfd", + isdGPU=args.dgpu, + gfxVersion=args.gfx_version, + dGPUPoolID=0, + m_type=args.m_type, +) renderDriNum = 128 -render_driver = GPURenderDriver(filename = f'dri/renderD{renderDriNum}') +render_driver = GPURenderDriver(filename=f"dri/renderD{renderDriNum}") # Creating the GPU kernel launching components: that is the HSA # packet processor (HSAPP), GPU command processor (CP), and the # dispatcher. -gpu_hsapp = HSAPacketProcessor(pioAddr=hsapp_gpu_map_paddr, - numHWQueues=args.num_hw_queues) +gpu_hsapp = HSAPacketProcessor( + pioAddr=hsapp_gpu_map_paddr, numHWQueues=args.num_hw_queues +) dispatcher = GPUDispatcher() -gpu_cmd_proc = GPUCommandProcessor(hsapp=gpu_hsapp, - dispatcher=dispatcher) +gpu_cmd_proc = GPUCommandProcessor(hsapp=gpu_hsapp, dispatcher=dispatcher) gpu_driver.device = gpu_cmd_proc shader.dispatcher = dispatcher shader.gpu_cmd_proc = gpu_cmd_proc @@ -465,9 +670,11 @@ def find_path(base_list, rel_path, test): return full_path fatal("%s not found in %s" % (rel_path, base_list)) + def find_file(base_list, rel_path): return find_path(base_list, rel_path, os.path.isfile) + executable = find_path(benchmark_path, args.cmd, os.path.exists) # It's common for a benchmark to be in a directory with the same # name as the executable, so we handle that automatically @@ -476,35 +683,43 @@ if os.path.isdir(executable): executable = find_file(benchmark_path, args.cmd) if args.env: - with open(args.env, 'r') as f: + with open(args.env, "r") as f: env = [line.rstrip() for line in f] else: - env = ['LD_LIBRARY_PATH=%s' % ':'.join([ - os.getenv('ROCM_PATH','/opt/rocm')+'/lib', - os.getenv('HCC_HOME','/opt/rocm/hcc')+'/lib', - os.getenv('HSA_PATH','/opt/rocm/hsa')+'/lib', - os.getenv('HIP_PATH','/opt/rocm/hip')+'/lib', - os.getenv('ROCM_PATH','/opt/rocm')+'/libhsakmt/lib', - os.getenv('ROCM_PATH','/opt/rocm')+'/miopen/lib', - os.getenv('ROCM_PATH','/opt/rocm')+'/miopengemm/lib', - os.getenv('ROCM_PATH','/opt/rocm')+'/hipblas/lib', - os.getenv('ROCM_PATH','/opt/rocm')+'/rocblas/lib', - "/usr/lib/x86_64-linux-gnu" - ]), - 'HOME=%s' % os.getenv('HOME','/'), - # Disable the VM fault handler signal creation for dGPUs also - # forces the use of DefaultSignals instead of driver-controlled - # InteruptSignals throughout the runtime. DefaultSignals poll - # on memory in the runtime, while InteruptSignals call into the - # driver. - "HSA_ENABLE_INTERRUPT=1", - # We don't have an SDMA hardware model, so need to fallback to - # vector copy kernels for dGPU memcopies to/from host and device. - "HSA_ENABLE_SDMA=0"] + env = [ + "LD_LIBRARY_PATH=%s" + % ":".join( + [ + os.getenv("ROCM_PATH", "/opt/rocm") + "/lib", + os.getenv("HCC_HOME", "/opt/rocm/hcc") + "/lib", + os.getenv("HSA_PATH", "/opt/rocm/hsa") + "/lib", + os.getenv("HIP_PATH", "/opt/rocm/hip") + "/lib", + os.getenv("ROCM_PATH", "/opt/rocm") + "/libhsakmt/lib", + os.getenv("ROCM_PATH", "/opt/rocm") + "/miopen/lib", + os.getenv("ROCM_PATH", "/opt/rocm") + "/miopengemm/lib", + os.getenv("ROCM_PATH", "/opt/rocm") + "/hipblas/lib", + os.getenv("ROCM_PATH", "/opt/rocm") + "/rocblas/lib", + "/usr/lib/x86_64-linux-gnu", + ] + ), + "HOME=%s" % os.getenv("HOME", "/"), + # Disable the VM fault handler signal creation for dGPUs also + # forces the use of DefaultSignals instead of driver-controlled + # InteruptSignals throughout the runtime. DefaultSignals poll + # on memory in the runtime, while InteruptSignals call into the + # driver. + "HSA_ENABLE_INTERRUPT=1", + # We don't have an SDMA hardware model, so need to fallback to + # vector copy kernels for dGPU memcopies to/from host and device. + "HSA_ENABLE_SDMA=0", + ] -process = Process(executable = executable, cmd = [args.cmd] - + args.options.split(), - drivers = [gpu_driver, render_driver], env = env) +process = Process( + executable=executable, + cmd=[args.cmd] + args.options.split(), + drivers=[gpu_driver, render_driver], + env=env, +) for cpu in cpu_list: cpu.createThreads() @@ -521,30 +736,39 @@ if fast_forward: ########################## Create the overall system ######################## # List of CPUs that must be switched when moving between KVM and simulation if fast_forward: - switch_cpu_list = \ - [(cpu_list[i], future_cpu_list[i]) for i in range(args.num_cpus)] + switch_cpu_list = [ + (cpu_list[i], future_cpu_list[i]) for i in range(args.num_cpus) + ] + +# Other CPU strings cause bad addresses in ROCm. Revert back to M5 Simulator. +for (i, cpu) in enumerate(cpu_list): + for j in range(len(cpu)): + cpu.isa[j].vendor_string = "M5 Simulator" # Full list of processing cores in the system. cpu_list = cpu_list + [shader] + cp_list # creating the overall system # notice the cpu list is explicitly added as a parameter to System -system = System(cpu = cpu_list, - mem_ranges = [AddrRange(args.mem_size)], - cache_line_size = args.cacheline_size, - mem_mode = mem_mode, - workload = SEWorkload.init_compatible(executable)) +system = System( + cpu=cpu_list, + mem_ranges=[AddrRange(args.mem_size)], + cache_line_size=args.cacheline_size, + mem_mode=mem_mode, + workload=SEWorkload.init_compatible(executable), +) if fast_forward: system.future_cpu = future_cpu_list -system.voltage_domain = VoltageDomain(voltage = args.sys_voltage) -system.clk_domain = SrcClockDomain(clock = args.sys_clock, - voltage_domain = system.voltage_domain) +system.voltage_domain = VoltageDomain(voltage=args.sys_voltage) +system.clk_domain = SrcClockDomain( + clock=args.sys_clock, voltage_domain=system.voltage_domain +) if fast_forward: - have_kvm_support = 'BaseKvmCPU' in globals() - if have_kvm_support and buildEnv['TARGET_ISA'] == "x86": + have_kvm_support = "BaseKvmCPU" in globals() + if have_kvm_support and get_runtime_isa() == ISA.X86: system.vm = KvmVM() - system.m5ops_base = 0xffff0000 + system.m5ops_base = 0xFFFF0000 for i in range(len(host_cpu.workload)): host_cpu.workload[i].useArchPT = True host_cpu.workload[i].kvmInSE = True @@ -555,17 +779,19 @@ if fast_forward: GPUTLBConfig.config_tlb_hierarchy(args, system, shader_idx) # create Ruby system -system.piobus = IOXBar(width=32, response_latency=0, - frontend_latency=0, forward_latency=0) +system.piobus = IOXBar( + width=32, response_latency=0, frontend_latency=0, forward_latency=0 +) dma_list = [gpu_hsapp, gpu_cmd_proc] Ruby.create_system(args, None, system, None, dma_list, None) -system.ruby.clk_domain = SrcClockDomain(clock = args.ruby_clock, - voltage_domain = system.voltage_domain) +system.ruby.clk_domain = SrcClockDomain( + clock=args.ruby_clock, voltage_domain=system.voltage_domain +) gpu_cmd_proc.pio = system.piobus.mem_side_ports gpu_hsapp.pio = system.piobus.mem_side_ports for i, dma_device in enumerate(dma_list): - exec('system.dma_cntrl%d.clk_domain = system.ruby.clk_domain' % i) + exec("system.dma_cntrl%d.clk_domain = system.ruby.clk_domain" % i) # attach the CPU ports to Ruby for i in range(args.num_cpus): @@ -579,15 +805,18 @@ for i in range(args.num_cpus): system.cpu[i].dcache_port = ruby_port.in_ports ruby_port.mem_request_port = system.piobus.cpu_side_ports - if buildEnv['TARGET_ISA'] == "x86": + if get_runtime_isa() == ISA.X86: system.cpu[i].interrupts[0].pio = system.piobus.mem_side_ports - system.cpu[i].interrupts[0].int_requestor = \ - system.piobus.cpu_side_ports - system.cpu[i].interrupts[0].int_responder = \ - system.piobus.mem_side_ports + system.cpu[i].interrupts[ + 0 + ].int_requestor = system.piobus.cpu_side_ports + system.cpu[i].interrupts[ + 0 + ].int_responder = system.piobus.mem_side_ports if fast_forward: system.cpu[i].mmu.connectWalkerPorts( - ruby_port.in_ports, ruby_port.in_ports) + ruby_port.in_ports, ruby_port.in_ports + ) # attach CU ports to Ruby # Because of the peculiarities of the CP core, you may have 1 CPU but 2 @@ -596,9 +825,12 @@ for i in range(args.num_cpus): # the index as below, but note that this assumes there is one sequencer # per compute unit and one sequencer per SQC for the math to work out # correctly. -gpu_port_idx = len(system.ruby._cpu_ports) \ - - args.num_compute_units - args.num_sqc \ - - args.num_scalar_cache +gpu_port_idx = ( + len(system.ruby._cpu_ports) + - args.num_compute_units + - args.num_sqc + - args.num_scalar_cache +) gpu_port_idx = gpu_port_idx - args.num_cp * 2 # Connect token ports. For this we need to search through the list of all @@ -607,8 +839,9 @@ gpu_port_idx = gpu_port_idx - args.num_cp * 2 token_port_idx = 0 for i in range(len(system.ruby._cpu_ports)): if isinstance(system.ruby._cpu_ports[i], VIPERCoalescer): - system.cpu[shader_idx].CUs[token_port_idx].gmTokenPort = \ - system.ruby._cpu_ports[i].gmTokenPort + system.cpu[shader_idx].CUs[ + token_port_idx + ].gmTokenPort = system.ruby._cpu_ports[i].gmTokenPort token_port_idx += 1 wavefront_size = args.wf_size @@ -616,38 +849,45 @@ for i in range(n_cu): # The pipeline issues wavefront_size number of uncoalesced requests # in one GPU issue cycle. Hence wavefront_size mem ports. for j in range(wavefront_size): - system.cpu[shader_idx].CUs[i].memory_port[j] = \ - system.ruby._cpu_ports[gpu_port_idx].in_ports[j] + system.cpu[shader_idx].CUs[i].memory_port[j] = system.ruby._cpu_ports[ + gpu_port_idx + ].in_ports[j] gpu_port_idx += 1 for i in range(n_cu): if i > 0 and not i % args.cu_per_sqc: print("incrementing idx on ", i) gpu_port_idx += 1 - system.cpu[shader_idx].CUs[i].sqc_port = \ - system.ruby._cpu_ports[gpu_port_idx].in_ports + system.cpu[shader_idx].CUs[i].sqc_port = system.ruby._cpu_ports[ + gpu_port_idx + ].in_ports gpu_port_idx = gpu_port_idx + 1 for i in range(n_cu): if i > 0 and not i % args.cu_per_scalar_cache: print("incrementing idx on ", i) gpu_port_idx += 1 - system.cpu[shader_idx].CUs[i].scalar_port = \ - system.ruby._cpu_ports[gpu_port_idx].in_ports + system.cpu[shader_idx].CUs[i].scalar_port = system.ruby._cpu_ports[ + gpu_port_idx + ].in_ports gpu_port_idx = gpu_port_idx + 1 # attach CP ports to Ruby for i in range(args.num_cp): system.cpu[cp_idx].createInterruptController() - system.cpu[cp_idx].dcache_port = \ - system.ruby._cpu_ports[gpu_port_idx + i * 2].in_ports - system.cpu[cp_idx].icache_port = \ - system.ruby._cpu_ports[gpu_port_idx + i * 2 + 1].in_ports + system.cpu[cp_idx].dcache_port = system.ruby._cpu_ports[ + gpu_port_idx + i * 2 + ].in_ports + system.cpu[cp_idx].icache_port = system.ruby._cpu_ports[ + gpu_port_idx + i * 2 + 1 + ].in_ports system.cpu[cp_idx].interrupts[0].pio = system.piobus.mem_side_ports - system.cpu[cp_idx].interrupts[0].int_requestor = \ - system.piobus.cpu_side_ports - system.cpu[cp_idx].interrupts[0].int_responder = \ - system.piobus.mem_side_ports + system.cpu[cp_idx].interrupts[ + 0 + ].int_requestor = system.piobus.cpu_side_ports + system.cpu[cp_idx].interrupts[ + 0 + ].int_responder = system.piobus.mem_side_ports cp_idx = cp_idx + 1 ################# Connect the CPU and GPU via GPU Dispatcher ################## @@ -665,15 +905,17 @@ else: ########################## Start simulation ######################## -redirect_paths = [RedirectPath(app_path = "/proc", - host_paths = - ["%s/fs/proc" % m5.options.outdir]), - RedirectPath(app_path = "/sys", - host_paths = - ["%s/fs/sys" % m5.options.outdir]), - RedirectPath(app_path = "/tmp", - host_paths = - ["%s/fs/tmp" % m5.options.outdir])] +redirect_paths = [ + RedirectPath( + app_path="/proc", host_paths=["%s/fs/proc" % m5.options.outdir] + ), + RedirectPath( + app_path="/sys", host_paths=["%s/fs/sys" % m5.options.outdir] + ), + RedirectPath( + app_path="/tmp", host_paths=["%s/fs/tmp" % m5.options.outdir] + ), +] system.redirect_paths = redirect_paths @@ -682,18 +924,22 @@ root = Root(system=system, full_system=False) # Create the /sys/devices filesystem for the simulator so that the HSA Runtime # knows what type of GPU hardware we are simulating if args.dgpu: - assert (args.gfx_version in ['gfx803', 'gfx900']),\ - "Incorrect gfx version for dGPU" - if args.gfx_version == 'gfx803': + assert args.gfx_version in [ + "gfx803", + "gfx900", + ], "Incorrect gfx version for dGPU" + if args.gfx_version == "gfx803": hsaTopology.createFijiTopology(args) - elif args.gfx_version == 'gfx900': + elif args.gfx_version == "gfx900": hsaTopology.createVegaTopology(args) else: - assert (args.gfx_version in ['gfx801', 'gfx902']),\ - "Incorrect gfx version for APU" + assert args.gfx_version in [ + "gfx801", + "gfx902", + ], "Incorrect gfx version for APU" hsaTopology.createCarrizoTopology(args) -m5.ticks.setGlobalFrequency('1THz') +m5.ticks.setGlobalFrequency("1THz") if args.abs_max_tick: maxtick = args.abs_max_tick else: @@ -703,8 +949,7 @@ else: Simulation.setWorkCountOptions(system, args) # Checkpointing is not supported by APU model -if (args.checkpoint_dir != None or - args.checkpoint_restore != None): +if args.checkpoint_dir != None or args.checkpoint_restore != None: fatal("Checkpointing not supported by apu model") checkpoint_dir = None @@ -739,6 +984,6 @@ elif args.fast_forward_pseudo_op: exit_event = m5.simulate(maxtick - m5.curTick()) print("Ticks:", m5.curTick()) -print('Exiting because ', exit_event.getCause()) +print("Exiting because ", exit_event.getCause()) sys.exit(exit_event.getCode()) diff --git a/configs/example/arm/baremetal.py b/configs/example/arm/baremetal.py index 44e3fd1799..9eeba37ff7 100644 --- a/configs/example/arm/baremetal.py +++ b/configs/example/arm/baremetal.py @@ -46,7 +46,7 @@ from m5.objects import * from m5.options import * import argparse -m5.util.addToPath('../..') +m5.util.addToPath("../..") from common import SysPaths from common import MemConfig @@ -60,25 +60,21 @@ import workloads # l1_icache_class, l1_dcache_class, walk_cache_class, l2_Cache_class). Any of # the cache class may be 'None' if the particular cache is not present. cpu_types = { - - "atomic" : ( AtomicSimpleCPU, None, None, None), - "minor" : (MinorCPU, - devices.L1I, devices.L1D, - devices.L2), - "hpi" : ( HPI.HPI, - HPI.HPI_ICache, HPI.HPI_DCache, - HPI.HPI_L2) + "atomic": (AtomicSimpleCPU, None, None, None), + "minor": (MinorCPU, devices.L1I, devices.L1D, devices.L2), + "hpi": (HPI.HPI, HPI.HPI_ICache, HPI.HPI_DCache, HPI.HPI_L2), } + def create_cow_image(name): """Helper function to create a Copy-on-Write disk image""" image = CowDiskImage() image.child.image_file = name - return image; + return image def create(args): - ''' Create and configure the system object. ''' + """Create and configure the system object.""" if args.readfile and not os.path.isfile(args.readfile): print("Error: Bootscript %s does not exist" % args.readfile) @@ -93,11 +89,13 @@ def create(args): platform = ObjectList.platform_list.get(args.machine_type) - system = devices.SimpleSystem(want_caches, - args.mem_size, - platform=platform(), - mem_mode=mem_mode, - readfile=args.readfile) + system = devices.SimpleSystem( + want_caches, + args.mem_size, + platform=platform(), + mem_mode=mem_mode, + readfile=args.readfile, + ) MemConfig.config_mem(args, system) @@ -107,7 +105,7 @@ def create(args): stdout=args.semi_stdout, stderr=args.semi_stderr, files_root_dir=args.semi_path, - cmd_line = " ".join([ object_file ] + args.args) + cmd_line=" ".join([object_file] + args.args), ) if args.disk_image: @@ -116,17 +114,17 @@ def create(args): # functionality to avoid writing changes to the stored copy of # the disk image. system.realview.vio[0].vio = VirtIOBlock( - image=create_cow_image(args.disk_image)) + image=create_cow_image(args.disk_image) + ) # Wire up the system's memory system system.connect() # Add CPU clusters to the system system.cpu_cluster = [ - devices.CpuCluster(system, - args.num_cores, - args.cpu_freq, "1.0V", - *cpu_types[args.cpu]), + devices.CpuCluster( + system, args.num_cores, args.cpu_freq, "1.0V", *cpu_types[args.cpu] + ) ] # Create a cache hierarchy for the cluster. We are assuming that @@ -143,11 +141,11 @@ def create(args): system.highest_el_is_64 = True workload_class = workloads.workload_list.get(args.workload) - system.workload = workload_class( - object_file, system) + system.workload = workload_class(object_file, system) return system + def run(args): cptdir = m5.options.outdir if args.checkpoint: @@ -171,67 +169,118 @@ def run(args): def main(): parser = argparse.ArgumentParser(epilog=__doc__) - parser.add_argument("--kernel", type=str, - default=None, - help="Binary to run") - parser.add_argument("--workload", type=str, - default="ArmBaremetal", - choices=workloads.workload_list.get_names(), - help="Workload type") - parser.add_argument("--disk-image", type=str, - default=None, - help="Disk to instantiate") - parser.add_argument("--readfile", type=str, default="", - help = "File to return with the m5 readfile command") - parser.add_argument("--cpu", type=str, choices=list(cpu_types.keys()), - default="atomic", - help="CPU model to use") + parser.add_argument( + "--kernel", type=str, default=None, help="Binary to run" + ) + parser.add_argument( + "--workload", + type=str, + default="ArmBaremetal", + choices=workloads.workload_list.get_names(), + help="Workload type", + ) + parser.add_argument( + "--disk-image", type=str, default=None, help="Disk to instantiate" + ) + parser.add_argument( + "--readfile", + type=str, + default="", + help="File to return with the m5 readfile command", + ) + parser.add_argument( + "--cpu", + type=str, + choices=list(cpu_types.keys()), + default="atomic", + help="CPU model to use", + ) parser.add_argument("--cpu-freq", type=str, default="4GHz") - parser.add_argument("--num-cores", type=int, default=1, - help="Number of CPU cores") - parser.add_argument("--machine-type", type=str, - choices=ObjectList.platform_list.get_names(), - default="VExpress_GEM5_V2", - help="Hardware platform class") - parser.add_argument("--mem-type", default="DDR3_1600_8x8", - choices=ObjectList.mem_list.get_names(), - help = "type of memory to use") - parser.add_argument("--mem-channels", type=int, default=1, - help = "number of memory channels") - parser.add_argument("--mem-ranks", type=int, default=None, - help = "number of memory ranks per channel") - parser.add_argument("--mem-size", action="store", type=str, - default="2GB", - help="Specify the physical memory size") + parser.add_argument( + "--num-cores", type=int, default=1, help="Number of CPU cores" + ) + parser.add_argument( + "--machine-type", + type=str, + choices=ObjectList.platform_list.get_names(), + default="VExpress_GEM5_V2", + help="Hardware platform class", + ) + parser.add_argument( + "--mem-type", + default="DDR3_1600_8x8", + choices=ObjectList.mem_list.get_names(), + help="type of memory to use", + ) + parser.add_argument( + "--mem-channels", type=int, default=1, help="number of memory channels" + ) + parser.add_argument( + "--mem-ranks", + type=int, + default=None, + help="number of memory ranks per channel", + ) + parser.add_argument( + "--mem-size", + action="store", + type=str, + default="2GB", + help="Specify the physical memory size", + ) parser.add_argument("--checkpoint", action="store_true") parser.add_argument("--restore", type=str, default=None) - parser.add_argument("--dtb-gen", action="store_true", - help="Doesn't run simulation, it generates a DTB only") - parser.add_argument("--semi-enable", action="store_true", - help="Enable semihosting support") - parser.add_argument("--semi-stdin", type=str, default="stdin", - help="Standard input for semihosting " \ - "(default: gem5's stdin)") - parser.add_argument("--semi-stdout", type=str, default="stdout", - help="Standard output for semihosting " \ - "(default: gem5's stdout)") - parser.add_argument("--semi-stderr", type=str, default="stderr", - help="Standard error for semihosting " \ - "(default: gem5's stderr)") - parser.add_argument('--semi-path', type=str, - default="", - help=('Search path for files to be loaded through ' - 'Arm Semihosting')) - parser.add_argument("args", default=[], nargs="*", - help="Semihosting arguments to pass to benchmark") - parser.add_argument("-P", "--param", action="append", default=[], + parser.add_argument( + "--dtb-gen", + action="store_true", + help="Doesn't run simulation, it generates a DTB only", + ) + parser.add_argument( + "--semi-enable", action="store_true", help="Enable semihosting support" + ) + parser.add_argument( + "--semi-stdin", + type=str, + default="stdin", + help="Standard input for semihosting " "(default: gem5's stdin)", + ) + parser.add_argument( + "--semi-stdout", + type=str, + default="stdout", + help="Standard output for semihosting " "(default: gem5's stdout)", + ) + parser.add_argument( + "--semi-stderr", + type=str, + default="stderr", + help="Standard error for semihosting " "(default: gem5's stderr)", + ) + parser.add_argument( + "--semi-path", + type=str, + default="", + help=("Search path for files to be loaded through " "Arm Semihosting"), + ) + parser.add_argument( + "args", + default=[], + nargs="*", + help="Semihosting arguments to pass to benchmark", + ) + parser.add_argument( + "-P", + "--param", + action="append", + default=[], help="Set a SimObject parameter relative to the root node. " - "An extended Python multi range slicing syntax can be used " - "for arrays. For example: " - "'system.cpu[0,1,3:8:2].max_insts_all_threads = 42' " - "sets max_insts_all_threads for cpus 0, 1, 3, 5 and 7 " - "Direct parameters of the root object are not accessible, " - "only parameters of its children.") + "An extended Python multi range slicing syntax can be used " + "for arrays. For example: " + "'system.cpu[0,1,3:8:2].max_insts_all_threads = 42' " + "sets max_insts_all_threads for cpus 0, 1, 3, 5 and 7 " + "Direct parameters of the root object are not accessible, " + "only parameters of its children.", + ) args = parser.parse_args() @@ -247,9 +296,10 @@ def main(): if args.dtb_gen: # No run, autogenerate DTB and exit - root.system.generateDtb(os.path.join(m5.options.outdir, 'system.dtb')) + root.system.generateDtb(os.path.join(m5.options.outdir, "system.dtb")) else: run(args) + if __name__ == "__m5_main__": main() diff --git a/configs/example/arm/devices.py b/configs/example/arm/devices.py index a488ab3755..c6560d74dd 100644 --- a/configs/example/arm/devices.py +++ b/configs/example/arm/devices.py @@ -37,20 +37,22 @@ import m5 from m5.objects import * -m5.util.addToPath('../../') + +m5.util.addToPath("../../") from common.Caches import * from common import ObjectList have_kvm = "ArmV8KvmCPU" in ObjectList.cpu_list.get_names() have_fastmodel = "FastModelCortexA76" in ObjectList.cpu_list.get_names() + class L1I(L1_ICache): tag_latency = 1 data_latency = 1 response_latency = 1 mshrs = 4 tgts_per_mshr = 8 - size = '48kB' + size = "48kB" assoc = 3 @@ -60,7 +62,7 @@ class L1D(L1_DCache): response_latency = 1 mshrs = 16 tgts_per_mshr = 16 - size = '32kB' + size = "32kB" assoc = 2 write_buffers = 16 @@ -71,21 +73,21 @@ class L2(L2Cache): response_latency = 5 mshrs = 32 tgts_per_mshr = 8 - size = '1MB' + size = "1MB" assoc = 16 write_buffers = 8 - clusivity='mostly_excl' + clusivity = "mostly_excl" class L3(Cache): - size = '16MB' + size = "16MB" assoc = 16 tag_latency = 20 data_latency = 20 response_latency = 20 mshrs = 20 tgts_per_mshr = 12 - clusivity='mostly_excl' + clusivity = "mostly_excl" class MemBus(SystemXBar): @@ -94,8 +96,17 @@ class MemBus(SystemXBar): class CpuCluster(SubSystem): - def __init__(self, system, num_cpus, cpu_clock, cpu_voltage, - cpu_type, l1i_type, l1d_type, l2_type): + def __init__( + self, + system, + num_cpus, + cpu_clock, + cpu_voltage, + cpu_type, + l1i_type, + l1d_type, + l2_type, + ): super(CpuCluster, self).__init__() self._cpu_type = cpu_type self._l1i_type = l1i_type @@ -105,12 +116,16 @@ class CpuCluster(SubSystem): assert num_cpus > 0 self.voltage_domain = VoltageDomain(voltage=cpu_voltage) - self.clk_domain = SrcClockDomain(clock=cpu_clock, - voltage_domain=self.voltage_domain) + self.clk_domain = SrcClockDomain( + clock=cpu_clock, voltage_domain=self.voltage_domain + ) - self.cpus = [ self._cpu_type(cpu_id=system.numCpus() + idx, - clk_domain=self.clk_domain) - for idx in range(num_cpus) ] + self.cpus = [ + self._cpu_type( + cpu_id=system.numCpus() + idx, clk_domain=self.clk_domain + ) + for idx in range(num_cpus) + ] for cpu in self.cpus: cpu.createThreads() @@ -157,11 +172,14 @@ class CpuCluster(SubSystem): int_cls = ArmPPI if pint < 32 else ArmSPI for isa in cpu.isa: isa.pmu = ArmPMU(interrupt=int_cls(num=pint)) - isa.pmu.addArchEvents(cpu=cpu, - itb=cpu.mmu.itb, dtb=cpu.mmu.dtb, - icache=getattr(cpu, 'icache', None), - dcache=getattr(cpu, 'dcache', None), - l2cache=getattr(self, 'l2', None)) + isa.pmu.addArchEvents( + cpu=cpu, + itb=cpu.mmu.itb, + dtb=cpu.mmu.dtb, + icache=getattr(cpu, "icache", None), + dcache=getattr(cpu, "dcache", None), + l2cache=getattr(self, "l2", None), + ) for ev in events: isa.pmu.addEvent(ev) @@ -175,42 +193,55 @@ class CpuCluster(SubSystem): class AtomicCluster(CpuCluster): def __init__(self, system, num_cpus, cpu_clock, cpu_voltage="1.0V"): - cpu_config = [ ObjectList.cpu_list.get("AtomicSimpleCPU"), None, - None, None ] - super(AtomicCluster, self).__init__(system, num_cpus, cpu_clock, - cpu_voltage, *cpu_config) + cpu_config = [ + ObjectList.cpu_list.get("AtomicSimpleCPU"), + None, + None, + None, + ] + super(AtomicCluster, self).__init__( + system, num_cpus, cpu_clock, cpu_voltage, *cpu_config + ) + def addL1(self): pass + class KvmCluster(CpuCluster): def __init__(self, system, num_cpus, cpu_clock, cpu_voltage="1.0V"): - cpu_config = [ ObjectList.cpu_list.get("ArmV8KvmCPU"), None, None, - None ] - super(KvmCluster, self).__init__(system, num_cpus, cpu_clock, - cpu_voltage, *cpu_config) + cpu_config = [ObjectList.cpu_list.get("ArmV8KvmCPU"), None, None, None] + super(KvmCluster, self).__init__( + system, num_cpus, cpu_clock, cpu_voltage, *cpu_config + ) + def addL1(self): pass + class FastmodelCluster(SubSystem): - def __init__(self, system, num_cpus, cpu_clock, cpu_voltage="1.0V"): + def __init__(self, system, num_cpus, cpu_clock, cpu_voltage="1.0V"): super(FastmodelCluster, self).__init__() # Setup GIC gic = system.realview.gic - gic.sc_gic.cpu_affinities = ','.join( - [ '0.0.%d.0' % i for i in range(num_cpus) ]) + gic.sc_gic.cpu_affinities = ",".join( + ["0.0.%d.0" % i for i in range(num_cpus)] + ) # Parse the base address of redistributor. redist_base = gic.get_redist_bases()[0] redist_frame_size = 0x40000 if gic.sc_gic.has_gicv4_1 else 0x20000 - gic.sc_gic.reg_base_per_redistributor = ','.join([ - '0.0.%d.0=%#x' % (i, redist_base + redist_frame_size * i) - for i in range(num_cpus) - ]) + gic.sc_gic.reg_base_per_redistributor = ",".join( + [ + "0.0.%d.0=%#x" % (i, redist_base + redist_frame_size * i) + for i in range(num_cpus) + ] + ) gic_a2t = AmbaToTlmBridge64(amba=gic.amba_m) - gic_t2g = TlmToGem5Bridge64(tlm=gic_a2t.tlm, - gem5=system.iobus.cpu_side_ports) + gic_t2g = TlmToGem5Bridge64( + tlm=gic_a2t.tlm, gem5=system.iobus.cpu_side_ports + ) gic_g2t = Gem5ToTlmBridge64(gem5=system.membus.mem_side_ports) gic_g2t.addr_ranges = gic.get_addr_ranges() gic_t2a = AmbaFromTlmBridge64(tlm=gic_g2t.tlm) @@ -223,28 +254,36 @@ class FastmodelCluster(SubSystem): system.gic_hub.gic_t2a = gic_t2a self.voltage_domain = VoltageDomain(voltage=cpu_voltage) - self.clk_domain = SrcClockDomain(clock=cpu_clock, - voltage_domain=self.voltage_domain) + self.clk_domain = SrcClockDomain( + clock=cpu_clock, voltage_domain=self.voltage_domain + ) # Setup CPU assert num_cpus <= 4 - CpuClasses = [FastModelCortexA76x1, FastModelCortexA76x2, - FastModelCortexA76x3, FastModelCortexA76x4] + CpuClasses = [ + FastModelCortexA76x1, + FastModelCortexA76x2, + FastModelCortexA76x3, + FastModelCortexA76x4, + ] CpuClass = CpuClasses[num_cpus - 1] - cpu = CpuClass(GICDISABLE=False) + cpu = CpuClass( + GICDISABLE=False, BROADCASTATOMIC=False, BROADCASTOUTER=False + ) for core in cpu.cores: core.semihosting_enable = False core.RVBARADDR = 0x10 core.redistributor = gic.redistributor core.createThreads() core.createInterruptController() - self.cpus = [ cpu ] + self.cpus = [cpu] + self.cpu_hub = SubSystem() a2t = AmbaToTlmBridge64(amba=cpu.amba) t2g = TlmToGem5Bridge64(tlm=a2t.tlm, gem5=system.membus.cpu_side_ports) - system.gic_hub.a2t = a2t - system.gic_hub.t2g = t2g + self.cpu_hub.a2t = a2t + self.cpu_hub.t2g = t2g system.addCpuCluster(self, num_cpus) @@ -252,7 +291,7 @@ class FastmodelCluster(SubSystem): return False def memoryMode(self): - return 'atomic_noncaching' + return "atomic_noncaching" def addL1(self): pass @@ -263,6 +302,7 @@ class FastmodelCluster(SubSystem): def connectMemSide(self, bus): pass + class BaseSimpleSystem(ArmSystem): cache_line_size = 64 @@ -271,15 +311,15 @@ class BaseSimpleSystem(ArmSystem): self.voltage_domain = VoltageDomain(voltage="1.0V") self.clk_domain = SrcClockDomain( - clock="1GHz", - voltage_domain=Parent.voltage_domain) + clock="1GHz", voltage_domain=Parent.voltage_domain + ) if platform is None: self.realview = VExpress_GEM5_V1() else: self.realview = platform - if hasattr(self.realview.gic, 'cpu_addr'): + if hasattr(self.realview.gic, "cpu_addr"): self.gic_cpu_addr = self.realview.gic.cpu_addr self.terminal = Terminal() @@ -305,7 +345,8 @@ class BaseSimpleSystem(ArmSystem): size_in_range = min(mem_size, mem_range.size()) mem_ranges.append( - AddrRange(start=mem_range.start, size=size_in_range)) + AddrRange(start=mem_range.start, size=size_in_range) + ) mem_size -= size_in_range if mem_size == 0: @@ -340,8 +381,9 @@ class BaseSimpleSystem(ArmSystem): for cluster in self._clusters: cluster.addL2(cluster.clk_domain) if last_cache_level > 2: - max_clock_cluster = max(self._clusters, - key=lambda c: c.clk_domain.clock[0]) + max_clock_cluster = max( + self._clusters, key=lambda c: c.clk_domain.clock[0] + ) self.l3 = L3(clk_domain=max_clock_cluster.clk_domain) self.toL3Bus = L2XBar(width=64) self.toL3Bus.mem_side_ports = self.l3.cpu_side @@ -352,23 +394,24 @@ class BaseSimpleSystem(ArmSystem): for cluster in self._clusters: cluster.connectMemSide(cluster_mem_bus) + class SimpleSystem(BaseSimpleSystem): """ Meant to be used with the classic memory model """ + def __init__(self, caches, mem_size, platform=None, **kwargs): super(SimpleSystem, self).__init__(mem_size, platform, **kwargs) self.membus = MemBus() # CPUs->PIO - self.iobridge = Bridge(delay='50ns') + self.iobridge = Bridge(delay="50ns") self._caches = caches if self._caches: self.iocache = IOCache(addr_ranges=self.mem_ranges) else: - self.dmabridge = Bridge(delay='50ns', - ranges=self.mem_ranges) + self.dmabridge = Bridge(delay="50ns", ranges=self.mem_ranges) def connect(self): self.iobridge.mem_side_port = self.iobus.cpu_side_ports @@ -381,7 +424,7 @@ class SimpleSystem(BaseSimpleSystem): self.dmabridge.mem_side_port = self.membus.cpu_side_ports self.dmabridge.cpu_side_port = self.iobus.mem_side_ports - if hasattr(self.realview.gic, 'cpu_addr'): + if hasattr(self.realview.gic, "cpu_addr"): self.gic_cpu_addr = self.realview.gic.cpu_addr self.realview.attachOnChipIO(self.membus, self.iobridge) self.realview.attachIO(self.iobus) @@ -390,18 +433,21 @@ class SimpleSystem(BaseSimpleSystem): def attach_pci(self, dev): self.realview.attachPciDevice(dev, self.iobus) + class ArmRubySystem(BaseSimpleSystem): """ Meant to be used with ruby """ + def __init__(self, mem_size, platform=None, **kwargs): super(ArmRubySystem, self).__init__(mem_size, platform, **kwargs) self._dma_ports = [] self._mem_ports = [] def connect(self): - self.realview.attachOnChipIO(self.iobus, - dma_ports=self._dma_ports, mem_ports=self._mem_ports) + self.realview.attachOnChipIO( + self.iobus, dma_ports=self._dma_ports, mem_ports=self._mem_ports + ) self.realview.attachIO(self.iobus, dma_ports=self._dma_ports) @@ -410,5 +456,6 @@ class ArmRubySystem(BaseSimpleSystem): self.ruby._cpu_ports[i].connectCpuPorts(cpu) def attach_pci(self, dev): - self.realview.attachPciDevice(dev, self.iobus, - dma_ports=self._dma_ports) + self.realview.attachPciDevice( + dev, self.iobus, dma_ports=self._dma_ports + ) diff --git a/configs/example/arm/dist_bigLITTLE.py b/configs/example/arm/dist_bigLITTLE.py index 6d35e53779..a3f3ede4eb 100644 --- a/configs/example/arm/dist_bigLITTLE.py +++ b/configs/example/arm/dist_bigLITTLE.py @@ -43,55 +43,96 @@ import m5 from m5.objects import * import fs_bigLITTLE as bL + m5.util.addToPath("../../dist") import sw def addOptions(parser): - # Options for distributed simulation (i.e. dist-gem5) - parser.add_argument("--dist", action="store_true", help="Distributed gem5"\ - " simulation.") - parser.add_argument("--is-switch", action="store_true", - help="Select the network switch simulator process for"\ - " a distributed gem5 run.") - parser.add_argument("--dist-rank", default=0, action="store", type=int, - help="Rank of this system within the dist gem5 run.") - parser.add_argument("--dist-size", default=0, action="store", type=int, - help="Number of gem5 processes within the dist gem5"\ - " run.") - parser.add_argument("--dist-server-name", - default="127.0.0.1", - action="store", type=str, - help="Name of the message server host\nDEFAULT:"\ - " localhost") - parser.add_argument("--dist-server-port", - default=2200, - action="store", type=int, - help="Message server listen port\nDEFAULT: 2200") - parser.add_argument("--dist-sync-repeat", - default="0us", - action="store", type=str, - help="Repeat interval for synchronisation barriers"\ - " among dist-gem5 processes\nDEFAULT:"\ - " --ethernet-linkdelay") - parser.add_argument("--dist-sync-start", - default="1000000000000t", - action="store", type=str, - help="Time to schedule the first dist synchronisation"\ - " barrier\nDEFAULT:1000000000000t") - parser.add_argument("--ethernet-linkspeed", default="10Gbps", - action="store", type=str, - help="Link speed in bps\nDEFAULT: 10Gbps") - parser.add_argument("--ethernet-linkdelay", default="10us", - action="store", type=str, - help="Link delay in seconds\nDEFAULT: 10us") - parser.add_argument("--etherdump", action="store", type=str, default="", - help="Specify the filename to dump a pcap capture of"\ - " the ethernet traffic") + # Options for distributed simulation (i.e. dist-gem5) + parser.add_argument( + "--dist", action="store_true", help="Distributed gem5" " simulation." + ) + parser.add_argument( + "--is-switch", + action="store_true", + help="Select the network switch simulator process for" + " a distributed gem5 run.", + ) + parser.add_argument( + "--dist-rank", + default=0, + action="store", + type=int, + help="Rank of this system within the dist gem5 run.", + ) + parser.add_argument( + "--dist-size", + default=0, + action="store", + type=int, + help="Number of gem5 processes within the dist gem5" " run.", + ) + parser.add_argument( + "--dist-server-name", + default="127.0.0.1", + action="store", + type=str, + help="Name of the message server host\nDEFAULT:" " localhost", + ) + parser.add_argument( + "--dist-server-port", + default=2200, + action="store", + type=int, + help="Message server listen port\nDEFAULT: 2200", + ) + parser.add_argument( + "--dist-sync-repeat", + default="0us", + action="store", + type=str, + help="Repeat interval for synchronisation barriers" + " among dist-gem5 processes\nDEFAULT:" + " --ethernet-linkdelay", + ) + parser.add_argument( + "--dist-sync-start", + default="1000000000000t", + action="store", + type=str, + help="Time to schedule the first dist synchronisation" + " barrier\nDEFAULT:1000000000000t", + ) + parser.add_argument( + "--ethernet-linkspeed", + default="10Gbps", + action="store", + type=str, + help="Link speed in bps\nDEFAULT: 10Gbps", + ) + parser.add_argument( + "--ethernet-linkdelay", + default="10us", + action="store", + type=str, + help="Link delay in seconds\nDEFAULT: 10us", + ) + parser.add_argument( + "--etherdump", + action="store", + type=str, + default="", + help="Specify the filename to dump a pcap capture of" + " the ethernet traffic", + ) # Used by util/dist/gem5-dist.sh - parser.add_argument("--checkpoint-dir", type=str, - default=m5.options.outdir, - help="Directory to save/read checkpoints") + parser.add_argument( + "--checkpoint-dir", + type=str, + default=m5.options.outdir, + help="Directory to save/read checkpoints", + ) def addEthernet(system, options): @@ -101,14 +142,16 @@ def addEthernet(system, options): system.ethernet = dev # create distributed ethernet link - system.etherlink = DistEtherLink(speed = options.ethernet_linkspeed, - delay = options.ethernet_linkdelay, - dist_rank = options.dist_rank, - dist_size = options.dist_size, - server_name = options.dist_server_name, - server_port = options.dist_server_port, - sync_start = options.dist_sync_start, - sync_repeat = options.dist_sync_repeat) + system.etherlink = DistEtherLink( + speed=options.ethernet_linkspeed, + delay=options.ethernet_linkdelay, + dist_rank=options.dist_rank, + dist_size=options.dist_size, + server_name=options.dist_server_name, + server_port=options.dist_server_port, + sync_start=options.dist_sync_start, + sync_repeat=options.dist_sync_repeat, + ) system.etherlink.int0 = Parent.system.ethernet.interface if options.etherdump: system.etherdump = EtherDump(file=options.etherdump) @@ -117,15 +160,15 @@ def addEthernet(system, options): def main(): parser = argparse.ArgumentParser( - description="Generic ARM big.LITTLE configuration with "\ - "dist-gem5 support") + description="Generic ARM big.LITTLE configuration with " + "dist-gem5 support" + ) bL.addOptions(parser) addOptions(parser) options = parser.parse_args() if options.is_switch: - root = Root(full_system = True, - system = sw.build_switch(options)) + root = Root(full_system=True, system=sw.build_switch(options)) else: root = bL.build(options) addEthernet(root.system, options) diff --git a/configs/example/arm/fs_bigLITTLE.py b/configs/example/arm/fs_bigLITTLE.py index 3f8b0cfb19..c188de663a 100644 --- a/configs/example/arm/fs_bigLITTLE.py +++ b/configs/example/arm/fs_bigLITTLE.py @@ -55,84 +55,121 @@ import devices from devices import AtomicCluster, KvmCluster, FastmodelCluster -default_disk = 'aarch64-ubuntu-trusty-headless.img' +default_disk = "aarch64-ubuntu-trusty-headless.img" + +default_mem_size = "2GB" -default_mem_size= "2GB" def _to_ticks(value): """Helper function to convert a latency from string format to Ticks""" return m5.ticks.fromSeconds(m5.util.convert.anyToLatency(value)) + def _using_pdes(root): """Determine if the simulator is using multiple parallel event queues""" for obj in root.descendants(): - if not m5.proxy.isproxy(obj.eventq_index) and \ - obj.eventq_index != root.eventq_index: + if ( + not m5.proxy.isproxy(obj.eventq_index) + and obj.eventq_index != root.eventq_index + ): return True return False class BigCluster(devices.CpuCluster): - def __init__(self, system, num_cpus, cpu_clock, - cpu_voltage="1.0V"): - cpu_config = [ ObjectList.cpu_list.get("O3_ARM_v7a_3"), - devices.L1I, devices.L1D, devices.L2 ] - super(BigCluster, self).__init__(system, num_cpus, cpu_clock, - cpu_voltage, *cpu_config) + def __init__(self, system, num_cpus, cpu_clock, cpu_voltage="1.0V"): + cpu_config = [ + ObjectList.cpu_list.get("O3_ARM_v7a_3"), + devices.L1I, + devices.L1D, + devices.L2, + ] + super(BigCluster, self).__init__( + system, num_cpus, cpu_clock, cpu_voltage, *cpu_config + ) + class LittleCluster(devices.CpuCluster): - def __init__(self, system, num_cpus, cpu_clock, - cpu_voltage="1.0V"): - cpu_config = [ ObjectList.cpu_list.get("MinorCPU"), devices.L1I, - devices.L1D, devices.L2 ] - super(LittleCluster, self).__init__(system, num_cpus, cpu_clock, - cpu_voltage, *cpu_config) + def __init__(self, system, num_cpus, cpu_clock, cpu_voltage="1.0V"): + cpu_config = [ + ObjectList.cpu_list.get("MinorCPU"), + devices.L1I, + devices.L1D, + devices.L2, + ] + super(LittleCluster, self).__init__( + system, num_cpus, cpu_clock, cpu_voltage, *cpu_config + ) + class Ex5BigCluster(devices.CpuCluster): - def __init__(self, system, num_cpus, cpu_clock, - cpu_voltage="1.0V"): - cpu_config = [ ObjectList.cpu_list.get("ex5_big"), ex5_big.L1I, - ex5_big.L1D, ex5_big.L2 ] - super(Ex5BigCluster, self).__init__(system, num_cpus, cpu_clock, - cpu_voltage, *cpu_config) + def __init__(self, system, num_cpus, cpu_clock, cpu_voltage="1.0V"): + cpu_config = [ + ObjectList.cpu_list.get("ex5_big"), + ex5_big.L1I, + ex5_big.L1D, + ex5_big.L2, + ] + super(Ex5BigCluster, self).__init__( + system, num_cpus, cpu_clock, cpu_voltage, *cpu_config + ) + class Ex5LittleCluster(devices.CpuCluster): - def __init__(self, system, num_cpus, cpu_clock, - cpu_voltage="1.0V"): - cpu_config = [ ObjectList.cpu_list.get("ex5_LITTLE"), - ex5_LITTLE.L1I, ex5_LITTLE.L1D, - ex5_LITTLE.L2 ] - super(Ex5LittleCluster, self).__init__(system, num_cpus, cpu_clock, - cpu_voltage, *cpu_config) + def __init__(self, system, num_cpus, cpu_clock, cpu_voltage="1.0V"): + cpu_config = [ + ObjectList.cpu_list.get("ex5_LITTLE"), + ex5_LITTLE.L1I, + ex5_LITTLE.L1D, + ex5_LITTLE.L2, + ] + super(Ex5LittleCluster, self).__init__( + system, num_cpus, cpu_clock, cpu_voltage, *cpu_config + ) -def createSystem(caches, kernel, bootscript, machine_type="VExpress_GEM5", - disks=[], mem_size=default_mem_size, bootloader=None): + +def createSystem( + caches, + kernel, + bootscript, + machine_type="VExpress_GEM5", + disks=[], + mem_size=default_mem_size, + bootloader=None, +): platform = ObjectList.platform_list.get(machine_type) m5.util.inform("Simulated platform: %s", platform.__name__) - sys = devices.SimpleSystem(caches, mem_size, platform(), - workload=ArmFsLinux( - object_file=SysPaths.binary(kernel)), - readfile=bootscript) + sys = devices.SimpleSystem( + caches, + mem_size, + platform(), + workload=ArmFsLinux(object_file=SysPaths.binary(kernel)), + readfile=bootscript, + ) - sys.mem_ctrls = [ SimpleMemory(range=r, port=sys.membus.mem_side_ports) - for r in sys.mem_ranges ] + sys.mem_ctrls = [ + SimpleMemory(range=r, port=sys.membus.mem_side_ports) + for r in sys.mem_ranges + ] sys.connect() # Attach disk images if disks: + def cow_disk(image_file): image = CowDiskImage() image.child.image_file = SysPaths.disk(image_file) return image - sys.disk_images = [ cow_disk(f) for f in disks ] - sys.pci_vio_block = [ PciVirtIO(vio=VirtIOBlock(image=img)) - for img in sys.disk_images ] + sys.disk_images = [cow_disk(f) for f in disks] + sys.pci_vio_block = [ + PciVirtIO(vio=VirtIOBlock(image=img)) for img in sys.disk_images + ] for dev in sys.pci_vio_block: sys.attach_pci(dev) @@ -140,10 +177,11 @@ def createSystem(caches, kernel, bootscript, machine_type="VExpress_GEM5", return sys + cpu_types = { - "atomic" : (AtomicCluster, AtomicCluster), - "timing" : (BigCluster, LittleCluster), - "exynos" : (Ex5BigCluster, Ex5LittleCluster), + "atomic": (AtomicCluster, AtomicCluster), + "timing": (BigCluster, LittleCluster), + "exynos": (Ex5BigCluster, Ex5LittleCluster), } # Only add the KVM CPU if it has been compiled into gem5 @@ -154,66 +192,143 @@ if devices.have_kvm: if devices.have_fastmodel: cpu_types["fastmodel"] = (FastmodelCluster, FastmodelCluster) + def addOptions(parser): - parser.add_argument("--restore-from", type=str, default=None, - help="Restore from checkpoint") - parser.add_argument("--dtb", type=str, default=None, - help="DTB file to load") - parser.add_argument("--kernel", type=str, required=True, - help="Linux kernel") - parser.add_argument("--root", type=str, default="/dev/vda1", - help="Specify the kernel CLI root= argument") - parser.add_argument("--machine-type", type=str, - choices=ObjectList.platform_list.get_names(), - default="VExpress_GEM5", - help="Hardware platform class") - parser.add_argument("--disk", action="append", type=str, default=[], - help="Disks to instantiate") - parser.add_argument("--bootscript", type=str, default="", - help="Linux bootscript") - parser.add_argument("--cpu-type", type=str, choices=list(cpu_types.keys()), - default="timing", - help="CPU simulation mode. Default: %(default)s") - parser.add_argument("--kernel-init", type=str, default="/sbin/init", - help="Override init") - parser.add_argument("--big-cpus", type=int, default=1, - help="Number of big CPUs to instantiate") - parser.add_argument("--little-cpus", type=int, default=1, - help="Number of little CPUs to instantiate") - parser.add_argument("--caches", action="store_true", default=False, - help="Instantiate caches") - parser.add_argument("--last-cache-level", type=int, default=2, - help="Last level of caches (e.g. 3 for L3)") - parser.add_argument("--big-cpu-clock", type=str, default="2GHz", - help="Big CPU clock frequency") - parser.add_argument("--little-cpu-clock", type=str, default="1GHz", - help="Little CPU clock frequency") - parser.add_argument("--sim-quantum", type=str, default="1ms", - help="Simulation quantum for parallel simulation. " \ - "Default: %(default)s") - parser.add_argument("--mem-size", type=str, default=default_mem_size, - help="System memory size") - parser.add_argument("--kernel-cmd", type=str, default=None, - help="Custom Linux kernel command") - parser.add_argument("--bootloader", action="append", - help="executable file that runs before the --kernel") - parser.add_argument("--kvm-userspace-gic", action="store_true", - default=False, - help="Use the gem5 GIC in a KVM simulation") - parser.add_argument("-P", "--param", action="append", default=[], + parser.add_argument( + "--restore-from", + type=str, + default=None, + help="Restore from checkpoint", + ) + parser.add_argument( + "--dtb", type=str, default=None, help="DTB file to load" + ) + parser.add_argument( + "--kernel", type=str, required=True, help="Linux kernel" + ) + parser.add_argument( + "--root", + type=str, + default="/dev/vda1", + help="Specify the kernel CLI root= argument", + ) + parser.add_argument( + "--machine-type", + type=str, + choices=ObjectList.platform_list.get_names(), + default="VExpress_GEM5", + help="Hardware platform class", + ) + parser.add_argument( + "--disk", + action="append", + type=str, + default=[], + help="Disks to instantiate", + ) + parser.add_argument( + "--bootscript", type=str, default="", help="Linux bootscript" + ) + parser.add_argument( + "--cpu-type", + type=str, + choices=list(cpu_types.keys()), + default="timing", + help="CPU simulation mode. Default: %(default)s", + ) + parser.add_argument( + "--kernel-init", type=str, default="/sbin/init", help="Override init" + ) + parser.add_argument( + "--big-cpus", + type=int, + default=1, + help="Number of big CPUs to instantiate", + ) + parser.add_argument( + "--little-cpus", + type=int, + default=1, + help="Number of little CPUs to instantiate", + ) + parser.add_argument( + "--caches", + action="store_true", + default=False, + help="Instantiate caches", + ) + parser.add_argument( + "--last-cache-level", + type=int, + default=2, + help="Last level of caches (e.g. 3 for L3)", + ) + parser.add_argument( + "--big-cpu-clock", + type=str, + default="2GHz", + help="Big CPU clock frequency", + ) + parser.add_argument( + "--little-cpu-clock", + type=str, + default="1GHz", + help="Little CPU clock frequency", + ) + parser.add_argument( + "--sim-quantum", + type=str, + default="1ms", + help="Simulation quantum for parallel simulation. " + "Default: %(default)s", + ) + parser.add_argument( + "--mem-size", + type=str, + default=default_mem_size, + help="System memory size", + ) + parser.add_argument( + "--kernel-cmd", + type=str, + default=None, + help="Custom Linux kernel command", + ) + parser.add_argument( + "--bootloader", + action="append", + help="executable file that runs before the --kernel", + ) + parser.add_argument( + "--kvm-userspace-gic", + action="store_true", + default=False, + help="Use the gem5 GIC in a KVM simulation", + ) + parser.add_argument( + "-P", + "--param", + action="append", + default=[], help="Set a SimObject parameter relative to the root node. " - "An extended Python multi range slicing syntax can be used " - "for arrays. For example: " - "'system.cpu[0,1,3:8:2].max_insts_all_threads = 42' " - "sets max_insts_all_threads for cpus 0, 1, 3, 5 and 7 " - "Direct parameters of the root object are not accessible, " - "only parameters of its children.") - parser.add_argument("--vio-9p", action="store_true", - help=Options.vio_9p_help) - parser.add_argument("--dtb-gen", action="store_true", - help="Doesn't run simulation, it generates a DTB only") + "An extended Python multi range slicing syntax can be used " + "for arrays. For example: " + "'system.cpu[0,1,3:8:2].max_insts_all_threads = 42' " + "sets max_insts_all_threads for cpus 0, 1, 3, 5 and 7 " + "Direct parameters of the root object are not accessible, " + "only parameters of its children.", + ) + parser.add_argument( + "--vio-9p", action="store_true", help=Options.vio_9p_help + ) + parser.add_argument( + "--dtb-gen", + action="store_true", + help="Doesn't run simulation, it generates a DTB only", + ) return parser + def build(options): m5.ticks.fixGlobalFrequency() @@ -234,13 +349,15 @@ def build(options): root = Root(full_system=True) disks = [default_disk] if len(options.disk) == 0 else options.disk - system = createSystem(options.caches, - options.kernel, - options.bootscript, - options.machine_type, - disks=disks, - mem_size=options.mem_size, - bootloader=options.bootloader) + system = createSystem( + options.caches, + options.kernel, + options.bootscript, + options.machine_type, + disks=disks, + mem_size=options.mem_size, + bootloader=options.bootloader, + ) root.system = system if options.kernel_cmd: @@ -256,24 +373,28 @@ def build(options): all_cpus = [] # big cluster if options.big_cpus > 0: - system.bigCluster = big_model(system, options.big_cpus, - options.big_cpu_clock) + system.bigCluster = big_model( + system, options.big_cpus, options.big_cpu_clock + ) system.mem_mode = system.bigCluster.memoryMode() all_cpus += system.bigCluster.cpus # little cluster if options.little_cpus > 0: - system.littleCluster = little_model(system, options.little_cpus, - options.little_cpu_clock) + system.littleCluster = little_model( + system, options.little_cpus, options.little_cpu_clock + ) system.mem_mode = system.littleCluster.memoryMode() all_cpus += system.littleCluster.cpus # Figure out the memory mode - if options.big_cpus > 0 and options.little_cpus > 0 and \ - system.bigCluster.memoryMode() != system.littleCluster.memoryMode(): + if ( + options.big_cpus > 0 + and options.little_cpus > 0 + and system.bigCluster.memoryMode() != system.littleCluster.memoryMode() + ): m5.util.panic("Memory mode missmatch among CPU clusters") - # create caches system.addCaches(options.caches, options.last_cache_level) if not options.caches: @@ -290,26 +411,31 @@ def build(options): if options.dtb is not None: system.workload.dtb_filename = SysPaths.binary(options.dtb) else: - system.workload.dtb_filename = \ - os.path.join(m5.options.outdir, 'system.dtb') + system.workload.dtb_filename = os.path.join( + m5.options.outdir, "system.dtb" + ) system.generateDtb(system.workload.dtb_filename) if devices.have_fastmodel and issubclass(big_model, FastmodelCluster): from m5 import arm_fast_model as fm, systemc as sc + # setup FastModels for simulation fm.setup_simulation("cortexa76") # setup SystemC root.systemc_kernel = m5.objects.SystemC_Kernel() m5.tlm.tlm_global_quantum_instance().set( - sc.sc_time(10000.0 / 100000000.0, sc.sc_time.SC_SEC)) + sc.sc_time(10000.0 / 100000000.0, sc.sc_time.SC_SEC) + ) if options.vio_9p: FSConfig.attach_9p(system.realview, system.iobus) return root + def _build_kvm(options, system, cpus): system.kvm_vm = KvmVM() + system.release = ArmDefaultRelease.for_kvm() if options.kvm_userspace_gic: # We will use the simulated GIC. @@ -335,14 +461,15 @@ def _build_kvm(options, system, cpus): cpu.eventq_index = first_cpu_eq + idx - def instantiate(options, checkpoint_dir=None): # Setup the simulation quantum if we are running in PDES-mode # (e.g., when using KVM) root = Root.getInstance() if root and _using_pdes(root): - m5.util.inform("Running in PDES mode with a %s simulation quantum.", - options.sim_quantum) + m5.util.inform( + "Running in PDES mode with a %s simulation quantum.", + options.sim_quantum, + ) root.sim_quantum = _to_ticks(options.sim_quantum) # Get and load from the chkpt or simpoint checkpoint @@ -381,16 +508,17 @@ def generateDtb(root): def main(): parser = argparse.ArgumentParser( - description="Generic ARM big.LITTLE configuration") + description="Generic ARM big.LITTLE configuration" + ) addOptions(parser) options = parser.parse_args() root = build(options) root.apply_config(options.param) instantiate(options) if options.dtb_gen: - generateDtb(root) + generateDtb(root) else: - run() + run() if __name__ == "__m5_main__": diff --git a/configs/example/arm/fs_power.py b/configs/example/arm/fs_power.py index 7ae9cf83b9..95d2182508 100644 --- a/configs/example/arm/fs_power.py +++ b/configs/example/arm/fs_power.py @@ -50,25 +50,29 @@ class CpuPowerOn(MathExprPowerModel): super(CpuPowerOn, self).__init__(**kwargs) # 2A per IPC, 3pA per cache miss # and then convert to Watt - self.dyn = "voltage * (2 * {}.ipc + 3 * 0.000000001 * " \ - "{}.dcache.overallMisses / simSeconds)".format(cpu_path, - cpu_path) + self.dyn = ( + "voltage * (2 * {}.ipc + 3 * 0.000000001 * " + "{}.dcache.overallMisses / simSeconds)".format(cpu_path, cpu_path) + ) self.st = "4 * temp" + class CpuPowerOff(MathExprPowerModel): dyn = "0" st = "0" + class CpuPowerModel(PowerModel): def __init__(self, cpu_path, **kwargs): super(CpuPowerModel, self).__init__(**kwargs) self.pm = [ - CpuPowerOn(cpu_path), # ON - CpuPowerOff(), # CLK_GATED - CpuPowerOff(), # SRAM_RETENTION - CpuPowerOff(), # OFF + CpuPowerOn(cpu_path), # ON + CpuPowerOff(), # CLK_GATED + CpuPowerOff(), # SRAM_RETENTION + CpuPowerOff(), # OFF ] + class L2PowerOn(MathExprPowerModel): def __init__(self, l2_path, **kwargs): super(L2PowerOn, self).__init__(**kwargs) @@ -78,26 +82,29 @@ class L2PowerOn(MathExprPowerModel): self.dyn = "{}.overallAccesses * 0.000018000".format(l2_path) self.st = "(voltage * 3)/10" + class L2PowerOff(MathExprPowerModel): dyn = "0" st = "0" + class L2PowerModel(PowerModel): def __init__(self, l2_path, **kwargs): super(L2PowerModel, self).__init__(**kwargs) # Choose a power model for every power state self.pm = [ - L2PowerOn(l2_path), # ON - L2PowerOff(), # CLK_GATED - L2PowerOff(), # SRAM_RETENTION - L2PowerOff(), # OFF + L2PowerOn(l2_path), # ON + L2PowerOff(), # CLK_GATED + L2PowerOff(), # SRAM_RETENTION + L2PowerOff(), # OFF ] def main(): parser = argparse.ArgumentParser( - description="Generic ARM big.LITTLE configuration with "\ - "example power models") + description="Generic ARM big.LITTLE configuration with " + "example power models" + ) bL.addOptions(parser) options = parser.parse_args() @@ -125,13 +132,15 @@ def main(): bL.instantiate(options) print("*" * 70) - print("WARNING: The power numbers generated by this script are " + print( + "WARNING: The power numbers generated by this script are " "examples. They are not representative of any particular " - "implementation or process.") + "implementation or process." + ) print("*" * 70) # Dumping stats periodically - m5.stats.periodicStatDump(m5.ticks.fromSeconds(0.1E-3)) + m5.stats.periodicStatDump(m5.ticks.fromSeconds(0.1e-3)) bL.run() diff --git a/configs/example/arm/ruby_fs.py b/configs/example/arm/ruby_fs.py index ecca22b048..d58184522c 100644 --- a/configs/example/arm/ruby_fs.py +++ b/configs/example/arm/ruby_fs.py @@ -40,7 +40,7 @@ from m5.objects import * from m5.options import * import argparse -m5.util.addToPath('../..') +m5.util.addToPath("../..") from common import MemConfig from common import ObjectList @@ -52,19 +52,20 @@ from ruby import Ruby import devices -default_kernel = 'vmlinux.arm64' -default_disk = 'linaro-minimal-aarch64.img' -default_root_device = '/dev/vda1' +default_kernel = "vmlinux.arm64" +default_disk = "linaro-minimal-aarch64.img" +default_root_device = "/dev/vda1" # Pre-defined CPU configurations. cpu_types = { - "noncaching" : NonCachingSimpleCPU, - "minor" : MinorCPU, - "hpi" : HPI.HPI, - "o3" : O3_ARM_v7a.O3_ARM_v7a_3, + "noncaching": NonCachingSimpleCPU, + "minor": MinorCPU, + "hpi": HPI.HPI, + "o3": O3_ARM_v7a.O3_ARM_v7a_3, } + def create_cow_image(name): """Helper function to create a Copy-on-Write disk image""" image = CowDiskImage() @@ -72,23 +73,31 @@ def create_cow_image(name): return image + def config_ruby(system, args): cpus = [] for cluster in system.cpu_cluster: for cpu in cluster.cpus: cpus.append(cpu) - Ruby.create_system(args, True, system, system.iobus, - system._dma_ports, system.realview.bootmem, - cpus) + Ruby.create_system( + args, + True, + system, + system.iobus, + system._dma_ports, + system.realview.bootmem, + cpus, + ) # Create a seperate clock domain for Ruby system.ruby.clk_domain = SrcClockDomain( - clock = args.ruby_clock, - voltage_domain = system.voltage_domain) + clock=args.ruby_clock, voltage_domain=system.voltage_domain + ) + def create(args): - ''' Create and configure the system object. ''' + """Create and configure the system object.""" if args.script and not os.path.isfile(args.script): print("Error: Bootscript %s does not exist" % args.script) @@ -97,19 +106,25 @@ def create(args): cpu_class = cpu_types[args.cpu] mem_mode = cpu_class.memory_mode() - system = devices.ArmRubySystem(args.mem_size, - mem_mode=mem_mode, - workload=ArmFsLinux( - object_file= - SysPaths.binary(args.kernel)), - readfile=args.script) + system = devices.ArmRubySystem( + args.mem_size, + mem_mode=mem_mode, + workload=ArmFsLinux(object_file=SysPaths.binary(args.kernel)), + readfile=args.script, + ) # Add CPU clusters to the system system.cpu_cluster = [ - devices.CpuCluster(system, - args.num_cpus, - args.cpu_freq, "1.0V", - cpu_class, None, None, None), + devices.CpuCluster( + system, + args.num_cpus, + args.cpu_freq, + "1.0V", + cpu_class, + None, + None, + None, + ) ] # Add the PCI devices we need for this system. The base system @@ -120,7 +135,7 @@ def create(args): # disk. Attach the disk image using gem5's Copy-on-Write # functionality to avoid writing changes to the stored copy of # the disk image. - PciVirtIO(vio=VirtIOBlock(image=create_cow_image(args.disk_image))), + PciVirtIO(vio=VirtIOBlock(image=create_cow_image(args.disk_image))) ] # Attach the PCI devices to the system. The helper method in the @@ -141,8 +156,9 @@ def create(args): system.workload.dtb_filename = args.dtb else: # No DTB specified: autogenerate DTB - system.workload.dtb_filename = \ - os.path.join(m5.options.outdir, 'system.dtb') + system.workload.dtb_filename = os.path.join( + m5.options.outdir, "system.dtb" + ) system.generateDtb(system.workload.dtb_filename) # Linux boot command flags @@ -189,41 +205,73 @@ def run(args): def main(): parser = argparse.ArgumentParser() - parser.add_argument("--dtb", type=str, default=None, - help="DTB file to load") - parser.add_argument("--kernel", type=str, default=default_kernel, - help="Linux kernel") - parser.add_argument("--disk-image", type=str, - default=default_disk, - help="Disk to instantiate") - parser.add_argument("--root-device", type=str, - default=default_root_device, - help="OS device name for root partition (default: {})" - .format(default_root_device)) - parser.add_argument("--script", type=str, default="", - help = "Linux bootscript") - parser.add_argument("--cpu", choices=list(cpu_types.keys()), - default="minor", - help="CPU model to use") + parser.add_argument( + "--dtb", type=str, default=None, help="DTB file to load" + ) + parser.add_argument( + "--kernel", type=str, default=default_kernel, help="Linux kernel" + ) + parser.add_argument( + "--disk-image", + type=str, + default=default_disk, + help="Disk to instantiate", + ) + parser.add_argument( + "--root-device", + type=str, + default=default_root_device, + help="OS device name for root partition (default: {})".format( + default_root_device + ), + ) + parser.add_argument( + "--script", type=str, default="", help="Linux bootscript" + ) + parser.add_argument( + "--cpu", + choices=list(cpu_types.keys()), + default="minor", + help="CPU model to use", + ) parser.add_argument("--cpu-freq", type=str, default="4GHz") parser.add_argument("-n", "--num-cpus", type=int, default=1) parser.add_argument("--checkpoint", action="store_true") parser.add_argument("--restore", type=str, default=None) - parser.add_argument("--mem-type", default="DDR3_1600_8x8", - choices=ObjectList.mem_list.get_names(), - help = "type of memory to use") - parser.add_argument("--mem-channels", type=int, default=1, - help = "number of memory channels") - parser.add_argument("--mem-ranks", type=int, default=None, - help = "number of memory ranks per channel") parser.add_argument( - "--mem-size", action="store", type=str, default="2GiB", - help="Specify the physical memory size (single memory)") - parser.add_argument("--enable-dram-powerdown", action="store_true", - help="Enable low-power states in DRAMInterface") - parser.add_argument("--mem-channels-intlv", type=int, default=0, - help="Memory channels interleave") + "--mem-type", + default="DDR3_1600_8x8", + choices=ObjectList.mem_list.get_names(), + help="type of memory to use", + ) + parser.add_argument( + "--mem-channels", type=int, default=1, help="number of memory channels" + ) + parser.add_argument( + "--mem-ranks", + type=int, + default=None, + help="number of memory ranks per channel", + ) + parser.add_argument( + "--mem-size", + action="store", + type=str, + default="2GiB", + help="Specify the physical memory size (single memory)", + ) + parser.add_argument( + "--enable-dram-powerdown", + action="store_true", + help="Enable low-power states in DRAMInterface", + ) + parser.add_argument( + "--mem-channels-intlv", + type=int, + default=0, + help="Memory channels interleave", + ) parser.add_argument("--num-dirs", type=int, default=1) parser.add_argument("--num-l2caches", type=int, default=1) diff --git a/configs/example/arm/starter_fs.py b/configs/example/arm/starter_fs.py index 140f102a83..3a9a8762d6 100644 --- a/configs/example/arm/starter_fs.py +++ b/configs/example/arm/starter_fs.py @@ -45,7 +45,7 @@ from m5.objects import * from m5.options import * import argparse -m5.util.addToPath('../..') +m5.util.addToPath("../..") from common import SysPaths from common import ObjectList @@ -55,37 +55,37 @@ from common.cores.arm import O3_ARM_v7a, HPI import devices -default_kernel = 'vmlinux.arm64' -default_disk = 'linaro-minimal-aarch64.img' -default_root_device = '/dev/vda1' +default_kernel = "vmlinux.arm64" +default_disk = "linaro-minimal-aarch64.img" +default_root_device = "/dev/vda1" # Pre-defined CPU configurations. Each tuple must be ordered as : (cpu_class, # l1_icache_class, l1_dcache_class, l2_Cache_class). Any of # the cache class may be 'None' if the particular cache is not present. cpu_types = { - "atomic" : (AtomicSimpleCPU, None, None, None), - "minor" : (MinorCPU, - devices.L1I, devices.L1D, - devices.L2), - "hpi" : (HPI.HPI, - HPI.HPI_ICache, HPI.HPI_DCache, - HPI.HPI_L2), - "o3" : (O3_ARM_v7a.O3_ARM_v7a_3, - O3_ARM_v7a.O3_ARM_v7a_ICache, O3_ARM_v7a.O3_ARM_v7a_DCache, - O3_ARM_v7a.O3_ARM_v7aL2), + "atomic": (AtomicSimpleCPU, None, None, None), + "minor": (MinorCPU, devices.L1I, devices.L1D, devices.L2), + "hpi": (HPI.HPI, HPI.HPI_ICache, HPI.HPI_DCache, HPI.HPI_L2), + "o3": ( + O3_ARM_v7a.O3_ARM_v7a_3, + O3_ARM_v7a.O3_ARM_v7a_ICache, + O3_ARM_v7a.O3_ARM_v7a_DCache, + O3_ARM_v7a.O3_ARM_v7aL2, + ), } + def create_cow_image(name): """Helper function to create a Copy-on-Write disk image""" image = CowDiskImage() image.child.image_file = SysPaths.disk(name) - return image; + return image def create(args): - ''' Create and configure the system object. ''' + """Create and configure the system object.""" if args.script and not os.path.isfile(args.script): print("Error: Bootscript %s does not exist" % args.script) @@ -96,13 +96,13 @@ def create(args): # Only simulate caches when using a timing CPU (e.g., the HPI model) want_caches = True if mem_mode == "timing" else False - system = devices.SimpleSystem(want_caches, - args.mem_size, - mem_mode=mem_mode, - workload=ArmFsLinux( - object_file= - SysPaths.binary(args.kernel)), - readfile=args.script) + system = devices.SimpleSystem( + want_caches, + args.mem_size, + mem_mode=mem_mode, + workload=ArmFsLinux(object_file=SysPaths.binary(args.kernel)), + readfile=args.script, + ) MemConfig.config_mem(args, system) @@ -114,7 +114,7 @@ def create(args): # disk. Attach the disk image using gem5's Copy-on-Write # functionality to avoid writing changes to the stored copy of # the disk image. - PciVirtIO(vio=VirtIOBlock(image=create_cow_image(args.disk_image))), + PciVirtIO(vio=VirtIOBlock(image=create_cow_image(args.disk_image))) ] # Attach the PCI devices to the system. The helper method in the @@ -128,10 +128,9 @@ def create(args): # Add CPU clusters to the system system.cpu_cluster = [ - devices.CpuCluster(system, - args.num_cores, - args.cpu_freq, "1.0V", - *cpu_types[args.cpu]), + devices.CpuCluster( + system, args.num_cores, args.cpu_freq, "1.0V", *cpu_types[args.cpu] + ) ] # Create a cache hierarchy for the cluster. We are assuming that @@ -146,8 +145,9 @@ def create(args): system.workload.dtb_filename = args.dtb else: # No DTB specified: autogenerate DTB - system.workload.dtb_filename = \ - os.path.join(m5.options.outdir, 'system.dtb') + system.workload.dtb_filename = os.path.join( + m5.options.outdir, "system.dtb" + ) system.generateDtb(system.workload.dtb_filename) if args.initrd: @@ -197,41 +197,71 @@ def run(args): def main(): parser = argparse.ArgumentParser(epilog=__doc__) - parser.add_argument("--dtb", type=str, default=None, - help="DTB file to load") - parser.add_argument("--kernel", type=str, default=default_kernel, - help="Linux kernel") - parser.add_argument("--initrd", type=str, default=None, - help="initrd/initramfs file to load") - parser.add_argument("--disk-image", type=str, - default=default_disk, - help="Disk to instantiate") - parser.add_argument("--root-device", type=str, - default=default_root_device, - help="OS device name for root partition (default: {})" - .format(default_root_device)) - parser.add_argument("--script", type=str, default="", - help = "Linux bootscript") - parser.add_argument("--cpu", type=str, choices=list(cpu_types.keys()), - default="atomic", - help="CPU model to use") + parser.add_argument( + "--dtb", type=str, default=None, help="DTB file to load" + ) + parser.add_argument( + "--kernel", type=str, default=default_kernel, help="Linux kernel" + ) + parser.add_argument( + "--initrd", + type=str, + default=None, + help="initrd/initramfs file to load", + ) + parser.add_argument( + "--disk-image", + type=str, + default=default_disk, + help="Disk to instantiate", + ) + parser.add_argument( + "--root-device", + type=str, + default=default_root_device, + help="OS device name for root partition (default: {})".format( + default_root_device + ), + ) + parser.add_argument( + "--script", type=str, default="", help="Linux bootscript" + ) + parser.add_argument( + "--cpu", + type=str, + choices=list(cpu_types.keys()), + default="atomic", + help="CPU model to use", + ) parser.add_argument("--cpu-freq", type=str, default="4GHz") - parser.add_argument("--num-cores", type=int, default=1, - help="Number of CPU cores") - parser.add_argument("--mem-type", default="DDR3_1600_8x8", - choices=ObjectList.mem_list.get_names(), - help = "type of memory to use") - parser.add_argument("--mem-channels", type=int, default=1, - help = "number of memory channels") - parser.add_argument("--mem-ranks", type=int, default=None, - help = "number of memory ranks per channel") - parser.add_argument("--mem-size", action="store", type=str, - default="2GB", - help="Specify the physical memory size") + parser.add_argument( + "--num-cores", type=int, default=1, help="Number of CPU cores" + ) + parser.add_argument( + "--mem-type", + default="DDR3_1600_8x8", + choices=ObjectList.mem_list.get_names(), + help="type of memory to use", + ) + parser.add_argument( + "--mem-channels", type=int, default=1, help="number of memory channels" + ) + parser.add_argument( + "--mem-ranks", + type=int, + default=None, + help="number of memory ranks per channel", + ) + parser.add_argument( + "--mem-size", + action="store", + type=str, + default="2GB", + help="Specify the physical memory size", + ) parser.add_argument("--checkpoint", action="store_true") parser.add_argument("--restore", type=str, default=None) - args = parser.parse_args() root = Root(full_system=True) diff --git a/configs/example/arm/starter_se.py b/configs/example/arm/starter_se.py index d80f7498d6..08c3d74fbd 100644 --- a/configs/example/arm/starter_se.py +++ b/configs/example/arm/starter_se.py @@ -45,7 +45,7 @@ from m5.objects import * import argparse import shlex -m5.util.addToPath('../..') +m5.util.addToPath("../..") from common import ObjectList from common import MemConfig @@ -54,25 +54,20 @@ from common.cores.arm import HPI import devices - # Pre-defined CPU configurations. Each tuple must be ordered as : (cpu_class, # l1_icache_class, l1_dcache_class, walk_cache_class, l2_Cache_class). Any of # the cache class may be 'None' if the particular cache is not present. cpu_types = { - "atomic" : ( AtomicSimpleCPU, None, None, None), - "minor" : (MinorCPU, - devices.L1I, devices.L1D, - devices.L2), - "hpi" : ( HPI.HPI, - HPI.HPI_ICache, HPI.HPI_DCache, - HPI.HPI_L2) + "atomic": (AtomicSimpleCPU, None, None, None), + "minor": (MinorCPU, devices.L1I, devices.L1D, devices.L2), + "hpi": (HPI.HPI, HPI.HPI_ICache, HPI.HPI_DCache, HPI.HPI_L2), } class SimpleSeSystem(System): - ''' + """ Example system class for syscall emulation mode - ''' + """ # Use a fixed cache line size of 64 bytes cache_line_size = 64 @@ -87,8 +82,9 @@ class SimpleSeSystem(System): # Create a voltage and clock domain for system components self.voltage_domain = VoltageDomain(voltage="3.3V") - self.clk_domain = SrcClockDomain(clock="1GHz", - voltage_domain=self.voltage_domain) + self.clk_domain = SrcClockDomain( + clock="1GHz", voltage_domain=self.voltage_domain + ) # Create the off-chip memory bus. self.membus = SystemXBar() @@ -97,13 +93,11 @@ class SimpleSeSystem(System): # and to perform debug accesses. self.system_port = self.membus.cpu_side_ports - # Add CPUs to the system. A cluster of CPUs typically have # private L1 caches and a shared L2 cache. - self.cpu_cluster = devices.CpuCluster(self, - args.num_cores, - args.cpu_freq, "1.2V", - *cpu_types[args.cpu]) + self.cpu_cluster = devices.CpuCluster( + self, args.num_cores, args.cpu_freq, "1.2V", *cpu_types[args.cpu] + ) # Create a cache hierarchy (unless we are simulating a # functional CPU in atomic memory mode) for the CPU cluster @@ -129,6 +123,7 @@ class SimpleSeSystem(System): def numCpus(self): return self._num_cpus + def get_processes(cmd): """Interprets commands to run and returns a list of processes""" @@ -147,14 +142,14 @@ def get_processes(cmd): def create(args): - ''' Create and configure the system object. ''' + """Create and configure the system object.""" system = SimpleSeSystem(args) # Tell components about the expected physical memory ranges. This # is, for example, used by the MemConfig helper to determine where # to map DRAMs in the physical address space. - system.mem_ranges = [ AddrRange(start=0, size=args.mem_size) ] + system.mem_ranges = [AddrRange(start=0, size=args.mem_size)] # Configure the off-chip memory system. MemConfig.config_mem(args, system) @@ -163,8 +158,10 @@ def create(args): # that we can pass to gem5. processes = get_processes(args.commands_to_run) if len(processes) != args.num_cores: - print("Error: Cannot map %d command(s) onto %d CPU(s)" % - (len(processes), args.num_cores)) + print( + "Error: Cannot map %d command(s) onto %d CPU(s)" + % (len(processes), args.num_cores) + ) sys.exit(1) system.workload = SEWorkload.init_compatible(processes[0].executable) @@ -179,24 +176,45 @@ def create(args): def main(): parser = argparse.ArgumentParser(epilog=__doc__) - parser.add_argument("commands_to_run", metavar="command(s)", nargs='*', - help="Command(s) to run") - parser.add_argument("--cpu", type=str, choices=list(cpu_types.keys()), - default="atomic", - help="CPU model to use") + parser.add_argument( + "commands_to_run", + metavar="command(s)", + nargs="*", + help="Command(s) to run", + ) + parser.add_argument( + "--cpu", + type=str, + choices=list(cpu_types.keys()), + default="atomic", + help="CPU model to use", + ) parser.add_argument("--cpu-freq", type=str, default="4GHz") - parser.add_argument("--num-cores", type=int, default=1, - help="Number of CPU cores") - parser.add_argument("--mem-type", default="DDR3_1600_8x8", - choices=ObjectList.mem_list.get_names(), - help = "type of memory to use") - parser.add_argument("--mem-channels", type=int, default=2, - help = "number of memory channels") - parser.add_argument("--mem-ranks", type=int, default=None, - help = "number of memory ranks per channel") - parser.add_argument("--mem-size", action="store", type=str, - default="2GB", - help="Specify the physical memory size") + parser.add_argument( + "--num-cores", type=int, default=1, help="Number of CPU cores" + ) + parser.add_argument( + "--mem-type", + default="DDR3_1600_8x8", + choices=ObjectList.mem_list.get_names(), + help="type of memory to use", + ) + parser.add_argument( + "--mem-channels", type=int, default=2, help="number of memory channels" + ) + parser.add_argument( + "--mem-ranks", + type=int, + default=None, + help="number of memory ranks per channel", + ) + parser.add_argument( + "--mem-size", + action="store", + type=str, + default="2GB", + help="Specify the physical memory size", + ) args = parser.parse_args() diff --git a/configs/example/arm/workloads.py b/configs/example/arm/workloads.py index 1fb9d00204..5c70dabfc2 100644 --- a/configs/example/arm/workloads.py +++ b/configs/example/arm/workloads.py @@ -42,8 +42,10 @@ from m5.options import * from common.ObjectList import ObjectList from common.SysPaths import binary, disk + class ArmBaremetal(ArmFsWorkload): - """ Baremetal workload """ + """Baremetal workload""" + dtb_addr = 0 def __init__(self, obj, system, **kwargs): @@ -51,6 +53,7 @@ class ArmBaremetal(ArmFsWorkload): self.object_file = obj + class ArmTrustedFirmware(ArmFsWorkload): """ Arm Trusted Firmware (TFA) workload. @@ -69,20 +72,22 @@ class ArmTrustedFirmware(ArmFsWorkload): https://github.com/ARM-software/arm-trusted-firmware """ + dtb_addr = 0 def __init__(self, obj, system, **kwargs): super(ArmTrustedFirmware, self).__init__(**kwargs) - self.extras = [ binary('bl1.bin'), binary('fip.bin'), ] + self.extras = [binary("bl1.bin"), binary("fip.bin")] self.extras_addrs = [ system.realview.bootmem.range.start, - system.realview.flash0.range.start + system.realview.flash0.range.start, ] # Arm Trusted Firmware will provide a PSCI implementation system._have_psci = True + class _WorkloadList(ObjectList): def _add_objects(self): """Add all sub-classes of the base class in the object hierarchy.""" @@ -90,4 +95,5 @@ class _WorkloadList(ObjectList): for name, cls in inspect.getmembers(modname, self._is_obj_class): self._sub_classes[name] = cls -workload_list = _WorkloadList(getattr(m5.objects, 'ArmFsWorkload', None)) + +workload_list = _WorkloadList(getattr(m5.objects, "ArmFsWorkload", None)) diff --git a/configs/example/etrace_replay.py b/configs/example/etrace_replay.py index 0a45f936b1..ddbf01acf5 100644 --- a/configs/example/etrace_replay.py +++ b/configs/example/etrace_replay.py @@ -39,7 +39,7 @@ import argparse from m5.util import addToPath, fatal -addToPath('../') +addToPath("../") from common import Options from common import Simulation @@ -50,9 +50,11 @@ from common.Caches import * parser = argparse.ArgumentParser() Options.addCommonOptions(parser) -if '--ruby' in sys.argv: - print("This script does not support Ruby configuration, mainly" - " because Trace CPU has been tested only with classic memory system") +if "--ruby" in sys.argv: + print( + "This script does not support Ruby configuration, mainly" + " because Trace CPU has been tested only with classic memory system" + ) sys.exit(1) args = parser.parse_args() @@ -60,8 +62,10 @@ args = parser.parse_args() numThreads = 1 if args.cpu_type != "TraceCPU": - fatal("This is a script for elastic trace replay simulation, use "\ - "--cpu-type=TraceCPU\n"); + fatal( + "This is a script for elastic trace replay simulation, use " + "--cpu-type=TraceCPU\n" + ) if args.num_cpus > 1: fatal("This script does not support multi-processor trace replay.\n") @@ -71,27 +75,30 @@ if args.num_cpus > 1: (CPUClass, test_mem_mode, FutureClass) = Simulation.setCPUClass(args) CPUClass.numThreads = numThreads -system = System(cpu = CPUClass(cpu_id=0), - mem_mode = test_mem_mode, - mem_ranges = [AddrRange(args.mem_size)], - cache_line_size = args.cacheline_size) +system = System( + cpu=CPUClass(cpu_id=0), + mem_mode=test_mem_mode, + mem_ranges=[AddrRange(args.mem_size)], + cache_line_size=args.cacheline_size, +) # Create a top-level voltage domain -system.voltage_domain = VoltageDomain(voltage = args.sys_voltage) +system.voltage_domain = VoltageDomain(voltage=args.sys_voltage) # Create a source clock for the system. This is used as the clock period for # xbar and memory -system.clk_domain = SrcClockDomain(clock = args.sys_clock, - voltage_domain = system.voltage_domain) +system.clk_domain = SrcClockDomain( + clock=args.sys_clock, voltage_domain=system.voltage_domain +) # Create a CPU voltage domain system.cpu_voltage_domain = VoltageDomain() # Create a separate clock domain for the CPUs. In case of Trace CPUs this clock # is actually used only by the caches connected to the CPU. -system.cpu_clk_domain = SrcClockDomain(clock = args.cpu_clock, - voltage_domain = - system.cpu_voltage_domain) +system.cpu_clk_domain = SrcClockDomain( + clock=args.cpu_clock, voltage_domain=system.cpu_voltage_domain +) # All cpus belong to a common cpu_clk_domain, therefore running at a common # frequency. @@ -104,8 +111,8 @@ for cpu in system.cpu: cpu.createThreads() # Assign input trace files to the Trace CPU -system.cpu.instTraceFile=args.inst_trace_file -system.cpu.dataTraceFile=args.data_trace_file +system.cpu.instTraceFile = args.inst_trace_file +system.cpu.dataTraceFile = args.data_trace_file # Configure the classic memory system args MemClass = Simulation.setMemClass(args) @@ -114,5 +121,5 @@ system.system_port = system.membus.cpu_side_ports CacheConfig.config_cache(args, system) MemConfig.config_mem(args, system) -root = Root(full_system = False, system = system) +root = Root(full_system=False, system=system) Simulation.run(args, root, system, FutureClass) diff --git a/configs/example/fs.py b/configs/example/fs.py index 6a64a849d5..0e31cfccac 100644 --- a/configs/example/fs.py +++ b/configs/example/fs.py @@ -47,8 +47,10 @@ from m5.defines import buildEnv from m5.objects import * from m5.util import addToPath, fatal, warn from m5.util.fdthelper import * +from gem5.isas import ISA +from gem5.runtime import get_runtime_isa -addToPath('../') +addToPath("../") from ruby import Ruby @@ -63,10 +65,13 @@ from common import ObjectList from common.Caches import * from common import Options + def cmd_line_template(): if args.command_line and args.command_line_file: - print("Error: --command-line and --command-line-file are " - "mutually exclusive") + print( + "Error: --command-line and --command-line-file are " + "mutually exclusive" + ) sys.exit(1) if args.command_line: return args.command_line @@ -74,19 +79,23 @@ def cmd_line_template(): return open(args.command_line_file).read().strip() return None + def build_test_system(np): cmdline = cmd_line_template() - if buildEnv['TARGET_ISA'] == "mips": + isa = get_runtime_isa() + if isa == ISA.MIPS: test_sys = makeLinuxMipsSystem(test_mem_mode, bm[0], cmdline=cmdline) - elif buildEnv['TARGET_ISA'] == "sparc": + elif isa == ISA.SPARC: test_sys = makeSparcSystem(test_mem_mode, bm[0], cmdline=cmdline) - elif buildEnv['TARGET_ISA'] == "riscv": - test_sys = makeBareMetalRiscvSystem(test_mem_mode, bm[0], - cmdline=cmdline) - elif buildEnv['TARGET_ISA'] == "x86": - test_sys = makeLinuxX86System(test_mem_mode, np, bm[0], args.ruby, - cmdline=cmdline) - elif buildEnv['TARGET_ISA'] == "arm": + elif isa == ISA.RISCV: + test_sys = makeBareMetalRiscvSystem( + test_mem_mode, bm[0], cmdline=cmdline + ) + elif isa == ISA.X86: + test_sys = makeLinuxX86System( + test_mem_mode, np, bm[0], args.ruby, cmdline=cmdline + ) + elif isa == ISA.ARM: test_sys = makeArmSystem( test_mem_mode, args.machine_type, @@ -103,27 +112,28 @@ def build_test_system(np): if args.enable_context_switch_stats_dump: test_sys.enable_context_switch_stats_dump = True else: - fatal("Incapable of building %s full system!", buildEnv['TARGET_ISA']) + fatal("Incapable of building %s full system!", isa.name) # Set the cache line size for the entire system test_sys.cache_line_size = args.cacheline_size # Create a top-level voltage domain - test_sys.voltage_domain = VoltageDomain(voltage = args.sys_voltage) + test_sys.voltage_domain = VoltageDomain(voltage=args.sys_voltage) # Create a source clock for the system and set the clock period - test_sys.clk_domain = SrcClockDomain(clock = args.sys_clock, - voltage_domain = test_sys.voltage_domain) + test_sys.clk_domain = SrcClockDomain( + clock=args.sys_clock, voltage_domain=test_sys.voltage_domain + ) # Create a CPU voltage domain test_sys.cpu_voltage_domain = VoltageDomain() # Create a source clock for the CPUs and set the clock period - test_sys.cpu_clk_domain = SrcClockDomain(clock = args.cpu_clock, - voltage_domain = - test_sys.cpu_voltage_domain) + test_sys.cpu_clk_domain = SrcClockDomain( + clock=args.cpu_clock, voltage_domain=test_sys.cpu_voltage_domain + ) - if buildEnv['TARGET_ISA'] == 'riscv': + if buildEnv["USE_RISCV_ISA"]: test_sys.workload.bootloader = args.kernel elif args.kernel is not None: test_sys.workload.object_file = binary(args.kernel) @@ -134,17 +144,21 @@ def build_test_system(np): test_sys.init_param = args.init_param # For now, assign all the CPUs to the same clock domain - test_sys.cpu = [TestCPUClass(clk_domain=test_sys.cpu_clk_domain, cpu_id=i) - for i in range(np)] + test_sys.cpu = [ + TestCPUClass(clk_domain=test_sys.cpu_clk_domain, cpu_id=i) + for i in range(np) + ] if args.ruby: - bootmem = getattr(test_sys, '_bootmem', None) - Ruby.create_system(args, True, test_sys, test_sys.iobus, - test_sys._dma_ports, bootmem) + bootmem = getattr(test_sys, "_bootmem", None) + Ruby.create_system( + args, True, test_sys, test_sys.iobus, test_sys._dma_ports, bootmem + ) # Create a seperate clock domain for Ruby - test_sys.ruby.clk_domain = SrcClockDomain(clock = args.ruby_clock, - voltage_domain = test_sys.voltage_domain) + test_sys.ruby.clk_domain = SrcClockDomain( + clock=args.ruby_clock, voltage_domain=test_sys.voltage_domain + ) # Connect the ruby io port to the PIO bus, # assuming that there is just one such port. @@ -163,11 +177,13 @@ def build_test_system(np): else: if args.caches or args.l2cache: # By default the IOCache runs at the system clock - test_sys.iocache = IOCache(addr_ranges = test_sys.mem_ranges) + test_sys.iocache = IOCache(addr_ranges=test_sys.mem_ranges) test_sys.iocache.cpu_side = test_sys.iobus.mem_side_ports test_sys.iocache.mem_side = test_sys.membus.cpu_side_ports elif not args.external_memory_system: - test_sys.iobridge = Bridge(delay='50ns', ranges = test_sys.mem_ranges) + test_sys.iobridge = Bridge( + delay="50ns", ranges=test_sys.mem_ranges + ) test_sys.iobridge.cpu_side_port = test_sys.iobus.mem_side_ports test_sys.iobridge.mem_side_port = test_sys.membus.cpu_side_ports @@ -176,7 +192,9 @@ def build_test_system(np): if not ObjectList.is_noncaching_cpu(TestCPUClass): fatal("SimPoint generation should be done with atomic cpu") if np > 1: - fatal("SimPoint generation not supported with more than one CPUs") + fatal( + "SimPoint generation not supported with more than one CPUs" + ) for i in range(np): if args.simpoint_profile: @@ -189,9 +207,11 @@ def build_test_system(np): test_sys.cpu[i].branchPred = bpClass() if args.indirect_bp_type: IndirectBPClass = ObjectList.indirect_bp_list.get( - args.indirect_bp_type) - test_sys.cpu[i].branchPred.indirectBranchPred = \ - IndirectBPClass() + args.indirect_bp_type + ) + test_sys.cpu[ + i + ].branchPred.indirectBranchPred = IndirectBPClass() test_sys.cpu[i].createThreads() # If elastic tracing is enabled when not restoring from checkpoint and @@ -201,20 +221,24 @@ def build_test_system(np): # If restoring from checkpoint or fast forwarding, the code that does this for # FutureCPUClass is in the Simulation module. If the check passes then the # elastic trace probe is attached to the switch CPUs. - if args.elastic_trace_en and args.checkpoint_restore == None and \ - not args.fast_forward: + if ( + args.elastic_trace_en + and args.checkpoint_restore == None + and not args.fast_forward + ): CpuConfig.config_etrace(TestCPUClass, test_sys.cpu, args) CacheConfig.config_cache(args, test_sys) MemConfig.config_mem(args, test_sys) - if ObjectList.is_kvm_cpu(TestCPUClass) or \ - ObjectList.is_kvm_cpu(FutureClass): + if ObjectList.is_kvm_cpu(TestCPUClass) or ObjectList.is_kvm_cpu( + FutureClass + ): # Assign KVM CPUs to their own event queues / threads. This # has to be done after creating caches and other child objects # since these mustn't inherit the CPU event queue. - for i,cpu in enumerate(test_sys.cpu): + for i, cpu in enumerate(test_sys.cpu): # Child objects usually inherit the parent's event # queue. Override that and use the same event queue for # all devices. @@ -225,42 +249,52 @@ def build_test_system(np): return test_sys + def build_drive_system(np): # driver system CPU is always simple, so is the memory # Note this is an assignment of a class, not an instance. DriveCPUClass = AtomicSimpleCPU - drive_mem_mode = 'atomic' + drive_mem_mode = "atomic" DriveMemClass = SimpleMemory cmdline = cmd_line_template() - if buildEnv['TARGET_ISA'] == 'mips': + if buildEnv["USE_MIPS_ISA"]: drive_sys = makeLinuxMipsSystem(drive_mem_mode, bm[1], cmdline=cmdline) - elif buildEnv['TARGET_ISA'] == 'sparc': + elif buildEnv["USE_SPARC_ISA"]: drive_sys = makeSparcSystem(drive_mem_mode, bm[1], cmdline=cmdline) - elif buildEnv['TARGET_ISA'] == 'x86': - drive_sys = makeLinuxX86System(drive_mem_mode, np, bm[1], - cmdline=cmdline) - elif buildEnv['TARGET_ISA'] == 'arm': - drive_sys = makeArmSystem(drive_mem_mode, args.machine_type, np, - bm[1], args.dtb_filename, cmdline=cmdline) + elif buildEnv["USE_X86_ISA"]: + drive_sys = makeLinuxX86System( + drive_mem_mode, np, bm[1], cmdline=cmdline + ) + elif buildEnv["USE_ARM_ISA"]: + drive_sys = makeArmSystem( + drive_mem_mode, + args.machine_type, + np, + bm[1], + args.dtb_filename, + cmdline=cmdline, + ) # Create a top-level voltage domain - drive_sys.voltage_domain = VoltageDomain(voltage = args.sys_voltage) + drive_sys.voltage_domain = VoltageDomain(voltage=args.sys_voltage) # Create a source clock for the system and set the clock period - drive_sys.clk_domain = SrcClockDomain(clock = args.sys_clock, - voltage_domain = drive_sys.voltage_domain) + drive_sys.clk_domain = SrcClockDomain( + clock=args.sys_clock, voltage_domain=drive_sys.voltage_domain + ) # Create a CPU voltage domain drive_sys.cpu_voltage_domain = VoltageDomain() # Create a source clock for the CPUs and set the clock period - drive_sys.cpu_clk_domain = SrcClockDomain(clock = args.cpu_clock, - voltage_domain = - drive_sys.cpu_voltage_domain) + drive_sys.cpu_clk_domain = SrcClockDomain( + clock=args.cpu_clock, voltage_domain=drive_sys.cpu_voltage_domain + ) - drive_sys.cpu = DriveCPUClass(clk_domain=drive_sys.cpu_clk_domain, - cpu_id=0) + drive_sys.cpu = DriveCPUClass( + clk_domain=drive_sys.cpu_clk_domain, cpu_id=0 + ) drive_sys.cpu.createThreads() drive_sys.cpu.createInterruptController() drive_sys.cpu.connectBus(drive_sys.membus) @@ -270,15 +304,15 @@ def build_drive_system(np): if ObjectList.is_kvm_cpu(DriveCPUClass): drive_sys.kvm_vm = KvmVM() - drive_sys.iobridge = Bridge(delay='50ns', - ranges = drive_sys.mem_ranges) + drive_sys.iobridge = Bridge(delay="50ns", ranges=drive_sys.mem_ranges) drive_sys.iobridge.cpu_side_port = drive_sys.iobus.mem_side_ports drive_sys.iobridge.mem_side_port = drive_sys.membus.cpu_side_ports # Create the appropriate memory controllers and connect them to the # memory bus - drive_sys.mem_ctrls = [DriveMemClass(range = r) - for r in drive_sys.mem_ranges] + drive_sys.mem_ctrls = [ + DriveMemClass(range=r) for r in drive_sys.mem_ranges + ] for i in range(len(drive_sys.mem_ctrls)): drive_sys.mem_ctrls[i].port = drive_sys.membus.mem_side_ports @@ -286,13 +320,14 @@ def build_drive_system(np): return drive_sys + # Add args parser = argparse.ArgumentParser() Options.addCommonOptions(parser) Options.addFSOptions(parser) # Add the ruby specific and protocol specific args -if '--ruby' in sys.argv: +if "--ruby" in sys.argv: Ruby.define_options(parser) args = parser.parse_args() @@ -312,13 +347,29 @@ if args.benchmark: sys.exit(1) else: if args.dual: - bm = [SysConfig(disks=args.disk_image, rootdev=args.root_device, - mem=args.mem_size, os_type=args.os_type), - SysConfig(disks=args.disk_image, rootdev=args.root_device, - mem=args.mem_size, os_type=args.os_type)] + bm = [ + SysConfig( + disks=args.disk_image, + rootdev=args.root_device, + mem=args.mem_size, + os_type=args.os_type, + ), + SysConfig( + disks=args.disk_image, + rootdev=args.root_device, + mem=args.mem_size, + os_type=args.os_type, + ), + ] else: - bm = [SysConfig(disks=args.disk_image, rootdev=args.root_device, - mem=args.mem_size, os_type=args.os_type)] + bm = [ + SysConfig( + disks=args.disk_image, + rootdev=args.root_device, + mem=args.mem_size, + os_type=args.os_type, + ) + ] np = args.num_cpus @@ -329,28 +380,29 @@ if len(bm) == 2: root = makeDualRoot(True, test_sys, drive_sys, args.etherdump) elif len(bm) == 1 and args.dist: # This system is part of a dist-gem5 simulation - root = makeDistRoot(test_sys, - args.dist_rank, - args.dist_size, - args.dist_server_name, - args.dist_server_port, - args.dist_sync_repeat, - args.dist_sync_start, - args.ethernet_linkspeed, - args.ethernet_linkdelay, - args.etherdump); + root = makeDistRoot( + test_sys, + args.dist_rank, + args.dist_size, + args.dist_server_name, + args.dist_server_port, + args.dist_sync_repeat, + args.dist_sync_start, + args.ethernet_linkspeed, + args.ethernet_linkdelay, + args.etherdump, + ) elif len(bm) == 1: root = Root(full_system=True, system=test_sys) else: print("Error I don't know how to create more than 2 systems.") sys.exit(1) -if ObjectList.is_kvm_cpu(TestCPUClass) or \ - ObjectList.is_kvm_cpu(FutureClass): +if ObjectList.is_kvm_cpu(TestCPUClass) or ObjectList.is_kvm_cpu(FutureClass): # Required for running kvm on multiple host cores. # Uses gem5's parallel event queue feature # Note: The simulator is quite picky about this number! - root.sim_quantum = int(1e9) # 1 ms + root.sim_quantum = int(1e9) # 1 ms if args.timesync: root.time_sync_enable = True @@ -358,22 +410,26 @@ if args.timesync: if args.frame_capture: VncServer.frame_capture = True -if buildEnv['TARGET_ISA'] == "arm" and not args.bare_metal \ - and not args.dtb_filename: - if args.machine_type not in ["VExpress_GEM5", - "VExpress_GEM5_V1", - "VExpress_GEM5_V2", - "VExpress_GEM5_Foundation"]: - warn("Can only correctly generate a dtb for VExpress_GEM5_* " \ - "platforms, unless custom hardware models have been equipped "\ - "with generation functionality.") +if buildEnv["USE_ARM_ISA"] and not args.bare_metal and not args.dtb_filename: + if args.machine_type not in [ + "VExpress_GEM5", + "VExpress_GEM5_V1", + "VExpress_GEM5_V2", + "VExpress_GEM5_Foundation", + ]: + warn( + "Can only correctly generate a dtb for VExpress_GEM5_* " + "platforms, unless custom hardware models have been equipped " + "with generation functionality." + ) # Generate a Device Tree - for sysname in ('system', 'testsys', 'drivesys'): + for sysname in ("system", "testsys", "drivesys"): if hasattr(root, sysname): sys = getattr(root, sysname) - sys.workload.dtb_filename = \ - os.path.join(m5.options.outdir, '%s.dtb' % sysname) + sys.workload.dtb_filename = os.path.join( + m5.options.outdir, "%s.dtb" % sysname + ) sys.generateDtb(sys.workload.dtb_filename) if args.wait_gdb: diff --git a/configs/example/garnet_synth_traffic.py b/configs/example/garnet_synth_traffic.py index c1cd8d0efe..1da82e11b8 100644 --- a/configs/example/garnet_synth_traffic.py +++ b/configs/example/garnet_synth_traffic.py @@ -32,7 +32,7 @@ from m5.defines import buildEnv from m5.util import addToPath import os, argparse, sys -addToPath('../') +addToPath("../") from common import Options from ruby import Ruby @@ -45,41 +45,77 @@ m5_root = os.path.dirname(config_root) parser = argparse.ArgumentParser() Options.addNoISAOptions(parser) -parser.add_argument("--synthetic", default="uniform_random", - choices=['uniform_random', 'tornado', 'bit_complement', \ - 'bit_reverse', 'bit_rotation', 'neighbor', \ - 'shuffle', 'transpose']) +parser.add_argument( + "--synthetic", + default="uniform_random", + choices=[ + "uniform_random", + "tornado", + "bit_complement", + "bit_reverse", + "bit_rotation", + "neighbor", + "shuffle", + "transpose", + ], +) -parser.add_argument("-i", "--injectionrate", type=float, default=0.1, - metavar="I", - help="Injection rate in packets per cycle per node. \ +parser.add_argument( + "-i", + "--injectionrate", + type=float, + default=0.1, + metavar="I", + help="Injection rate in packets per cycle per node. \ Takes decimal value between 0 to 1 (eg. 0.225). \ - Number of digits after 0 depends upon --precision.") + Number of digits after 0 depends upon --precision.", +) -parser.add_argument("--precision", type=int, default=3, - help="Number of digits of precision after decimal point\ - for injection rate") +parser.add_argument( + "--precision", + type=int, + default=3, + help="Number of digits of precision after decimal point\ + for injection rate", +) -parser.add_argument("--sim-cycles", type=int, default=1000, - help="Number of simulation cycles") +parser.add_argument( + "--sim-cycles", type=int, default=1000, help="Number of simulation cycles" +) -parser.add_argument("--num-packets-max", type=int, default=-1, - help="Stop injecting after --num-packets-max.\ - Set to -1 to disable.") +parser.add_argument( + "--num-packets-max", + type=int, + default=-1, + help="Stop injecting after --num-packets-max.\ + Set to -1 to disable.", +) -parser.add_argument("--single-sender-id", type=int, default=-1, - help="Only inject from this sender.\ - Set to -1 to disable.") +parser.add_argument( + "--single-sender-id", + type=int, + default=-1, + help="Only inject from this sender.\ + Set to -1 to disable.", +) -parser.add_argument("--single-dest-id", type=int, default=-1, - help="Only send to this destination.\ - Set to -1 to disable.") +parser.add_argument( + "--single-dest-id", + type=int, + default=-1, + help="Only send to this destination.\ + Set to -1 to disable.", +) -parser.add_argument("--inj-vnet", type=int, default=-1, - choices=[-1,0,1,2], - help="Only inject in this vnet (0, 1 or 2).\ +parser.add_argument( + "--inj-vnet", + type=int, + default=-1, + choices=[-1, 0, 1, 2], + help="Only inject in this vnet (0, 1 or 2).\ 0 and 1 are 1-flit, 2 is 5-flit.\ - Set to -1 to inject randomly in all vnets.") + Set to -1 to inject randomly in all vnets.", +) # # Add the ruby specific and protocol specific options @@ -88,51 +124,56 @@ Ruby.define_options(parser) args = parser.parse_args() -cpus = [ GarnetSyntheticTraffic( - num_packets_max=args.num_packets_max, - single_sender=args.single_sender_id, - single_dest=args.single_dest_id, - sim_cycles=args.sim_cycles, - traffic_type=args.synthetic, - inj_rate=args.injectionrate, - inj_vnet=args.inj_vnet, - precision=args.precision, - num_dest=args.num_dirs) \ - for i in range(args.num_cpus) ] +cpus = [ + GarnetSyntheticTraffic( + num_packets_max=args.num_packets_max, + single_sender=args.single_sender_id, + single_dest=args.single_dest_id, + sim_cycles=args.sim_cycles, + traffic_type=args.synthetic, + inj_rate=args.injectionrate, + inj_vnet=args.inj_vnet, + precision=args.precision, + num_dest=args.num_dirs, + ) + for i in range(args.num_cpus) +] # create the desired simulated system -system = System(cpu = cpus, mem_ranges = [AddrRange(args.mem_size)]) +system = System(cpu=cpus, mem_ranges=[AddrRange(args.mem_size)]) # Create a top-level voltage domain and clock domain -system.voltage_domain = VoltageDomain(voltage = args.sys_voltage) +system.voltage_domain = VoltageDomain(voltage=args.sys_voltage) -system.clk_domain = SrcClockDomain(clock = args.sys_clock, - voltage_domain = system.voltage_domain) +system.clk_domain = SrcClockDomain( + clock=args.sys_clock, voltage_domain=system.voltage_domain +) Ruby.create_system(args, False, system) # Create a seperate clock domain for Ruby -system.ruby.clk_domain = SrcClockDomain(clock = args.ruby_clock, - voltage_domain = system.voltage_domain) +system.ruby.clk_domain = SrcClockDomain( + clock=args.ruby_clock, voltage_domain=system.voltage_domain +) i = 0 for ruby_port in system.ruby._cpu_ports: - # - # Tie the cpu test ports to the ruby cpu port - # - cpus[i].test = ruby_port.in_ports - i += 1 + # + # Tie the cpu test ports to the ruby cpu port + # + cpus[i].test = ruby_port.in_ports + i += 1 # ----------------------- # run simulation # ----------------------- -root = Root(full_system = False, system = system) -root.system.mem_mode = 'timing' +root = Root(full_system=False, system=system) +root.system.mem_mode = "timing" # Not much point in this being higher than the L1 latency -m5.ticks.setGlobalFrequency('1ps') +m5.ticks.setGlobalFrequency("1ps") # instantiate configuration m5.instantiate() @@ -140,4 +181,4 @@ m5.instantiate() # simulate until program terminates exit_event = m5.simulate(args.abs_max_tick) -print('Exiting @ tick', m5.curTick(), 'because', exit_event.getCause()) +print("Exiting @ tick", m5.curTick(), "because", exit_event.getCause()) diff --git a/configs/example/gem5_library/arm-hello.py b/configs/example/gem5_library/arm-hello.py index d94fb33b5f..d66eee5790 100644 --- a/configs/example/gem5_library/arm-hello.py +++ b/configs/example/gem5_library/arm-hello.py @@ -93,7 +93,6 @@ simulator.run() print( "Exiting @ tick {} because {}.".format( - simulator.get_current_tick(), - simulator.get_last_exit_event_cause(), + simulator.get_current_tick(), simulator.get_last_exit_event_cause() ) ) diff --git a/configs/example/gem5_library/arm-ubuntu-boot-exit.py b/configs/example/gem5_library/arm-ubuntu-run.py similarity index 60% rename from configs/example/gem5_library/arm-ubuntu-boot-exit.py rename to configs/example/gem5_library/arm-ubuntu-run.py index 163f45a1d3..7f976f06db 100644 --- a/configs/example/gem5_library/arm-ubuntu-boot-exit.py +++ b/configs/example/gem5_library/arm-ubuntu-run.py @@ -25,18 +25,17 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ -This script shows an example of booting an ARM based full system Ubuntu -disk image using the gem5's standard library. This simulation boots the disk -image using 2 TIMING CPU cores. The simulation ends when the startup is -completed successfully (i.e. when an `m5_exit instruction is reached on -successful boot). +This script further shows an example of booting an ARM based full system Ubuntu +disk image. This simulation boots the disk image using 2 TIMING CPU cores. The +simulation ends when the startup is completed successfully (i.e. when an +`m5_exit instruction is reached on successful boot). Usage ----- ``` scons build/ARM/gem5.opt -j -./build/ARM/gem5.opt configs/example/gem5_library/arm-ubuntu-boot-exit.py +./build/ARM/gem5.opt configs/example/gem5_library/arm-ubuntu-run.py ``` """ @@ -44,59 +43,44 @@ scons build/ARM/gem5.opt -j from gem5.isas import ISA from m5.objects import ArmDefaultRelease from gem5.utils.requires import requires -from gem5.resources.resource import Resource +from gem5.resources.workload import Workload from gem5.simulate.simulator import Simulator from m5.objects import VExpress_GEM5_Foundation +from gem5.coherence_protocol import CoherenceProtocol from gem5.components.boards.arm_board import ArmBoard from gem5.components.memory import DualChannelDDR4_2400 from gem5.components.processors.cpu_types import CPUTypes from gem5.components.processors.simple_processor import SimpleProcessor -# This runs a check to ensure the gem5 binary is compiled for ARM. -requires( - isa_required=ISA.ARM, -) +# This runs a check to ensure the gem5 binary is compiled for ARM and the +# protocol is CHI. -# With ARM, we use simple caches. +requires(isa_required=ISA.ARM) -from gem5.components.cachehierarchies.classic\ - .private_l1_private_l2_cache_hierarchy import ( +from gem5.components.cachehierarchies.classic.private_l1_private_l2_cache_hierarchy import ( PrivateL1PrivateL2CacheHierarchy, ) - # Here we setup the parameters of the l1 and l2 caches. - cache_hierarchy = PrivateL1PrivateL2CacheHierarchy( - l1d_size="16kB", - l1i_size="16kB", - l2_size="256kB", + l1d_size="16kB", l1i_size="16kB", l2_size="256kB" ) # Memory: Dual Channel DDR4 2400 DRAM device. -memory = DualChannelDDR4_2400(size = "2GB") +memory = DualChannelDDR4_2400(size="2GB") # Here we setup the processor. We use a simple TIMING processor. The config # script was also tested with ATOMIC processor. -processor = SimpleProcessor( - cpu_type=CPUTypes.TIMING, - num_cores=2, -) +processor = SimpleProcessor(cpu_type=CPUTypes.TIMING, num_cores=2, isa=ISA.ARM) # The ArmBoard requires a `release` to be specified. This adds all the # extensions or features to the system. We are setting this to Armv8 -# (ArmDefaultRelease) in this example config script. However, the ArmBoard -# currently does not support SECURITY extension. - +# (ArmDefaultRelease) in this example config script. release = ArmDefaultRelease() -# Removing the SECURITY extension. - -release.extensions.remove(release.extensions[2]) - # The platform sets up the memory ranges of all the on-chip and off-chip # devices present on the ARM system. @@ -105,44 +89,22 @@ platform = VExpress_GEM5_Foundation() # Here we setup the board. The ArmBoard allows for Full-System ARM simulations. board = ArmBoard( - clk_freq = "3GHz", - processor = processor, - memory = memory, - cache_hierarchy = cache_hierarchy, - release = release, - platform = platform + clk_freq="3GHz", + processor=processor, + memory=memory, + cache_hierarchy=cache_hierarchy, + release=release, + platform=platform, ) -# Here we set the Full System workload. +# Here we set a full system workload. The "arm64-ubuntu-20.04-boot" boots +# Ubuntu 20.04. -# The `set_kernel_disk_workload` function on the ArmBoard accepts an ARM -# kernel, a disk image, and, path to the bootloader. - -board.set_kernel_disk_workload( - - # The ARM kernel will be automatically downloaded to the `~/.cache/gem5` - # directory if not already present. The arm-ubuntu-boot-exit was tested - # with `vmlinux.arm64` - - kernel = Resource("arm64-linux-kernel-5.4.49"), - - # The ARM ubuntu image will be automatically downloaded to the - # `~/.cache/gem5` directory if not already present. - - disk_image = Resource("arm64-ubuntu-18.04-img"), - - # We need to specify the path for the bootloader file. - - bootloader = Resource("arm64-bootloader-foundation"), - - # For the arm64-ubuntu-18.04.img, we need to specify the readfile content - - readfile_contents = "m5 exit" -) +board.set_workload(Workload("arm64-ubuntu-20.04-boot")) # We define the system with the aforementioned system defined. -simulator = Simulator(board = board) +simulator = Simulator(board=board) # Once the system successfully boots, it encounters an # `m5_exit instruction encountered`. We stop the simulation then. When the diff --git a/configs/example/gem5_library/checkpoints/riscv-hello-restore-checkpoint.py b/configs/example/gem5_library/checkpoints/riscv-hello-restore-checkpoint.py index 4b0626790a..e112b76ddb 100644 --- a/configs/example/gem5_library/checkpoints/riscv-hello-restore-checkpoint.py +++ b/configs/example/gem5_library/checkpoints/riscv-hello-restore-checkpoint.py @@ -67,8 +67,9 @@ cache_hierarchy = NoCache() memory = SingleChannelDDR3_1600(size="32MB") # We use a simple Timing processor with one core. -processor = SimpleProcessor(cpu_type=CPUTypes.TIMING, isa=ISA.RISCV, - num_cores=1) +processor = SimpleProcessor( + cpu_type=CPUTypes.TIMING, isa=ISA.RISCV, num_cores=1 +) # The gem5 library simble board which can be used to run simple SE-mode # simulations. @@ -83,30 +84,23 @@ board = SimpleBoard( # program compiled to the RISCV ISA. The `Resource` class will automatically # download the binary from the gem5 Resources cloud bucket if it's not already # present. -board.set_se_binary_workload( - # the workload should be the same as the save-checkpoint script - Resource("riscv-hello") -) - -# Getting the pre-taken checkpoint from gem5-resources. This checkpoint +# We get the pre-taken checkpoint from gem5-resources. This checkpoint # was taken from running this gem5 configuration script, # configs/example/gem5_library/checkpoints/riscv-hello-save-checkpoint.py -checkpoint_resource = Resource("riscv-hello-example-checkpoint") +board.set_se_binary_workload( + # the workload should be the same as the save-checkpoint script + Resource("riscv-hello"), + checkpoint=Resource("riscv-hello-example-checkpoint-v22-1"), +) -# Now we restore the checkpoint by passing the path to the checkpoint to -# the Simulator object. The checkpoint_path could be a string containing -# the path to the checkpoint folder. However, here, we use gem5 resources -# to automatically download the checkpoint folder, and use .get_local_path() -# to obtain the path to that folder. -checkpoint_path = checkpoint_resource.get_local_path() -print("Restore a checkpoint at", checkpoint_path) -simulator = Simulator(board=board, full_system=False, - checkpoint_path=checkpoint_path) +simulator = Simulator( + board=board, + full_system=False, +) simulator.run() print( "Exiting @ tick {} because {}.".format( - simulator.get_current_tick(), - simulator.get_last_exit_event_cause(), + simulator.get_current_tick(), simulator.get_last_exit_event_cause() ) ) diff --git a/configs/example/gem5_library/checkpoints/riscv-hello-save-checkpoint.py b/configs/example/gem5_library/checkpoints/riscv-hello-save-checkpoint.py index fd81d45322..159c4b76e1 100644 --- a/configs/example/gem5_library/checkpoints/riscv-hello-save-checkpoint.py +++ b/configs/example/gem5_library/checkpoints/riscv-hello-save-checkpoint.py @@ -43,6 +43,7 @@ scons build/RISCV/gem5.opt ``` """ +import argparse from gem5.isas import ISA from gem5.utils.requires import requires from gem5.resources.resource import Resource @@ -53,6 +54,18 @@ from gem5.components.cachehierarchies.classic.no_cache import NoCache from gem5.components.processors.simple_processor import SimpleProcessor from gem5.simulate.simulator import Simulator +parser = argparse.ArgumentParser() + +parser.add_argument( + "--checkpoint-path", + type=str, + required=False, + default="riscv-hello-checkpoint/", + help="The directory to store the checkpoint.", +) + +args = parser.parse_args() + # This check ensures the gem5 binary is compiled to the RISCV ISA target. # If not, an exception will be thrown. requires(isa_required=ISA.RISCV) @@ -64,8 +77,9 @@ cache_hierarchy = NoCache() memory = SingleChannelDDR3_1600(size="32MB") # We use a simple Timing processor with one core. -processor = SimpleProcessor(cpu_type=CPUTypes.TIMING, isa=ISA.RISCV, - num_cores=1) +processor = SimpleProcessor( + cpu_type=CPUTypes.TIMING, isa=ISA.RISCV, num_cores=1 +) # The gem5 library simble board which can be used to run simple SE-mode # simulations. @@ -93,16 +107,14 @@ board.set_se_binary_workload( # Lastly we run the simulation. max_ticks = 10**6 simulator = Simulator(board=board, full_system=False) -simulator.run(max_ticks = max_ticks) +simulator.run(max_ticks=max_ticks) print( "Exiting @ tick {} because {}.".format( - simulator.get_current_tick(), - simulator.get_last_exit_event_cause(), + simulator.get_current_tick(), simulator.get_last_exit_event_cause() ) ) -checkpoint_path = "riscv-hello-checkpoint/" -print("Taking a checkpoint at", checkpoint_path) -simulator.save_checkpoint(checkpoint_path) +print("Taking a checkpoint at", args.checkpoint_path) +simulator.save_checkpoint(args.checkpoint_path) print("Done taking a checkpoint") diff --git a/configs/example/gem5_library/checkpoints/simpoints-se-checkpoint.py b/configs/example/gem5_library/checkpoints/simpoints-se-checkpoint.py new file mode 100644 index 0000000000..d2d1af730f --- /dev/null +++ b/configs/example/gem5_library/checkpoints/simpoints-se-checkpoint.py @@ -0,0 +1,125 @@ +# Copyright (c) 2022 The Regents of the University of California +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" +This configuration script shows an example of how to take checkpoints for +SimPoints using the gem5 stdlib. Simpoints are set via a Workload and the +gem5 SimPoint module will calculate where to take checkpoints based of the +SimPoints, SimPoints interval length, and the warmup instruction length. + +This scipt builds a simple board with the gem5 stdlib with no cache and a +simple memory structure to take checkpoints. Some of the components, such as +cache hierarchy, can be changed when restoring checkpoints. + +Usage +----- + +``` +scons build/X86/gem5.opt +./build/X86/gem5.opt \ + configs/example/gem5_library/checkpoints/simpoints-se-checkpoint.py + +./build/X86/gem5.opt \ + configs/example/gem5_library/checkpoints/simpoints-se-restore.py +``` +""" + +import argparse + +from gem5.simulate.exit_event import ExitEvent +from gem5.simulate.simulator import Simulator +from gem5.utils.requires import requires +from gem5.components.boards.simple_board import SimpleBoard +from gem5.components.memory.single_channel import SingleChannelDDR3_1600 +from gem5.components.processors.simple_processor import SimpleProcessor +from gem5.components.processors.cpu_types import CPUTypes +from gem5.isas import ISA +from gem5.resources.workload import Workload +from pathlib import Path +from gem5.components.cachehierarchies.classic.no_cache import NoCache +from gem5.simulate.exit_event_generators import ( + save_checkpoint_generator, +) + +requires(isa_required=ISA.X86) + +parser = argparse.ArgumentParser( + description="An example simpoint workload file path" +) + +# The lone arguments is a file path to a directory to store the checkpoints. + +parser.add_argument( + "--checkpoint-path", + type=str, + required=False, + default="se_checkpoint_folder/", + help="The directory to store the checkpoint.", +) + +args = parser.parse_args() + +# When taking a checkpoint, the cache state is not saved, so the cache +# hierarchy can be changed completely when restoring from a checkpoint. +# By using NoCache() to take checkpoints, it can slightly improve the +# performance when running in atomic mode, and it will not put any restrictions +# on what people can do with the checkpoints. +cache_hierarchy = NoCache() + +# Using simple memory to take checkpoints might slightly imporve the +# performance in atomic mode. The memory structure can be changed when +# restoring from a checkpoint, but the size of the memory must be maintained. +memory = SingleChannelDDR3_1600(size="2GB") + +processor = SimpleProcessor( + cpu_type=CPUTypes.ATOMIC, + isa=ISA.X86, + # SimPoints only works with one core + num_cores=1, +) + +board = SimpleBoard( + clk_freq="3GHz", + processor=processor, + memory=memory, + cache_hierarchy=cache_hierarchy, +) + +board.set_workload(Workload("x86-print-this-15000-with-simpoints")) + +dir = Path(args.checkpoint_path) +dir.mkdir(exist_ok=True) + +simulator = Simulator( + board=board, + on_exit_event={ + # using the SimPoints event generator in the standard library to take + # checkpoints + ExitEvent.SIMPOINT_BEGIN: save_checkpoint_generator(dir) + }, +) + +simulator.run() diff --git a/configs/example/gem5_library/checkpoints/simpoints-se-restore.py b/configs/example/gem5_library/checkpoints/simpoints-se-restore.py new file mode 100644 index 0000000000..f8f48d0ec1 --- /dev/null +++ b/configs/example/gem5_library/checkpoints/simpoints-se-restore.py @@ -0,0 +1,136 @@ +# Copyright (c) 2022 The Regents of the University of California +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" +This configuration script shows an example of how to restore a checkpoint that +was taken for SimPoints in the +configs/example/gem5_library/checkpoints/simpoints-se-checkpoint.py. +The SimPoints, SimPoints interval length, and the warmup instruction length +are passed into the SimPoint module, so the SimPoint object will store and +calculate the warmup instruction length for each SimPoints based on the +available instructions before reaching the start of the SimPoint. With the +Simulator module, exit event will be generated to stop when the warmup session +ends and the SimPoints interval ends. + +This script builds a more complex board than the board used for taking +checkpoint. + +Usage +----- + +``` +scons build/X86/gem5.opt +./build/X86/gem5.opt \ + configs/example/gem5_library/checkpoints/simpoints-se-checkpoint.py + +./build/X86/gem5.opt \ + configs/example/gem5_library/checkpoints/simpoints-se-restore.py +``` + +""" + +from gem5.simulate.exit_event import ExitEvent +from gem5.simulate.simulator import Simulator +from gem5.utils.requires import requires +from gem5.components.cachehierarchies.classic.private_l1_private_l2_cache_hierarchy import ( + PrivateL1PrivateL2CacheHierarchy, +) +from gem5.components.boards.simple_board import SimpleBoard +from gem5.components.memory import DualChannelDDR4_2400 +from gem5.components.processors.simple_processor import SimpleProcessor +from gem5.components.processors.cpu_types import CPUTypes +from gem5.isas import ISA +from gem5.resources.resource import Resource +from gem5.resources.workload import Workload + +from pathlib import Path +from m5.stats import reset, dump + +requires(isa_required=ISA.X86) + +# The cache hierarchy can be different from the cache hierarchy used in taking +# the checkpoints +cache_hierarchy = PrivateL1PrivateL2CacheHierarchy( + l1d_size="32kB", + l1i_size="32kB", + l2_size="256kB", +) + +# The memory structure can be different from the memory structure used in +# taking the checkpoints, but the size of the memory must be maintained +memory = DualChannelDDR4_2400(size="2GB") + +processor = SimpleProcessor( + cpu_type=CPUTypes.TIMING, + isa=ISA.X86, + num_cores=1, +) + +board = SimpleBoard( + clk_freq="3GHz", + processor=processor, + memory=memory, + cache_hierarchy=cache_hierarchy, +) + +# Here we obtain the workloadfrom gem5 resources, the checkpoint in this +# workload was generated from +# `configs/example/gem5_library/checkpoints/simpoints-se-checkpoint.py`. +board.set_workload( + Workload("x86-print-this-15000-with-simpoints-and-checkpoint") +) + + +def max_inst(): + warmed_up = False + while True: + if warmed_up: + print("end of SimPoint interval") + yield True + else: + print("end of warmup, starting to simulate SimPoint") + warmed_up = True + # Schedule a MAX_INSTS exit event during the simulation + simulator.schedule_max_insts( + board.get_simpoint().get_simpoint_interval() + ) + dump() + reset() + yield False + + +simulator = Simulator( + board=board, + on_exit_event={ExitEvent.MAX_INSTS: max_inst()}, +) + +# Schedule a MAX_INSTS exit event before the simulation begins the +# schedule_max_insts function only schedule event when the instruction length +# is greater than 0. +# In here, it schedules an exit event for the first SimPoint's warmup +# instructions +simulator.schedule_max_insts(board.get_simpoint().get_warmup_list()[0]) +simulator.run() diff --git a/configs/example/gem5_library/memory_traffic.py b/configs/example/gem5_library/memory_traffic.py new file mode 100644 index 0000000000..d6772d1e77 --- /dev/null +++ b/configs/example/gem5_library/memory_traffic.py @@ -0,0 +1,114 @@ +# Copyright (c) 2021 The Regents of the University of California +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" +This script is used for running a traffic generator connected to a memory +device. It supports linear and random accesses with a configurable amount +of write traffic. + +By default, this scripts runs with one channel (two pseudo channels) of HBM2 +and this channel is driven with 32GiB/s of traffic for 1ms. +""" + +import argparse + +from m5.objects import MemorySize +from gem5.components.boards.test_board import TestBoard + +from gem5.components.processors.linear_generator import LinearGenerator +from gem5.components.processors.random_generator import RandomGenerator + +from gem5.components.memory.hbm import HighBandwidthMemory +from gem5.components.memory.dram_interfaces.hbm import HBM_2000_4H_1x64 + +from gem5.simulate.simulator import Simulator + + +def generator_factory( + generator_class: str, rd_perc: int, mem_size: MemorySize +): + rd_perc = int(rd_perc) + if rd_perc > 100 or rd_perc < 0: + raise ValueError( + "Read percentage has to be an integer number between 0 and 100." + ) + if generator_class == "LinearGenerator": + return LinearGenerator( + duration="1ms", rate="32GiB/s", max_addr=mem_size, rd_perc=rd_perc + ) + elif generator_class == "RandomGenerator": + return RandomGenerator( + duration="1ms", rate="32GiB/s", max_addr=mem_size, rd_perc=rd_perc + ) + else: + raise ValueError(f"Unknown generator class {generator_class}") + + +parser = argparse.ArgumentParser( + description="A traffic generator that can be used to test a gem5 " + "memory component." +) + +parser.add_argument( + "generator_class", + type=str, + help="The class of generator to use.", + choices=[ + "LinearGenerator", + "RandomGenerator", + ], +) + +parser.add_argument( + "read_percentage", + type=int, + help="Percentage of read requests in the generated traffic.", +) + +args = parser.parse_args() + +# Single pair of HBM2 pseudo channels. This can be replaced with any +# single ported memory device +memory = HighBandwidthMemory(HBM_2000_4H_1x64, 1, 128) + +generator = generator_factory( + args.generator_class, args.read_percentage, memory.get_size() +) + +# We use the Test Board. This is a special board to run traffic generation +# tasks. Can replace the cache_hierarchy with any hierarchy to simulate the +# cache as well as the memory +board = TestBoard( + clk_freq="1GHz", # Ignored for these generators + generator=generator, # We pass the traffic generator as the processor. + memory=memory, + # With no cache hierarchy the test board will directly connect the + # generator to the memory + cache_hierarchy=None, +) + +simulator = Simulator(board=board) +simulator.run() diff --git a/configs/example/gem5_library/riscv-fs.py b/configs/example/gem5_library/riscv-fs.py index dffb3d464e..e3e2bc75e1 100644 --- a/configs/example/gem5_library/riscv-fs.py +++ b/configs/example/gem5_library/riscv-fs.py @@ -42,10 +42,9 @@ Characteristics from gem5.components.boards.riscv_board import RiscvBoard from gem5.components.memory import SingleChannelDDR3_1600 from gem5.components.processors.simple_processor import SimpleProcessor -from gem5.components.cachehierarchies.classic.\ - private_l1_private_l2_cache_hierarchy import ( - PrivateL1PrivateL2CacheHierarchy, - ) +from gem5.components.cachehierarchies.classic.private_l1_private_l2_cache_hierarchy import ( + PrivateL1PrivateL2CacheHierarchy, +) from gem5.components.processors.cpu_types import CPUTypes from gem5.isas import ISA from gem5.utils.requires import requires @@ -80,8 +79,8 @@ board = RiscvBoard( # Set the Full System workload. board.set_kernel_disk_workload( - kernel=Resource("riscv-bootloader-vmlinux-5.10"), - disk_image=Resource("riscv-disk-img"), + kernel=Resource("riscv-bootloader-vmlinux-5.10"), + disk_image=Resource("riscv-disk-img"), ) simulator = Simulator(board=board) @@ -90,4 +89,4 @@ print("Beginning simulation!") # using m5term (`./util/term`): `./m5term localhost `. Note the `` # value is obtained from the gem5 terminal stdout. Look out for # "system.platform.terminal: Listening for connections on port ". -simulator.run() \ No newline at end of file +simulator.run() diff --git a/configs/example/gem5_library/riscv-ubuntu-run.py b/configs/example/gem5_library/riscv-ubuntu-run.py index f3e6d1322f..87b98cc5ba 100644 --- a/configs/example/gem5_library/riscv-ubuntu-run.py +++ b/configs/example/gem5_library/riscv-ubuntu-run.py @@ -46,42 +46,33 @@ from m5.objects import Root from gem5.utils.requires import requires from gem5.components.boards.riscv_board import RiscvBoard from gem5.components.memory import DualChannelDDR4_2400 -from gem5.components.processors.simple_processor import ( - SimpleProcessor, -) +from gem5.components.processors.simple_processor import SimpleProcessor from gem5.components.processors.cpu_types import CPUTypes from gem5.isas import ISA -from gem5.coherence_protocol import CoherenceProtocol -from gem5.resources.resource import Resource from gem5.simulate.simulator import Simulator +from gem5.resources.workload import Workload # This runs a check to ensure the gem5 binary is compiled for RISCV. -requires( - isa_required=ISA.RISCV, -) +requires(isa_required=ISA.RISCV) # With RISCV, we use simple caches. -from gem5.components.cachehierarchies.classic\ - .private_l1_private_l2_cache_hierarchy import ( +from gem5.components.cachehierarchies.classic.private_l1_private_l2_cache_hierarchy import ( PrivateL1PrivateL2CacheHierarchy, ) + # Here we setup the parameters of the l1 and l2 caches. cache_hierarchy = PrivateL1PrivateL2CacheHierarchy( - l1d_size="16kB", - l1i_size="16kB", - l2_size="256kB", + l1d_size="16kB", l1i_size="16kB", l2_size="256kB" ) # Memory: Dual Channel DDR4 2400 DRAM device. -memory = DualChannelDDR4_2400(size = "3GB") +memory = DualChannelDDR4_2400(size="3GB") # Here we setup the processor. We use a simple processor. processor = SimpleProcessor( - cpu_type=CPUTypes.TIMING, - isa=ISA.RISCV, - num_cores=2, + cpu_type=CPUTypes.TIMING, isa=ISA.RISCV, num_cores=2 ) # Here we setup the board. The RiscvBoard allows for Full-System RISCV @@ -93,27 +84,11 @@ board = RiscvBoard( cache_hierarchy=cache_hierarchy, ) -# Here we set the Full System workload. - -# The `set_kernel_disk_workload` function for the RiscvBoard accepts a -# RISCV bootloader and a disk image. Once the system successfully boots, it -# encounters an `m5_exit instruction encountered`. We stop the simulation then. -# When the simulation has ended you may inspect `m5out/system.pc.com_1.device` -# to see the stdout. - -board.set_kernel_disk_workload( - # The RISCV bootloader will be automatically downloaded to the - # `~/.cache/gem5` directory if not already present. - # The riscv-ubuntu boot-test was tested with riscv-bootloader-5.10 - kernel=Resource( - "riscv-bootloader-vmlinux-5.10", - ), - # The RISCV ubuntu image will be automatically downloaded to the - # `~/.cache/gem5` directory if not already present. - disk_image=Resource( - "riscv-ubuntu-20.04-img", - ), -) +# Here we a full system workload: "riscv-ubuntu-20.04-boot" which boots +# Ubuntu 20.04. Once the system successfully boots it encounters an `m5_exit` +# instruction which stops the simulation. When the simulation has ended you may +# inspect `m5out/system.pc.com_1.device` to see the stdout. +board.set_workload(Workload("riscv-ubuntu-20.04-boot")) simulator = Simulator(board=board) simulator.run() diff --git a/configs/example/gem5_library/riscvmatched-fs.py b/configs/example/gem5_library/riscvmatched-fs.py new file mode 100644 index 0000000000..3e84b8c1ea --- /dev/null +++ b/configs/example/gem5_library/riscvmatched-fs.py @@ -0,0 +1,87 @@ +# Copyright (c) 2022 The Regents of the University of California +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" +This gem5 configuration script runs the RISCVMatchedBoard in FS mode with a +an Ubuntu 20.04 image and calls m5 exit after the simulation has booted the OS. + +Usage +--- + +``` +scons build/RISCV/gem5.opt + +./build/RISCV/gem5.opt configs/example/gem5_library/riscvmatched-fs.py +``` +""" + +from gem5.prebuilt.riscvmatched.riscvmatched_board import RISCVMatchedBoard +from gem5.utils.requires import requires +from gem5.isas import ISA +from gem5.simulate.simulator import Simulator +from gem5.resources.workload import Workload + +import argparse + +requires(isa_required=ISA.RISCV) + +parser = argparse.ArgumentParser( + description="A script which uses the RISCVMatchedBoard in FS mode." +) + +parser.add_argument( + "-i", + "--to-init", + action="store_true", + help="Exit the simulation after the Linux Kernel boot.", +) + +args = parser.parse_args() + +# instantiate the riscv matched board with default parameters +board = RISCVMatchedBoard( + clk_freq="1.2GHz", + l2_size="2MB", + is_fs=True, +) + +# Here we a full system workload: "riscv-ubuntu-20.04-boot" which boots +# Ubuntu 20.04. Once the system successfully boots it encounters an `m5_exit` +# instruction which stops the simulation. When the simulation has ended you may +# inspect `m5out/system.pc.com_1.device` to see the stdout. +# +# In the case where the `-i` flag is passed, we add the kernel argument +# `init=/root/exit.sh`. This means the simulation will exit after the Linux +# Kernel has booted. +workload = Workload("riscv-ubuntu-20.04-boot") +kernel_args = board.get_default_kernel_args() +if args.to_init: + kernel_args.append("init=/root/exit.sh") +workload.set_parameter("kernel_args", kernel_args) +board.set_workload(workload) + +simulator = Simulator(board=board) +simulator.run() diff --git a/configs/example/gem5_library/riscvmatched-hello.py b/configs/example/gem5_library/riscvmatched-hello.py new file mode 100644 index 0000000000..d8ae8e5f9c --- /dev/null +++ b/configs/example/gem5_library/riscvmatched-hello.py @@ -0,0 +1,65 @@ +# Copyright (c) 2022 The Regents of the University of California +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +""" +This gem5 configuation script runs a "hello world" binary on the +RISCVMatched prebuilt board found in src/python/gem5/prebuilt/riscvmatched/ + +Usage +----- + +``` +scons build/RISCV/gem5.opt +./build/RISCV/gem5.opt \ + configs/example/gem5_library/riscvmatched-hello.py +``` +""" + +from gem5.resources.resource import Resource +from gem5.simulate.simulator import Simulator +from python.gem5.prebuilt.riscvmatched.riscvmatched_board import ( + RISCVMatchedBoard, +) +from gem5.isas import ISA +from gem5.utils.requires import requires + +requires(isa_required=ISA.RISCV) + +# instantiate the riscv matched board with default parameters +board = RISCVMatchedBoard() + +# set the hello world riscv binary as the board workload +board.set_se_binary_workload(Resource("riscv-hello")) + +# run the simulation with the RISCV Matched board +simulator = Simulator(board=board, full_system=False) +simulator.run() + +print( + "Exiting @ tick {} because {}.".format( + simulator.get_current_tick(), + simulator.get_last_exit_event_cause(), + ) +) diff --git a/configs/example/gem5_library/x86-gapbs-benchmarks.py b/configs/example/gem5_library/x86-gapbs-benchmarks.py index 50f56d5924..6ab37479f9 100644 --- a/configs/example/gem5_library/x86-gapbs-benchmarks.py +++ b/configs/example/gem5_library/x86-gapbs-benchmarks.py @@ -64,8 +64,8 @@ from gem5.components.processors.cpu_types import CPUTypes from gem5.isas import ISA from gem5.coherence_protocol import CoherenceProtocol from gem5.resources.resource import Resource - -from m5.stats.gem5stats import get_simstat +from gem5.simulate.simulator import Simulator +from gem5.simulate.exit_event import ExitEvent requires( isa_required=ISA.X86, @@ -79,8 +79,25 @@ benchmark_choices = ["cc", "bc", "tc", "pr", "bfs"] synthetic_choices = ["0", "1"] -size_choices = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", - "13", "14", "15", "16", "USA-road-d.NY.gr"] +size_choices = [ + "1", + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "10", + "11", + "12", + "13", + "14", + "15", + "16", + "USA-road-d.NY.gr", +] parser = argparse.ArgumentParser( description="An example configuration script to run the gapbs benchmarks." @@ -118,8 +135,7 @@ args = parser.parse_args() # Setting up all the fixed system parameters here # Caches: MESI Two Level Cache Hierarchy -from gem5.components.cachehierarchies.ruby.\ - mesi_two_level_cache_hierarchy import( +from gem5.components.cachehierarchies.ruby.mesi_two_level_cache_hierarchy import ( MESITwoLevelCacheHierarchy, ) @@ -173,8 +189,10 @@ board = X86Board( if args.synthetic == "1": if args.size == "USA-road-d.NY.gr": - print("fatal: cannot use a real graph with --synthetic 1", - file=sys.stderr) + print( + "fatal: cannot use a real graph with --synthetic 1", + file=sys.stderr, + ) exit(-1) command = "./{} -g {}\n".format(args.benchmark, args.size) @@ -185,24 +203,37 @@ board.set_kernel_disk_workload( # The x86 linux kernel will be automatically downloaded to the # `~/.cache/gem5` directory if not already present. # gapbs benchamarks was tested with kernel version 4.19.83 - kernel=Resource( - "x86-linux-kernel-4.19.83", - ), + kernel=Resource("x86-linux-kernel-4.19.83"), # The x86-gapbs image will be automatically downloaded to the # `~/.cache/gem5` directory if not already present. - disk_image=Resource( - "x86-gapbs", - ), + disk_image=Resource("x86-gapbs"), readfile_contents=command, ) -root = Root(full_system=True, system=board) -# sim_quantum must be set when KVM cores are used. +def handle_workbegin(): + print("Done booting Linux") + print("Resetting stats at the start of ROI!") + m5.stats.reset() + global start_tick + start_tick = m5.curTick() + processor.switch() + yield False # E.g., continue the simulation. -root.sim_quantum = int(1e9) -m5.instantiate() +def handle_workend(): + print("Dump stats at the end of the ROI!") + m5.stats.dump() + yield True # Stop the simulation. We're done. + + +simulator = Simulator( + board=board, + on_exit_event={ + ExitEvent.WORKBEGIN: handle_workbegin(), + ExitEvent.WORKEND: handle_workend(), + }, +) # We maintain the wall clock time. @@ -217,75 +248,8 @@ print("Using KVM cpu") # the first ROI annotation in details. The X86Board currently does not support # `work items started count reached`. -exit_event = m5.simulate() - -# The first exit_event ends with a `workbegin` cause. This means that the -# system started successfully and the execution on the program started. The -# ROI begin is encountered. - -if exit_event.getCause() == "workbegin": - - print("Done booting Linux") - print("Resetting stats at the start of ROI!") - - m5.stats.reset() - start_tick = m5.curTick() - - # We have completed up to this step using KVM cpu. Now we switch to timing - # cpu for detailed simulation. - - processor.switch() -else: - print("Unexpected termination of simulation before ROI was reached!") - print( - "Exiting @ tick {} because {}.".format( - m5.curTick(), - exit_event.getCause() - ) - ) - exit(-1) - -# The next exit_event is to simulate the ROI. It should be exited with a cause -# marked by `workend`. This implies that the first annotation is successfully -# completed. - -exit_event = m5.simulate() - -# Reached the end of first ROI. -# We dump the stats here. - -# We exepect that ROI ends with `workend`. Otherwise the simulation ended -# unexpectedly. -if exit_event.getCause() == "workend": - print("Dump stats at the end of the ROI!") - - m5.stats.dump() - end_tick = m5.curTick() -else: - print("Unexpected termination of simulation while ROI was being executed!") - print( - "Exiting @ tick {} because {}.".format( - m5.curTick(), - exit_event.getCause() - ) - ) - exit(-1) - -# We get simInsts using get_simstat and output it in the final print statement. - -gem5stats = get_simstat(root) - -# We get the number of committed instructions from the timing cores. We then -# sum and print them at the end. - -roi_insts = float(\ - gem5stats.to_json()\ - ["system"]["processor"]["cores2"]["core"]["exec_context.thread_0"]\ - ["numInsts"]["value"]) + float(\ - gem5stats.to_json()\ - ["system"]["processor"]["cores3"]["core"]["exec_context.thread_0"]\ - ["numInsts"]["value"]\ -) +simulator.run() +end_tick = m5.curTick() # Since we simulated the ROI in details, therefore, simulation is over at this # point. @@ -299,8 +263,9 @@ print() print("Performance statistics:") print("Simulated time in ROI: %.2fs" % ((end_tick - start_tick) / 1e12)) -print("Instructions executed in ROI: %d" % ((roi_insts))) -print("Ran a total of", m5.curTick() / 1e12, "simulated seconds") +print( + "Ran a total of", simulator.get_current_tick() / 1e12, "simulated seconds" +) print( "Total wallclock time: %.2fs, %.2f min" % (time.time() - globalStart, (time.time() - globalStart) / 60) diff --git a/configs/example/gem5_library/x86-npb-benchmarks.py b/configs/example/gem5_library/x86-npb-benchmarks.py index 83cc7009a5..ff363e449c 100644 --- a/configs/example/gem5_library/x86-npb-benchmarks.py +++ b/configs/example/gem5_library/x86-npb-benchmarks.py @@ -54,19 +54,21 @@ from m5.objects import Root from gem5.utils.requires import requires from gem5.components.boards.x86_board import X86Board from gem5.components.memory import DualChannelDDR4_2400 -from gem5.components.processors.simple_switchable_processor import( +from gem5.components.processors.simple_switchable_processor import ( SimpleSwitchableProcessor, ) from gem5.components.processors.cpu_types import CPUTypes from gem5.isas import ISA from gem5.coherence_protocol import CoherenceProtocol from gem5.resources.resource import Resource +from gem5.simulate.simulator import Simulator +from gem5.simulate.simulator import ExitEvent from m5.stats.gem5stats import get_simstat from m5.util import warn requires( - isa_required = ISA.X86, + isa_required=ISA.X86, coherence_protocol_required=CoherenceProtocol.MESI_TWO_LEVEL, kvm_required=True, ) @@ -93,25 +95,25 @@ parser = argparse.ArgumentParser( parser.add_argument( "--benchmark", - type = str, + type=str, required=True, - help = "Input the benchmark program to execute.", - choices = benchmark_choices, + help="Input the benchmark program to execute.", + choices=benchmark_choices, ) parser.add_argument( "--size", - type = str, + type=str, required=True, - help = "Input the class of the program to simulate.", - choices = size_choices, + help="Input the class of the program to simulate.", + choices=size_choices, ) parser.add_argument( "--ticks", - type = int, - help = "Optionally put the maximum number of ticks to execute during the "\ - "ROI. It accepts an integer value." + type=int, + help="Optionally put the maximum number of ticks to execute during the " + "ROI. It accepts an integer value.", ) args = parser.parse_args() @@ -121,28 +123,31 @@ args = parser.parse_args() # We warn the user here. if args.benchmark == "mg" and args.size == "C": - warn("mg.C uses 3.3 GB of memory. Currently we are simulating 3 GB\ - of main memory in the system.") + warn( + "mg.C uses 3.3 GB of memory. Currently we are simulating 3 GB\ + of main memory in the system." + ) # The simulation will fail in the case of `ft` with class C. We warn the user # here. elif args.benchmark == "ft" and args.size == "C": - warn("There is not enough memory for ft.C. Currently we are\ - simulating 3 GB of main memory in the system.") + warn( + "There is not enough memory for ft.C. Currently we are\ + simulating 3 GB of main memory in the system." + ) # Checking for the maximum number of instructions, if provided by the user. # Setting up all the fixed system parameters here # Caches: MESI Two Level Cache Hierarchy -from gem5.components.cachehierarchies.ruby.\ - mesi_two_level_cache_hierarchy import( +from gem5.components.cachehierarchies.ruby.mesi_two_level_cache_hierarchy import ( MESITwoLevelCacheHierarchy, ) cache_hierarchy = MESITwoLevelCacheHierarchy( - l1d_size = "32kB", - l1d_assoc = 8, + l1d_size="32kB", + l1d_assoc=8, l1i_size="32kB", l1i_assoc=8, l2_size="256kB", @@ -152,7 +157,7 @@ cache_hierarchy = MESITwoLevelCacheHierarchy( # Memory: Dual Channel DDR4 2400 DRAM device. # The X86 board only supports 3 GB of main memory. -memory = DualChannelDDR4_2400(size = "3GB") +memory = DualChannelDDR4_2400(size="3GB") # Here we setup the processor. This is a special switchable processor in which # a starting core type and a switch core type must be specified. Once a @@ -189,35 +194,63 @@ board = X86Board( # Also, we sleep the system for some time so that the output is printed # properly. -command="/home/gem5/NPB3.3-OMP/bin/{}.{}.x;".format(args.benchmark,args.size)\ - + "sleep 5;" \ +command = ( + "/home/gem5/NPB3.3-OMP/bin/{}.{}.x;".format(args.benchmark, args.size) + + "sleep 5;" + "m5 exit;" +) board.set_kernel_disk_workload( # The x86 linux kernel will be automatically downloaded to the # `~/.cache/gem5` directory if not already present. # npb benchamarks was tested with kernel version 4.19.83 - kernel=Resource( - "x86-linux-kernel-4.19.83", - ), + kernel=Resource("x86-linux-kernel-4.19.83"), # The x86-npb image will be automatically downloaded to the # `~/.cache/gem5` directory if not already present. - disk_image=Resource( - "x86-npb", - ), + disk_image=Resource("x86-npb"), readfile_contents=command, ) -# We need this for long running processes. -m5.disableAllListeners() +# The first exit_event ends with a `workbegin` cause. This means that the +# system started successfully and the execution on the program started. +def handle_workbegin(): + print("Done booting Linux") + print("Resetting stats at the start of ROI!") -root = Root(full_system = True, system = board) + m5.stats.reset() -# sim_quantum must be set when KVM cores are used. + # We have completed up to this step using KVM cpu. Now we switch to timing + # cpu for detailed simulation. -root.sim_quantum = int(1e9) + # # Next, we need to check if the user passed a value for --ticks. If yes, + # then we limit out execution to this number of ticks during the ROI. + # Otherwise, we simulate until the ROI ends. + processor.switch() + if args.ticks: + # schedule an exit event for this amount of ticks in the future. + # The simulation will then continue. + m5.scheduleTickExitFromCurrent(args.ticks) + yield False -m5.instantiate() + +# The next exit_event is to simulate the ROI. It should be exited with a cause +# marked by `workend`. + +# We exepect that ROI ends with `workend` or `simulate() limit reached`. +def handle_workend(): + print("Dump stats at the end of the ROI!") + + m5.stats.dump() + yield True + + +simulator = Simulator( + board=board, + on_exit_event={ + ExitEvent.WORKBEGIN: handle_workbegin(), + ExitEvent.WORKEND: handle_workend(), + }, +) # We maintain the wall clock time. @@ -227,96 +260,12 @@ print("Running the simulation") print("Using KVM cpu") # We start the simulation. - -exit_event = m5.simulate() - -# The first exit_event ends with a `workbegin` cause. This means that the -# system started successfully and the execution on the program started. - -if exit_event.getCause() == "workbegin": - - print("Done booting Linux") - print("Resetting stats at the start of ROI!") - - m5.stats.reset() - start_tick = m5.curTick() - - # We have completed up to this step using KVM cpu. Now we switch to timing - # cpu for detailed simulation. - - processor.switch() -else: - # `workbegin` call was never encountered. - - print("Unexpected termination of simulation before ROI was reached!") - print( - "Exiting @ tick {} because {}.".format( - m5.curTick(), - exit_event.getCause() - ) - ) - exit(-1) - -# The next exit_event is to simulate the ROI. It should be exited with a cause -# marked by `workend`. - -# Next, we need to check if the user passed a value for --ticks. If yes, -# then we limit out execution to this number of ticks during the ROI. -# Otherwise, we simulate until the ROI ends. -if args.ticks: - exit_event = m5.simulate(args.ticks) -else: - exit_event = m5.simulate() - - -# Reached the end of ROI. -# We dump the stats here. - -# We exepect that ROI ends with `workend` or `simulate() limit reached`. -# Otherwise the simulation ended unexpectedly. -if exit_event.getCause() == "workend": - print("Dump stats at the end of the ROI!") - - m5.stats.dump() - end_tick = m5.curTick() -elif exit_event.getCause() == "simulate() limit reached" and \ - args.ticks is not None: - print("Dump stats at the end of {} ticks in the ROI".format(args.ticks)) - - m5.stats.dump() - end_tick = m5.curTick() -else: - print("Unexpected termination of simulation while ROI was being executed!") - print( - "Exiting @ tick {} because {}.".format( - m5.curTick(), - exit_event.getCause() - ) - ) - exit(-1) +simulator.run() # We need to note that the benchmark is not executed completely till this # point, but, the ROI has. We collect the essential statistics here before # resuming the simulation again. -# We get simInsts using get_simstat and output it in the final -# print statement. - -gem5stats = get_simstat(root) - -# We get the number of committed instructions from the timing -# cores. We then sum and print them at the end. - -roi_insts = float(\ - gem5stats.to_json()\ - ["system"]["processor"]["cores2"]["core"]["exec_context.thread_0"]\ - ["numInsts"]["value"] -) + float(\ - gem5stats.to_json()\ - ["system"]["processor"]["cores3"]["core"]["exec_context.thread_0"]\ - ["numInsts"]["value"]\ -) - # Simulation is over at this point. We acknowledge that all the simulation # events were successful. print("All simulation events were successful.") @@ -326,8 +275,17 @@ print("Done with the simulation") print() print("Performance statistics:") -print("Simulated time in ROI: %.2fs" % ((end_tick-start_tick)/1e12)) -print("Instructions executed in ROI: %d" % ((roi_insts))) -print("Ran a total of", m5.curTick()/1e12, "simulated seconds") -print("Total wallclock time: %.2fs, %.2f min" % \ - (time.time()-globalStart, (time.time()-globalStart)/60)) +# manually calculate ROI time if ticks arg is used in case the +# entire ROI wasn't simulated +if args.ticks: + print(f"Simulated time in ROI (to tick): {args.ticks/ 1e12}s") +else: + print(f"Simulated time in ROI: {simulator.get_roi_ticks()[0] / 1e12}s") + +print( + f"Ran a total of {simulator.get_current_tick() / 1e12} simulated seconds" +) +print( + "Total wallclock time: %.2fs, %.2f min" + % (time.time() - globalStart, (time.time() - globalStart) / 60) +) diff --git a/configs/example/gem5_library/x86-parsec-benchmarks.py b/configs/example/gem5_library/x86-parsec-benchmarks.py index 0d2e66586a..190c0a0980 100644 --- a/configs/example/gem5_library/x86-parsec-benchmarks.py +++ b/configs/example/gem5_library/x86-parsec-benchmarks.py @@ -53,33 +53,45 @@ from m5.objects import Root from gem5.utils.requires import requires from gem5.components.boards.x86_board import X86Board from gem5.components.memory import DualChannelDDR4_2400 -from gem5.components.processors.simple_switchable_processor import( +from gem5.components.processors.simple_switchable_processor import ( SimpleSwitchableProcessor, ) from gem5.components.processors.cpu_types import CPUTypes from gem5.isas import ISA from gem5.coherence_protocol import CoherenceProtocol from gem5.resources.resource import Resource - -from m5.stats.gem5stats import get_simstat +from gem5.simulate.simulator import Simulator +from gem5.simulate.exit_event import ExitEvent # We check for the required gem5 build. requires( - isa_required = ISA.X86, + isa_required=ISA.X86, coherence_protocol_required=CoherenceProtocol.MESI_TWO_LEVEL, kvm_required=True, ) # Following are the list of benchmark programs for parsec. -benchmark_choices = ["blackscholes", "bodytrack", "canneal", "dedup", - "facesim", "ferret", "fluidanimate", "freqmine", - "raytrace", "streamcluster", "swaptions", "vips", "x264"] +benchmark_choices = [ + "blackscholes", + "bodytrack", + "canneal", + "dedup", + "facesim", + "ferret", + "fluidanimate", + "freqmine", + "raytrace", + "streamcluster", + "swaptions", + "vips", + "x264", +] # Following are the input size. -size_choices=["simsmall", "simmedium", "simlarge"] +size_choices = ["simsmall", "simmedium", "simlarge"] parser = argparse.ArgumentParser( description="An example configuration script to run the npb benchmarks." @@ -89,32 +101,31 @@ parser = argparse.ArgumentParser( parser.add_argument( "--benchmark", - type = str, + type=str, required=True, - help = "Input the benchmark program to execute.", - choices = benchmark_choices, + help="Input the benchmark program to execute.", + choices=benchmark_choices, ) parser.add_argument( "--size", - type = str, + type=str, required=True, - help = "Simulation size the benchmark program.", - choices = size_choices, + help="Simulation size the benchmark program.", + choices=size_choices, ) args = parser.parse_args() # Setting up all the fixed system parameters here # Caches: MESI Two Level Cache Hierarchy -from gem5.components.cachehierarchies.ruby.\ - mesi_two_level_cache_hierarchy import( +from gem5.components.cachehierarchies.ruby.mesi_two_level_cache_hierarchy import ( MESITwoLevelCacheHierarchy, ) cache_hierarchy = MESITwoLevelCacheHierarchy( - l1d_size = "32kB", - l1d_assoc = 8, + l1d_size="32kB", + l1d_assoc=8, l1i_size="32kB", l1i_assoc=8, l2_size="256kB", @@ -125,7 +136,7 @@ cache_hierarchy = MESITwoLevelCacheHierarchy( # Memory: Dual Channel DDR4 2400 DRAM device. # The X86 board only supports 3 GB of main memory. -memory = DualChannelDDR4_2400(size = "3GB") +memory = DualChannelDDR4_2400(size="3GB") # Here we setup the processor. This is a special switchable processor in which # a starting core type and a switch core type must be specified. Once a @@ -163,38 +174,49 @@ board = X86Board( # properly. -command = "cd /home/gem5/parsec-benchmark;".format(args.benchmark) \ - + "source env.sh;" \ +command = ( + "cd /home/gem5/parsec-benchmark;".format(args.benchmark) + + "source env.sh;" + "parsecmgmt -a run -p {} -c gcc-hooks -i {} \ - -n {};".format(args.benchmark, args.size, "2") \ - + "sleep 5;" \ - + "m5 exit;" \ - + -n {};".format( + args.benchmark, args.size, "2" + ) + + "sleep 5;" + + "m5 exit;" +) board.set_kernel_disk_workload( # The x86 linux kernel will be automatically downloaded to the # `~/.cache/gem5` directory if not already present. # PARSEC benchamarks were tested with kernel version 4.19.83 - kernel=Resource( - "x86-linux-kernel-4.19.83", - ), + kernel=Resource("x86-linux-kernel-4.19.83"), # The x86-parsec image will be automatically downloaded to the # `~/.cache/gem5` directory if not already present. - disk_image=Resource( - "x86-parsec", - ), + disk_image=Resource("x86-parsec"), readfile_contents=command, ) -# We need this for long running processes. -m5.disableAllListeners() +# functions to handle different exit events during the simuation +def handle_workbegin(): + print("Done booting Linux") + print("Resetting stats at the start of ROI!") + m5.stats.reset() + processor.switch() + yield False -root = Root(full_system = True, system = board) -# sim_quantum must be set if KVM cores are used. +def handle_workend(): + print("Dump stats at the end of the ROI!") + m5.stats.dump() + yield True -root.sim_quantum = int(1e9) -m5.instantiate() +simulator = Simulator( + board=board, + on_exit_event={ + ExitEvent.WORKBEGIN: handle_workbegin(), + ExitEvent.WORKEND: handle_workend(), + }, +) # We maintain the wall clock time. @@ -203,84 +225,11 @@ globalStart = time.time() print("Running the simulation") print("Using KVM cpu") -start_tick = m5.curTick() -end_tick = m5.curTick() m5.stats.reset() # We start the simulation +simulator.run() -exit_event = m5.simulate() - -# The first exit_event ends with a `workbegin` cause. This means that the -# system booted successfully and the execution on the program started. - -if exit_event.getCause() == "workbegin": - - print("Done booting Linux") - print("Resetting stats at the start of ROI!") - - m5.stats.reset() - start_tick = m5.curTick() - - # We have completed up to this step using KVM cpu. Now we switch to timing - # cpu for detailed simulation. - - processor.switch() -else: - # `workbegin` call was never encountered. - - print("Unexpected termination of simulation before ROI was reached!") - print( - "Exiting @ tick {} because {}.".format( - m5.curTick(), - exit_event.getCause() - ) - ) - exit(-1) - -# The next exit_event is to simulate the ROI. It should be exited with a cause -# marked by `workend`. - -exit_event = m5.simulate() - -# Reached the end of ROI. -# We dump the stats here. - -# We exepect that ROI ends with `workend`. Otherwise the simulation ended -# unexpectedly. -if exit_event.getCause() == "workend": - print("Dump stats at the end of the ROI!") - - m5.stats.dump() - end_tick = m5.curTick() -else: - print("Unexpected termination of simulation while ROI was being executed!") - print( - "Exiting @ tick {} because {}.".format( - m5.curTick(), - exit_event.getCause() - ) - ) - exit(-1) - -# ROI has ended here, and we get `simInsts` using get_simstat and print it in -# the final print statement. - -gem5stats = get_simstat(root) - -# We get the number of committed instructions from the timing -# cores. We then sum and print them at the end. -roi_insts = float(\ - gem5stats.to_json()\ - ["system"]["processor"]["cores2"]["core"]["exec_context.thread_0"]\ - ["numInsts"]["value"]) + float(\ - gem5stats.to_json()\ - ["system"]["processor"]["cores3"]["core"]["exec_context.thread_0"]\ - ["numInsts"]["value"]\ -) - -# Simulation is over at this point. We acknowledge that all the simulation -# events were successful. print("All simulation events were successful.") # We print the final simulation statistics. @@ -289,8 +238,11 @@ print("Done with the simulation") print() print("Performance statistics:") -print("Simulated time in ROI: %.2fs" % ((end_tick-start_tick)/1e12)) -print("Instructions executed in ROI: %d" % ((roi_insts))) -print("Ran a total of", m5.curTick()/1e12, "simulated seconds") -print("Total wallclock time: %.2fs, %.2f min" % \ - (time.time()-globalStart, (time.time()-globalStart)/60)) +print("Simulated time in ROI: " + ((str(simulator.get_roi_ticks()[0])))) +print( + "Ran a total of", simulator.get_current_tick() / 1e12, "simulated seconds" +) +print( + "Total wallclock time: %.2fs, %.2f min" + % (time.time() - globalStart, (time.time() - globalStart) / 60) +) diff --git a/configs/example/gem5_library/x86-spec-cpu2006-benchmarks.py b/configs/example/gem5_library/x86-spec-cpu2006-benchmarks.py index 2e52eef0a9..8f39f49e2e 100644 --- a/configs/example/gem5_library/x86-spec-cpu2006-benchmarks.py +++ b/configs/example/gem5_library/x86-spec-cpu2006-benchmarks.py @@ -59,13 +59,15 @@ from m5.objects import Root from gem5.utils.requires import requires from gem5.components.boards.x86_board import X86Board from gem5.components.memory import DualChannelDDR4_2400 -from gem5.components.processors.simple_switchable_processor import( +from gem5.components.processors.simple_switchable_processor import ( SimpleSwitchableProcessor, ) from gem5.components.processors.cpu_types import CPUTypes from gem5.isas import ISA from gem5.coherence_protocol import CoherenceProtocol from gem5.resources.resource import Resource, CustomDiskImageResource +from gem5.simulate.simulator import Simulator +from gem5.simulate.exit_event import ExitEvent from m5.stats.gem5stats import get_simstat from m5.util import warn @@ -84,14 +86,38 @@ requires( # have build errors, and, therefore cannot be executed. More information is # available at: https://www.gem5.org/documentation/benchmark_status/gem5-20 -benchmark_choices = ['400.perlbench', '401.bzip2', '403.gcc', '410.bwaves', - '416.gamess', '429.mcf', '433.milc', '435.gromacs', - '436.cactusADM', '437.leslie3d', '444.namd', '445.gobmk', - '447.dealII', '450.soplex', '453.povray', '454.calculix', - '456.hmmer', '458.sjeng', '459.GemsFDTD', - '462.libquantum', '464.h264ref', '465.tonto', '470.lbm', - '471.omnetpp', '473.astar', '481.wrf', '482.sphinx3', - '483.xalancbmk', '998.specrand', '999.specrand'] +benchmark_choices = [ + "400.perlbench", + "401.bzip2", + "403.gcc", + "410.bwaves", + "416.gamess", + "429.mcf", + "433.milc", + "435.gromacs", + "436.cactusADM", + "437.leslie3d", + "444.namd", + "445.gobmk", + "447.dealII", + "450.soplex", + "453.povray", + "454.calculix", + "456.hmmer", + "458.sjeng", + "459.GemsFDTD", + "462.libquantum", + "464.h264ref", + "465.tonto", + "470.lbm", + "471.omnetpp", + "473.astar", + "481.wrf", + "482.sphinx3", + "483.xalancbmk", + "998.specrand", + "999.specrand", +] # Following are the input size. @@ -109,34 +135,34 @@ parser = argparse.ArgumentParser( parser.add_argument( "--image", - type = str, - required = True, - help = "Input the full path to the built spec-2006 disk-image." + type=str, + required=True, + help="Input the full path to the built spec-2006 disk-image.", ) parser.add_argument( "--partition", - type = str, - required = False, + type=str, + required=False, default=None, - help = "Input the root partition of the SPEC disk-image. If the disk is \ - not partitioned, then pass \"\"." + help='Input the root partition of the SPEC disk-image. If the disk is \ + not partitioned, then pass "".', ) parser.add_argument( "--benchmark", - type = str, + type=str, required=True, - help = "Input the benchmark program to execute.", + help="Input the benchmark program to execute.", choices=benchmark_choices, ) parser.add_argument( "--size", - type = str, + type=str, required=True, - help = "Sumulation size the benchmark program.", - choices = size_choices, + help="Sumulation size the benchmark program.", + choices=size_choices, ) args = parser.parse_args() @@ -151,21 +177,20 @@ if not os.path.exists(args.image): warn("Disk image not found!") print("Instructions on building the disk image can be found at: ") print( - "https://gem5art.readthedocs.io/en/latest/tutorials/spec-tutorial.html" + "https://gem5art.readthedocs.io/en/latest/tutorials/spec-tutorial.html" ) fatal("The disk-image is not found at {}".format(args.image)) # Setting up all the fixed system parameters here # Caches: MESI Two Level Cache Hierarchy -from gem5.components.cachehierarchies.ruby.\ - mesi_two_level_cache_hierarchy import( +from gem5.components.cachehierarchies.ruby.mesi_two_level_cache_hierarchy import ( MESITwoLevelCacheHierarchy, ) cache_hierarchy = MESITwoLevelCacheHierarchy( - l1d_size = "32kB", - l1d_assoc = 8, + l1d_size="32kB", + l1d_assoc=8, l1i_size="32kB", l1i_assoc=8, l2_size="256kB", @@ -175,7 +200,7 @@ cache_hierarchy = MESITwoLevelCacheHierarchy( # Memory: Dual Channel DDR4 2400 DRAM device. # The X86 board only supports 3 GB of main memory. -memory = DualChannelDDR4_2400(size = "3GB") +memory = DualChannelDDR4_2400(size="3GB") # Here we setup the processor. This is a special switchable processor in which # a starting core type and a switch core type must be specified. Once a @@ -205,8 +230,8 @@ board = X86Board( # m5.options.outdir and the output from the disk-image folder is copied to # this folder. -output_dir = "speclogs_" + ''.join(x.strip() for x in time.asctime().split()) -output_dir = output_dir.replace(":","") +output_dir = "speclogs_" + "".join(x.strip() for x in time.asctime().split()) +output_dir = output_dir.replace(":", "") # We create this folder if it is absent. try: @@ -234,27 +259,31 @@ board.set_kernel_disk_workload( # `~/.cache/gem5` directory if not already present. # SPEC CPU2006 benchamarks were tested with kernel version 4.19.83 and # 5.4.49 - kernel=Resource( - "x86-linux-kernel-4.19.83", - ), + kernel=Resource("x86-linux-kernel-4.19.83"), # The location of the x86 SPEC CPU 2017 image disk_image=CustomDiskImageResource( - args.image, - disk_root_partition=args.partition, + args.image, disk_root_partition=args.partition ), readfile_contents=command, ) -# We need this for long running processes. -m5.disableAllListeners() -root = Root(full_system = True, system = board) +def handle_exit(): + print("Done bootling Linux") + print("Resetting stats at the start of ROI!") + m5.stats.reset() + yield False # E.g., continue the simulation. + print("Dump stats at the end of the ROI!") + m5.stats.dump() + yield True # Stop the simulation. We're done. -# sim_quantum must be set when KVM cores are used. -root.sim_quantum = int(1e9) - -m5.instantiate() +simulator = Simulator( + board=board, + on_exit_event={ + ExitEvent.EXIT: handle_exit(), + }, +) # We maintain the wall clock time. @@ -263,94 +292,10 @@ globalStart = time.time() print("Running the simulation") print("Using KVM cpu") -start_tick = m5.curTick() -end_tick = m5.curTick() m5.stats.reset() -exit_event = m5.simulate() - -if exit_event.getCause() == "m5_exit instruction encountered": - # We have completed booting the OS using KVM cpu - # Reached the start of ROI - - print("Done booting Linux") - print("Resetting stats at the start of ROI!") - - m5.stats.reset() - start_tick = m5.curTick() - - # We switch to timing cpu for detailed simulation. - - processor.switch() -else: - # `m5_exit instruction encountered` was never reached - - print("Unexpected termination of simulation before ROI was reached!") - print( - "Exiting @ tick {} because {}.".format( - m5.curTick(), - exit_event.getCause() - ) - ) - exit(-1) - -# Simulate the ROI -exit_event = m5.simulate() - -# Reached the end of ROI -gem5stats = get_simstat(root) - -# We get the number of committed instructions from the timing -# cores. We then sum and print them at the end. - -roi_insts = float(\ - json.loads(gem5stats.dumps())\ - ["system"]["processor"]["cores2"]["core"]["exec_context.thread_0"]\ - ["numInsts"]["value"]) + float(\ - json.loads(gem5stats.dumps())\ - ["system"]["processor"]["cores3"]["core"]["exec_context.thread_0"]\ - ["numInsts"]["value"]\ -) - -if exit_event.getCause() == "m5_exit instruction encountered": - print("Dump stats at the end of the ROI!") - m5.stats.dump() - end_tick = m5.curTick() - m5.stats.reset() - -else: - # `m5_exit instruction encountered` was never reached - - print("Unexpected termination of simulation while ROI was being executed!") - print( - "Exiting @ tick {} because {}.".format( - m5.curTick(), - exit_event.getCause() - ) - ) - exit(-1) - -# We need to copy back the contents of the `speclogs' directory to -# m5.options.outdir - -exit_event = m5.simulate() - -if exit_event.getCause() == "m5_exit instruction encountered": - print("Output logs copied!") - -else: - print("Unexpected termination of simulation while copying speclogs!") - print( - "Exiting @ tick {} because {}.".format( - m5.curTick(), - exit_event.getCause() - ) - ) - exit(-1) - -m5.stats.dump() -end_tick = m5.curTick() -m5.stats.reset() +# We start the simulation +simulator.run() # Simulation is over at this point. We acknowledge that all the simulation # events were successful. @@ -359,8 +304,11 @@ print("All simulation events were successful.") print("Performance statistics:") -print("Simulated time: %.2fs" % ((end_tick-start_tick)/1e12)) -print("Instructions executed: %d" % ((roi_insts))) -print("Ran a total of", m5.curTick()/1e12, "simulated seconds") -print("Total wallclock time: %.2fs, %.2f min" % \ - (time.time()-globalStart, (time.time()-globalStart)/60)) +print("Simulated time: " + ((str(simulator.get_roi_ticks()[0])))) +print( + "Ran a total of", simulator.get_current_tick() / 1e12, "simulated seconds" +) +print( + "Total wallclock time: %.2fs, %.2f min" + % (time.time() - globalStart, (time.time() - globalStart) / 60) +) diff --git a/configs/example/gem5_library/x86-spec-cpu2017-benchmarks.py b/configs/example/gem5_library/x86-spec-cpu2017-benchmarks.py index 4e77dd0727..c4af7f5dd9 100644 --- a/configs/example/gem5_library/x86-spec-cpu2017-benchmarks.py +++ b/configs/example/gem5_library/x86-spec-cpu2017-benchmarks.py @@ -57,13 +57,15 @@ from m5.objects import Root from gem5.utils.requires import requires from gem5.components.boards.x86_board import X86Board from gem5.components.memory import DualChannelDDR4_2400 -from gem5.components.processors.simple_switchable_processor import( +from gem5.components.processors.simple_switchable_processor import ( SimpleSwitchableProcessor, ) from gem5.components.processors.cpu_types import CPUTypes from gem5.isas import ISA from gem5.coherence_protocol import CoherenceProtocol from gem5.resources.resource import Resource, CustomDiskImageResource +from gem5.simulate.simulator import Simulator +from gem5.simulate.exit_event import ExitEvent from m5.stats.gem5stats import get_simstat from m5.util import warn @@ -81,22 +83,54 @@ requires( # More information is available at: # https://www.gem5.org/documentation/benchmark_status/gem5-20 -benchmark_choices =["500.perlbench_r", "502.gcc_r", "503.bwaves_r", - "505.mcf_r", "507.cactusBSSN_r", "508.namd_r", - "510.parest_r", "511.povray_r", "519.lbm_r", - "520.omnetpp_r", "521.wrf_r", "523.xalancbmk_r", - "525.x264_r", "527.cam4_r", "531.deepsjeng_r", - "538.imagick_r", "541.leela_r", "544.nab_r", - "548.exchange2_r", "549.fotonik3d_r", "554.roms_r", - "557.xz_r", "600.perlbench_s", "602.gcc_s", - "603.bwaves_s", "605.mcf_s", "607.cactusBSSN_s", - "608.namd_s", "610.parest_s", "611.povray_s", - "619.lbm_s", "620.omnetpp_s", "621.wrf_s", - "623.xalancbmk_s", "625.x264_s", "627.cam4_s", - "631.deepsjeng_s", "638.imagick_s", "641.leela_s", - "644.nab_s", "648.exchange2_s", "649.fotonik3d_s", - "654.roms_s", "996.specrand_fs", "997.specrand_fr", - "998.specrand_is", "999.specrand_ir" +benchmark_choices = [ + "500.perlbench_r", + "502.gcc_r", + "503.bwaves_r", + "505.mcf_r", + "507.cactusBSSN_r", + "508.namd_r", + "510.parest_r", + "511.povray_r", + "519.lbm_r", + "520.omnetpp_r", + "521.wrf_r", + "523.xalancbmk_r", + "525.x264_r", + "527.cam4_r", + "531.deepsjeng_r", + "538.imagick_r", + "541.leela_r", + "544.nab_r", + "548.exchange2_r", + "549.fotonik3d_r", + "554.roms_r", + "557.xz_r", + "600.perlbench_s", + "602.gcc_s", + "603.bwaves_s", + "605.mcf_s", + "607.cactusBSSN_s", + "608.namd_s", + "610.parest_s", + "611.povray_s", + "619.lbm_s", + "620.omnetpp_s", + "621.wrf_s", + "623.xalancbmk_s", + "625.x264_s", + "627.cam4_s", + "631.deepsjeng_s", + "638.imagick_s", + "641.leela_s", + "644.nab_s", + "648.exchange2_s", + "649.fotonik3d_s", + "654.roms_s", + "996.specrand_fs", + "997.specrand_fr", + "998.specrand_is", + "999.specrand_ir", ] # Following are the input size. @@ -115,34 +149,34 @@ parser = argparse.ArgumentParser( parser.add_argument( "--image", - type = str, - required = True, - help = "Input the full path to the built spec-2017 disk-image." + type=str, + required=True, + help="Input the full path to the built spec-2017 disk-image.", ) parser.add_argument( "--partition", - type = str, - required = False, + type=str, + required=False, default=None, - help = "Input the root partition of the SPEC disk-image. If the disk is \ - not partitioned, then pass \"\"." + help='Input the root partition of the SPEC disk-image. If the disk is \ + not partitioned, then pass "".', ) parser.add_argument( "--benchmark", - type = str, - required = True, - help = "Input the benchmark program to execute.", + type=str, + required=True, + help="Input the benchmark program to execute.", choices=benchmark_choices, ) parser.add_argument( "--size", - type = str, - required = True, - help = "Sumulation size the benchmark program.", - choices = size_choices, + type=str, + required=True, + help="Sumulation size the benchmark program.", + choices=size_choices, ) args = parser.parse_args() @@ -157,21 +191,20 @@ if not os.path.exists(args.image): warn("Disk image not found!") print("Instructions on building the disk image can be found at: ") print( - "https://gem5art.readthedocs.io/en/latest/tutorials/spec-tutorial.html" + "https://gem5art.readthedocs.io/en/latest/tutorials/spec-tutorial.html" ) fatal("The disk-image is not found at {}".format(args.image)) # Setting up all the fixed system parameters here # Caches: MESI Two Level Cache Hierarchy -from gem5.components.cachehierarchies.ruby.\ - mesi_two_level_cache_hierarchy import( +from gem5.components.cachehierarchies.ruby.mesi_two_level_cache_hierarchy import ( MESITwoLevelCacheHierarchy, ) cache_hierarchy = MESITwoLevelCacheHierarchy( - l1d_size = "32kB", - l1d_assoc = 8, + l1d_size="32kB", + l1d_assoc=8, l1i_size="32kB", l1i_assoc=8, l2_size="256kB", @@ -181,7 +214,7 @@ cache_hierarchy = MESITwoLevelCacheHierarchy( # Memory: Dual Channel DDR4 2400 DRAM device. # The X86 board only supports 3 GB of main memory. -memory = DualChannelDDR4_2400(size = "3GB") +memory = DualChannelDDR4_2400(size="3GB") # Here we setup the processor. This is a special switchable processor in which # a starting core type and a switch core type must be specified. Once a @@ -211,8 +244,8 @@ board = X86Board( # m5.options.outdir and the output from the disk-image folder is copied to # this folder. -output_dir = "speclogs_" + ''.join(x.strip() for x in time.asctime().split()) -output_dir = output_dir.replace(":","") +output_dir = "speclogs_" + "".join(x.strip() for x in time.asctime().split()) +output_dir = output_dir.replace(":", "") # We create this folder if it is absent. try: @@ -242,27 +275,31 @@ board.set_kernel_disk_workload( # The x86 linux kernel will be automatically downloaded to the # `~/.cache/gem5` directory if not already present. # SPEC CPU2017 benchamarks were tested with kernel version 4.19.83 - kernel=Resource( - "x86-linux-kernel-4.19.83", - ), + kernel=Resource("x86-linux-kernel-4.19.83"), # The location of the x86 SPEC CPU 2017 image disk_image=CustomDiskImageResource( - args.image, - disk_root_partition=args.partition, + args.image, disk_root_partition=args.partition ), readfile_contents=command, ) -# We need this for long running processes. -m5.disableAllListeners() -root = Root(full_system = True, system = board) +def handle_exit(): + print("Done bootling Linux") + print("Resetting stats at the start of ROI!") + m5.stats.reset() + yield False # E.g., continue the simulation. + print("Dump stats at the end of the ROI!") + m5.stats.dump() + yield True # Stop the simulation. We're done. -# sim_quantum must be set when KVM cores are used. -root.sim_quantum = int(1e9) - -m5.instantiate() +simulator = Simulator( + board=board, + on_exit_event={ + ExitEvent.EXIT: handle_exit(), + }, +) # We maintain the wall clock time. @@ -271,94 +308,22 @@ globalStart = time.time() print("Running the simulation") print("Using KVM cpu") -start_tick = m5.curTick() -end_tick = m5.curTick() m5.stats.reset() -exit_event = m5.simulate() +# We start the simulation +simulator.run() -if exit_event.getCause() == "m5_exit instruction encountered": - # We have completed booting the OS using KVM cpu - # Reached the start of ROI - - print("Done booting Linux") - print("Resetting stats at the start of ROI!") - - m5.stats.reset() - start_tick = m5.curTick() - - # We switch to timing cpu for detailed simulation. - - processor.switch() -else: - print("Unexpected termination of simulation before ROI was reached!") - print( - "Exiting @ tick {} because {}.".format( - m5.curTick(), - exit_event.getCause() - ) - ) - exit(-1) - -# Simulate the ROI -exit_event = m5.simulate() - -# Reached the end of ROI -gem5stats = get_simstat(root) - -# We get the number of committed instructions from the timing -# cores. We then sum and print them at the end. - -roi_insts = float(\ - json.loads(gem5stats.dumps())\ - ["system"]["processor"]["cores2"]["core"]["exec_context.thread_0"]\ - ["numInsts"]["value"] -) + float(\ - json.loads(gem5stats.dumps())\ - ["system"]["processor"]["cores3"]["core"]["exec_context.thread_0"]\ - ["numInsts"]["value"]\ -) - -if exit_event.getCause() == "m5_exit instruction encountered": - print("Dump stats at the end of the ROI!") - m5.stats.dump() - end_tick = m5.curTick() - m5.stats.reset() - -else: - print("Unexpected termination of simulation while ROI was being executed!") - print( - "Exiting @ tick {} because {}.".format( - m5.curTick(), - exit_event.getCause() - ) - ) - exit(-1) - -# We need to copy back the contents of the `speclogs' directory to -# m5.options.outdir - -exit_event = m5.simulate() - -if exit_event.getCause() == "m5_exit instruction encountered": - print("Output logs copied!") - -else: - print("Unexpected termination of simulation while copying speclogs!") - print( - "Exiting @ tick {} because {}.".format( - m5.curTick(), - exit_event.getCause() - ) - ) - exit(-1) +# We print the final simulation statistics. print("Done with the simulation") print() print("Performance statistics:") -print("Simulated time in ROI: %.2fs" % ((end_tick-start_tick)/1e12)) -print("Instructions executed in ROI: %d" % ((roi_insts))) -print("Ran a total of", m5.curTick()/1e12, "simulated seconds") -print("Total wallclock time: %.2fs, %.2f min" % \ - (time.time()-globalStart, (time.time()-globalStart)/60)) +print("Simulated time in ROI: " + ((str(simulator.get_roi_ticks()[0])))) +print( + "Ran a total of", simulator.get_current_tick() / 1e12, "simulated seconds" +) +print( + "Total wallclock time: %.2fs, %.2f min" + % (time.time() - globalStart, (time.time() - globalStart) / 60) +) diff --git a/configs/example/gem5_library/x86-ubuntu-run-with-kvm.py b/configs/example/gem5_library/x86-ubuntu-run-with-kvm.py index a47eb2b060..f55ec60f21 100644 --- a/configs/example/gem5_library/x86-ubuntu-run-with-kvm.py +++ b/configs/example/gem5_library/x86-ubuntu-run-with-kvm.py @@ -49,9 +49,9 @@ from gem5.components.processors.simple_switchable_processor import ( from gem5.components.processors.cpu_types import CPUTypes from gem5.isas import ISA from gem5.coherence_protocol import CoherenceProtocol -from gem5.resources.resource import Resource from gem5.simulate.simulator import Simulator from gem5.simulate.exit_event import ExitEvent +from gem5.resources.workload import Workload # This runs a check to ensure the gem5 binary is compiled to X86 and to the # MESI Two Level coherence protocol. @@ -61,8 +61,7 @@ requires( kvm_required=True, ) -from gem5.components.cachehierarchies.ruby.\ - mesi_two_level_cache_hierarchy import ( +from gem5.components.cachehierarchies.ruby.mesi_two_level_cache_hierarchy import ( MESITwoLevelCacheHierarchy, ) @@ -111,21 +110,17 @@ board = X86Board( # then, again, call `m5 exit` to terminate the simulation. After simulation # has ended you may inspect `m5out/system.pc.com_1.device` to see the echo # output. -command = "m5 exit;" \ - + "echo 'This is running on Timing CPU cores.';" \ - + "sleep 1;" \ - + "m5 exit;" - -board.set_kernel_disk_workload( - # The x86 linux kernel will be automatically downloaded to the if not - # already present. - kernel=Resource("x86-linux-kernel-5.4.49"), - # The x86 ubuntu image will be automatically downloaded to the if not - # already present. - disk_image=Resource("x86-ubuntu-18.04-img"), - readfile_contents=command, +command = ( + "m5 exit;" + + "echo 'This is running on Timing CPU cores.';" + + "sleep 1;" + + "m5 exit;" ) +workload = Workload("x86-ubuntu-18.04-boot") +workload.set_parameter("readfile_contents", command) +board.set_workload(workload) + simulator = Simulator( board=board, on_exit_event={ @@ -133,7 +128,7 @@ simulator = Simulator( # exit event. Instead of exiting the simulator, we just want to # switch the processor. The 2nd m5 exit after will revert to using # default behavior where the simulator run will exit. - ExitEvent.EXIT : (func() for func in [processor.switch]), + ExitEvent.EXIT: (func() for func in [processor.switch]) }, ) simulator.run() diff --git a/configs/example/gem5_library/x86-ubuntu-run.py b/configs/example/gem5_library/x86-ubuntu-run.py index 2aee8c73df..50b52e6e3c 100644 --- a/configs/example/gem5_library/x86-ubuntu-run.py +++ b/configs/example/gem5_library/x86-ubuntu-run.py @@ -45,7 +45,7 @@ scons build/X86/gem5.opt """ from gem5.prebuilt.demo.x86_demo_board import X86DemoBoard -from gem5.resources.resource import Resource +from gem5.resources.workload import Workload from gem5.simulate.simulator import Simulator @@ -53,13 +53,10 @@ from gem5.simulate.simulator import Simulator # simulation. board = X86DemoBoard() -# We then set the workload. Here we use the 5.4.49 Linux kernel with an X86 -# Ubuntu OS. If these cannot be found locally they will be automatically -# downloaded. -board.set_kernel_disk_workload( - kernel=Resource("x86-linux-kernel-5.4.49"), - disk_image=Resource("x86-ubuntu-18.04-img"), -) +# We then set the workload. Here we use the "x86-ubuntu-18.04-boot" workload. +# This boots Ubuntu 18.04 with Linux 5.4.49. If the required resources are not +# found locally, they will be downloaded. +board.set_workload(Workload("x86-ubuntu-18.04-boot")) simulator = Simulator(board=board) simulator.run() diff --git a/configs/example/gpufs/DisjointNetwork.py b/configs/example/gpufs/DisjointNetwork.py index e1838bb209..1d7f708967 100644 --- a/configs/example/gpufs/DisjointNetwork.py +++ b/configs/example/gpufs/DisjointNetwork.py @@ -34,8 +34,8 @@ from importlib import * from network import Network -class DisjointSimple(SimpleNetwork): +class DisjointSimple(SimpleNetwork): def __init__(self, ruby_system): super(DisjointSimple, self).__init__() @@ -51,8 +51,7 @@ class DisjointSimple(SimpleNetwork): topo_module = import_module("topologies.%s" % opts.cpu_topology) topo_class = getattr(topo_module, opts.cpu_topology) _topo = topo_class(controllers) - _topo.makeTopology(opts, self, SimpleIntLink, - SimpleExtLink, Switch) + _topo.makeTopology(opts, self, SimpleIntLink, SimpleExtLink, Switch) self.initSimple(opts, self.int_links, self.ext_links) @@ -62,12 +61,10 @@ class DisjointSimple(SimpleNetwork): topo_module = import_module("topologies.%s" % opts.gpu_topology) topo_class = getattr(topo_module, opts.gpu_topology) _topo = topo_class(controllers) - _topo.makeTopology(opts, self, SimpleIntLink, - SimpleExtLink, Switch) + _topo.makeTopology(opts, self, SimpleIntLink, SimpleExtLink, Switch) self.initSimple(opts, self.int_links, self.ext_links) - def initSimple(self, opts, int_links, ext_links): # Attach links to network @@ -76,8 +73,8 @@ class DisjointSimple(SimpleNetwork): self.setup_buffers() -class DisjointGarnet(GarnetNetwork): +class DisjointGarnet(GarnetNetwork): def __init__(self, ruby_system): super(DisjointGarnet, self).__init__() @@ -90,8 +87,9 @@ class DisjointGarnet(GarnetNetwork): topo_module = import_module("topologies.%s" % opts.cpu_topology) topo_class = getattr(topo_module, opts.cpu_topology) _topo = topo_class(controllers) - _topo.makeTopology(opts, self, GarnetIntLink, - GarnetExtLink, GarnetRouter) + _topo.makeTopology( + opts, self, GarnetIntLink, GarnetExtLink, GarnetRouter + ) Network.init_network(opts, self, GarnetNetworkInterface) @@ -101,7 +99,8 @@ class DisjointGarnet(GarnetNetwork): topo_module = import_module("topologies.%s" % opts.gpu_topology) topo_class = getattr(topo_module, opts.gpu_topology) _topo = topo_class(controllers) - _topo.makeTopology(opts, self, GarnetIntLink, - GarnetExtLink, GarnetRouter) + _topo.makeTopology( + opts, self, GarnetIntLink, GarnetExtLink, GarnetRouter + ) Network.init_network(opts, self, GarnetNetworkInterface) diff --git a/configs/example/gpufs/Disjoint_VIPER.py b/configs/example/gpufs/Disjoint_VIPER.py index 8ddaeac97e..14b47d8cf0 100644 --- a/configs/example/gpufs/Disjoint_VIPER.py +++ b/configs/example/gpufs/Disjoint_VIPER.py @@ -36,7 +36,7 @@ from ruby.GPU_VIPER import * from ruby import Ruby -class DummySystem(): +class DummySystem: def __init__(self, mem_ranges): self.mem_ctrls = [] @@ -45,7 +45,7 @@ class DummySystem(): class Disjoint_VIPER(RubySystem): def __init__(self): - if buildEnv['PROTOCOL'] != "GPU_VIPER": + if buildEnv["PROTOCOL"] != "GPU_VIPER": fatal("This ruby config only supports the GPU_VIPER protocol") super(Disjoint_VIPER, self).__init__() @@ -60,29 +60,33 @@ class Disjoint_VIPER(RubySystem): self.network_cpu = DisjointSimple(self) self.network_gpu = DisjointSimple(self) - # Construct CPU controllers - cpu_dir_nodes = \ - construct_dirs(options, system, self, self.network_cpu) - (cp_sequencers, cp_cntrl_nodes) = \ - construct_corepairs(options, system, self, self.network_cpu) + cpu_dir_nodes = construct_dirs(options, system, self, self.network_cpu) + (cp_sequencers, cp_cntrl_nodes) = construct_corepairs( + options, system, self, self.network_cpu + ) # Construct GPU controllers - (tcp_sequencers, tcp_cntrl_nodes) = \ - construct_tcps(options, system, self, self.network_gpu) - (sqc_sequencers, sqc_cntrl_nodes) = \ - construct_sqcs(options, system, self, self.network_gpu) - (scalar_sequencers, scalar_cntrl_nodes) = \ - construct_scalars(options, system, self, self.network_gpu) - tcc_cntrl_nodes = \ - construct_tccs(options, system, self, self.network_gpu) + (tcp_sequencers, tcp_cntrl_nodes) = construct_tcps( + options, system, self, self.network_gpu + ) + (sqc_sequencers, sqc_cntrl_nodes) = construct_sqcs( + options, system, self, self.network_gpu + ) + (scalar_sequencers, scalar_cntrl_nodes) = construct_scalars( + options, system, self, self.network_gpu + ) + tcc_cntrl_nodes = construct_tccs( + options, system, self, self.network_gpu + ) # Construct CPU memories Ruby.setup_memory_controllers(system, self, cpu_dir_nodes, options) # Construct GPU memories - (gpu_dir_nodes, gpu_mem_ctrls) = \ - construct_gpudirs(options, system, self, self.network_gpu) + (gpu_dir_nodes, gpu_mem_ctrls) = construct_gpudirs( + options, system, self, self.network_gpu + ) # Configure the directories based on which network they are in for cpu_dir_node in cpu_dir_nodes: @@ -115,11 +119,12 @@ class Disjoint_VIPER(RubySystem): dma_cntrls = [] for i, dma_device in enumerate(dma_devices): dma_seq = DMASequencer(version=i, ruby_system=self) - dma_cntrl = DMA_Controller(version=i, dma_sequencer=dma_seq, - ruby_system=self) + dma_cntrl = DMA_Controller( + version=i, dma_sequencer=dma_seq, ruby_system=self + ) # Handle inconsistently named ports on various DMA devices: - if not hasattr(dma_device, 'type'): + if not hasattr(dma_device, "type"): # IDE doesn't have a .type but seems like everything else does. dma_seq.in_ports = dma_device elif dma_device.type in gpu_dma_types: @@ -127,13 +132,15 @@ class Disjoint_VIPER(RubySystem): else: dma_seq.in_ports = dma_device.dma - if hasattr(dma_device, 'type') and \ - dma_device.type in gpu_dma_types: + if ( + hasattr(dma_device, "type") + and dma_device.type in gpu_dma_types + ): dma_cntrl.requestToDir = MessageBuffer(buffer_size=0) dma_cntrl.requestToDir.out_port = self.network_gpu.in_port dma_cntrl.responseFromDir = MessageBuffer(buffer_size=0) dma_cntrl.responseFromDir.in_port = self.network_gpu.out_port - dma_cntrl.mandatoryQueue = MessageBuffer(buffer_size = 0) + dma_cntrl.mandatoryQueue = MessageBuffer(buffer_size=0) gpu_dma_ctrls.append(dma_cntrl) else: @@ -141,7 +148,7 @@ class Disjoint_VIPER(RubySystem): dma_cntrl.requestToDir.out_port = self.network_cpu.in_port dma_cntrl.responseFromDir = MessageBuffer(buffer_size=0) dma_cntrl.responseFromDir.in_port = self.network_cpu.out_port - dma_cntrl.mandatoryQueue = MessageBuffer(buffer_size = 0) + dma_cntrl.mandatoryQueue = MessageBuffer(buffer_size=0) cpu_dma_ctrls.append(dma_cntrl) @@ -149,32 +156,32 @@ class Disjoint_VIPER(RubySystem): system.dma_cntrls = dma_cntrls - # Collect CPU and GPU controllers into seperate lists cpu_cntrls = cpu_dir_nodes + cp_cntrl_nodes + cpu_dma_ctrls - gpu_cntrls = tcp_cntrl_nodes + sqc_cntrl_nodes + \ - scalar_cntrl_nodes + tcc_cntrl_nodes + gpu_dma_ctrls + \ - gpu_dir_nodes - + gpu_cntrls = ( + tcp_cntrl_nodes + + sqc_cntrl_nodes + + scalar_cntrl_nodes + + tcc_cntrl_nodes + + gpu_dma_ctrls + + gpu_dir_nodes + ) # Setup number of vnets self.number_of_virtual_networks = 11 self.network_cpu.number_of_virtual_networks = 11 self.network_gpu.number_of_virtual_networks = 11 - # Set up the disjoint topology self.network_cpu.connectCPU(options, cpu_cntrls) self.network_gpu.connectGPU(options, gpu_cntrls) - # Create port proxy for connecting system port. System port is used # for loading from outside guest, e.g., binaries like vmlinux. - system.sys_port_proxy = RubyPortProxy(ruby_system = self) + system.sys_port_proxy = RubyPortProxy(ruby_system=self) system.sys_port_proxy.pio_request_port = piobus.cpu_side_ports system.system_port = system.sys_port_proxy.in_ports - # Only CPU sequencers connect to PIO bus. This acts as the "default" # destination for unknown address ranges. PCIe requests fall under # this category. @@ -188,9 +195,9 @@ class Disjoint_VIPER(RubySystem): if i < options.num_cpus: cp_sequencers[i].pio_response_port = piobus.mem_side_ports - # Setup ruby port. Both CPU and GPU are actually connected here. - all_sequencers = cp_sequencers + tcp_sequencers + \ - sqc_sequencers + scalar_sequencers + all_sequencers = ( + cp_sequencers + tcp_sequencers + sqc_sequencers + scalar_sequencers + ) self._cpu_ports = all_sequencers self.num_of_sequencers = len(all_sequencers) diff --git a/configs/example/gpufs/amd/AmdGPUOptions.py b/configs/example/gpufs/amd/AmdGPUOptions.py index 51e010870d..531249ee84 100644 --- a/configs/example/gpufs/amd/AmdGPUOptions.py +++ b/configs/example/gpufs/amd/AmdGPUOptions.py @@ -27,99 +27,223 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. + def addAmdGPUOptions(parser): - parser.add_argument("-u", "--num-compute-units", type=int, default=4, - help="number of GPU compute units"), - parser.add_argument("--num-cp", type=int, default=0, - help="Number of GPU Command Processors (CP)") + parser.add_argument( + "-u", + "--num-compute-units", + type=int, + default=4, + help="number of GPU compute units", + ), + parser.add_argument( + "--num-cp", + type=int, + default=0, + help="Number of GPU Command Processors (CP)", + ) # not super important now, but to avoid putting the number 4 everywhere, # make it an option/knob - parser.add_argument("--cu-per-sqc", type=int, default=4, - help="number of CUs sharing an SQC" - " (icache, and thus icache TLB)") - parser.add_argument("--cu-per-scalar-cache", type=int, default=4, - help="Number of CUs sharing a scalar cache") - parser.add_argument("--simds-per-cu", type=int, default=4, - help="SIMD units per CU") - parser.add_argument("--cu-per-sa", type=int, default=4, - help="Number of CUs per shader array. This must be a" - " multiple of options.cu-per-sqc and " - " options.cu-per-scalar") - parser.add_argument("--sa-per-complex", type=int, default=1, - help="Number of shader arrays per complex") - parser.add_argument("--num-gpu-complexes", type=int, default=1, - help="Number of GPU complexes") - parser.add_argument("--wf-size", type=int, default=64, - help="Wavefront size(in workitems)") - parser.add_argument("--sp-bypass-path-length", type=int, default=4, - help="Number of stages of bypass path in vector ALU " - "for Single Precision ops") - parser.add_argument("--dp-bypass-path-length", type=int, default=4, - help="Number of stages of bypass path in vector ALU " - "for Double Precision ops") - #issue period per SIMD unit: number of cycles before issuing another vector - parser.add_argument("--issue-period", type=int, default=4, - help="Number of cycles per vector instruction issue" - " period") - parser.add_argument("--glbmem-wr-bus-width", type=int, default=32, - help="VGPR to Coalescer (Global Memory) data bus width" - " in bytes") - parser.add_argument("--glbmem-rd-bus-width", type=int, default=32, - help="Coalescer to VGPR (Global Memory) data bus width" - " in bytes") + parser.add_argument( + "--cu-per-sqc", + type=int, + default=4, + help="number of CUs sharing an SQC" " (icache, and thus icache TLB)", + ) + parser.add_argument( + "--cu-per-scalar-cache", + type=int, + default=4, + help="Number of CUs sharing a scalar cache", + ) + parser.add_argument( + "--simds-per-cu", type=int, default=4, help="SIMD units per CU" + ) + parser.add_argument( + "--cu-per-sa", + type=int, + default=4, + help="Number of CUs per shader array. This must be a" + " multiple of options.cu-per-sqc and " + " options.cu-per-scalar", + ) + parser.add_argument( + "--sa-per-complex", + type=int, + default=1, + help="Number of shader arrays per complex", + ) + parser.add_argument( + "--num-gpu-complexes", + type=int, + default=1, + help="Number of GPU complexes", + ) + parser.add_argument( + "--wf-size", type=int, default=64, help="Wavefront size(in workitems)" + ) + parser.add_argument( + "--sp-bypass-path-length", + type=int, + default=4, + help="Number of stages of bypass path in vector ALU " + "for Single Precision ops", + ) + parser.add_argument( + "--dp-bypass-path-length", + type=int, + default=4, + help="Number of stages of bypass path in vector ALU " + "for Double Precision ops", + ) + # issue period per SIMD unit: number of cycles before issuing another vector + parser.add_argument( + "--issue-period", + type=int, + default=4, + help="Number of cycles per vector instruction issue" " period", + ) + parser.add_argument( + "--glbmem-wr-bus-width", + type=int, + default=32, + help="VGPR to Coalescer (Global Memory) data bus width" " in bytes", + ) + parser.add_argument( + "--glbmem-rd-bus-width", + type=int, + default=32, + help="Coalescer to VGPR (Global Memory) data bus width" " in bytes", + ) # Currently we only support 1 local memory pipe - parser.add_argument("--shr-mem-pipes-per-cu", type=int, default=1, - help="Number of Shared Memory pipelines per CU") + parser.add_argument( + "--shr-mem-pipes-per-cu", + type=int, + default=1, + help="Number of Shared Memory pipelines per CU", + ) # Currently we only support 1 global memory pipe - parser.add_argument("--glb-mem-pipes-per-cu", type=int, default=1, - help="Number of Global Memory pipelines per CU") - parser.add_argument("--wfs-per-simd", type=int, default=10, - help="Number of WF slots per SIMD") + parser.add_argument( + "--glb-mem-pipes-per-cu", + type=int, + default=1, + help="Number of Global Memory pipelines per CU", + ) + parser.add_argument( + "--wfs-per-simd", + type=int, + default=10, + help="Number of WF slots per SIMD", + ) - parser.add_argument("--registerManagerPolicy", type=str, - default="static", help="Register manager policy") - parser.add_argument("--vreg-file-size", type=int, default=2048, - help="number of physical vector registers per SIMD") - parser.add_argument("--vreg-min-alloc", type=int, default=4, - help="vector register reservation unit") + parser.add_argument( + "--registerManagerPolicy", + type=str, + default="static", + help="Register manager policy", + ) + parser.add_argument( + "--vreg-file-size", + type=int, + default=2048, + help="number of physical vector registers per SIMD", + ) + parser.add_argument( + "--vreg-min-alloc", + type=int, + default=4, + help="vector register reservation unit", + ) - parser.add_argument("--sreg-file-size", type=int, default=2048, - help="number of physical scalar registers per SIMD") - parser.add_argument("--sreg-min-alloc", type=int, default=4, - help="scalar register reservation unit") + parser.add_argument( + "--sreg-file-size", + type=int, + default=2048, + help="number of physical scalar registers per SIMD", + ) + parser.add_argument( + "--sreg-min-alloc", + type=int, + default=4, + help="scalar register reservation unit", + ) - parser.add_argument("--bw-scalor", type=int, default=0, - help="bandwidth scalor for scalability analysis") - parser.add_argument("--CPUClock", type=str, default="2GHz", - help="CPU clock") - parser.add_argument("--gpu-clock", type=str, default="1GHz", - help="GPU clock") - parser.add_argument("--cpu-voltage", action="store", type=str, - default='1.0V', help="CPU voltage domain") - parser.add_argument("--gpu-voltage", action="store", type=str, - default='1.0V', help="GPU voltage domain") - parser.add_argument("--CUExecPolicy", type=str, default="OLDEST-FIRST", - help="WF exec policy (OLDEST-FIRST, ROUND-ROBIN)") - parser.add_argument("--LocalMemBarrier",action="store_true", - help="Barrier does not wait for writethroughs to " - " complete") - parser.add_argument("--countPages", action="store_true", - help="Count Page Accesses and output in " - " per-CU output files") - parser.add_argument("--TLB-prefetch", type=int, help="prefetch depth for" - "TLBs") - parser.add_argument("--pf-type", type=str, help="type of prefetch: " - "PF_CU, PF_WF, PF_PHASE, PF_STRIDE") + parser.add_argument( + "--bw-scalor", + type=int, + default=0, + help="bandwidth scalor for scalability analysis", + ) + parser.add_argument( + "--CPUClock", type=str, default="2GHz", help="CPU clock" + ) + parser.add_argument( + "--gpu-clock", type=str, default="1GHz", help="GPU clock" + ) + parser.add_argument( + "--cpu-voltage", + action="store", + type=str, + default="1.0V", + help="CPU voltage domain", + ) + parser.add_argument( + "--gpu-voltage", + action="store", + type=str, + default="1.0V", + help="GPU voltage domain", + ) + parser.add_argument( + "--CUExecPolicy", + type=str, + default="OLDEST-FIRST", + help="WF exec policy (OLDEST-FIRST, ROUND-ROBIN)", + ) + parser.add_argument( + "--LocalMemBarrier", + action="store_true", + help="Barrier does not wait for writethroughs to " " complete", + ) + parser.add_argument( + "--countPages", + action="store_true", + help="Count Page Accesses and output in " " per-CU output files", + ) + parser.add_argument( + "--TLB-prefetch", type=int, help="prefetch depth for" "TLBs" + ) + parser.add_argument( + "--pf-type", + type=str, + help="type of prefetch: " "PF_CU, PF_WF, PF_PHASE, PF_STRIDE", + ) parser.add_argument("--pf-stride", type=int, help="set prefetch stride") - parser.add_argument("--numLdsBanks", type=int, default=32, - help="number of physical banks per LDS module") - parser.add_argument("--ldsBankConflictPenalty", type=int, default=1, - help="number of cycles per LDS bank conflict") - parser.add_argument("--lds-size", type=int, default=65536, - help="Size of the LDS in bytes") - parser.add_argument("--num-hw-queues", type=int, default=10, - help="number of hw queues in packet processor") - parser.add_argument("--reg-alloc-policy", type=str, default="simple", - help="register allocation policy (simple/dynamic)") - + parser.add_argument( + "--numLdsBanks", + type=int, + default=32, + help="number of physical banks per LDS module", + ) + parser.add_argument( + "--ldsBankConflictPenalty", + type=int, + default=1, + help="number of cycles per LDS bank conflict", + ) + parser.add_argument( + "--lds-size", type=int, default=65536, help="Size of the LDS in bytes" + ) + parser.add_argument( + "--num-hw-queues", + type=int, + default=10, + help="number of hw queues in packet processor", + ) + parser.add_argument( + "--reg-alloc-policy", + type=str, + default="simple", + help="register allocation policy (simple/dynamic)", + ) diff --git a/configs/example/gpufs/hip_cookbook.py b/configs/example/gpufs/hip_cookbook.py index cd0e2845a0..1c22be52da 100644 --- a/configs/example/gpufs/hip_cookbook.py +++ b/configs/example/gpufs/hip_cookbook.py @@ -39,9 +39,9 @@ from common import Options from common import GPUTLBOptions from ruby import Ruby -cookbook_runscript = '''\ +cookbook_runscript = """\ export LD_LIBRARY_PATH=/opt/rocm/lib:$LD_LIBRARY_PATH -export HSA_ENABLE_SDMA=0 +export HSA_ENABLE_INTERRUPT=0 dmesg -n3 dd if=/root/roms/vega10.rom of=/dev/mem bs=1k seek=768 count=128 if [ ! -f /lib/modules/`uname -r`/updates/dkms/amdgpu.ko ]; then @@ -54,27 +54,36 @@ cd /opt/rocm/hip/samples/2_Cookbook/{}/ make clean make /sbin/m5 exit -''' +""" + def addCookbookOptions(parser): - parser.add_argument("-a", "--app", default=None, - choices=['0_MatrixTranspose', - '1_hipEvent', - '3_shared_memory', - '4_shfl', - '5_2dshfl', - '6_dynamic_shared', - '7_streams', - '8_peer2peer', - '9_unroll', - '10_inline_asm', - '11_texture_driver', - '13_occupancy', - '14_gpu_arch', - '15_static_library'], - help="GPU application to run") - parser.add_argument("-o", "--opts", default="", - help="GPU application arguments") + parser.add_argument( + "-a", + "--app", + default=None, + choices=[ + "0_MatrixTranspose", + "1_hipEvent", + "3_shared_memory", + "4_shfl", + "5_2dshfl", + "6_dynamic_shared", + "7_streams", + "8_peer2peer", + "9_unroll", + "10_inline_asm", + "11_texture_driver", + "13_occupancy", + "14_gpu_arch", + "15_static_library", + ], + help="GPU application to run", + ) + parser.add_argument( + "-o", "--opts", default="", help="GPU application arguments" + ) + if __name__ == "__m5_main__": parser = argparse.ArgumentParser() @@ -99,12 +108,13 @@ if __name__ == "__m5_main__": print("No disk path given. Use %s --disk-image " % sys.argv[0]) sys.exit(1) elif args.gpu_mmio_trace is None: - print("No MMIO trace path. Use %s --gpu-mmio-trace " - % sys.argv[0]) + print( + "No MMIO trace path. Use %s --gpu-mmio-trace " % sys.argv[0] + ) sys.exit(1) _, tempRunscript = tempfile.mkstemp() - with open(tempRunscript, 'w') as b64file: + with open(tempRunscript, "w") as b64file: runscriptStr = cookbook_runscript.format(args.app, args.app) b64file.write(runscriptStr) @@ -113,12 +123,12 @@ if __name__ == "__m5_main__": # Defaults for Vega10 args.ruby = True - args.cpu_type = 'X86KvmCPU' + args.cpu_type = "X86KvmCPU" args.num_cpus = 1 - args.mem_size = '3GB' + args.mem_size = "3GB" args.dgpu = True - args.dgpu_mem_size = '16GB' - args.dgpu_start = '0GB' + args.dgpu_mem_size = "16GB" + args.dgpu_start = "0GB" args.checkpoint_restore = 0 args.disjoint = True args.timing_gpu = True diff --git a/configs/example/gpufs/hip_rodinia.py b/configs/example/gpufs/hip_rodinia.py index 3d7cef477f..a6c7c504c1 100644 --- a/configs/example/gpufs/hip_rodinia.py +++ b/configs/example/gpufs/hip_rodinia.py @@ -40,9 +40,9 @@ from common import Options from common import GPUTLBOptions from ruby import Ruby -rodinia_runscript = '''\ +rodinia_runscript = """\ export LD_LIBRARY_PATH=/opt/rocm/lib:$LD_LIBRARY_PATH -export HSA_ENABLE_SDMA=0 +export HSA_ENABLE_INTERRUPT=0 dmesg -n3 dd if=/root/roms/vega10.rom of=/dev/mem bs=1k seek=768 count=128 if [ ! -f /lib/modules/`uname -r`/updates/dkms/amdgpu.ko ]; then @@ -56,33 +56,42 @@ make clean make make test /sbin/m5 exit -''' +""" + def addRodiniaOptions(parser): - parser.add_argument("-a", "--app", default=None, - choices=['b+tree', - 'backprop', - 'bfs', - 'cfd', - 'dwt2d', - 'gaussian', - 'heartwall', - 'hotspot', - 'hybridsort', - 'kmeans', - 'lavaMD', - 'leukocyte', - 'lud', - 'myocyte', - 'nn', - 'nw', - 'particlefilter', - 'pathfinder', - 'srad', - 'streamcluster'], - help="GPU application to run") - parser.add_argument("-o", "--opts", default="", - help="GPU application arguments") + parser.add_argument( + "-a", + "--app", + default=None, + choices=[ + "b+tree", + "backprop", + "bfs", + "cfd", + "dwt2d", + "gaussian", + "heartwall", + "hotspot", + "hybridsort", + "kmeans", + "lavaMD", + "leukocyte", + "lud", + "myocyte", + "nn", + "nw", + "particlefilter", + "pathfinder", + "srad", + "streamcluster", + ], + help="GPU application to run", + ) + parser.add_argument( + "-o", "--opts", default="", help="GPU application arguments" + ) + if __name__ == "__m5_main__": parser = argparse.ArgumentParser() @@ -107,12 +116,13 @@ if __name__ == "__m5_main__": print("No disk path given. Use %s --disk-image " % sys.argv[0]) sys.exit(1) elif args.gpu_mmio_trace is None: - print("No MMIO trace path. Use %s --gpu-mmio-trace " - % sys.argv[0]) + print( + "No MMIO trace path. Use %s --gpu-mmio-trace " % sys.argv[0] + ) sys.exit(1) _, tempRunscript = tempfile.mkstemp() - with open(tempRunscript, 'w') as b64file: + with open(tempRunscript, "w") as b64file: runscriptStr = rodinia_runscript.format(args.app, args.app) b64file.write(runscriptStr) @@ -121,12 +131,12 @@ if __name__ == "__m5_main__": # Defaults for Vega10 args.ruby = True - args.cpu_type = 'X86KvmCPU' + args.cpu_type = "X86KvmCPU" args.num_cpus = 1 - args.mem_size = '3GB' + args.mem_size = "3GB" args.dgpu = True - args.dgpu_mem_size = '16GB' - args.dgpu_start = '0GB' + args.dgpu_mem_size = "16GB" + args.dgpu_start = "0GB" args.checkpoint_restore = 0 args.disjoint = True args.timing_gpu = True diff --git a/configs/example/gpufs/hip_samples.py b/configs/example/gpufs/hip_samples.py index b3a66a7717..0d9263e128 100644 --- a/configs/example/gpufs/hip_samples.py +++ b/configs/example/gpufs/hip_samples.py @@ -39,9 +39,9 @@ from common import Options from common import GPUTLBOptions from ruby import Ruby -samples_runscript = '''\ +samples_runscript = """\ export LD_LIBRARY_PATH=/opt/rocm/lib:$LD_LIBRARY_PATH -export HSA_ENABLE_SDMA=0 +export HSA_ENABLE_INTERRUPT=0 dmesg -n3 dd if=/root/roms/vega10.rom of=/dev/mem bs=1k seek=768 count=128 if [ ! -f /lib/modules/`uname -r`/updates/dkms/amdgpu.ko ]; then @@ -54,25 +54,34 @@ cd /home/gem5/HIP-Examples/HIP-Examples-Applications/{}/ make clean make /sbin/m5 exit -''' +""" + def addSamplesOptions(parser): - parser.add_argument("-a", "--app", default=None, - choices=['BinomialOption', - 'BitonicSort', - 'FastWalshTransform', - 'FloydWarshall', - 'HelloWorld', - 'Histogram', - 'MatrixMultiplication', - 'PrefixSum', - 'RecursiveGaussian', - 'SimpleConvolution', - 'dct', - 'dwtHaar1D'], - help="GPU application to run") - parser.add_argument("-o", "--opts", default="", - help="GPU application arguments") + parser.add_argument( + "-a", + "--app", + default=None, + choices=[ + "BinomialOption", + "BitonicSort", + "FastWalshTransform", + "FloydWarshall", + "HelloWorld", + "Histogram", + "MatrixMultiplication", + "PrefixSum", + "RecursiveGaussian", + "SimpleConvolution", + "dct", + "dwtHaar1D", + ], + help="GPU application to run", + ) + parser.add_argument( + "-o", "--opts", default="", help="GPU application arguments" + ) + if __name__ == "__m5_main__": parser = argparse.ArgumentParser() @@ -97,12 +106,13 @@ if __name__ == "__m5_main__": print("No disk path given. Use %s --disk-image " % sys.argv[0]) sys.exit(1) elif args.gpu_mmio_trace is None: - print("No MMIO trace path. Use %s --gpu-mmio-trace " - % sys.argv[0]) + print( + "No MMIO trace path. Use %s --gpu-mmio-trace " % sys.argv[0] + ) sys.exit(1) _, tempRunscript = tempfile.mkstemp() - with open(tempRunscript, 'w') as b64file: + with open(tempRunscript, "w") as b64file: runscriptStr = samples_runscript.format(args.app, args.app) b64file.write(runscriptStr) @@ -111,12 +121,12 @@ if __name__ == "__m5_main__": # Defaults for Vega10 args.ruby = True - args.cpu_type = 'X86KvmCPU' + args.cpu_type = "X86KvmCPU" args.num_cpus = 1 - args.mem_size = '3GB' + args.mem_size = "3GB" args.dgpu = True - args.dgpu_mem_size = '16GB' - args.dgpu_start = '0GB' + args.dgpu_mem_size = "16GB" + args.dgpu_start = "0GB" args.checkpoint_restore = 0 args.disjoint = True args.timing_gpu = True diff --git a/configs/example/gpufs/runfs.py b/configs/example/gpufs/runfs.py index b198552b71..781ce8e27c 100644 --- a/configs/example/gpufs/runfs.py +++ b/configs/example/gpufs/runfs.py @@ -37,7 +37,7 @@ from m5.objects import * from m5.util import addToPath # gem5 options and objects -addToPath('../../') +addToPath("../../") from ruby import Ruby from common import Simulation from common import ObjectList @@ -51,58 +51,111 @@ from system.system import makeGpuFSSystem def addRunFSOptions(parser): - parser.add_argument("--script", default=None, - help="Script to execute in the simulated system") - parser.add_argument("--host-parallel", default=False, - action="store_true", - help="Run multiple host threads in KVM mode") - parser.add_argument("--restore-dir", type=str, default=None, - help="Directory to restore checkpoints from") - parser.add_argument("--disk-image", default="", - help="The boot disk image to mount (/dev/sda)") - parser.add_argument("--second-disk", default=None, - help="The second disk image to mount (/dev/sdb)") + parser.add_argument( + "--script", + default=None, + help="Script to execute in the simulated system", + ) + parser.add_argument( + "--host-parallel", + default=False, + action="store_true", + help="Run multiple host threads in KVM mode", + ) + parser.add_argument( + "--restore-dir", + type=str, + default=None, + help="Directory to restore checkpoints from", + ) + parser.add_argument( + "--disk-image", + default="", + help="The boot disk image to mount (/dev/sda)", + ) + parser.add_argument( + "--second-disk", + default=None, + help="The second disk image to mount (/dev/sdb)", + ) parser.add_argument("--kernel", default=None, help="Linux kernel to boot") parser.add_argument("--gpu-rom", default=None, help="GPU BIOS to load") - parser.add_argument("--gpu-mmio-trace", default=None, - help="GPU MMIO trace to load") - parser.add_argument("--checkpoint-before-mmios", default=False, - action="store_true", - help="Take a checkpoint before driver sends MMIOs. " - "This is used to switch out of KVM mode and into " - "timing mode required to read the VGA ROM on boot.") - parser.add_argument("--cpu-topology", type=str, default="Crossbar", - help="Network topology to use for CPU side. " - "Check configs/topologies for complete set") - parser.add_argument("--gpu-topology", type=str, default="Crossbar", - help="Network topology to use for GPU side. " - "Check configs/topologies for complete set") - parser.add_argument("--dgpu-mem-size", action="store", type=str, - default="16GB", help="Specify the dGPU physical memory" - " size") - parser.add_argument("--dgpu-num-dirs", type=int, default=1, help="Set " - "the number of dGPU directories (memory controllers") - parser.add_argument("--dgpu-mem-type", default="HBM_1000_4H_1x128", - choices=ObjectList.mem_list.get_names(), - help="type of memory to use") + parser.add_argument( + "--gpu-mmio-trace", default=None, help="GPU MMIO trace to load" + ) + parser.add_argument( + "--checkpoint-before-mmios", + default=False, + action="store_true", + help="Take a checkpoint before driver sends MMIOs. " + "This is used to switch out of KVM mode and into " + "timing mode required to read the VGA ROM on boot.", + ) + parser.add_argument( + "--cpu-topology", + type=str, + default="Crossbar", + help="Network topology to use for CPU side. " + "Check configs/topologies for complete set", + ) + parser.add_argument( + "--gpu-topology", + type=str, + default="Crossbar", + help="Network topology to use for GPU side. " + "Check configs/topologies for complete set", + ) + parser.add_argument( + "--dgpu-mem-size", + action="store", + type=str, + default="16GB", + help="Specify the dGPU physical memory" " size", + ) + parser.add_argument( + "--dgpu-num-dirs", + type=int, + default=1, + help="Set " "the number of dGPU directories (memory controllers", + ) + parser.add_argument( + "--dgpu-mem-type", + default="HBM_1000_4H_1x128", + choices=ObjectList.mem_list.get_names(), + help="type of memory to use", + ) + def runGpuFSSystem(args): - ''' + """ This function can be called by higher level scripts designed to simulate specific devices. As a result the scripts typically hard code some args that should not be changed by the user. - ''' + """ + + # GPUFS is primarily designed to use the X86 KVM CPU. This model needs to + # use multiple event queues when more than one CPU is simulated. Force it + # on if that is the case. + args.host_parallel = True if args.num_cpus > 1 else False # These are used by the protocols. They should not be set by the user. n_cu = args.num_compute_units args.num_sqc = int(math.ceil(float(n_cu) / args.cu_per_sqc)) - args.num_scalar_cache = \ - int(math.ceil(float(n_cu) / args.cu_per_scalar_cache)) + args.num_scalar_cache = int( + math.ceil(float(n_cu) / args.cu_per_scalar_cache) + ) system = makeGpuFSSystem(args) - root = Root(full_system = True, system = system, - time_sync_enable = True, time_sync_period = '1000us') + root = Root( + full_system=True, + system=system, + time_sync_enable=True, + time_sync_period="1000us", + ) + + if args.host_parallel: + root.sim_quantum = int(1e8) if args.script is not None: system.readfile = args.script @@ -112,7 +165,6 @@ def runGpuFSSystem(args): else: m5.instantiate(args.restore_dir) - print("Running the simulation") sim_ticks = args.abs_max_tick @@ -120,20 +172,24 @@ def runGpuFSSystem(args): # Keep executing while there is something to do while True: - if exit_event.getCause() == "m5_exit instruction encountered" or \ - exit_event.getCause() == "user interrupt received" or \ - exit_event.getCause() == "simulate() limit reached": + if ( + exit_event.getCause() == "m5_exit instruction encountered" + or exit_event.getCause() == "user interrupt received" + or exit_event.getCause() == "simulate() limit reached" + ): break elif "checkpoint" in exit_event.getCause(): - assert(args.checkpoint_dir is not None) + assert args.checkpoint_dir is not None m5.checkpoint(args.checkpoint_dir) break else: - print('Unknown exit event: %s. Continuing...' - % exit_event.getCause()) + print( + "Unknown exit event: %s. Continuing..." % exit_event.getCause() + ) - print('Exiting @ tick %i because %s' % - (m5.curTick(), exit_event.getCause())) + print( + "Exiting @ tick %i because %s" % (m5.curTick(), exit_event.getCause()) + ) if __name__ == "__m5_main__": diff --git a/configs/example/gpufs/system/amdgpu.py b/configs/example/gpufs/system/amdgpu.py index 26e4809e25..1fd3e2f304 100644 --- a/configs/example/gpufs/system/amdgpu.py +++ b/configs/example/gpufs/system/amdgpu.py @@ -30,10 +30,11 @@ import m5 from m5.objects import * + def createGPU(system, args): - shader = Shader(n_wf = args.wfs_per_simd, - timing = True, - clk_domain = system.clk_domain) + shader = Shader( + n_wf=args.wfs_per_simd, timing=True, clk_domain=system.clk_domain + ) # VIPER GPU protocol implements release consistency at GPU side. So, # we make their writes visible to the global memory and should read @@ -55,31 +56,29 @@ def createGPU(system, args): compute_units = [] for i in range(args.num_compute_units): compute_units.append( - ComputeUnit(cu_id = i, perLaneTLB = per_lane, - num_SIMDs = args.simds_per_cu, - wf_size = args.wf_size, - spbypass_pipe_length = \ - args.sp_bypass_path_length, - dpbypass_pipe_length = \ - args.dp_bypass_path_length, - issue_period = args.issue_period, - coalescer_to_vrf_bus_width = \ - args.glbmem_rd_bus_width, - vrf_to_coalescer_bus_width = \ - args.glbmem_wr_bus_width, - num_global_mem_pipes = \ - args.glb_mem_pipes_per_cu, - num_shared_mem_pipes = \ - args.shr_mem_pipes_per_cu, - n_wf = args.wfs_per_simd, - execPolicy = args.CUExecPolicy, - localMemBarrier = args.LocalMemBarrier, - countPages = args.countPages, - localDataStore = \ - LdsState(banks = args.numLdsBanks, - bankConflictPenalty = \ - args.ldsBankConflictPenalty, - size = args.lds_size))) + ComputeUnit( + cu_id=i, + perLaneTLB=per_lane, + num_SIMDs=args.simds_per_cu, + wf_size=args.wf_size, + spbypass_pipe_length=args.sp_bypass_path_length, + dpbypass_pipe_length=args.dp_bypass_path_length, + issue_period=args.issue_period, + coalescer_to_vrf_bus_width=args.glbmem_rd_bus_width, + vrf_to_coalescer_bus_width=args.glbmem_wr_bus_width, + num_global_mem_pipes=args.glb_mem_pipes_per_cu, + num_shared_mem_pipes=args.shr_mem_pipes_per_cu, + n_wf=args.wfs_per_simd, + execPolicy=args.CUExecPolicy, + localMemBarrier=args.LocalMemBarrier, + countPages=args.countPages, + localDataStore=LdsState( + banks=args.numLdsBanks, + bankConflictPenalty=args.ldsBankConflictPenalty, + size=args.lds_size, + ), + ) + ) wavefronts = [] vrfs = [] @@ -88,49 +87,70 @@ def createGPU(system, args): srf_pool_mgrs = [] for j in range(args.simds_per_cu): for k in range(shader.n_wf): - wavefronts.append(Wavefront(simdId = j, wf_slot_id = k, - wf_size = args.wf_size)) + wavefronts.append( + Wavefront(simdId=j, wf_slot_id=k, wf_size=args.wf_size) + ) if args.reg_alloc_policy == "simple": - vrf_pool_mgrs.append(SimplePoolManager(pool_size = \ - args.vreg_file_size, - min_alloc = \ - args.vreg_min_alloc)) - srf_pool_mgrs.append(SimplePoolManager(pool_size = \ - args.sreg_file_size, - min_alloc = \ - args.vreg_min_alloc)) + vrf_pool_mgrs.append( + SimplePoolManager( + pool_size=args.vreg_file_size, + min_alloc=args.vreg_min_alloc, + ) + ) + srf_pool_mgrs.append( + SimplePoolManager( + pool_size=args.sreg_file_size, + min_alloc=args.vreg_min_alloc, + ) + ) elif args.reg_alloc_policy == "dynamic": - vrf_pool_mgrs.append(DynPoolManager(pool_size = \ - args.vreg_file_size, - min_alloc = \ - args.vreg_min_alloc)) - srf_pool_mgrs.append(DynPoolManager(pool_size = \ - args.sreg_file_size, - min_alloc = \ - args.vreg_min_alloc)) + vrf_pool_mgrs.append( + DynPoolManager( + pool_size=args.vreg_file_size, + min_alloc=args.vreg_min_alloc, + ) + ) + srf_pool_mgrs.append( + DynPoolManager( + pool_size=args.sreg_file_size, + min_alloc=args.vreg_min_alloc, + ) + ) - vrfs.append(VectorRegisterFile(simd_id=j, wf_size=args.wf_size, - num_regs=args.vreg_file_size)) + vrfs.append( + VectorRegisterFile( + simd_id=j, + wf_size=args.wf_size, + num_regs=args.vreg_file_size, + ) + ) - srfs.append(ScalarRegisterFile(simd_id=j, wf_size=args.wf_size, - num_regs=args.sreg_file_size)) + srfs.append( + ScalarRegisterFile( + simd_id=j, + wf_size=args.wf_size, + num_regs=args.sreg_file_size, + ) + ) compute_units[-1].wavefronts = wavefronts compute_units[-1].vector_register_file = vrfs compute_units[-1].scalar_register_file = srfs - compute_units[-1].register_manager = \ - RegisterManager(policy=args.registerManagerPolicy, - vrf_pool_managers=vrf_pool_mgrs, - srf_pool_managers=srf_pool_mgrs) + compute_units[-1].register_manager = RegisterManager( + policy=args.registerManagerPolicy, + vrf_pool_managers=vrf_pool_mgrs, + srf_pool_managers=srf_pool_mgrs, + ) if args.TLB_prefetch: compute_units[-1].prefetch_depth = args.TLB_prefetch compute_units[-1].prefetch_prev_type = args.pf_type # Attach the LDS and the CU to the bus (actually a Bridge) compute_units[-1].ldsPort = compute_units[-1].ldsBus.cpu_side_port - compute_units[-1].ldsBus.mem_side_port = \ - compute_units[-1].localDataStore.cuPort + compute_units[-1].ldsBus.mem_side_port = compute_units[ + -1 + ].localDataStore.cuPort # Attach compute units to GPU shader.CUs = compute_units @@ -141,10 +161,12 @@ def createGPU(system, args): return shader + def connectGPU(system, args): system.pc.south_bridge.gpu = AMDGPUDevice(pci_func=0, pci_dev=8, pci_bus=0) system.pc.south_bridge.gpu.trace_file = args.gpu_mmio_trace system.pc.south_bridge.gpu.rom_binary = args.gpu_rom - system.pc.south_bridge.gpu.checkpoint_before_mmios = \ + system.pc.south_bridge.gpu.checkpoint_before_mmios = ( args.checkpoint_before_mmios + ) diff --git a/configs/example/gpufs/system/system.py b/configs/example/gpufs/system/system.py index 972a4f98de..a1b59ef20b 100644 --- a/configs/example/gpufs/system/system.py +++ b/configs/example/gpufs/system/system.py @@ -39,20 +39,25 @@ from ruby import Ruby from example.gpufs.Disjoint_VIPER import * + def makeGpuFSSystem(args): # Boot options are standard gem5 options plus: # - Framebuffer device emulation 0 to reduce driver code paths. # - Blacklist amdgpu as it cannot (currently) load in KVM CPU. # - Blacklist psmouse as amdgpu driver adds proprietary commands that # cause gem5 to panic. - boot_options = ['earlyprintk=ttyS0', 'console=ttyS0,9600', - 'lpj=7999923', 'root=/dev/sda1', - 'drm_kms_helper.fbdev_emulation=0', - 'modprobe.blacklist=amdgpu', - 'modprobe.blacklist=psmouse'] - cmdline = ' '.join(boot_options) + boot_options = [ + "earlyprintk=ttyS0", + "console=ttyS0,9600", + "lpj=7999923", + "root=/dev/sda1", + "drm_kms_helper.fbdev_emulation=0", + "modprobe.blacklist=amdgpu", + "modprobe.blacklist=psmouse", + ] + cmdline = " ".join(boot_options) - if MemorySize(args.mem_size) < MemorySize('2GB'): + if MemorySize(args.mem_size) < MemorySize("2GB"): panic("Need at least 2GB of system memory to load amdgpu module") # Use the common FSConfig to setup a Linux X86 System @@ -61,30 +66,34 @@ def makeGpuFSSystem(args): if args.second_disk is not None: disks.extend([args.second_disk]) bm = SysConfig(disks=disks, mem=args.mem_size) - system = makeLinuxX86System(test_mem_mode, args.num_cpus, bm, True, - cmdline=cmdline) + system = makeLinuxX86System( + test_mem_mode, args.num_cpus, bm, True, cmdline=cmdline + ) system.workload.object_file = binary(args.kernel) # Set the cache line size for the entire system. system.cache_line_size = args.cacheline_size # Create a top-level voltage and clock domain. - system.voltage_domain = VoltageDomain(voltage = args.sys_voltage) - system.clk_domain = SrcClockDomain(clock = args.sys_clock, - voltage_domain = system.voltage_domain) + system.voltage_domain = VoltageDomain(voltage=args.sys_voltage) + system.clk_domain = SrcClockDomain( + clock=args.sys_clock, voltage_domain=system.voltage_domain + ) # Create a CPU voltage and clock domain. system.cpu_voltage_domain = VoltageDomain() - system.cpu_clk_domain = SrcClockDomain(clock = args.cpu_clock, - voltage_domain = - system.cpu_voltage_domain) + system.cpu_clk_domain = SrcClockDomain( + clock=args.cpu_clock, voltage_domain=system.cpu_voltage_domain + ) # Setup VGA ROM region - system.shadow_rom_ranges = [AddrRange(0xc0000, size = Addr('128kB'))] + system.shadow_rom_ranges = [AddrRange(0xC0000, size=Addr("128kB"))] # Create specified number of CPUs. GPUFS really only needs one. - system.cpu = [X86KvmCPU(clk_domain=system.cpu_clk_domain, cpu_id=i) - for i in range(args.num_cpus)] + system.cpu = [ + X86KvmCPU(clk_domain=system.cpu_clk_domain, cpu_id=i) + for i in range(args.num_cpus) + ] system.kvm_vm = KvmVM() # Create AMDGPU and attach to southbridge @@ -96,16 +105,18 @@ def makeGpuFSSystem(args): system.cpu.append(shader) # This arbitrary address is something in the X86 I/O hole - hsapp_gpu_map_paddr = 0xe00000000 + hsapp_gpu_map_paddr = 0xE00000000 hsapp_pt_walker = VegaPagetableWalker() - gpu_hsapp = HSAPacketProcessor(pioAddr=hsapp_gpu_map_paddr, - numHWQueues=args.num_hw_queues, - walker=hsapp_pt_walker) + gpu_hsapp = HSAPacketProcessor( + pioAddr=hsapp_gpu_map_paddr, + numHWQueues=args.num_hw_queues, + walker=hsapp_pt_walker, + ) dispatcher = GPUDispatcher() cp_pt_walker = VegaPagetableWalker() - gpu_cmd_proc = GPUCommandProcessor(hsapp=gpu_hsapp, - dispatcher=dispatcher, - walker=cp_pt_walker) + gpu_cmd_proc = GPUCommandProcessor( + hsapp=gpu_hsapp, dispatcher=dispatcher, walker=cp_pt_walker + ) shader.dispatcher = dispatcher shader.gpu_cmd_proc = gpu_cmd_proc @@ -163,16 +174,18 @@ def makeGpuFSSystem(args): # Full system needs special TLBs for SQC, Scalar, and vector data ports args.full_system = True - GPUTLBConfig.config_tlb_hierarchy(args, system, shader_idx, - system.pc.south_bridge.gpu, True) + GPUTLBConfig.config_tlb_hierarchy( + args, system, shader_idx, system.pc.south_bridge.gpu, True + ) # Create Ruby system using disjoint VIPER topology system.ruby = Disjoint_VIPER() system.ruby.create(args, system, system.iobus, system._dma_ports) # Create a seperate clock domain for Ruby - system.ruby.clk_domain = SrcClockDomain(clock = args.ruby_clock, - voltage_domain = system.voltage_domain) + system.ruby.clk_domain = SrcClockDomain( + clock=args.ruby_clock, voltage_domain=system.voltage_domain + ) for (i, cpu) in enumerate(system.cpu): # Break once we reach the shader "CPU" @@ -191,9 +204,21 @@ def makeGpuFSSystem(args): for j in range(len(system.cpu[i].isa)): system.cpu[i].isa[j].vendor_string = "AuthenticAMD" - gpu_port_idx = len(system.ruby._cpu_ports) \ - - args.num_compute_units - args.num_sqc \ - - args.num_scalar_cache + if args.host_parallel: + # To get the KVM CPUs to run on different host CPUs, specify a + # different event queue for each CPU. The last CPU is a GPU + # shader and should be skipped. + for i, cpu in enumerate(system.cpu[:-1]): + for obj in cpu.descendants(): + obj.eventq_index = 0 + cpu.eventq_index = i + 1 + + gpu_port_idx = ( + len(system.ruby._cpu_ports) + - args.num_compute_units + - args.num_sqc + - args.num_scalar_cache + ) gpu_port_idx = gpu_port_idx - args.num_cp * 2 # Connect token ports. For this we need to search through the list of all @@ -202,8 +227,9 @@ def makeGpuFSSystem(args): token_port_idx = 0 for i in range(len(system.ruby._cpu_ports)): if isinstance(system.ruby._cpu_ports[i], VIPERCoalescer): - system.cpu[shader_idx].CUs[token_port_idx].gmTokenPort = \ - system.ruby._cpu_ports[i].gmTokenPort + system.cpu[shader_idx].CUs[ + token_port_idx + ].gmTokenPort = system.ruby._cpu_ports[i].gmTokenPort token_port_idx += 1 wavefront_size = args.wf_size @@ -211,22 +237,25 @@ def makeGpuFSSystem(args): # The pipeline issues wavefront_size number of uncoalesced requests # in one GPU issue cycle. Hence wavefront_size mem ports. for j in range(wavefront_size): - system.cpu[shader_idx].CUs[i].memory_port[j] = \ - system.ruby._cpu_ports[gpu_port_idx].in_ports[j] + system.cpu[shader_idx].CUs[i].memory_port[ + j + ] = system.ruby._cpu_ports[gpu_port_idx].in_ports[j] gpu_port_idx += 1 for i in range(args.num_compute_units): if i > 0 and not i % args.cu_per_sqc: gpu_port_idx += 1 - system.cpu[shader_idx].CUs[i].sqc_port = \ - system.ruby._cpu_ports[gpu_port_idx].in_ports + system.cpu[shader_idx].CUs[i].sqc_port = system.ruby._cpu_ports[ + gpu_port_idx + ].in_ports gpu_port_idx = gpu_port_idx + 1 for i in range(args.num_compute_units): if i > 0 and not i % args.cu_per_scalar_cache: gpu_port_idx += 1 - system.cpu[shader_idx].CUs[i].scalar_port = \ - system.ruby._cpu_ports[gpu_port_idx].in_ports + system.cpu[shader_idx].CUs[i].scalar_port = system.ruby._cpu_ports[ + gpu_port_idx + ].in_ports gpu_port_idx = gpu_port_idx + 1 return system diff --git a/configs/example/gpufs/vega10_kvm.py b/configs/example/gpufs/vega10_kvm.py index baee077a0f..48e2d69516 100644 --- a/configs/example/gpufs/vega10_kvm.py +++ b/configs/example/gpufs/vega10_kvm.py @@ -41,9 +41,9 @@ from common import GPUTLBOptions from ruby import Ruby -demo_runscript = '''\ +demo_runscript = """\ export LD_LIBRARY_PATH=/opt/rocm/lib:$LD_LIBRARY_PATH -export HSA_ENABLE_SDMA=0 +export HSA_ENABLE_INTERRUPT=0 dmesg -n3 dd if=/root/roms/vega10.rom of=/dev/mem bs=1k seek=768 count=128 if [ ! -f /lib/modules/`uname -r`/updates/dkms/amdgpu.ko ]; then @@ -56,13 +56,17 @@ echo "{}" | base64 -d > myapp chmod +x myapp ./myapp {} /sbin/m5 exit -''' +""" + def addDemoOptions(parser): - parser.add_argument("-a", "--app", default=None, - help="GPU application to run") - parser.add_argument("-o", "--opts", default="", - help="GPU application arguments") + parser.add_argument( + "-a", "--app", default=None, help="GPU application to run" + ) + parser.add_argument( + "-o", "--opts", default="", help="GPU application arguments" + ) + if __name__ == "__m5_main__": parser = argparse.ArgumentParser() @@ -87,20 +91,22 @@ if __name__ == "__m5_main__": print("No disk path given. Use %s --disk-image " % sys.argv[0]) sys.exit(1) elif args.gpu_mmio_trace is None: - print("No MMIO trace path. Use %s --gpu-mmio-trace " - % sys.argv[0]) + print( + "No MMIO trace path. Use %s --gpu-mmio-trace " % sys.argv[0] + ) sys.exit(1) elif not os.path.isfile(args.app): print("Could not find applcation", args.app) sys.exit(1) - with open(os.path.abspath(args.app), 'rb') as binfile: + with open(os.path.abspath(args.app), "rb") as binfile: encodedBin = base64.b64encode(binfile.read()).decode() _, tempRunscript = tempfile.mkstemp() - with open(tempRunscript, 'w') as b64file: - runscriptStr = demo_runscript.format(args.app, args.opts, encodedBin, - args.opts) + with open(tempRunscript, "w") as b64file: + runscriptStr = demo_runscript.format( + args.app, args.opts, encodedBin, args.opts + ) b64file.write(runscriptStr) if args.second_disk == None: @@ -108,12 +114,12 @@ if __name__ == "__m5_main__": # Defaults for Vega10 args.ruby = True - args.cpu_type = 'X86KvmCPU' + args.cpu_type = "X86KvmCPU" args.num_cpus = 1 - args.mem_size = '3GB' + args.mem_size = "3GB" args.dgpu = True - args.dgpu_mem_size = '16GB' - args.dgpu_start = '0GB' + args.dgpu_mem_size = "16GB" + args.dgpu_start = "0GB" args.checkpoint_restore = 0 args.disjoint = True args.timing_gpu = True diff --git a/configs/example/hmc_hello.py b/configs/example/hmc_hello.py index 11d52c037b..bb1711b977 100644 --- a/configs/example/hmc_hello.py +++ b/configs/example/hmc_hello.py @@ -36,7 +36,10 @@ import argparse import m5 from m5.objects import * from m5.util import * -addToPath('../') +from gem5.runtime import get_runtime_isa + +addToPath("../") + from common import MemConfig from common import HMC @@ -48,10 +51,10 @@ options = parser.parse_args() # create the system we are going to simulate system = System() # use timing mode for the interaction between requestor-responder ports -system.mem_mode = 'timing' +system.mem_mode = "timing" # set the clock frequency of the system -clk = '1GHz' -vd = VoltageDomain(voltage='1V') +clk = "1GHz" +vd = VoltageDomain(voltage="1V") system.clk_domain = SrcClockDomain(clock=clk, voltage_domain=vd) # create a simple CPU system.cpu = TimingSimpleCPU() @@ -66,9 +69,9 @@ system.cpu.createInterruptController() # functional-only port to allow the system to read and write memory. system.system_port = system.membus.cpu_side_ports # get ISA for the binary to run. -isa = str(m5.defines.buildEnv['TARGET_ISA']).lower() +isa = get_runtime_isa() # run 'hello' and use the compiled ISA to find the binary -binary = 'tests/test-progs/hello/bin/' + isa + '/linux/hello' +binary = "tests/test-progs/hello/bin/" + isa.name.lower() + "/linux/hello" # create a process for a simple "Hello World" application process = Process() # cmd is a list which begins with the executable (like argv) diff --git a/configs/example/hmctest.py b/configs/example/hmctest.py index 429ea4a955..eca3c28465 100644 --- a/configs/example/hmctest.py +++ b/configs/example/hmctest.py @@ -1,4 +1,3 @@ - import sys import argparse import subprocess @@ -8,34 +7,55 @@ import m5 from m5.objects import * from m5.util import * -addToPath('../') +addToPath("../") from common import MemConfig from common import HMC def add_options(parser): - parser.add_argument("--external-memory-system", default=0, action="store", - type=int, help="External memory system") + parser.add_argument( + "--external-memory-system", + default=0, + action="store", + type=int, + help="External memory system", + ) # TLM related options, currently optional in configs/common/MemConfig.py - parser.add_argument("--tlm-memory", action="store_true", help="use\ + parser.add_argument( + "--tlm-memory", + action="store_true", + help="use\ external port for SystemC TLM co-simulation. Default:\ - no") + no", + ) # Elastic traces related options, currently optional in # configs/common/MemConfig.py - parser.add_argument("--elastic-trace-en", action="store_true", - help="enable capture of data dependency and\ + parser.add_argument( + "--elastic-trace-en", + action="store_true", + help="enable capture of data dependency and\ instruction fetch traces using elastic trace\ - probe.\nDefault: no") + probe.\nDefault: no", + ) # Options related to traffic generation - parser.add_argument("--num-tgen", default=4, action="store", type=int, - choices=[4], help="number of traffic generators.\ - Right now this script supports only 4.\nDefault: 4") - parser.add_argument("--tgen-cfg-file", - default="./configs/example/hmc_tgen.cfg", - type=str, help="Traffic generator(s) configuration\ + parser.add_argument( + "--num-tgen", + default=4, + action="store", + type=int, + choices=[4], + help="number of traffic generators.\ + Right now this script supports only 4.\nDefault: 4", + ) + parser.add_argument( + "--tgen-cfg-file", + default="./configs/example/hmc_tgen.cfg", + type=str, + help="Traffic generator(s) configuration\ file. Note: this script uses the same configuration\ - file for all traffic generators") + file for all traffic generators", + ) # considering 4GB HMC device with following parameters @@ -49,14 +69,16 @@ def build_system(options): # create the system we are going to simulate system = System() # use timing mode for the interaction between requestor-responder ports - system.mem_mode = 'timing' + system.mem_mode = "timing" # set the clock frequency of the system - clk = '100GHz' - vd = VoltageDomain(voltage='1V') + clk = "100GHz" + vd = VoltageDomain(voltage="1V") system.clk_domain = SrcClockDomain(clock=clk, voltage_domain=vd) # add traffic generators to the system - system.tgen = [TrafficGen(config_file=options.tgen_cfg_file) for i in - range(options.num_tgen)] + system.tgen = [ + TrafficGen(config_file=options.tgen_cfg_file) + for i in range(options.num_tgen) + ] # Config memory system with given HMC arch MemConfig.config_mem(options, system) # Connect the traffic generatiors @@ -66,7 +88,7 @@ def build_system(options): # connect the system port even if it is not used in this example system.system_port = system.membus.cpu_side_ports if options.arch == "mixed": - for i in range(int(options.num_tgen/2)): + for i in range(int(options.num_tgen / 2)): system.tgen[i].port = system.membus.cpu_side_ports hh = system.hmc_host if options.enable_global_monitor: @@ -92,8 +114,10 @@ def build_system(options): def main(): - parser = argparse.ArgumentParser(description="Simple system using HMC as\ - main memory") + parser = argparse.ArgumentParser( + description="Simple system using HMC as\ + main memory" + ) HMC.add_options(parser) add_options(parser) options = parser.parse_args() @@ -104,9 +128,10 @@ def main(): print("Beginning simulation!") event = m5.simulate(10000000000) m5.stats.dump() - print('Exiting @ tick %i because %s (exit code is %i)' % (m5.curTick(), - event.getCause(), - event.getCode())) + print( + "Exiting @ tick %i because %s (exit code is %i)" + % (m5.curTick(), event.getCause(), event.getCode()) + ) print("Done") diff --git a/configs/example/hsaTopology.py b/configs/example/hsaTopology.py index b11a8df0e4..691e8c2a58 100644 --- a/configs/example/hsaTopology.py +++ b/configs/example/hsaTopology.py @@ -36,17 +36,20 @@ from os.path import isdir from shutil import rmtree, copyfile from m5.util.convert import toFrequency, toMemorySize + def file_append(path, contents): - with open(joinpath(*path), 'a') as f: + with open(joinpath(*path), "a") as f: f.write(str(contents)) f.flush() fsync(f.fileno()) + def remake_dir(path): if isdir(path): rmtree(path) makedirs(path) + # This fakes out a dGPU setup so the runtime operates correctly. The spoofed # system has a single dGPU and a single socket CPU. Note that more complex # topologies (multi-GPU, multi-socket CPUs) need to have a different setup @@ -57,104 +60,111 @@ def remake_dir(path): # future. We might need to scrub through this and extract the appropriate # fields from the simulator in the future. def createVegaTopology(options): - topology_dir = joinpath(m5.options.outdir, \ - 'fs/sys/devices/virtual/kfd/kfd/topology') + topology_dir = joinpath( + m5.options.outdir, "fs/sys/devices/virtual/kfd/kfd/topology" + ) remake_dir(topology_dir) - amdgpu_dir = joinpath(m5.options.outdir, \ - 'fs/sys/module/amdgpu/parameters') + amdgpu_dir = joinpath(m5.options.outdir, "fs/sys/module/amdgpu/parameters") remake_dir(amdgpu_dir) - pci_ids_dir = joinpath(m5.options.outdir, \ - 'fs/usr/share/hwdata/') + pci_ids_dir = joinpath(m5.options.outdir, "fs/usr/share/hwdata/") remake_dir(pci_ids_dir) # Vega reported VM size in GB. Used to reserve an allocation from CPU # to implement SVM (i.e. GPUVM64 pointers and X86 pointers agree) - file_append((amdgpu_dir, 'vm_size'), 256) + file_append((amdgpu_dir, "vm_size"), 256) # Ripped from real Vega platform to appease KMT version checks - file_append((topology_dir, 'generation_id'), 2) + file_append((topology_dir, "generation_id"), 2) # Set up system properties. Regiter as ast-rocm server - sys_prop = 'platform_oem 35498446626881\n' + \ - 'platform_id 71791775140929\n' + \ - 'platform_rev 2\n' - file_append((topology_dir, 'system_properties'), sys_prop) + sys_prop = ( + "platform_oem 35498446626881\n" + + "platform_id 71791775140929\n" + + "platform_rev 2\n" + ) + file_append((topology_dir, "system_properties"), sys_prop) # Populate the topology tree # Our dGPU system is two nodes. Node 0 is a CPU and Node 1 is a dGPU - node_dir = joinpath(topology_dir, 'nodes/0') + node_dir = joinpath(topology_dir, "nodes/0") remake_dir(node_dir) # Register as a CPU - file_append((node_dir, 'gpu_id'), 0) - file_append((node_dir, 'name'), '') + file_append((node_dir, "gpu_id"), 0) + file_append((node_dir, "name"), "") # CPU links. Only thing that matters is we tell the runtime that GPU is # connected through PCIe to CPU socket 0. io_links = 1 - io_dir = joinpath(node_dir, 'io_links/0') + io_dir = joinpath(node_dir, "io_links/0") remake_dir(io_dir) - io_prop = 'type 2\n' + \ - 'version_major 0\n' + \ - 'version_minor 0\n' + \ - 'node_from 0\n' + \ - 'node_to 1\n' + \ - 'weight 20\n' + \ - 'min_latency 0\n' + \ - 'max_latency 0\n' + \ - 'min_bandwidth 0\n' + \ - 'max_bandwidth 0\n' + \ - 'recommended_transfer_size 0\n' + \ - 'flags 13\n' - file_append((io_dir, 'properties'), io_prop) + io_prop = ( + "type 2\n" + + "version_major 0\n" + + "version_minor 0\n" + + "node_from 0\n" + + "node_to 1\n" + + "weight 20\n" + + "min_latency 0\n" + + "max_latency 0\n" + + "min_bandwidth 0\n" + + "max_bandwidth 0\n" + + "recommended_transfer_size 0\n" + + "flags 13\n" + ) + file_append((io_dir, "properties"), io_prop) # Populate CPU node properties - node_prop = 'cpu_cores_count %s\n' % options.num_cpus + \ - 'simd_count 0\n' + \ - 'mem_banks_count 1\n' + \ - 'caches_count 0\n' + \ - 'io_links_count %s\n' % io_links + \ - 'cpu_core_id_base 0\n' + \ - 'simd_id_base 0\n' + \ - 'max_waves_per_simd 0\n' + \ - 'lds_size_in_kb 0\n' + \ - 'gds_size_in_kb 0\n' + \ - 'wave_front_size 64\n' + \ - 'array_count 0\n' + \ - 'simd_arrays_per_engine 0\n' + \ - 'cu_per_simd_array 0\n' + \ - 'simd_per_cu 0\n' + \ - 'max_slots_scratch_cu 0\n' + \ - 'vendor_id 0\n' + \ - 'device_id 0\n' + \ - 'location_id 0\n' + \ - 'drm_render_minor 0\n' + \ - 'max_engine_clk_ccompute 3400\n' + node_prop = ( + "cpu_cores_count %s\n" % options.num_cpus + + "simd_count 0\n" + + "mem_banks_count 1\n" + + "caches_count 0\n" + + "io_links_count %s\n" % io_links + + "cpu_core_id_base 0\n" + + "simd_id_base 0\n" + + "max_waves_per_simd 0\n" + + "lds_size_in_kb 0\n" + + "gds_size_in_kb 0\n" + + "wave_front_size 64\n" + + "array_count 0\n" + + "simd_arrays_per_engine 0\n" + + "cu_per_simd_array 0\n" + + "simd_per_cu 0\n" + + "max_slots_scratch_cu 0\n" + + "vendor_id 0\n" + + "device_id 0\n" + + "location_id 0\n" + + "drm_render_minor 0\n" + + "max_engine_clk_ccompute 3400\n" + ) - file_append((node_dir, 'properties'), node_prop) + file_append((node_dir, "properties"), node_prop) # CPU memory reporting - mem_dir = joinpath(node_dir, 'mem_banks/0') + mem_dir = joinpath(node_dir, "mem_banks/0") remake_dir(mem_dir) # Heap type value taken from real system, heap type values: # https://github.com/RadeonOpenCompute/ROCT-Thunk-Interface/blob/roc-4.0.x/include/hsakmttypes.h#L317 - mem_prop = 'heap_type 0\n' + \ - 'size_in_bytes 33704329216\n' + \ - 'flags 0\n' + \ - 'width 72\n' + \ - 'mem_clk_max 2400\n' + mem_prop = ( + "heap_type 0\n" + + "size_in_bytes 33704329216\n" + + "flags 0\n" + + "width 72\n" + + "mem_clk_max 2400\n" + ) - file_append((mem_dir, 'properties'), mem_prop) + file_append((mem_dir, "properties"), mem_prop) # Build the GPU node - node_dir = joinpath(topology_dir, 'nodes/1') + node_dir = joinpath(topology_dir, "nodes/1") remake_dir(node_dir) # Register as a Vega - file_append((node_dir, 'gpu_id'), 22124) - file_append((node_dir, 'name'), 'Vega\n') + file_append((node_dir, "gpu_id"), 22124) + file_append((node_dir, "name"), "Vega\n") # Should be the same as the render driver filename (dri/renderD) drm_num = 128 @@ -166,70 +176,77 @@ def createVegaTopology(options): # GPU links. Only thing that matters is we tell the runtime that GPU is # connected through PCIe to CPU socket 0. io_links = 1 - io_dir = joinpath(node_dir, 'io_links/0') + io_dir = joinpath(node_dir, "io_links/0") remake_dir(io_dir) - io_prop = 'type 2\n' + \ - 'version_major 0\n' + \ - 'version_minor 0\n' + \ - 'node_from 1\n' + \ - 'node_to 0\n' + \ - 'weight 20\n' + \ - 'min_latency 0\n' + \ - 'max_latency 0\n' + \ - 'min_bandwidth 0\n' + \ - 'max_bandwidth 0\n' + \ - 'recommended_transfer_size 0\n' + \ - 'flags 1\n' - file_append((io_dir, 'properties'), io_prop) + io_prop = ( + "type 2\n" + + "version_major 0\n" + + "version_minor 0\n" + + "node_from 1\n" + + "node_to 0\n" + + "weight 20\n" + + "min_latency 0\n" + + "max_latency 0\n" + + "min_bandwidth 0\n" + + "max_bandwidth 0\n" + + "recommended_transfer_size 0\n" + + "flags 1\n" + ) + file_append((io_dir, "properties"), io_prop) # Populate GPU node properties cu_scratch = options.simds_per_cu * options.wfs_per_simd - node_prop = 'cpu_cores_count 0\n' + \ - 'simd_count 256\n' + \ - 'mem_banks_count 1\n' + \ - 'caches_count %s\n' % caches + \ - 'io_links_count %s\n' % io_links + \ - 'cpu_core_id_base 0\n' + \ - 'simd_id_base 2147487744\n' + \ - 'max_waves_per_simd 10\n' + \ - 'lds_size_in_kb 64\n' + \ - 'gds_size_in_kb 0\n' + \ - 'wave_front_size 64\n' + \ - 'array_count 4\n' + \ - 'simd_arrays_per_engine 1\n' + \ - 'cu_per_simd_array 16\n' + \ - 'simd_per_cu 4\n' + \ - 'max_slots_scratch_cu %s\n' % cu_scratch + \ - 'vendor_id 4098\n' + \ - 'device_id 26720\n' + \ - 'location_id 1024\n' + \ - 'drm_render_minor %s\n' % drm_num + \ - 'hive_id 0\n' + \ - 'num_sdma_engines 2\n' + \ - 'num_sdma_xgmi_engines 0\n' + \ - 'max_engine_clk_fcompute 1500\n' + \ - 'local_mem_size 17163091968\n' + \ - 'fw_version 421\n' + \ - 'capability 238208\n' + \ - 'debug_prop 32768\n' + \ - 'sdma_fw_version 430\n' + \ - 'max_engine_clk_ccompute 3400\n' + node_prop = ( + "cpu_cores_count 0\n" + + "simd_count 256\n" + + "mem_banks_count 1\n" + + "caches_count %s\n" % caches + + "io_links_count %s\n" % io_links + + "cpu_core_id_base 0\n" + + "simd_id_base 2147487744\n" + + "max_waves_per_simd 10\n" + + "lds_size_in_kb 64\n" + + "gds_size_in_kb 0\n" + + "wave_front_size 64\n" + + "array_count 4\n" + + "simd_arrays_per_engine 1\n" + + "cu_per_simd_array 16\n" + + "simd_per_cu 4\n" + + "max_slots_scratch_cu %s\n" % cu_scratch + + "vendor_id 4098\n" + + "device_id 26720\n" + + "location_id 1024\n" + + "drm_render_minor %s\n" % drm_num + + "hive_id 0\n" + + "num_sdma_engines 2\n" + + "num_sdma_xgmi_engines 0\n" + + "max_engine_clk_fcompute 1500\n" + + "local_mem_size 17163091968\n" + + "fw_version 421\n" + + "capability 238208\n" + + "debug_prop 32768\n" + + "sdma_fw_version 430\n" + + "max_engine_clk_ccompute 3400\n" + ) - file_append((node_dir, 'properties'), node_prop) + file_append((node_dir, "properties"), node_prop) # Fiji HBM reporting # TODO: Extract size, clk, and width from sim paramters - mem_dir = joinpath(node_dir, 'mem_banks/0') + mem_dir = joinpath(node_dir, "mem_banks/0") remake_dir(mem_dir) # Heap type value taken from real system, heap type values: # https://github.com/RadeonOpenCompute/ROCT-Thunk-Interface/blob/roc-4.0.x/include/hsakmttypes.h#L317 - mem_prop = 'heap_type 1\n' + \ - 'size_in_bytes 17163091968\n' + \ - 'flags 0\n' + \ - 'width 2048\n' + \ - 'mem_clk_max 945\n' + mem_prop = ( + "heap_type 1\n" + + "size_in_bytes 17163091968\n" + + "flags 0\n" + + "width 2048\n" + + "mem_clk_max 945\n" + ) + + file_append((mem_dir, "properties"), mem_prop) - file_append((mem_dir, 'properties'), mem_prop) # This fakes out a dGPU setup so the runtime correctly operations. The spoofed # system has a single dGPU and a single socket CPU. Note that more complex @@ -241,100 +258,108 @@ def createVegaTopology(options): # future. We might need to scrub through this and extract the appropriate # fields from the simulator in the future. def createFijiTopology(options): - topology_dir = joinpath(m5.options.outdir, \ - 'fs/sys/devices/virtual/kfd/kfd/topology') + topology_dir = joinpath( + m5.options.outdir, "fs/sys/devices/virtual/kfd/kfd/topology" + ) remake_dir(topology_dir) - amdgpu_dir = joinpath(m5.options.outdir, \ - 'fs/sys/module/amdgpu/parameters') + amdgpu_dir = joinpath(m5.options.outdir, "fs/sys/module/amdgpu/parameters") remake_dir(amdgpu_dir) # Fiji reported VM size in GB. Used to reserve an allocation from CPU # to implement SVM (i.e. GPUVM64 pointers and X86 pointers agree) - file_append((amdgpu_dir, 'vm_size'), 256) + file_append((amdgpu_dir, "vm_size"), 256) # Ripped from real Fiji platform to appease KMT version checks - file_append((topology_dir, 'generation_id'), 2) + file_append((topology_dir, "generation_id"), 2) # Set up system properties. Regiter as ast-rocm server - sys_prop = 'platform_oem 35498446626881\n' + \ - 'platform_id 71791775140929\n' + \ - 'platform_rev 2\n' - file_append((topology_dir, 'system_properties'), sys_prop) + sys_prop = ( + "platform_oem 35498446626881\n" + + "platform_id 71791775140929\n" + + "platform_rev 2\n" + ) + file_append((topology_dir, "system_properties"), sys_prop) # Populate the topology tree # Our dGPU system is two nodes. Node 0 is a CPU and Node 1 is a dGPU - node_dir = joinpath(topology_dir, 'nodes/0') + node_dir = joinpath(topology_dir, "nodes/0") remake_dir(node_dir) # Register as a CPU - file_append((node_dir, 'gpu_id'), 0) - file_append((node_dir, 'name'), '') + file_append((node_dir, "gpu_id"), 0) + file_append((node_dir, "name"), "") # CPU links. Only thing that matters is we tell the runtime that GPU is # connected through PCIe to CPU socket 0. io_links = 1 - io_dir = joinpath(node_dir, 'io_links/0') + io_dir = joinpath(node_dir, "io_links/0") remake_dir(io_dir) - io_prop = 'type 2\n' + \ - 'version_major 0\n' + \ - 'version_minor 0\n' + \ - 'node_from 0\n' + \ - 'node_to 1\n' + \ - 'weight 20\n' + \ - 'min_latency 0\n' + \ - 'max_latency 0\n' + \ - 'min_bandwidth 0\n' + \ - 'max_bandwidth 0\n' + \ - 'recommended_transfer_size 0\n' + \ - 'flags 13\n' - file_append((io_dir, 'properties'), io_prop) + io_prop = ( + "type 2\n" + + "version_major 0\n" + + "version_minor 0\n" + + "node_from 0\n" + + "node_to 1\n" + + "weight 20\n" + + "min_latency 0\n" + + "max_latency 0\n" + + "min_bandwidth 0\n" + + "max_bandwidth 0\n" + + "recommended_transfer_size 0\n" + + "flags 13\n" + ) + file_append((io_dir, "properties"), io_prop) # Populate CPU node properties - node_prop = 'cpu_cores_count %s\n' % options.num_cpus + \ - 'simd_count 0\n' + \ - 'mem_banks_count 1\n' + \ - 'caches_count 0\n' + \ - 'io_links_count %s\n' % io_links + \ - 'cpu_core_id_base 0\n' + \ - 'simd_id_base 0\n' + \ - 'max_waves_per_simd 0\n' + \ - 'lds_size_in_kb 0\n' + \ - 'gds_size_in_kb 0\n' + \ - 'wave_front_size 64\n' + \ - 'array_count 0\n' + \ - 'simd_arrays_per_engine 0\n' + \ - 'cu_per_simd_array 0\n' + \ - 'simd_per_cu 0\n' + \ - 'max_slots_scratch_cu 0\n' + \ - 'vendor_id 0\n' + \ - 'device_id 0\n' + \ - 'location_id 0\n' + \ - 'drm_render_minor 0\n' + \ - 'max_engine_clk_ccompute 3400\n' + node_prop = ( + "cpu_cores_count %s\n" % options.num_cpus + + "simd_count 0\n" + + "mem_banks_count 1\n" + + "caches_count 0\n" + + "io_links_count %s\n" % io_links + + "cpu_core_id_base 0\n" + + "simd_id_base 0\n" + + "max_waves_per_simd 0\n" + + "lds_size_in_kb 0\n" + + "gds_size_in_kb 0\n" + + "wave_front_size 64\n" + + "array_count 0\n" + + "simd_arrays_per_engine 0\n" + + "cu_per_simd_array 0\n" + + "simd_per_cu 0\n" + + "max_slots_scratch_cu 0\n" + + "vendor_id 0\n" + + "device_id 0\n" + + "location_id 0\n" + + "drm_render_minor 0\n" + + "max_engine_clk_ccompute 3400\n" + ) - file_append((node_dir, 'properties'), node_prop) + file_append((node_dir, "properties"), node_prop) # CPU memory reporting - mem_dir = joinpath(node_dir, 'mem_banks/0') + mem_dir = joinpath(node_dir, "mem_banks/0") remake_dir(mem_dir) # Heap type value taken from real system, heap type values: # https://github.com/RadeonOpenCompute/ROCT-Thunk-Interface/blob/roc-4.0.x/include/hsakmttypes.h#L317 - mem_prop = 'heap_type 0\n' + \ - 'size_in_bytes 33704329216\n' + \ - 'flags 0\n' + \ - 'width 72\n' + \ - 'mem_clk_max 2400\n' + mem_prop = ( + "heap_type 0\n" + + "size_in_bytes 33704329216\n" + + "flags 0\n" + + "width 72\n" + + "mem_clk_max 2400\n" + ) - file_append((mem_dir, 'properties'), mem_prop) + file_append((mem_dir, "properties"), mem_prop) # Build the GPU node - node_dir = joinpath(topology_dir, 'nodes/1') + node_dir = joinpath(topology_dir, "nodes/1") remake_dir(node_dir) # Register as a Fiji - file_append((node_dir, 'gpu_id'), 50156) - file_append((node_dir, 'name'), 'Fiji\n') + file_append((node_dir, "gpu_id"), 50156) + file_append((node_dir, "name"), "Fiji\n") # Should be the same as the render driver filename (dri/renderD) drm_num = 128 @@ -346,97 +371,108 @@ def createFijiTopology(options): # GPU links. Only thing that matters is we tell the runtime that GPU is # connected through PCIe to CPU socket 0. io_links = 1 - io_dir = joinpath(node_dir, 'io_links/0') + io_dir = joinpath(node_dir, "io_links/0") remake_dir(io_dir) - io_prop = 'type 2\n' + \ - 'version_major 0\n' + \ - 'version_minor 0\n' + \ - 'node_from 1\n' + \ - 'node_to 0\n' + \ - 'weight 20\n' + \ - 'min_latency 0\n' + \ - 'max_latency 0\n' + \ - 'min_bandwidth 0\n' + \ - 'max_bandwidth 0\n' + \ - 'recommended_transfer_size 0\n' + \ - 'flags 1\n' - file_append((io_dir, 'properties'), io_prop) + io_prop = ( + "type 2\n" + + "version_major 0\n" + + "version_minor 0\n" + + "node_from 1\n" + + "node_to 0\n" + + "weight 20\n" + + "min_latency 0\n" + + "max_latency 0\n" + + "min_bandwidth 0\n" + + "max_bandwidth 0\n" + + "recommended_transfer_size 0\n" + + "flags 1\n" + ) + file_append((io_dir, "properties"), io_prop) # Populate GPU node properties - node_prop = 'cpu_cores_count 0\n' + \ - 'simd_count %s\n' \ - % (options.num_compute_units * options.simds_per_cu) + \ - 'mem_banks_count 1\n' + \ - 'caches_count %s\n' % caches + \ - 'io_links_count %s\n' % io_links + \ - 'cpu_core_id_base 0\n' + \ - 'simd_id_base 2147487744\n' + \ - 'max_waves_per_simd %s\n' % options.wfs_per_simd + \ - 'lds_size_in_kb %s\n' % int(options.lds_size / 1024) + \ - 'gds_size_in_kb 0\n' + \ - 'wave_front_size %s\n' % options.wf_size + \ - 'array_count 4\n' + \ - 'simd_arrays_per_engine %s\n' % options.sa_per_complex + \ - 'cu_per_simd_array %s\n' % options.cu_per_sa + \ - 'simd_per_cu %s\n' % options.simds_per_cu + \ - 'max_slots_scratch_cu 32\n' + \ - 'vendor_id 4098\n' + \ - 'device_id 29440\n' + \ - 'location_id 512\n' + \ - 'drm_render_minor %s\n' % drm_num + \ - 'max_engine_clk_fcompute %s\n' \ - % int(toFrequency(options.gpu_clock) / 1e6) + \ - 'local_mem_size 4294967296\n' + \ - 'fw_version 730\n' + \ - 'capability 4736\n' + \ - 'max_engine_clk_ccompute %s\n' \ - % int(toFrequency(options.CPUClock) / 1e6) + node_prop = ( + "cpu_cores_count 0\n" + + "simd_count %s\n" + % (options.num_compute_units * options.simds_per_cu) + + "mem_banks_count 1\n" + + "caches_count %s\n" % caches + + "io_links_count %s\n" % io_links + + "cpu_core_id_base 0\n" + + "simd_id_base 2147487744\n" + + "max_waves_per_simd %s\n" % options.wfs_per_simd + + "lds_size_in_kb %s\n" % int(options.lds_size / 1024) + + "gds_size_in_kb 0\n" + + "wave_front_size %s\n" % options.wf_size + + "array_count 4\n" + + "simd_arrays_per_engine %s\n" % options.sa_per_complex + + "cu_per_simd_array %s\n" % options.cu_per_sa + + "simd_per_cu %s\n" % options.simds_per_cu + + "max_slots_scratch_cu 32\n" + + "vendor_id 4098\n" + + "device_id 29440\n" + + "location_id 512\n" + + "drm_render_minor %s\n" % drm_num + + "max_engine_clk_fcompute %s\n" + % int(toFrequency(options.gpu_clock) / 1e6) + + "local_mem_size 4294967296\n" + + "fw_version 730\n" + + "capability 4736\n" + + "max_engine_clk_ccompute %s\n" + % int(toFrequency(options.CPUClock) / 1e6) + ) - file_append((node_dir, 'properties'), node_prop) + file_append((node_dir, "properties"), node_prop) # Fiji HBM reporting # TODO: Extract size, clk, and width from sim paramters - mem_dir = joinpath(node_dir, 'mem_banks/0') + mem_dir = joinpath(node_dir, "mem_banks/0") remake_dir(mem_dir) # Heap type value taken from real system, heap type values: # https://github.com/RadeonOpenCompute/ROCT-Thunk-Interface/blob/roc-4.0.x/include/hsakmttypes.h#L317 - mem_prop = 'heap_type 1\n' + \ - 'size_in_bytes 4294967296\n' + \ - 'flags 0\n' + \ - 'width 4096\n' + \ - 'mem_clk_max 500\n' + mem_prop = ( + "heap_type 1\n" + + "size_in_bytes 4294967296\n" + + "flags 0\n" + + "width 4096\n" + + "mem_clk_max 500\n" + ) - file_append((mem_dir, 'properties'), mem_prop) + file_append((mem_dir, "properties"), mem_prop) def createCarrizoTopology(options): - topology_dir = joinpath(m5.options.outdir, \ - 'fs/sys/devices/virtual/kfd/kfd/topology') + topology_dir = joinpath( + m5.options.outdir, "fs/sys/devices/virtual/kfd/kfd/topology" + ) remake_dir(topology_dir) # Ripped from real Kaveri platform to appease kmt version checks # Set up generation_id - file_append((topology_dir, 'generation_id'), 1) + file_append((topology_dir, "generation_id"), 1) # Set up system properties - sys_prop = 'platform_oem 2314885673410447169\n' + \ - 'platform_id 35322352389441\n' + \ - 'platform_rev 1\n' - file_append((topology_dir, 'system_properties'), sys_prop) + sys_prop = ( + "platform_oem 2314885673410447169\n" + + "platform_id 35322352389441\n" + + "platform_rev 1\n" + ) + file_append((topology_dir, "system_properties"), sys_prop) # Populate the topology tree # TODO: Just the bare minimum to pass for now - node_dir = joinpath(topology_dir, 'nodes/0') + node_dir = joinpath(topology_dir, "nodes/0") remake_dir(node_dir) # must show valid kaveri gpu id or massive meltdown - file_append((node_dir, 'gpu_id'), 2765) + file_append((node_dir, "gpu_id"), 2765) - gfx_dict = { "gfx801": {"name": "Carrizo\n", "id": 39028}, - "gfx902": {"name": "Raven\n", "id": 5597}} + gfx_dict = { + "gfx801": {"name": "Carrizo\n", "id": 39028}, + "gfx902": {"name": "Raven\n", "id": 5597}, + } # must have marketing name - file_append((node_dir, 'name'), gfx_dict[options.gfx_version]["name"]) + file_append((node_dir, "name"), gfx_dict[options.gfx_version]["name"]) mem_banks_cnt = 1 @@ -447,46 +483,50 @@ def createCarrizoTopology(options): # populate global node properties # NOTE: SIMD count triggers a valid GPU agent creation - node_prop = 'cpu_cores_count %s\n' % options.num_cpus + \ - 'simd_count %s\n' \ - % (options.num_compute_units * options.simds_per_cu) + \ - 'mem_banks_count %s\n' % mem_banks_cnt + \ - 'caches_count 0\n' + \ - 'io_links_count 0\n' + \ - 'cpu_core_id_base 16\n' + \ - 'simd_id_base 2147483648\n' + \ - 'max_waves_per_simd %s\n' % options.wfs_per_simd + \ - 'lds_size_in_kb %s\n' % int(options.lds_size / 1024) + \ - 'gds_size_in_kb 0\n' + \ - 'wave_front_size %s\n' % options.wf_size + \ - 'array_count 1\n' + \ - 'simd_arrays_per_engine %s\n' % options.sa_per_complex + \ - 'cu_per_simd_array %s\n' % options.cu_per_sa + \ - 'simd_per_cu %s\n' % options.simds_per_cu + \ - 'max_slots_scratch_cu 32\n' + \ - 'vendor_id 4098\n' + \ - 'device_id %s\n' % device_id + \ - 'location_id 8\n' + \ - 'drm_render_minor %s\n' % drm_num + \ - 'max_engine_clk_fcompute %s\n' \ - % int(toFrequency(options.gpu_clock) / 1e6) + \ - 'local_mem_size 0\n' + \ - 'fw_version 699\n' + \ - 'capability 4738\n' + \ - 'max_engine_clk_ccompute %s\n' \ - % int(toFrequency(options.CPUClock) / 1e6) + node_prop = ( + "cpu_cores_count %s\n" % options.num_cpus + + "simd_count %s\n" + % (options.num_compute_units * options.simds_per_cu) + + "mem_banks_count %s\n" % mem_banks_cnt + + "caches_count 0\n" + + "io_links_count 0\n" + + "cpu_core_id_base 16\n" + + "simd_id_base 2147483648\n" + + "max_waves_per_simd %s\n" % options.wfs_per_simd + + "lds_size_in_kb %s\n" % int(options.lds_size / 1024) + + "gds_size_in_kb 0\n" + + "wave_front_size %s\n" % options.wf_size + + "array_count 1\n" + + "simd_arrays_per_engine %s\n" % options.sa_per_complex + + "cu_per_simd_array %s\n" % options.cu_per_sa + + "simd_per_cu %s\n" % options.simds_per_cu + + "max_slots_scratch_cu 32\n" + + "vendor_id 4098\n" + + "device_id %s\n" % device_id + + "location_id 8\n" + + "drm_render_minor %s\n" % drm_num + + "max_engine_clk_fcompute %s\n" + % int(toFrequency(options.gpu_clock) / 1e6) + + "local_mem_size 0\n" + + "fw_version 699\n" + + "capability 4738\n" + + "max_engine_clk_ccompute %s\n" + % int(toFrequency(options.CPUClock) / 1e6) + ) - file_append((node_dir, 'properties'), node_prop) + file_append((node_dir, "properties"), node_prop) for i in range(mem_banks_cnt): - mem_dir = joinpath(node_dir, f'mem_banks/{i}') + mem_dir = joinpath(node_dir, f"mem_banks/{i}") remake_dir(mem_dir) # Heap type value taken from real system, heap type values: # https://github.com/RadeonOpenCompute/ROCT-Thunk-Interface/blob/roc-4.0.x/include/hsakmttypes.h#L317 - mem_prop = f'heap_type 0\n' + \ - f'size_in_bytes {toMemorySize(options.mem_size)}' + \ - f'flags 0\n' + \ - f'width 64\n' + \ - f'mem_clk_max 1600\n' - file_append((mem_dir, 'properties'), mem_prop) + mem_prop = ( + f"heap_type 0\n" + + f"size_in_bytes {toMemorySize(options.mem_size)}" + + f"flags 0\n" + + f"width 64\n" + + f"mem_clk_max 1600\n" + ) + file_append((mem_dir, "properties"), mem_prop) diff --git a/configs/example/lupv/README.md b/configs/example/lupv/README.md index d5895db4af..4f1e33c520 100644 --- a/configs/example/lupv/README.md +++ b/configs/example/lupv/README.md @@ -41,4 +41,4 @@ m5term localhost 3456 ``` This should allow you to run busybox, in which you can see the LupIO device at -work! \ No newline at end of file +work! diff --git a/configs/example/lupv/run_lupv.py b/configs/example/lupv/run_lupv.py index e87d3925f9..0056cf8bb4 100644 --- a/configs/example/lupv/run_lupv.py +++ b/configs/example/lupv/run_lupv.py @@ -49,8 +49,7 @@ import argparse # Run a check to ensure the right version of gem5 is being used. requires(isa_required=ISA.RISCV) -from gem5.components.cachehierarchies.classic.\ - private_l1_private_l2_cache_hierarchy import ( +from gem5.components.cachehierarchies.classic.private_l1_private_l2_cache_hierarchy import ( PrivateL1PrivateL2CacheHierarchy, ) @@ -108,6 +107,7 @@ board.set_kernel_disk_workload( print("Running with ISA: " + processor.get_isa().name) print() root = Root(full_system=True, system=board) +board._pre_instantiate() m5.instantiate() print("Beginning simulation!") diff --git a/configs/example/memcheck.py b/configs/example/memcheck.py index 6dd73ef15c..a50644b2b1 100644 --- a/configs/example/memcheck.py +++ b/configs/example/memcheck.py @@ -44,20 +44,30 @@ import m5 from m5.objects import * parser = argparse.ArgumentParser( - formatter_class=argparse.ArgumentDefaultsHelpFormatter) + formatter_class=argparse.ArgumentDefaultsHelpFormatter +) -parser.add_argument("-a", "--atomic", action="store_true", - help="Use atomic (non-timing) mode") -parser.add_argument("-b", "--blocking", action="store_true", - help="Use blocking caches") -parser.add_argument("-m", "--maxtick", type=int, default=m5.MaxTick, - metavar="T", - help="Stop after T ticks") -parser.add_argument("-p", "--prefetchers", action="store_true", - help="Use prefetchers") -parser.add_argument("-s", "--stridepref", action="store_true", - help="Use strided prefetchers") +parser.add_argument( + "-a", "--atomic", action="store_true", help="Use atomic (non-timing) mode" +) +parser.add_argument( + "-b", "--blocking", action="store_true", help="Use blocking caches" +) +parser.add_argument( + "-m", + "--maxtick", + type=int, + default=m5.MaxTick, + metavar="T", + help="Stop after T ticks", +) +parser.add_argument( + "-p", "--prefetchers", action="store_true", help="Use prefetchers" +) +parser.add_argument( + "-s", "--stridepref", action="store_true", help="Use strided prefetchers" +) # This example script has a lot in common with the memtest.py in that # it is designed to stress tests the memory system. However, this @@ -89,107 +99,134 @@ parser.add_argument("-s", "--stridepref", action="store_true", # and linear address streams to ensure that the prefetchers will # trigger. By default prefetchers are off. -parser.add_argument("-c", "--caches", type=str, default="3:2", - help="Colon-separated cache hierarchy specification, " - "see script comments for details ") -parser.add_argument("-t", "--testers", type=str, default="1:0:2", - help="Colon-separated tester hierarchy specification, " - "see script comments for details ") -parser.add_argument("-r", "--random", action="store_true", - help="Generate a random tree topology") -parser.add_argument("--sys-clock", action="store", type=str, - default='1GHz', - help = """Top-level clock for blocks running at system - speed""") +parser.add_argument( + "-c", + "--caches", + type=str, + default="3:2", + help="Colon-separated cache hierarchy specification, " + "see script comments for details ", +) +parser.add_argument( + "-t", + "--testers", + type=str, + default="1:0:2", + help="Colon-separated tester hierarchy specification, " + "see script comments for details ", +) +parser.add_argument( + "-r", + "--random", + action="store_true", + help="Generate a random tree topology", +) +parser.add_argument( + "--sys-clock", + action="store", + type=str, + default="1GHz", + help="""Top-level clock for blocks running at system + speed""", +) args = parser.parse_args() # Start by parsing the command line args and do some basic sanity # checking if args.random: - # Generate a tree with a valid number of testers - tree_depth = random.randint(1, 4) - cachespec = [random.randint(1, 3) for i in range(tree_depth)] - testerspec = [random.randint(1, 3) for i in range(tree_depth + 1)] - print("Generated random tree -c", ':'.join(map(str, cachespec)), - "-t", ':'.join(map(str, testerspec))) + # Generate a tree with a valid number of testers + tree_depth = random.randint(1, 4) + cachespec = [random.randint(1, 3) for i in range(tree_depth)] + testerspec = [random.randint(1, 3) for i in range(tree_depth + 1)] + print( + "Generated random tree -c", + ":".join(map(str, cachespec)), + "-t", + ":".join(map(str, testerspec)), + ) else: - try: - cachespec = [int(x) for x in args.caches.split(':')] - testerspec = [int(x) for x in args.testers.split(':')] - except: - print("Error: Unable to parse caches or testers option") - sys.exit(1) + try: + cachespec = [int(x) for x in args.caches.split(":")] + testerspec = [int(x) for x in args.testers.split(":")] + except: + print("Error: Unable to parse caches or testers option") + sys.exit(1) - if len(cachespec) < 1: - print("Error: Must have at least one level of caches") - sys.exit(1) + if len(cachespec) < 1: + print("Error: Must have at least one level of caches") + sys.exit(1) - if len(cachespec) != len(testerspec) - 1: - print("Error: Testers must have one element more than caches") - sys.exit(1) + if len(cachespec) != len(testerspec) - 1: + print("Error: Testers must have one element more than caches") + sys.exit(1) - if testerspec[-1] == 0: - print("Error: Must have testers at the uppermost level") - sys.exit(1) + if testerspec[-1] == 0: + print("Error: Must have testers at the uppermost level") + sys.exit(1) - for t in testerspec: - if t < 0: - print("Error: Cannot have a negative number of testers") - sys.exit(1) + for t in testerspec: + if t < 0: + print("Error: Cannot have a negative number of testers") + sys.exit(1) - for c in cachespec: - if c < 1: - print("Error: Must have 1 or more caches at each level") - sys.exit(1) + for c in cachespec: + if c < 1: + print("Error: Must have 1 or more caches at each level") + sys.exit(1) # Determine the tester multiplier for each level as the string # elements are per subsystem and it fans out multiplier = [1] for c in cachespec: - if c < 1: - print("Error: Must have at least one cache per level") - multiplier.append(multiplier[-1] * c) + if c < 1: + print("Error: Must have at least one cache per level") + multiplier.append(multiplier[-1] * c) numtesters = 0 for t, m in zip(testerspec, multiplier): - numtesters += t * m + numtesters += t * m # Define a prototype L1 cache that we scale for all successive levels -proto_l1 = Cache(size = '32kB', assoc = 4, - tag_latency = 1, data_latency = 1, response_latency = 1, - tgts_per_mshr = 8) +proto_l1 = Cache( + size="32kB", + assoc=4, + tag_latency=1, + data_latency=1, + response_latency=1, + tgts_per_mshr=8, +) if args.blocking: - proto_l1.mshrs = 1 + proto_l1.mshrs = 1 else: - proto_l1.mshrs = 4 + proto_l1.mshrs = 4 if args.prefetchers: - proto_l1.prefetcher = TaggedPrefetcher() + proto_l1.prefetcher = TaggedPrefetcher() elif args.stridepref: - proto_l1.prefetcher = StridePrefetcher() + proto_l1.prefetcher = StridePrefetcher() cache_proto = [proto_l1] # Now add additional cache levels (if any) by scaling L1 params, the # first element is Ln, and the last element L1 for scale in cachespec[:-1]: - # Clone previous level and update params - prev = cache_proto[0] - next = prev() - next.size = prev.size * scale - next.tag_latency = prev.tag_latency * 10 - next.data_latency = prev.data_latency * 10 - next.response_latency = prev.response_latency * 10 - next.assoc = prev.assoc * scale - next.mshrs = prev.mshrs * scale - cache_proto.insert(0, next) + # Clone previous level and update params + prev = cache_proto[0] + next = prev() + next.size = prev.size * scale + next.tag_latency = prev.tag_latency * 10 + next.data_latency = prev.data_latency * 10 + next.response_latency = prev.response_latency * 10 + next.assoc = prev.assoc * scale + next.mshrs = prev.mshrs * scale + cache_proto.insert(0, next) # Create a config to be used by all the traffic generators cfg_file_name = "memcheck.cfg" -cfg_file_path = os.path.dirname(__file__) + "/" +cfg_file_name -cfg_file = open(cfg_file_path, 'w') +cfg_file_path = os.path.dirname(__file__) + "/" + cfg_file_name +cfg_file = open(cfg_file_path, "w") # Three states, with random, linear and idle behaviours. The random # and linear states access memory in the range [0 : 16 Mbyte] with 8 @@ -207,15 +244,16 @@ cfg_file.write("TRANSITION 2 1 0.5\n") cfg_file.close() # Make a prototype for the tester to be used throughout -proto_tester = TrafficGen(config_file = cfg_file_path) +proto_tester = TrafficGen(config_file=cfg_file_path) # Set up the system along with a DRAM controller -system = System(physmem = MemCtrl(dram = DDR3_1600_8x8())) +system = System(physmem=MemCtrl(dram=DDR3_1600_8x8())) -system.voltage_domain = VoltageDomain(voltage = '1V') +system.voltage_domain = VoltageDomain(voltage="1V") -system.clk_domain = SrcClockDomain(clock = args.sys_clock, - voltage_domain = system.voltage_domain) +system.clk_domain = SrcClockDomain( + clock=args.sys_clock, voltage_domain=system.voltage_domain +) system.memchecker = MemChecker() @@ -225,79 +263,82 @@ next_subsys_index = [0] * (len(cachespec) + 1) # Recursive function to create a sub-tree of the cache and tester # hierarchy def make_cache_level(ncaches, prototypes, level, next_cache): - global next_subsys_index, proto_l1, testerspec, proto_tester + global next_subsys_index, proto_l1, testerspec, proto_tester - index = next_subsys_index[level] - next_subsys_index[level] += 1 + index = next_subsys_index[level] + next_subsys_index[level] += 1 - # Create a subsystem to contain the crossbar and caches, and - # any testers - subsys = SubSystem() - setattr(system, 'l%dsubsys%d' % (level, index), subsys) + # Create a subsystem to contain the crossbar and caches, and + # any testers + subsys = SubSystem() + setattr(system, "l%dsubsys%d" % (level, index), subsys) - # The levels are indexing backwards through the list - ntesters = testerspec[len(cachespec) - level] + # The levels are indexing backwards through the list + ntesters = testerspec[len(cachespec) - level] - testers = [proto_tester() for i in range(ntesters)] - checkers = [MemCheckerMonitor(memchecker = system.memchecker) \ - for i in range(ntesters)] - if ntesters: - subsys.tester = testers - subsys.checkers = checkers + testers = [proto_tester() for i in range(ntesters)] + checkers = [ + MemCheckerMonitor(memchecker=system.memchecker) + for i in range(ntesters) + ] + if ntesters: + subsys.tester = testers + subsys.checkers = checkers - if level != 0: - # Create a crossbar and add it to the subsystem, note that - # we do this even with a single element on this level - xbar = L2XBar(width = 32) - subsys.xbar = xbar - if next_cache: - xbar.mem_side_ports = next_cache.cpu_side + if level != 0: + # Create a crossbar and add it to the subsystem, note that + # we do this even with a single element on this level + xbar = L2XBar(width=32) + subsys.xbar = xbar + if next_cache: + xbar.mem_side_ports = next_cache.cpu_side - # Create and connect the caches, both the ones fanning out - # to create the tree, and the ones used to connect testers - # on this level - tree_caches = [prototypes[0]() for i in range(ncaches[0])] - tester_caches = [proto_l1() for i in range(ntesters)] + # Create and connect the caches, both the ones fanning out + # to create the tree, and the ones used to connect testers + # on this level + tree_caches = [prototypes[0]() for i in range(ncaches[0])] + tester_caches = [proto_l1() for i in range(ntesters)] - subsys.cache = tester_caches + tree_caches - for cache in tree_caches: - cache.mem_side = xbar.cpu_side_ports - make_cache_level(ncaches[1:], prototypes[1:], level - 1, cache) - for tester, checker, cache in zip(testers, checkers, tester_caches): - tester.port = checker.cpu_side_port - checker.mem_side_port = cache.cpu_side - cache.mem_side = xbar.cpu_side_ports - else: - if not next_cache: - print("Error: No next-level cache at top level") - sys.exit(1) + subsys.cache = tester_caches + tree_caches + for cache in tree_caches: + cache.mem_side = xbar.cpu_side_ports + make_cache_level(ncaches[1:], prototypes[1:], level - 1, cache) + for tester, checker, cache in zip(testers, checkers, tester_caches): + tester.port = checker.cpu_side_port + checker.mem_side_port = cache.cpu_side + cache.mem_side = xbar.cpu_side_ports + else: + if not next_cache: + print("Error: No next-level cache at top level") + sys.exit(1) + + if ntesters > 1: + # Create a crossbar and add it to the subsystem + xbar = L2XBar(width=32) + subsys.xbar = xbar + xbar.mem_side_ports = next_cache.cpu_side + for tester, checker in zip(testers, checkers): + tester.port = checker.cpu_side_port + checker.mem_side_port = xbar.cpu_side_ports + else: + # Single tester + testers[0].port = checkers[0].cpu_side_port + checkers[0].mem_side_port = next_cache.cpu_side - if ntesters > 1: - # Create a crossbar and add it to the subsystem - xbar = L2XBar(width = 32) - subsys.xbar = xbar - xbar.mem_side_ports = next_cache.cpu_side - for tester, checker in zip(testers, checkers): - tester.port = checker.cpu_side_port - checker.mem_side_port = xbar.cpu_side_ports - else: - # Single tester - testers[0].port = checkers[0].cpu_side_port - checkers[0].mem_side_port = next_cache.cpu_side # Top level call to create the cache hierarchy, bottom up make_cache_level(cachespec, cache_proto, len(cachespec), None) # Connect the lowest level crossbar to the memory -last_subsys = getattr(system, 'l%dsubsys0' % len(cachespec)) +last_subsys = getattr(system, "l%dsubsys0" % len(cachespec)) last_subsys.xbar.mem_side_ports = system.physmem.port last_subsys.xbar.point_of_coherency = True -root = Root(full_system = False, system = system) +root = Root(full_system=False, system=system) if args.atomic: - root.system.mem_mode = 'atomic' + root.system.mem_mode = "atomic" else: - root.system.mem_mode = 'timing' + root.system.mem_mode = "timing" # The system port is never used in the tester so merely connect it # to avoid problems @@ -309,4 +350,4 @@ m5.instantiate() # Simulate until program terminates exit_event = m5.simulate(args.maxtick) -print('Exiting @ tick', m5.curTick(), 'because', exit_event.getCause()) +print("Exiting @ tick", m5.curTick(), "because", exit_event.getCause()) diff --git a/configs/example/memtest.py b/configs/example/memtest.py index 1c3ef042bc..58d762dc60 100644 --- a/configs/example/memtest.py +++ b/configs/example/memtest.py @@ -52,17 +52,26 @@ from m5.objects import * # and testers not only at the L1s, but also at the L2s, L3s etc. parser = argparse.ArgumentParser( - formatter_class=argparse.ArgumentDefaultsHelpFormatter) + formatter_class=argparse.ArgumentDefaultsHelpFormatter +) -parser.add_argument("-a", "--atomic", action="store_true", - help="Use atomic (non-timing) mode") -parser.add_argument("-b", "--blocking", action="store_true", - help="Use blocking caches") -parser.add_argument("-l", "--maxloads", metavar="N", default=0, - help="Stop after N loads") -parser.add_argument("-m", "--maxtick", type=int, default=m5.MaxTick, - metavar="T", - help="Stop after T ticks") +parser.add_argument( + "-a", "--atomic", action="store_true", help="Use atomic (non-timing) mode" +) +parser.add_argument( + "-b", "--blocking", action="store_true", help="Use blocking caches" +) +parser.add_argument( + "-l", "--maxloads", metavar="N", default=0, help="Stop after N loads" +) +parser.add_argument( + "-m", + "--maxtick", + type=int, + default=m5.MaxTick, + metavar="T", + help="Stop after T ticks", +) # The tree specification consists of two colon-separated lists of one # or more integers, one for the caches, and one for the testers. The @@ -75,147 +84,197 @@ parser.add_argument("-m", "--maxtick", type=int, default=m5.MaxTick, # cache string as there should always be testers attached to the # uppermost caches. -parser.add_argument("-c", "--caches", type=str, default="2:2:1", - help="Colon-separated cache hierarchy specification, " - "see script comments for details ") -parser.add_argument("--noncoherent-cache", action="store_true", - help="Adds a non-coherent, last-level cache") -parser.add_argument("-t", "--testers", type=str, default="1:1:0:2", - help="Colon-separated tester hierarchy specification, " - "see script comments for details ") -parser.add_argument("-f", "--functional", type=int, default=10, - metavar="PCT", - help="Target percentage of functional accesses ") -parser.add_argument("-u", "--uncacheable", type=int, default=10, - metavar="PCT", - help="Target percentage of uncacheable accesses ") -parser.add_argument("-r", "--random", action="store_true", - help="Generate a random tree topology") -parser.add_argument("--progress", type=int, default=100000, - metavar="NLOADS", - help="Progress message interval ") -parser.add_argument("--sys-clock", action="store", type=str, - default='1GHz', - help="""Top-level clock for blocks running at system - speed""") +parser.add_argument( + "-c", + "--caches", + type=str, + default="2:2:1", + help="Colon-separated cache hierarchy specification, " + "see script comments for details ", +) +parser.add_argument( + "--noncoherent-cache", + action="store_true", + help="Adds a non-coherent, last-level cache", +) +parser.add_argument( + "-t", + "--testers", + type=str, + default="1:1:0:2", + help="Colon-separated tester hierarchy specification, " + "see script comments for details ", +) +parser.add_argument( + "-f", + "--functional", + type=int, + default=10, + metavar="PCT", + help="Target percentage of functional accesses ", +) +parser.add_argument( + "-u", + "--uncacheable", + type=int, + default=10, + metavar="PCT", + help="Target percentage of uncacheable accesses ", +) +parser.add_argument( + "-r", + "--random", + action="store_true", + help="Generate a random tree topology", +) +parser.add_argument( + "--progress", + type=int, + default=100000, + metavar="NLOADS", + help="Progress message interval ", +) +parser.add_argument( + "--sys-clock", + action="store", + type=str, + default="1GHz", + help="""Top-level clock for blocks running at system + speed""", +) args = parser.parse_args() # Get the total number of testers def numtesters(cachespec, testerspec): - # Determine the tester multiplier for each level as the - # elements are per subsystem and it fans out - multiplier = [1] - for c in cachespec: - multiplier.append(multiplier[-1] * c) + # Determine the tester multiplier for each level as the + # elements are per subsystem and it fans out + multiplier = [1] + for c in cachespec: + multiplier.append(multiplier[-1] * c) - total = 0 - for t, m in zip(testerspec, multiplier): - total += t * m + total = 0 + for t, m in zip(testerspec, multiplier): + total += t * m + + return total - return total block_size = 64 # Start by parsing the command line args and do some basic sanity # checking if args.random: - # Generate a tree with a valid number of testers - while True: - tree_depth = random.randint(1, 4) - cachespec = [random.randint(1, 3) for i in range(tree_depth)] - testerspec = [random.randint(1, 3) for i in range(tree_depth + 1)] - if numtesters(cachespec, testerspec) < block_size: - break + # Generate a tree with a valid number of testers + while True: + tree_depth = random.randint(1, 4) + cachespec = [random.randint(1, 3) for i in range(tree_depth)] + testerspec = [random.randint(1, 3) for i in range(tree_depth + 1)] + if numtesters(cachespec, testerspec) < block_size: + break - print("Generated random tree -c", ':'.join(map(str, cachespec)), - "-t", ':'.join(map(str, testerspec))) + print( + "Generated random tree -c", + ":".join(map(str, cachespec)), + "-t", + ":".join(map(str, testerspec)), + ) else: - try: - cachespec = [int(x) for x in args.caches.split(':')] - testerspec = [int(x) for x in args.testers.split(':')] - except: - print("Error: Unable to parse caches or testers option") - sys.exit(1) + try: + cachespec = [int(x) for x in args.caches.split(":")] + testerspec = [int(x) for x in args.testers.split(":")] + except: + print("Error: Unable to parse caches or testers option") + sys.exit(1) - if len(cachespec) < 1: - print("Error: Must have at least one level of caches") - sys.exit(1) + if len(cachespec) < 1: + print("Error: Must have at least one level of caches") + sys.exit(1) - if len(cachespec) != len(testerspec) - 1: - print("Error: Testers must have one element more than caches") - sys.exit(1) + if len(cachespec) != len(testerspec) - 1: + print("Error: Testers must have one element more than caches") + sys.exit(1) - if testerspec[-1] == 0: - print("Error: Must have testers at the uppermost level") - sys.exit(1) + if testerspec[-1] == 0: + print("Error: Must have testers at the uppermost level") + sys.exit(1) - for t in testerspec: - if t < 0: - print("Error: Cannot have a negative number of testers") - sys.exit(1) + for t in testerspec: + if t < 0: + print("Error: Cannot have a negative number of testers") + sys.exit(1) - for c in cachespec: - if c < 1: - print("Error: Must have 1 or more caches at each level") - sys.exit(1) + for c in cachespec: + if c < 1: + print("Error: Must have 1 or more caches at each level") + sys.exit(1) - if numtesters(cachespec, testerspec) > block_size: - print("Error: Limited to %s testers because of false sharing" - % (block_size)) - sys.exit(1) + if numtesters(cachespec, testerspec) > block_size: + print( + "Error: Limited to %s testers because of false sharing" + % (block_size) + ) + sys.exit(1) # Define a prototype L1 cache that we scale for all successive levels -proto_l1 = Cache(size = '32kB', assoc = 4, - tag_latency = 1, data_latency = 1, response_latency = 1, - tgts_per_mshr = 8, clusivity = 'mostly_incl', - writeback_clean = True) +proto_l1 = Cache( + size="32kB", + assoc=4, + tag_latency=1, + data_latency=1, + response_latency=1, + tgts_per_mshr=8, + clusivity="mostly_incl", + writeback_clean=True, +) if args.blocking: - proto_l1.mshrs = 1 + proto_l1.mshrs = 1 else: - proto_l1.mshrs = 4 + proto_l1.mshrs = 4 cache_proto = [proto_l1] # Now add additional cache levels (if any) by scaling L1 params, the # first element is Ln, and the last element L1 for scale in cachespec[:-1]: - # Clone previous level and update params - prev = cache_proto[0] - next = prev() - next.size = prev.size * scale - next.tag_latency = prev.tag_latency * 10 - next.data_latency = prev.data_latency * 10 - next.response_latency = prev.response_latency * 10 - next.assoc = prev.assoc * scale - next.mshrs = prev.mshrs * scale + # Clone previous level and update params + prev = cache_proto[0] + next = prev() + next.size = prev.size * scale + next.tag_latency = prev.tag_latency * 10 + next.data_latency = prev.data_latency * 10 + next.response_latency = prev.response_latency * 10 + next.assoc = prev.assoc * scale + next.mshrs = prev.mshrs * scale - # Swap the inclusivity/exclusivity at each level. L2 is mostly - # exclusive with respect to L1, L3 mostly inclusive, L4 mostly - # exclusive etc. - next.writeback_clean = not prev.writeback_clean - if (prev.clusivity.value == 'mostly_incl'): - next.clusivity = 'mostly_excl' - else: - next.clusivity = 'mostly_incl' + # Swap the inclusivity/exclusivity at each level. L2 is mostly + # exclusive with respect to L1, L3 mostly inclusive, L4 mostly + # exclusive etc. + next.writeback_clean = not prev.writeback_clean + if prev.clusivity.value == "mostly_incl": + next.clusivity = "mostly_excl" + else: + next.clusivity = "mostly_incl" - cache_proto.insert(0, next) + cache_proto.insert(0, next) # Make a prototype for the tester to be used throughout -proto_tester = MemTest(max_loads = args.maxloads, - percent_functional = args.functional, - percent_uncacheable = args.uncacheable, - progress_interval = args.progress) +proto_tester = MemTest( + max_loads=args.maxloads, + percent_functional=args.functional, + percent_uncacheable=args.uncacheable, + progress_interval=args.progress, +) # Set up the system along with a simple memory and reference memory -system = System(physmem = SimpleMemory(), - cache_line_size = block_size) +system = System(physmem=SimpleMemory(), cache_line_size=block_size) -system.voltage_domain = VoltageDomain(voltage = '1V') +system.voltage_domain = VoltageDomain(voltage="1V") -system.clk_domain = SrcClockDomain(clock = args.sys_clock, - voltage_domain = system.voltage_domain) +system.clk_domain = SrcClockDomain( + clock=args.sys_clock, voltage_domain=system.voltage_domain +) # For each level, track the next subsys index to use next_subsys_index = [0] * (len(cachespec) + 1) @@ -223,90 +282,98 @@ next_subsys_index = [0] * (len(cachespec) + 1) # Recursive function to create a sub-tree of the cache and tester # hierarchy def make_cache_level(ncaches, prototypes, level, next_cache): - global next_subsys_index, proto_l1, testerspec, proto_tester + global next_subsys_index, proto_l1, testerspec, proto_tester - index = next_subsys_index[level] - next_subsys_index[level] += 1 + index = next_subsys_index[level] + next_subsys_index[level] += 1 - # Create a subsystem to contain the crossbar and caches, and - # any testers - subsys = SubSystem() - setattr(system, 'l%dsubsys%d' % (level, index), subsys) + # Create a subsystem to contain the crossbar and caches, and + # any testers + subsys = SubSystem() + setattr(system, "l%dsubsys%d" % (level, index), subsys) - # The levels are indexing backwards through the list - ntesters = testerspec[len(cachespec) - level] + # The levels are indexing backwards through the list + ntesters = testerspec[len(cachespec) - level] - # Scale the progress threshold as testers higher up in the tree - # (smaller level) get a smaller portion of the overall bandwidth, - # and also make the interval of packet injection longer for the - # testers closer to the memory (larger level) to prevent them - # hogging all the bandwidth - limit = (len(cachespec) - level + 1) * 100000000 - testers = [proto_tester(interval = 10 * (level * level + 1), - progress_check = limit) \ - for i in range(ntesters)] - if ntesters: - subsys.tester = testers + # Scale the progress threshold as testers higher up in the tree + # (smaller level) get a smaller portion of the overall bandwidth, + # and also make the interval of packet injection longer for the + # testers closer to the memory (larger level) to prevent them + # hogging all the bandwidth + limit = (len(cachespec) - level + 1) * 100000000 + testers = [ + proto_tester(interval=10 * (level * level + 1), progress_check=limit) + for i in range(ntesters) + ] + if ntesters: + subsys.tester = testers - if level != 0: - # Create a crossbar and add it to the subsystem, note that - # we do this even with a single element on this level - xbar = L2XBar() - subsys.xbar = xbar - if next_cache: - xbar.mem_side_ports = next_cache.cpu_side + if level != 0: + # Create a crossbar and add it to the subsystem, note that + # we do this even with a single element on this level + xbar = L2XBar() + subsys.xbar = xbar + if next_cache: + xbar.mem_side_ports = next_cache.cpu_side - # Create and connect the caches, both the ones fanning out - # to create the tree, and the ones used to connect testers - # on this level - tree_caches = [prototypes[0]() for i in range(ncaches[0])] - tester_caches = [proto_l1() for i in range(ntesters)] + # Create and connect the caches, both the ones fanning out + # to create the tree, and the ones used to connect testers + # on this level + tree_caches = [prototypes[0]() for i in range(ncaches[0])] + tester_caches = [proto_l1() for i in range(ntesters)] - subsys.cache = tester_caches + tree_caches - for cache in tree_caches: - cache.mem_side = xbar.cpu_side_ports - make_cache_level(ncaches[1:], prototypes[1:], level - 1, cache) - for tester, cache in zip(testers, tester_caches): - tester.port = cache.cpu_side - cache.mem_side = xbar.cpu_side_ports - else: - if not next_cache: - print("Error: No next-level cache at top level") - sys.exit(1) + subsys.cache = tester_caches + tree_caches + for cache in tree_caches: + cache.mem_side = xbar.cpu_side_ports + make_cache_level(ncaches[1:], prototypes[1:], level - 1, cache) + for tester, cache in zip(testers, tester_caches): + tester.port = cache.cpu_side + cache.mem_side = xbar.cpu_side_ports + else: + if not next_cache: + print("Error: No next-level cache at top level") + sys.exit(1) + + if ntesters > 1: + # Create a crossbar and add it to the subsystem + xbar = L2XBar() + subsys.xbar = xbar + xbar.mem_side_ports = next_cache.cpu_side + for tester in testers: + tester.port = xbar.cpu_side_ports + else: + # Single tester + testers[0].port = next_cache.cpu_side - if ntesters > 1: - # Create a crossbar and add it to the subsystem - xbar = L2XBar() - subsys.xbar = xbar - xbar.mem_side_ports = next_cache.cpu_side - for tester in testers: - tester.port = xbar.cpu_side_ports - else: - # Single tester - testers[0].port = next_cache.cpu_side # Top level call to create the cache hierarchy, bottom up make_cache_level(cachespec, cache_proto, len(cachespec), None) # Connect the lowest level crossbar to the last-level cache and memory # controller -last_subsys = getattr(system, 'l%dsubsys0' % len(cachespec)) +last_subsys = getattr(system, "l%dsubsys0" % len(cachespec)) last_subsys.xbar.point_of_coherency = True if args.noncoherent_cache: - system.llc = NoncoherentCache(size = '16MB', assoc = 16, tag_latency = 10, - data_latency = 10, sequential_access = True, - response_latency = 20, tgts_per_mshr = 8, - mshrs = 64) - last_subsys.xbar.mem_side_ports = system.llc.cpu_side - system.llc.mem_side = system.physmem.port + system.llc = NoncoherentCache( + size="16MB", + assoc=16, + tag_latency=10, + data_latency=10, + sequential_access=True, + response_latency=20, + tgts_per_mshr=8, + mshrs=64, + ) + last_subsys.xbar.mem_side_ports = system.llc.cpu_side + system.llc.mem_side = system.physmem.port else: - last_subsys.xbar.mem_side_ports = system.physmem.port + last_subsys.xbar.mem_side_ports = system.physmem.port -root = Root(full_system = False, system = system) +root = Root(full_system=False, system=system) if args.atomic: - root.system.mem_mode = 'atomic' + root.system.mem_mode = "atomic" else: - root.system.mem_mode = 'timing' + root.system.mem_mode = "timing" # The system port is never used in the tester so merely connect it # to avoid problems @@ -318,4 +385,4 @@ m5.instantiate() # Simulate until program terminates exit_event = m5.simulate(args.maxtick) -print('Exiting @ tick', m5.curTick(), 'because', exit_event.getCause()) +print("Exiting @ tick", m5.curTick(), "because", exit_event.getCause()) diff --git a/configs/example/noc_config/2x4.py b/configs/example/noc_config/2x4.py index 2d10da636a..3dd2403799 100644 --- a/configs/example/noc_config/2x4.py +++ b/configs/example/noc_config/2x4.py @@ -47,35 +47,43 @@ class NoC_Params(CHI_config.NoC_Params): num_rows = 2 num_cols = 4 + # Specialization of nodes to define bindings for each CHI node type # needed by CustomMesh. # The default types are defined in CHI_Node and their derivatives in # configs/ruby/CHI_config.py + class CHI_RNF(CHI_config.CHI_RNF): class NoC_Params(CHI_config.CHI_RNF.NoC_Params): router_list = [1, 2, 5, 6] + class CHI_HNF(CHI_config.CHI_HNF): class NoC_Params(CHI_config.CHI_HNF.NoC_Params): router_list = [1, 2, 5, 6] + class CHI_MN(CHI_config.CHI_MN): class NoC_Params(CHI_config.CHI_MN.NoC_Params): router_list = [4] + class CHI_SNF_MainMem(CHI_config.CHI_SNF_MainMem): class NoC_Params(CHI_config.CHI_SNF_MainMem.NoC_Params): router_list = [0, 4] + class CHI_SNF_BootMem(CHI_config.CHI_SNF_BootMem): class NoC_Params(CHI_config.CHI_SNF_BootMem.NoC_Params): router_list = [3] + class CHI_RNI_DMA(CHI_config.CHI_RNI_DMA): class NoC_Params(CHI_config.CHI_RNI_DMA.NoC_Params): router_list = [7] + class CHI_RNI_IO(CHI_config.CHI_RNI_IO): class NoC_Params(CHI_config.CHI_RNI_IO.NoC_Params): router_list = [7] diff --git a/configs/example/read_config.py b/configs/example/read_config.py index 5e64a9a841..b52a73d1fa 100644 --- a/configs/example/read_config.py +++ b/configs/example/read_config.py @@ -56,42 +56,56 @@ import m5 import m5.ticks as ticks sim_object_classes_by_name = { - cls.__name__: cls for cls in list(m5.objects.__dict__.values()) - if inspect.isclass(cls) and issubclass(cls, m5.objects.SimObject) } + cls.__name__: cls + for cls in list(m5.objects.__dict__.values()) + if inspect.isclass(cls) and issubclass(cls, m5.objects.SimObject) +} # Add some parsing functions to Param classes to handle reading in .ini # file elements. This could be moved into src/python/m5/params.py if # reading .ini files from Python proves to be useful -def no_parser(cls, flags, param): - raise Exception('Can\'t parse string: %s for parameter' - ' class: %s' % (str(param), cls.__name__)) -def simple_parser(suffix='', cast=lambda i: i): +def no_parser(cls, flags, param): + raise Exception( + "Can't parse string: %s for parameter" + " class: %s" % (str(param), cls.__name__) + ) + + +def simple_parser(suffix="", cast=lambda i: i): def body(cls, flags, param): return cls(cast(param + suffix)) + return body + # def tick_parser(cast=m5.objects.Latency): # lambda i: i): def tick_parser(cast=lambda i: i): def body(cls, flags, param): old_param = param - ret = cls(cast(str(param) + 't')) + ret = cls(cast(str(param) + "t")) return ret + return body + def addr_range_parser(cls, flags, param): sys.stdout.flush() - _param = param.split(':') + _param = param.split(":") (start, end) = _param[0:2] if len(_param) == 2: return m5.objects.AddrRange(start=int(start), end=int(end)) else: assert len(_param) > 2 intlv_match = _param[2] - masks = [ int(m) for m in _param[3:] ] - return m5.objects.AddrRange(start=int(start), end=int(end), - masks=masks, intlvMatch=int(intlv_match)) + masks = [int(m) for m in _param[3:]] + return m5.objects.AddrRange( + start=int(start), + end=int(end), + masks=masks, + intlvMatch=int(intlv_match), + ) def memory_bandwidth_parser(cls, flags, param): @@ -100,30 +114,32 @@ def memory_bandwidth_parser(cls, flags, param): value = 1.0 / float(param) # Convert to byte/s value = ticks.fromSeconds(value) - return cls('%fB/s' % value) + return cls("%fB/s" % value) + # These parameters have trickier parsing from .ini files than might be # expected param_parsers = { - 'Bool': simple_parser(), - 'ParamValue': no_parser, - 'NumericParamValue': simple_parser(cast=int), - 'TickParamValue': tick_parser(), - 'Frequency': tick_parser(cast=m5.objects.Latency), - 'Current': simple_parser(suffix='A'), - 'Voltage': simple_parser(suffix='V'), - 'Enum': simple_parser(), - 'MemorySize': simple_parser(suffix='B'), - 'MemorySize32': simple_parser(suffix='B'), - 'AddrRange': addr_range_parser, - 'String': simple_parser(), - 'MemoryBandwidth': memory_bandwidth_parser, - 'Time': simple_parser(), - 'EthernetAddr': simple_parser() - } + "Bool": simple_parser(), + "ParamValue": no_parser, + "NumericParamValue": simple_parser(cast=int), + "TickParamValue": tick_parser(), + "Frequency": tick_parser(cast=m5.objects.Latency), + "Current": simple_parser(suffix="A"), + "Voltage": simple_parser(suffix="V"), + "Enum": simple_parser(), + "MemorySize": simple_parser(suffix="B"), + "MemorySize32": simple_parser(suffix="B"), + "AddrRange": addr_range_parser, + "String": simple_parser(), + "MemoryBandwidth": memory_bandwidth_parser, + "Time": simple_parser(), + "EthernetAddr": simple_parser(), +} for name, parser in list(param_parsers.items()): - setattr(m5.params.__dict__[name], 'parse_ini', classmethod(parser)) + setattr(m5.params.__dict__[name], "parse_ini", classmethod(parser)) + class PortConnection(object): """This class is similar to m5.params.PortRef but with just enough @@ -136,7 +152,7 @@ class PortConnection(object): @classmethod def from_string(cls, str): - m = re.match('(.*)\.([^.\[]+)(\[(\d+)\])?', str) + m = re.match("(.*)\.([^.\[]+)(\[(\d+)\])?", str) object_name, port_name, whole_index, index = m.groups() if index is not None: index = int(index) @@ -146,11 +162,14 @@ class PortConnection(object): return PortConnection(object_name, port_name, index) def __str__(self): - return '%s.%s[%d]' % (self.object_name, self.port_name, self.index) + return "%s.%s[%d]" % (self.object_name, self.port_name, self.index) def __cmp__(self, right): - return cmp((self.object_name, self.port_name, self.index), - (right.object_name, right.port_name, right.index)) + return cmp( + (self.object_name, self.port_name, self.index), + (right.object_name, right.port_name, right.index), + ) + def to_list(v): """Convert any non list to a singleton list""" @@ -159,8 +178,10 @@ def to_list(v): else: return [v] + class ConfigManager(object): """Manager for parsing a Root configuration from a config file""" + def __init__(self, config): self.config = config self.objects_by_name = {} @@ -170,17 +191,19 @@ class ConfigManager(object): """Find and configure (with just non-SimObject parameters) a single object""" - if object_name == 'Null': + if object_name == "Null": return NULL if object_name in self.objects_by_name: return self.objects_by_name[object_name] - object_type = self.config.get_param(object_name, 'type') + object_type = self.config.get_param(object_name, "type") if object_type not in sim_object_classes_by_name: - raise Exception('No SimObject type %s is available to' - ' build: %s' % (object_type, object_name)) + raise Exception( + "No SimObject type %s is available to" + " build: %s" % (object_type, object_name) + ) object_class = sim_object_classes_by_name[object_type] @@ -189,15 +212,19 @@ class ConfigManager(object): for param_name, param in list(object_class._params.items()): if issubclass(param.ptype, m5.params.ParamValue): if isinstance(param, m5.params.VectorParamDesc): - param_values = self.config.get_param_vector(object_name, - param_name) + param_values = self.config.get_param_vector( + object_name, param_name + ) - param_value = [ param.ptype.parse_ini(self.flags, value) - for value in param_values ] + param_value = [ + param.ptype.parse_ini(self.flags, value) + for value in param_values + ] else: param_value = param.ptype.parse_ini( - self.flags, self.config.get_param(object_name, - param_name)) + self.flags, + self.config.get_param(object_name, param_name), + ) parsed_params[param_name] = param_value @@ -211,26 +238,35 @@ class ConfigManager(object): parameters. This relies on all referenced objects having been created""" - if object_name == 'Null': + if object_name == "Null": return NULL for param_name, param in list(obj.__class__._params.items()): if issubclass(param.ptype, m5.objects.SimObject): if isinstance(param, m5.params.VectorParamDesc): - param_values = self.config.get_param_vector(object_name, - param_name) + param_values = self.config.get_param_vector( + object_name, param_name + ) - setattr(obj, param_name, - [ self.objects_by_name[name] - if name != 'Null' else m5.params.NULL - for name in param_values ]) + setattr( + obj, + param_name, + [ + self.objects_by_name[name] + if name != "Null" + else m5.params.NULL + for name in param_values + ], + ) else: - param_value = self.config.get_param(object_name, - param_name) + param_value = self.config.get_param( + object_name, param_name + ) - if param_value != 'Null': - setattr(obj, param_name, self.objects_by_name[ - param_value]) + if param_value != "Null": + setattr( + obj, param_name, self.objects_by_name[param_value] + ) return obj @@ -242,12 +278,13 @@ class ConfigManager(object): for child_name, child_paths in children: param = obj.__class__._params.get(child_name, None) - if child_name == 'Null': + if child_name == "Null": continue if isinstance(child_paths, list): - child_list = [ self.objects_by_name[path] - for path in child_paths ] + child_list = [ + self.objects_by_name[path] for path in child_paths + ] else: child_list = self.objects_by_name[child_paths] @@ -261,7 +298,7 @@ class ConfigManager(object): def parse_port_name(self, port): """Parse the name of a port""" - m = re.match('(.*)\.([^.\[]+)(\[(\d+)\])?', port) + m = re.match("(.*)\.([^.\[]+)(\[(\d+)\])?", port) peer, peer_port, whole_index, index = m.groups() if index is not None: index = int(index) @@ -275,7 +312,7 @@ class ConfigManager(object): Returns a list of (PortConnection, PortConnection) with unordered (wrt. requestor/responder) connection information""" - if object_name == 'Null': + if object_name == "Null": return NULL parsed_ports = [] @@ -284,9 +321,12 @@ class ConfigManager(object): peers = self.config.get_port_peers(object_name, port_name) for index, peer in zip(list(range(0, len(peers))), peers): - parsed_ports.append(( - PortConnection(object_name, port.name, index), - PortConnection.from_string(peer))) + parsed_ports.append( + ( + PortConnection(object_name, port.name, index), + PortConnection.from_string(peer), + ) + ) return parsed_ports @@ -300,16 +340,16 @@ class ConfigManager(object): # has a suitable port index port_bind_indices = {} for from_port, to_port in connections: - port_bind_indices[ - (from_port.object_name, from_port.port_name)] = 0 + port_bind_indices[(from_port.object_name, from_port.port_name)] = 0 def port_has_correct_index(port): - return port_bind_indices[ - (port.object_name, port.port_name)] == port.index + return ( + port_bind_indices[(port.object_name, port.port_name)] + == port.index + ) def increment_port_index(port): - port_bind_indices[ - (port.object_name, port.port_name)] += 1 + port_bind_indices[(port.object_name, port.port_name)] += 1 # Step through the sorted connections. Exactly one of # each (responder,requestor) and (requestor,responder) pairs will be @@ -325,8 +365,9 @@ class ConfigManager(object): for connection in sorted(connections): from_port, to_port = connection - if (port_has_correct_index(from_port) and - port_has_correct_index(to_port)): + if port_has_correct_index(from_port) and port_has_correct_index( + to_port + ): connections_to_make.append((from_port, to_port)) @@ -336,15 +377,18 @@ class ConfigManager(object): # Exactly half of the connections (ie. all of them, one per # direction) must now have been made if (len(connections_to_make) * 2) != len(connections): - raise Exception('Port bindings can\'t be ordered') + raise Exception("Port bindings can't be ordered") # Actually do the binding for from_port, to_port in connections_to_make: from_object = self.objects_by_name[from_port.object_name] to_object = self.objects_by_name[to_port.object_name] - setattr(from_object, from_port.port_name, - getattr(to_object, to_port.port_name)) + setattr( + from_object, + from_port.port_name, + getattr(to_object, to_port.port_name), + ) def find_all_objects(self): """Find and build all SimObjects from the config file and connect @@ -356,7 +400,7 @@ class ConfigManager(object): self.find_object(object_name) # Add children to objects in the hierarchy from root - self.fill_in_children('root', self.find_object('root')) + self.fill_in_children("root", self.find_object("root")) # Now fill in SimObject-valued parameters in the knowledge that # this won't be interpreted as becoming the parent of objects @@ -373,6 +417,7 @@ class ConfigManager(object): # bind them self.bind_ports(connections) + class ConfigFile(object): def get_flags(self): return set() @@ -406,6 +451,7 @@ class ConfigFile(object): object.port(\[index\])?) of the port object_name.port_name""" pass + class ConfigIniFile(ConfigFile): def __init__(self): self.parser = configparser.ConfigParser() @@ -423,19 +469,19 @@ class ConfigIniFile(ConfigFile): return self.parser.get(object_name, param_name).split() def get_object_children(self, object_name): - if self.parser.has_option(object_name, 'children'): - children = self.parser.get(object_name, 'children') + if self.parser.has_option(object_name, "children"): + children = self.parser.get(object_name, "children") child_names = children.split() else: child_names = [] def make_path(child_name): - if object_name == 'root': + if object_name == "root": return child_name else: - return '%s.%s' % (object_name, child_name) + return "%s.%s" % (object_name, child_name) - return [ (name, make_path(name)) for name in child_names ] + return [(name, make_path(name)) for name in child_names] def get_port_peers(self, object_name, port_name): if self.parser.has_option(object_name, port_name): @@ -444,16 +490,17 @@ class ConfigIniFile(ConfigFile): else: return [] + class ConfigJsonFile(ConfigFile): def __init__(self): pass def is_sim_object(self, node): - return isinstance(node, dict) and 'path' in node + return isinstance(node, dict) and "path" in node def find_all_objects(self, node): if self.is_sim_object(node): - self.object_dicts[node['path']] = node + self.object_dicts[node["path"]] = node if isinstance(node, list): for elem in node: @@ -463,7 +510,7 @@ class ConfigJsonFile(ConfigFile): self.find_all_objects(elem) def load(self, config_file): - root = json.load(open(config_file, 'r')) + root = json.load(open(config_file, "r")) self.object_dicts = {} self.find_all_objects(root) @@ -474,7 +521,7 @@ class ConfigJsonFile(ConfigFile): if node is None: return "Null" elif self.is_sim_object(node): - return node['path'] + return node["path"] else: return str(node) @@ -486,7 +533,7 @@ class ConfigJsonFile(ConfigFile): def get_param_vector(self, object_name, param_name): obj = self.object_dicts[object_name] - return [ self.parse_param_string(p) for p in obj[param_name] ] + return [self.parse_param_string(p) for p in obj[param_name]] def get_object_children(self, object_name): """It is difficult to tell which elements are children in the @@ -500,10 +547,13 @@ class ConfigJsonFile(ConfigFile): children = [] for name, node in list(obj.items()): if self.is_sim_object(node): - children.append((name, node['path'])) - elif isinstance(node, list) and node != [] and all([ - self.is_sim_object(e) for e in node ]): - children.append((name, [ e['path'] for e in node ])) + children.append((name, node["path"])) + elif ( + isinstance(node, list) + and node != [] + and all([self.is_sim_object(e) for e in node]) + ): + children.append((name, [e["path"] for e in node])) return children @@ -513,23 +563,34 @@ class ConfigJsonFile(ConfigFile): obj = self.object_dicts[object_name] peers = [] - if port_name in obj and 'peer' in obj[port_name] and \ - 'role' in obj[port_name]: - peers = to_list(obj[port_name]['peer']) + if ( + port_name in obj + and "peer" in obj[port_name] + and "role" in obj[port_name] + ): + peers = to_list(obj[port_name]["peer"]) return peers + parser = argparse.ArgumentParser() -parser.add_argument('config_file', metavar='config-file.ini', - help='.ini configuration file to load and run') -parser.add_argument('--checkpoint-dir', type=str, default=None, - help='A checkpoint to directory to restore when starting ' - 'the simulation') +parser.add_argument( + "config_file", + metavar="config-file.ini", + help=".ini configuration file to load and run", +) +parser.add_argument( + "--checkpoint-dir", + type=str, + default=None, + help="A checkpoint to directory to restore when starting " + "the simulation", +) args = parser.parse_args(sys.argv[1:]) -if args.config_file.endswith('.ini'): +if args.config_file.endswith(".ini"): config = ConfigIniFile() config.load(args.config_file) else: @@ -545,4 +606,4 @@ mgr.find_all_objects() m5.instantiate(args.checkpoint_dir) exit_event = m5.simulate() -print('Exiting @ tick %i because %s' % (m5.curTick(), exit_event.getCause())) +print("Exiting @ tick %i because %s" % (m5.curTick(), exit_event.getCause())) diff --git a/configs/example/riscv/fs_linux.py b/configs/example/riscv/fs_linux.py index 570ef22a63..1a98126e92 100644 --- a/configs/example/riscv/fs_linux.py +++ b/configs/example/riscv/fs_linux.py @@ -48,7 +48,7 @@ from m5.objects import * from m5.util import addToPath, fatal, warn from m5.util.fdthelper import * -addToPath('../../') +addToPath("../../") from ruby import Ruby @@ -89,17 +89,23 @@ from common import Options # ----------------------- DTB Generation Function ---------------------- # + def generateMemNode(state, mem_range): node = FdtNode("memory@%x" % int(mem_range.start)) node.append(FdtPropertyStrings("device_type", ["memory"])) - node.append(FdtPropertyWords("reg", - state.addrCells(mem_range.start) + - state.sizeCells(mem_range.size()) )) + node.append( + FdtPropertyWords( + "reg", + state.addrCells(mem_range.start) + + state.sizeCells(mem_range.size()), + ) + ) return node + def generateDtb(system): state = FdtState(addr_cells=2, size_cells=2, cpu_cells=1) - root = FdtNode('/') + root = FdtNode("/") root.append(state.addrCellsProperty()) root.append(state.sizeCellsProperty()) root.appendCompatible(["riscv-virtio"]) @@ -118,20 +124,29 @@ def generateDtb(system): fdt = Fdt() fdt.add_rootnode(root) - fdt.writeDtsFile(path.join(m5.options.outdir, 'device.dts')) - fdt.writeDtbFile(path.join(m5.options.outdir, 'device.dtb')) + fdt.writeDtsFile(path.join(m5.options.outdir, "device.dts")) + fdt.writeDtbFile(path.join(m5.options.outdir, "device.dtb")) + # ----------------------------- Add Options ---------------------------- # parser = argparse.ArgumentParser() Options.addCommonOptions(parser) Options.addFSOptions(parser) -parser.add_argument("--bare-metal", action="store_true", - help="Provide the raw system without the linux specific bits") -parser.add_argument("--dtb-filename", action="store", type=str, - help="Specifies device tree blob file to use with device-tree-"\ - "enabled kernels") -parser.add_argument("--virtio-rng", action="store_true", - help="Enable VirtIORng device") +parser.add_argument( + "--bare-metal", + action="store_true", + help="Provide the raw system without the linux specific bits", +) +parser.add_argument( + "--dtb-filename", + action="store", + type=str, + help="Specifies device tree blob file to use with device-tree-" + "enabled kernels", +) +parser.add_argument( + "--virtio-rng", action="store_true", help="Enable VirtIORng device" +) # ---------------------------- Parse Options --------------------------- # args = parser.parse_args() @@ -145,8 +160,12 @@ np = args.num_cpus # ---------------------------- Setup System ---------------------------- # # Default Setup system = System() -mdesc = SysConfig(disks=args.disk_image, rootdev=args.root_device, - mem=args.mem_size, os_type=args.os_type) +mdesc = SysConfig( + disks=args.disk_image, + rootdev=args.root_device, + mem=args.mem_size, + os_type=args.os_type, +) system.mem_mode = mem_mode system.mem_ranges = [AddrRange(start=0x80000000, size=mdesc.mem())] @@ -177,19 +196,16 @@ if args.disk_image: vio=VirtIOBlock(image=image), interrupt_id=0x8, pio_size=4096, - pio_addr=0x10008000 + pio_addr=0x10008000, ) # VirtIORng if args.virtio_rng: system.platform.rng = RiscvMmioVirtIO( - vio=VirtIORng(), - interrupt_id=0x8, - pio_size=4096, - pio_addr=0x10007000 + vio=VirtIORng(), interrupt_id=0x8, pio_size=4096, pio_addr=0x10007000 ) -system.bridge = Bridge(delay='50ns') +system.bridge = Bridge(delay="50ns") system.bridge.mem_side_port = system.iobus.cpu_side_ports system.bridge.cpu_side_port = system.membus.mem_side_ports system.bridge.ranges = system.platform._off_chip_ranges() @@ -205,19 +221,20 @@ system.platform.setNumCores(np) system.cache_line_size = args.cacheline_size # Create a top-level voltage domain -system.voltage_domain = VoltageDomain(voltage = args.sys_voltage) +system.voltage_domain = VoltageDomain(voltage=args.sys_voltage) # Create a source clock for the system and set the clock period -system.clk_domain = SrcClockDomain(clock = args.sys_clock, - voltage_domain = system.voltage_domain) +system.clk_domain = SrcClockDomain( + clock=args.sys_clock, voltage_domain=system.voltage_domain +) # Create a CPU voltage domain system.cpu_voltage_domain = VoltageDomain() # Create a source clock for the CPUs and set the clock period -system.cpu_clk_domain = SrcClockDomain(clock = args.cpu_clock, - voltage_domain = - system.cpu_voltage_domain) +system.cpu_clk_domain = SrcClockDomain( + clock=args.cpu_clock, voltage_domain=system.cpu_voltage_domain +) system.workload.object_file = args.kernel @@ -227,16 +244,17 @@ if args.script is not None: system.init_param = args.init_param -system.cpu = [CPUClass(clk_domain=system.cpu_clk_domain, cpu_id=i) - for i in range(np)] +system.cpu = [ + CPUClass(clk_domain=system.cpu_clk_domain, cpu_id=i) for i in range(np) +] if args.caches or args.l2cache: # By default the IOCache runs at the system clock - system.iocache = IOCache(addr_ranges = system.mem_ranges) + system.iocache = IOCache(addr_ranges=system.mem_ranges) system.iocache.cpu_side = system.iobus.mem_side_ports system.iocache.mem_side = system.membus.cpu_side_ports elif not args.external_memory_system: - system.iobridge = Bridge(delay='50ns', ranges = system.mem_ranges) + system.iobridge = Bridge(delay="50ns", ranges=system.mem_ranges) system.iobridge.cpu_side_port = system.iobus.mem_side_ports system.iobridge.mem_side_port = system.membus.cpu_side_ports @@ -258,16 +276,16 @@ for i in range(np): system.cpu[i].branchPred = bpClass() if args.indirect_bp_type: IndirectBPClass = ObjectList.indirect_bp_list.get( - args.indirect_bp_type) - system.cpu[i].branchPred.indirectBranchPred = \ - IndirectBPClass() + args.indirect_bp_type + ) + system.cpu[i].branchPred.indirectBranchPred = IndirectBPClass() system.cpu[i].createThreads() # ----------------------------- PMA Checker ---------------------------- # uncacheable_range = [ *system.platform._on_chip_ranges(), - *system.platform._off_chip_ranges() + *system.platform._off_chip_ranges(), ] # PMA checker can be defined at system-level (system.pma_checker) @@ -284,26 +302,26 @@ if not args.bare_metal: else: generateDtb(system) system.workload.dtb_filename = path.join( - m5.options.outdir, 'device.dtb') + m5.options.outdir, "device.dtb" + ) # Default DTB address if bbl is bulit with --with-dts option - system.workload.dtb_addr = 0x87e00000 + system.workload.dtb_addr = 0x87E00000 -# Linux boot command flags + # Linux boot command flags if args.command_line: system.workload.command_line = args.command_line else: - kernel_cmd = [ - "console=ttyS0", - "root=/dev/vda", - "ro" - ] + kernel_cmd = ["console=ttyS0", "root=/dev/vda", "ro"] system.workload.command_line = " ".join(kernel_cmd) # ---------------------------- Default Setup --------------------------- # -if args.elastic_trace_en and args.checkpoint_restore == None and \ - not args.fast_forward: +if ( + args.elastic_trace_en + and args.checkpoint_restore == None + and not args.fast_forward +): CpuConfig.config_etrace(CPUClass, system.cpu, args) CacheConfig.config_cache(args, system) diff --git a/configs/example/ruby_direct_test.py b/configs/example/ruby_direct_test.py index 048fb5fd75..edbeed4cc7 100644 --- a/configs/example/ruby_direct_test.py +++ b/configs/example/ruby_direct_test.py @@ -31,7 +31,7 @@ from m5.defines import buildEnv from m5.util import addToPath import os, argparse, sys -addToPath('../') +addToPath("../") from common import Options from ruby import Ruby @@ -44,16 +44,28 @@ m5_root = os.path.dirname(config_root) parser = argparse.ArgumentParser() Options.addNoISAOptions(parser) -parser.add_argument("--requests", metavar="N", default=100, - help="Stop after N requests") -parser.add_argument("-f", "--wakeup_freq", metavar="N", default=10, - help="Wakeup every N cycles") -parser.add_argument("--test-type", default="SeriesGetx", - choices = ["SeriesGetx", "SeriesGets", "SeriesGetMixed", - "Invalidate"], - help = "Type of test") -parser.add_argument("--percent-writes", type=int, default=100, - help="percentage of accesses that should be writes") +parser.add_argument( + "--requests", metavar="N", default=100, help="Stop after N requests" +) +parser.add_argument( + "-f", + "--wakeup_freq", + metavar="N", + default=10, + help="Wakeup every N cycles", +) +parser.add_argument( + "--test-type", + default="SeriesGetx", + choices=["SeriesGetx", "SeriesGets", "SeriesGetMixed", "Invalidate"], + help="Type of test", +) +parser.add_argument( + "--percent-writes", + type=int, + default=100, + help="percentage of accesses that should be writes", +) # # Add the ruby specific and protocol specific args @@ -65,47 +77,53 @@ args = parser.parse_args() # Select the direct test generator # if args.test_type == "SeriesGetx": - generator = SeriesRequestGenerator(num_cpus = args.num_cpus, - percent_writes = 100) + generator = SeriesRequestGenerator( + num_cpus=args.num_cpus, percent_writes=100 + ) elif args.test_type == "SeriesGets": - generator = SeriesRequestGenerator(num_cpus = args.num_cpus, - percent_writes = 0) + generator = SeriesRequestGenerator( + num_cpus=args.num_cpus, percent_writes=0 + ) elif args.test_type == "SeriesGetMixed": - generator = SeriesRequestGenerator(num_cpus = args.num_cpus, - percent_writes = args.percent_writes) + generator = SeriesRequestGenerator( + num_cpus=args.num_cpus, percent_writes=args.percent_writes + ) elif args.test_type == "Invalidate": - generator = InvalidateGenerator(num_cpus = args.num_cpus) + generator = InvalidateGenerator(num_cpus=args.num_cpus) else: print("Error: unknown direct test generator") sys.exit(1) # Create the M5 system. -system = System(mem_ranges = [AddrRange(args.mem_size)]) +system = System(mem_ranges=[AddrRange(args.mem_size)]) # Create a top-level voltage domain and clock domain -system.voltage_domain = VoltageDomain(voltage = args.sys_voltage) +system.voltage_domain = VoltageDomain(voltage=args.sys_voltage) -system.clk_domain = SrcClockDomain(clock = args.sys_clock, - voltage_domain = system.voltage_domain) +system.clk_domain = SrcClockDomain( + clock=args.sys_clock, voltage_domain=system.voltage_domain +) # Create the ruby random tester -system.cpu = RubyDirectedTester(requests_to_complete = args.requests, - generator = generator) +system.cpu = RubyDirectedTester( + requests_to_complete=args.requests, generator=generator +) # the ruby tester reuses num_cpus to specify the # number of cpu ports connected to the tester object, which # is stored in system.cpu. because there is only ever one # tester object, num_cpus is not necessarily equal to the # size of system.cpu -cpu_list = [ system.cpu ] * args.num_cpus +cpu_list = [system.cpu] * args.num_cpus Ruby.create_system(args, False, system, cpus=cpu_list) # Since Ruby runs at an independent frequency, create a seperate clock -system.ruby.clk_domain = SrcClockDomain(clock = args.ruby_clock, - voltage_domain = system.voltage_domain) +system.ruby.clk_domain = SrcClockDomain( + clock=args.ruby_clock, voltage_domain=system.voltage_domain +) -assert(args.num_cpus == len(system.ruby._cpu_ports)) +assert args.num_cpus == len(system.ruby._cpu_ports) for ruby_port in system.ruby._cpu_ports: # @@ -117,11 +135,11 @@ for ruby_port in system.ruby._cpu_ports: # run simulation # ----------------------- -root = Root( full_system = False, system = system ) -root.system.mem_mode = 'timing' +root = Root(full_system=False, system=system) +root.system.mem_mode = "timing" # Not much point in this being higher than the L1 latency -m5.ticks.setGlobalFrequency('1ns') +m5.ticks.setGlobalFrequency("1ns") # instantiate configuration m5.instantiate() @@ -129,4 +147,4 @@ m5.instantiate() # simulate until program terminates exit_event = m5.simulate(args.abs_max_tick) -print('Exiting @ tick', m5.curTick(), 'because', exit_event.getCause()) +print("Exiting @ tick", m5.curTick(), "because", exit_event.getCause()) diff --git a/configs/example/ruby_gpu_random_test.py b/configs/example/ruby_gpu_random_test.py index 029a97d42c..25d5a51892 100644 --- a/configs/example/ruby_gpu_random_test.py +++ b/configs/example/ruby_gpu_random_test.py @@ -33,7 +33,7 @@ from m5.defines import buildEnv from m5.util import addToPath import os, argparse, sys -addToPath('../') +addToPath("../") from common import Options from ruby import Ruby @@ -46,41 +46,68 @@ Options.addNoISAOptions(parser) Ruby.define_options(parser) # GPU Ruby tester options -parser.add_argument("--cache-size", default="small", - choices=["small", "large"], - help="Cache sizes to use. Small encourages races between \ +parser.add_argument( + "--cache-size", + default="small", + choices=["small", "large"], + help="Cache sizes to use. Small encourages races between \ requests and writebacks. Large stresses write-through \ - and/or write-back GPU caches.") -parser.add_argument("--system-size", default="small", - choices=["small", "medium", "large"], - help="This option defines how many CUs, CPUs and cache \ - components in the test system.") -parser.add_argument("--address-range", default="small", - choices=["small", "large"], - help="This option defines the number of atomic \ + and/or write-back GPU caches.", +) +parser.add_argument( + "--system-size", + default="small", + choices=["small", "medium", "large"], + help="This option defines how many CUs, CPUs and cache \ + components in the test system.", +) +parser.add_argument( + "--address-range", + default="small", + choices=["small", "large"], + help="This option defines the number of atomic \ locations that affects the working set's size. \ A small number of atomic locations encourage more \ races among threads. The large option stresses cache \ - resources.") -parser.add_argument("--episode-length", default="short", - choices=["short", "medium", "long"], - help="This option defines the number of LDs and \ + resources.", +) +parser.add_argument( + "--episode-length", + default="short", + choices=["short", "medium", "long"], + help="This option defines the number of LDs and \ STs in an episode. The small option encourages races \ between the start and end of an episode. The long \ option encourages races between LDs and STs in the \ - same episode.") -parser.add_argument("--test-length", type=int, default=1, - help="The number of episodes to be executed by each \ + same episode.", +) +parser.add_argument( + "--test-length", + type=int, + default=1, + help="The number of episodes to be executed by each \ wavefront. This determines the maximum number, i.e., \ - val X #WFs, of episodes to be executed in the test.") -parser.add_argument("--debug-tester", action='store_true', - help="This option will turn on DRF checker") -parser.add_argument("--random-seed", type=int, default=0, - help="Random seed number. Default value (i.e., 0) means \ - using runtime-specific value") + val X #WFs, of episodes to be executed in the test.", +) +parser.add_argument( + "--debug-tester", + action="store_true", + help="This option will turn on DRF checker", +) +parser.add_argument( + "--random-seed", + type=int, + default=0, + help="Random seed number. Default value (i.e., 0) means \ + using runtime-specific value", +) parser.add_argument("--log-file", type=str, default="gpu-ruby-test.log") -parser.add_argument("--num-dmas", type=int, default=None, - help="The number of DMA engines to use in tester config.") +parser.add_argument( + "--num-dmas", + type=int, + default=None, + help="The number of DMA engines to use in tester config.", +) args = parser.parse_args() @@ -89,21 +116,21 @@ args = parser.parse_args() # 0: small cache # 1: large cache # -if (args.cache_size == "small"): - args.tcp_size="256B" - args.tcp_assoc=2 - args.tcc_size="1kB" - args.tcc_assoc=2 -elif (args.cache_size == "large"): - args.tcp_size="256kB" - args.tcp_assoc=16 - args.tcc_size="1024kB" - args.tcc_assoc=16 +if args.cache_size == "small": + args.tcp_size = "256B" + args.tcp_assoc = 2 + args.tcc_size = "1kB" + args.tcc_assoc = 2 +elif args.cache_size == "large": + args.tcp_size = "256kB" + args.tcp_assoc = 16 + args.tcc_size = "1024kB" + args.tcc_assoc = 16 # # Set up system size - 3 options # -if (args.system_size == "small"): +if args.system_size == "small": # 1 CU, 1 CPU, 1 SQC, 1 Scalar args.wf_size = 1 args.wavefronts_per_cu = 1 @@ -112,7 +139,7 @@ if (args.system_size == "small"): args.cu_per_sqc = 1 args.cu_per_scalar_cache = 1 args.num_compute_units = 1 -elif (args.system_size == "medium"): +elif args.system_size == "medium": # 4 CUs, 4 CPUs, 1 SQCs, 1 Scalars args.wf_size = 16 args.wavefronts_per_cu = 4 @@ -121,7 +148,7 @@ elif (args.system_size == "medium"): args.cu_per_sqc = 4 args.cu_per_scalar_cache = 4 args.num_compute_units = 4 -elif (args.system_size == "large"): +elif args.system_size == "large": # 8 CUs, 4 CPUs, 1 SQCs, 1 Scalars args.wf_size = 32 args.wavefronts_per_cu = 4 @@ -132,11 +159,11 @@ elif (args.system_size == "large"): args.num_compute_units = 8 # Number of DMA engines -if not(args.num_dmas is None): +if not (args.num_dmas is None): n_DMAs = args.num_dmas # currently the tester does not support requests returned as # aliased, thus we need num_dmas to be 0 for it - if not(args.num_dmas == 0): + if not (args.num_dmas == 0): print("WARNING: num_dmas != 0 not supported with VIPER") # @@ -145,11 +172,11 @@ if not(args.num_dmas is None): # level 1: large # Each location corresponds to a 4-byte piece of data # -args.mem_size = '1024MB' -if (args.address_range == "small"): +args.mem_size = "1024MB" +if args.address_range == "small": num_atomic_locs = 10 num_regular_locs_per_atomic_loc = 10000 -elif (args.address_range == "large"): +elif args.address_range == "large": num_atomic_locs = 100 num_regular_locs_per_atomic_loc = 100000 @@ -159,11 +186,11 @@ elif (args.address_range == "large"): # 1: 100 actions # 2: 500 actions # -if (args.episode_length == "short"): +if args.episode_length == "short": eps_length = 10 -elif (args.episode_length == "medium"): +elif args.episode_length == "medium": eps_length = 100 -elif (args.episode_length == "long"): +elif args.episode_length == "long": eps_length = 500 # @@ -190,29 +217,30 @@ n_WFs = n_CUs * args.wavefronts_per_cu max_episodes = args.test_length * n_WFs # Number of SQC and Scalar caches -assert(n_CUs % args.cu_per_sqc == 0) +assert n_CUs % args.cu_per_sqc == 0 n_SQCs = n_CUs // args.cu_per_sqc args.num_sqc = n_SQCs -assert(args.cu_per_scalar_cache != 0) +assert args.cu_per_scalar_cache != 0 n_Scalars = n_CUs // args.cu_per_scalar_cache args.num_scalar_cache = n_Scalars # # Create GPU Ruby random tester # -tester = ProtocolTester(cus_per_sqc = args.cu_per_sqc, - cus_per_scalar = args.cu_per_scalar_cache, - wavefronts_per_cu = args.wavefronts_per_cu, - workitems_per_wavefront = args.wf_size, - num_atomic_locations = num_atomic_locs, - num_normal_locs_per_atomic = \ - num_regular_locs_per_atomic_loc, - max_num_episodes = max_episodes, - episode_length = eps_length, - debug_tester = args.debug_tester, - random_seed = args.random_seed, - log_file = args.log_file) +tester = ProtocolTester( + cus_per_sqc=args.cu_per_sqc, + cus_per_scalar=args.cu_per_scalar_cache, + wavefronts_per_cu=args.wavefronts_per_cu, + workitems_per_wavefront=args.wf_size, + num_atomic_locations=num_atomic_locs, + num_normal_locs_per_atomic=num_regular_locs_per_atomic_loc, + max_num_episodes=max_episodes, + episode_length=eps_length, + debug_tester=args.debug_tester, + random_seed=args.random_seed, + log_file=args.log_file, +) # # Create a gem5 system. Note that the memory object isn't actually used by the @@ -220,14 +248,17 @@ tester = ProtocolTester(cus_per_sqc = args.cu_per_sqc, # checks. The system doesn't have real CPUs or CUs. It just has a tester that # has physical ports to be connected to Ruby # -system = System(cpu = tester, - mem_ranges = [AddrRange(args.mem_size)], - cache_line_size = args.cacheline_size, - mem_mode = 'timing') +system = System( + cpu=tester, + mem_ranges=[AddrRange(args.mem_size)], + cache_line_size=args.cacheline_size, + mem_mode="timing", +) -system.voltage_domain = VoltageDomain(voltage = args.sys_voltage) -system.clk_domain = SrcClockDomain(clock = args.sys_clock, - voltage_domain = system.voltage_domain) +system.voltage_domain = VoltageDomain(voltage=args.sys_voltage) +system.clk_domain = SrcClockDomain( + clock=args.sys_clock, voltage_domain=system.voltage_domain +) # # Command processor is not needed for the tester since we don't run real @@ -254,11 +285,14 @@ if n_DMAs > 0: # is stored in system.cpu. because there is only ever one # tester object, num_cpus is not necessarily equal to the # size of system.cpu -cpu_list = [ system.cpu ] * args.num_cpus -Ruby.create_system(args, full_system = False, - system = system, - dma_ports = system.dma_devices if n_DMAs > 0 else [], - cpus = cpu_list) +cpu_list = [system.cpu] * args.num_cpus +Ruby.create_system( + args, + full_system=False, + system=system, + dma_ports=system.dma_devices if n_DMAs > 0 else [], + cpus=cpu_list, +) # # The tester is most effective when randomization is turned on and @@ -267,7 +301,7 @@ Ruby.create_system(args, full_system = False, system.ruby.randomization = True # Assert that we got the right number of Ruby ports -assert(len(system.ruby._cpu_ports) == n_CUs + n_SQCs + n_Scalars) +assert len(system.ruby._cpu_ports) == n_CUs + n_SQCs + n_Scalars # # Attach Ruby ports to the tester in the order: @@ -290,7 +324,7 @@ for i, ruby_port in enumerate(system.ruby._cpu_ports): if i < n_CUs: tester.cu_vector_ports = ruby_port.in_ports tester.cu_token_ports = ruby_port.gmTokenPort - tester.max_cu_tokens = 4*n_WFs + tester.max_cu_tokens = 4 * n_WFs elif i < (n_CUs + n_SQCs): tester.cu_sqc_ports = ruby_port.in_ports else: @@ -305,15 +339,16 @@ for i, ruby_port in enumerate(system.ruby._cpu_ports): # dma_ports = [] for i in range(n_DMAs): - dma_cntrl = getattr(system, 'dma_cntrl' + str(i)) + dma_cntrl = getattr(system, "dma_cntrl" + str(i)) dma_ports.append(dma_cntrl.dma_sequencer.in_ports) tester.dma_ports = dma_ports # # Common variables for all types of threads # -thread_clock = SrcClockDomain(clock = '1GHz', - voltage_domain = system.voltage_domain) +thread_clock = SrcClockDomain( + clock="1GHz", voltage_domain=system.voltage_domain +) g_thread_idx = 0 # @@ -327,10 +362,14 @@ tester.cpu_threads = [] dma_threads = [] print("Creating %i DMAs" % n_DMAs) for dma_idx in range(n_DMAs): - dma_threads.append(DmaThread(thread_id = g_thread_idx, - num_lanes = 1, clk_domain = thread_clock, - deadlock_threshold = \ - tester_deadlock_threshold)) + dma_threads.append( + DmaThread( + thread_id=g_thread_idx, + num_lanes=1, + clk_domain=thread_clock, + deadlock_threshold=tester_deadlock_threshold, + ) + ) g_thread_idx += 1 tester.dma_threads = dma_threads @@ -338,26 +377,31 @@ tester.dma_threads = dma_threads # Create GPU wavefronts # wavefronts = [] -print("Creating %i WFs attached to %i CUs" % \ - (n_CUs * tester.wavefronts_per_cu, n_CUs)) +print( + "Creating %i WFs attached to %i CUs" + % (n_CUs * tester.wavefronts_per_cu, n_CUs) +) for cu_idx in range(n_CUs): for wf_idx in range(tester.wavefronts_per_cu): - wavefronts.append(GpuWavefront(thread_id = g_thread_idx, - cu_id = cu_idx, - num_lanes = args.wf_size, - clk_domain = thread_clock, - deadlock_threshold = \ - tester_deadlock_threshold)) + wavefronts.append( + GpuWavefront( + thread_id=g_thread_idx, + cu_id=cu_idx, + num_lanes=args.wf_size, + clk_domain=thread_clock, + deadlock_threshold=tester_deadlock_threshold, + ) + ) g_thread_idx += 1 tester.wavefronts = wavefronts # # Run simulation # -root = Root(full_system = False, system = system) +root = Root(full_system=False, system=system) # Not much point in this being higher than the L1 latency -m5.ticks.setGlobalFrequency('1ns') +m5.ticks.setGlobalFrequency("1ns") # Instantiate configuration m5.instantiate() @@ -365,5 +409,5 @@ m5.instantiate() # Simulate until tester completes exit_event = m5.simulate() -print('Exiting tick: ', m5.curTick()) -print('Exiting because ', exit_event.getCause()) +print("Exiting tick: ", m5.curTick()) +print("Exiting because ", exit_event.getCause()) diff --git a/configs/example/ruby_mem_test.py b/configs/example/ruby_mem_test.py index b16b295f0f..c90950107e 100644 --- a/configs/example/ruby_mem_test.py +++ b/configs/example/ruby_mem_test.py @@ -31,7 +31,7 @@ from m5.defines import buildEnv from m5.util import addToPath import os, argparse, sys -addToPath('../') +addToPath("../") from common import Options from ruby import Ruby @@ -41,19 +41,32 @@ config_path = os.path.dirname(os.path.abspath(__file__)) config_root = os.path.dirname(config_path) parser = argparse.ArgumentParser( - formatter_class=argparse.ArgumentDefaultsHelpFormatter) + formatter_class=argparse.ArgumentDefaultsHelpFormatter +) Options.addNoISAOptions(parser) -parser.add_argument("--maxloads", metavar="N", default=0, - help="Stop after N loads") -parser.add_argument("--progress", type=int, default=1000, - metavar="NLOADS", - help="Progress message interval ") +parser.add_argument( + "--maxloads", metavar="N", default=0, help="Stop after N loads" +) +parser.add_argument( + "--progress", + type=int, + default=1000, + metavar="NLOADS", + help="Progress message interval ", +) parser.add_argument("--num-dmas", type=int, default=0, help="# of dma testers") -parser.add_argument("--functional", type=int, default=0, - help="percentage of accesses that should be functional") -parser.add_argument("--suppress-func-errors", action="store_true", - help="suppress panic when functional accesses fail") +parser.add_argument( + "--functional", + type=int, + default=0, + help="percentage of accesses that should be functional", +) +parser.add_argument( + "--suppress-func-errors", + action="store_true", + help="suppress panic when functional accesses fail", +) # # Add the ruby specific and protocol specific options @@ -66,44 +79,55 @@ args = parser.parse_args() # Set the default cache size and associativity to be very small to encourage # races between requests and writebacks. # -args.l1d_size="256B" -args.l1i_size="256B" -args.l2_size="512B" -args.l3_size="1kB" -args.l1d_assoc=2 -args.l1i_assoc=2 -args.l2_assoc=2 -args.l3_assoc=2 +args.l1d_size = "256B" +args.l1i_size = "256B" +args.l2_size = "512B" +args.l3_size = "1kB" +args.l1d_assoc = 2 +args.l1i_assoc = 2 +args.l2_assoc = 2 +args.l3_assoc = 2 block_size = 64 if args.num_cpus > block_size: - print("Error: Number of testers %d limited to %d because of false sharing" - % (args.num_cpus, block_size)) - sys.exit(1) + print( + "Error: Number of testers %d limited to %d because of false sharing" + % (args.num_cpus, block_size) + ) + sys.exit(1) # # Currently ruby does not support atomic or uncacheable accesses # -cpus = [ MemTest(max_loads = args.maxloads, - percent_functional = args.functional, - percent_uncacheable = 0, - progress_interval = args.progress, - suppress_func_errors = args.suppress_func_errors) \ - for i in range(args.num_cpus) ] +cpus = [ + MemTest( + max_loads=args.maxloads, + percent_functional=args.functional, + percent_uncacheable=0, + progress_interval=args.progress, + suppress_func_errors=args.suppress_func_errors, + ) + for i in range(args.num_cpus) +] -system = System(cpu = cpus, - clk_domain = SrcClockDomain(clock = args.sys_clock), - mem_ranges = [AddrRange(args.mem_size)]) +system = System( + cpu=cpus, + clk_domain=SrcClockDomain(clock=args.sys_clock), + mem_ranges=[AddrRange(args.mem_size)], +) if args.num_dmas > 0: - dmas = [ MemTest(max_loads = args.maxloads, - percent_functional = 0, - percent_uncacheable = 0, - progress_interval = args.progress, - suppress_func_errors = - not args.suppress_func_errors) \ - for i in range(args.num_dmas) ] + dmas = [ + MemTest( + max_loads=args.maxloads, + percent_functional=0, + percent_uncacheable=0, + progress_interval=args.progress, + suppress_func_errors=not args.suppress_func_errors, + ) + for i in range(args.num_dmas) + ] system.dma_devices = dmas else: dmas = [] @@ -111,15 +135,17 @@ else: dma_ports = [] for (i, dma) in enumerate(dmas): dma_ports.append(dma.test) -Ruby.create_system(args, False, system, dma_ports = dma_ports) +Ruby.create_system(args, False, system, dma_ports=dma_ports) # Create a top-level voltage domain and clock domain -system.voltage_domain = VoltageDomain(voltage = args.sys_voltage) -system.clk_domain = SrcClockDomain(clock = args.sys_clock, - voltage_domain = system.voltage_domain) +system.voltage_domain = VoltageDomain(voltage=args.sys_voltage) +system.clk_domain = SrcClockDomain( + clock=args.sys_clock, voltage_domain=system.voltage_domain +) # Create a seperate clock domain for Ruby -system.ruby.clk_domain = SrcClockDomain(clock = args.ruby_clock, - voltage_domain = system.voltage_domain) +system.ruby.clk_domain = SrcClockDomain( + clock=args.ruby_clock, voltage_domain=system.voltage_domain +) # # The tester is most effective when randomization is turned on and @@ -127,7 +153,7 @@ system.ruby.clk_domain = SrcClockDomain(clock = args.ruby_clock, # system.ruby.randomization = True -assert(len(cpus) == len(system.ruby._cpu_ports)) +assert len(cpus) == len(system.ruby._cpu_ports) for (i, cpu) in enumerate(cpus): # @@ -145,11 +171,11 @@ for (i, cpu) in enumerate(cpus): # run simulation # ----------------------- -root = Root( full_system = False, system = system ) -root.system.mem_mode = 'timing' +root = Root(full_system=False, system=system) +root.system.mem_mode = "timing" # Not much point in this being higher than the L1 latency -m5.ticks.setGlobalFrequency('1ns') +m5.ticks.setGlobalFrequency("1ns") # instantiate configuration m5.instantiate() @@ -157,4 +183,4 @@ m5.instantiate() # simulate until program terminates exit_event = m5.simulate(args.abs_max_tick) -print('Exiting @ tick', m5.curTick(), 'because', exit_event.getCause()) +print("Exiting @ tick", m5.curTick(), "because", exit_event.getCause()) diff --git a/configs/example/ruby_random_test.py b/configs/example/ruby_random_test.py index edc8ee8f98..816864be91 100644 --- a/configs/example/ruby_random_test.py +++ b/configs/example/ruby_random_test.py @@ -31,7 +31,7 @@ from m5.defines import buildEnv from m5.util import addToPath import os, argparse, sys -addToPath('../') +addToPath("../") from common import Options from ruby import Ruby @@ -44,19 +44,29 @@ m5_root = os.path.dirname(config_root) parser = argparse.ArgumentParser() Options.addNoISAOptions(parser) -parser.add_argument("--maxloads", metavar="N", default=100, - help="Stop after N loads") -parser.add_argument("-f", "--wakeup_freq", metavar="N", default=10, - help="Wakeup every N cycles") +parser.add_argument( + "--maxloads", metavar="N", default=100, help="Stop after N loads" +) +parser.add_argument( + "-f", + "--wakeup_freq", + metavar="N", + default=10, + help="Wakeup every N cycles", +) # # Add the ruby specific and protocol specific options # Ruby.define_options(parser) -exec(compile( \ - open(os.path.join(config_root, "common", "Options.py")).read(), \ - os.path.join(config_root, "common", "Options.py"), 'exec')) +exec( + compile( + open(os.path.join(config_root, "common", "Options.py")).read(), + os.path.join(config_root, "common", "Options.py"), + "exec", + ) +) args = parser.parse_args() @@ -64,14 +74,14 @@ args = parser.parse_args() # Set the default cache size and associativity to be very small to encourage # races between requests and writebacks. # -args.l1d_size="256B" -args.l1i_size="256B" -args.l2_size="512B" -args.l3_size="1kB" -args.l1d_assoc=2 -args.l1i_assoc=2 -args.l2_assoc=2 -args.l3_assoc=2 +args.l1d_size = "256B" +args.l1i_size = "256B" +args.l2_size = "512B" +args.l3_size = "1kB" +args.l1d_assoc = 2 +args.l1i_assoc = 2 +args.l2_assoc = 2 +args.l3_assoc = 2 # # Create the ruby random tester @@ -79,39 +89,43 @@ args.l3_assoc=2 # Check the protocol check_flush = False -if buildEnv['PROTOCOL'] == 'MOESI_hammer': +if buildEnv["PROTOCOL"] == "MOESI_hammer": check_flush = True -tester = RubyTester(check_flush = check_flush, - checks_to_complete = args.maxloads, - wakeup_frequency = args.wakeup_freq) +tester = RubyTester( + check_flush=check_flush, + checks_to_complete=args.maxloads, + wakeup_frequency=args.wakeup_freq, +) # # Create the M5 system. Note that the Memory Object isn't # actually used by the rubytester, but is included to support the # M5 memory size == Ruby memory size checks # -system = System(cpu = tester, mem_ranges = [AddrRange(args.mem_size)]) +system = System(cpu=tester, mem_ranges=[AddrRange(args.mem_size)]) # Create a top-level voltage domain and clock domain -system.voltage_domain = VoltageDomain(voltage = args.sys_voltage) +system.voltage_domain = VoltageDomain(voltage=args.sys_voltage) -system.clk_domain = SrcClockDomain(clock = args.sys_clock, - voltage_domain = system.voltage_domain) +system.clk_domain = SrcClockDomain( + clock=args.sys_clock, voltage_domain=system.voltage_domain +) # the ruby tester reuses num_cpus to specify the # number of cpu ports connected to the tester object, which # is stored in system.cpu. because there is only ever one # tester object, num_cpus is not necessarily equal to the # size of system.cpu -cpu_list = [ system.cpu ] * args.num_cpus +cpu_list = [system.cpu] * args.num_cpus Ruby.create_system(args, False, system, cpus=cpu_list) # Create a seperate clock domain for Ruby -system.ruby.clk_domain = SrcClockDomain(clock = args.ruby_clock, - voltage_domain = system.voltage_domain) +system.ruby.clk_domain = SrcClockDomain( + clock=args.ruby_clock, voltage_domain=system.voltage_domain +) -assert(args.num_cpus == len(system.ruby._cpu_ports)) +assert args.num_cpus == len(system.ruby._cpu_ports) tester.num_cpus = len(system.ruby._cpu_ports) @@ -145,11 +159,11 @@ for ruby_port in system.ruby._cpu_ports: # run simulation # ----------------------- -root = Root( full_system = False, system = system ) -root.system.mem_mode = 'timing' +root = Root(full_system=False, system=system) +root.system.mem_mode = "timing" # Not much point in this being higher than the L1 latency -m5.ticks.setGlobalFrequency('1ns') +m5.ticks.setGlobalFrequency("1ns") # instantiate configuration m5.instantiate() @@ -157,4 +171,4 @@ m5.instantiate() # simulate until program terminates exit_event = m5.simulate(args.abs_max_tick) -print('Exiting @ tick', m5.curTick(), 'because', exit_event.getCause()) +print("Exiting @ tick", m5.curTick(), "because", exit_event.getCause()) diff --git a/configs/example/sc_main.py b/configs/example/sc_main.py index ef7746a3a2..4a1482d87d 100755 --- a/configs/example/sc_main.py +++ b/configs/example/sc_main.py @@ -41,4 +41,4 @@ cause = m5.simulate(m5.MaxTick).getCause() result = m5.systemc.sc_main_result() if result.code != 0: - m5.util.panic('sc_main return code was %d.' % result.code) + m5.util.panic("sc_main return code was %d." % result.code) diff --git a/configs/example/se.py b/configs/example/se.py index 3a8203da2e..2372cf0efe 100644 --- a/configs/example/se.py +++ b/configs/example/se.py @@ -49,8 +49,10 @@ from m5.defines import buildEnv from m5.objects import * from m5.params import NULL from m5.util import addToPath, fatal, warn +from gem5.isas import ISA +from gem5.runtime import get_runtime_isa -addToPath('../') +addToPath("../") from ruby import Ruby @@ -64,6 +66,7 @@ from common.FileSystemConfig import config_filesystem from common.Caches import * from common.cpu2000 import * + def get_processes(args): """Interprets provided args and returns a list of processes""" @@ -73,25 +76,25 @@ def get_processes(args): errouts = [] pargs = [] - workloads = args.cmd.split(';') + workloads = args.cmd.split(";") if args.input != "": - inputs = args.input.split(';') + inputs = args.input.split(";") if args.output != "": - outputs = args.output.split(';') + outputs = args.output.split(";") if args.errout != "": - errouts = args.errout.split(';') + errouts = args.errout.split(";") if args.options != "": - pargs = args.options.split(';') + pargs = args.options.split(";") idx = 0 for wrkld in workloads: - process = Process(pid = 100 + idx) + process = Process(pid=100 + idx) process.executable = wrkld process.cwd = os.getcwd() process.gid = os.getgid() if args.env: - with open(args.env, 'r') as f: + with open(args.env, "r") as f: process.env = [line.rstrip() for line in f] if len(pargs) > idx: @@ -110,7 +113,7 @@ def get_processes(args): idx += 1 if args.smt: - assert(args.cpu_type == "DerivO3CPU") + assert args.cpu_type == "DerivO3CPU" return multiprocesses, idx else: return multiprocesses, 1 @@ -120,7 +123,7 @@ parser = argparse.ArgumentParser() Options.addCommonOptions(parser) Options.addSEOptions(parser) -if '--ruby' in sys.argv: +if "--ruby" in sys.argv: Ruby.define_options(parser) args = parser.parse_args() @@ -136,17 +139,25 @@ if args.bench: for app in apps: try: - if buildEnv['TARGET_ISA'] == 'arm': - exec("workload = %s('arm_%s', 'linux', '%s')" % ( - app, args.arm_iset, args.spec_input)) + if get_runtime_isa() == ISA.ARM: + exec( + "workload = %s('arm_%s', 'linux', '%s')" + % (app, args.arm_iset, args.spec_input) + ) else: - exec("workload = %s(buildEnv['TARGET_ISA', 'linux', '%s')" % ( - app, args.spec_input)) + # TARGET_ISA has been removed, but this is missing a ], so it + # has incorrect syntax and wasn't being used anyway. + exec( + "workload = %s(buildEnv['TARGET_ISA', 'linux', '%s')" + % (app, args.spec_input) + ) multiprocesses.append(workload.makeProcess()) except: - print("Unable to find workload for %s: %s" % - (buildEnv['TARGET_ISA'], app), - file=sys.stderr) + print( + "Unable to find workload for %s: %s" + % (get_runtime_isa().name(), app), + file=sys.stderr, + ) sys.exit(1) elif args.cmd: multiprocesses, numThreads = get_processes(args) @@ -164,28 +175,31 @@ if args.smt and args.num_cpus > 1: np = args.num_cpus mp0_path = multiprocesses[0].executable -system = System(cpu = [CPUClass(cpu_id=i) for i in range(np)], - mem_mode = test_mem_mode, - mem_ranges = [AddrRange(args.mem_size)], - cache_line_size = args.cacheline_size) +system = System( + cpu=[CPUClass(cpu_id=i) for i in range(np)], + mem_mode=test_mem_mode, + mem_ranges=[AddrRange(args.mem_size)], + cache_line_size=args.cacheline_size, +) if numThreads > 1: system.multi_thread = True # Create a top-level voltage domain -system.voltage_domain = VoltageDomain(voltage = args.sys_voltage) +system.voltage_domain = VoltageDomain(voltage=args.sys_voltage) # Create a source clock for the system and set the clock period -system.clk_domain = SrcClockDomain(clock = args.sys_clock, - voltage_domain = system.voltage_domain) +system.clk_domain = SrcClockDomain( + clock=args.sys_clock, voltage_domain=system.voltage_domain +) # Create a CPU voltage domain system.cpu_voltage_domain = VoltageDomain() # Create a separate clock domain for the CPUs -system.cpu_clk_domain = SrcClockDomain(clock = args.cpu_clock, - voltage_domain = - system.cpu_voltage_domain) +system.cpu_clk_domain = SrcClockDomain( + clock=args.cpu_clock, voltage_domain=system.cpu_voltage_domain +) # If elastic tracing is enabled, then configure the cpu and attach the elastic # trace probe @@ -198,9 +212,9 @@ for cpu in system.cpu: cpu.clk_domain = system.cpu_clk_domain if ObjectList.is_kvm_cpu(CPUClass) or ObjectList.is_kvm_cpu(FutureClass): - if buildEnv['TARGET_ISA'] == 'x86': + if buildEnv["USE_X86_ISA"]: system.kvm_vm = KvmVM() - system.m5ops_base = 0xffff0000 + system.m5ops_base = 0xFFFF0000 for process in multiprocesses: process.useArchPT = True process.kvmInSE = True @@ -233,18 +247,20 @@ for i in range(np): system.cpu[i].branchPred = bpClass() if args.indirect_bp_type: - indirectBPClass = \ - ObjectList.indirect_bp_list.get(args.indirect_bp_type) + indirectBPClass = ObjectList.indirect_bp_list.get( + args.indirect_bp_type + ) system.cpu[i].branchPred.indirectBranchPred = indirectBPClass() system.cpu[i].createThreads() if args.ruby: Ruby.create_system(args, False, system) - assert(args.num_cpus == len(system.ruby._cpu_ports)) + assert args.num_cpus == len(system.ruby._cpu_ports) - system.ruby.clk_domain = SrcClockDomain(clock = args.ruby_clock, - voltage_domain = system.voltage_domain) + system.ruby.clk_domain = SrcClockDomain( + clock=args.ruby_clock, voltage_domain=system.voltage_domain + ) for i in range(np): ruby_port = system.ruby._cpu_ports[i] @@ -268,5 +284,5 @@ system.workload = SEWorkload.init_compatible(mp0_path) if args.wait_gdb: system.workload.wait_for_remote_gdb = True -root = Root(full_system = False, system = system) +root = Root(full_system=False, system=system) Simulation.run(args, root, system, FutureClass) diff --git a/configs/example/sst/arm_fs.py b/configs/example/sst/arm_fs.py index d6c1635eb4..bee4be1118 100644 --- a/configs/example/sst/arm_fs.py +++ b/configs/example/sst/arm_fs.py @@ -42,14 +42,15 @@ from os import path import m5 from m5.objects import * -m5.util.addToPath('../..') +m5.util.addToPath("../..") from common import SysPaths + class ArmSstSystem(ArmSystem): def __init__(self, cpu_clock_rate, **kwargs): super(ArmSstSystem, self).__init__(**kwargs) - self.voltage_domain=VoltageDomain(voltage="1.0V") + self.voltage_domain = VoltageDomain(voltage="1.0V") self.clk_domain = SrcClockDomain( clock=cpu_clock_rate, voltage_domain=Parent.voltage_domain ) @@ -62,24 +63,28 @@ class ArmSstSystem(ArmSystem): # Since the latency from CPU to the bus was set in SST, # additional latency is undesirable. self.membus = NoncoherentXBar( - frontend_latency=0, forward_latency=0, - response_latency=0, header_latency=0, width=64) + frontend_latency=0, + forward_latency=0, + response_latency=0, + header_latency=0, + width=64, + ) self.membus.badaddr_responder = BadAddr() - self.membus.default = \ - self.membus.badaddr_responder.pio + self.membus.default = self.membus.badaddr_responder.pio _my_ranges = [ - AddrRange(0, size='64MiB'), - AddrRange(0x80000000, size='16GiB') + AddrRange(0, size="64MiB"), + AddrRange(0x80000000, size="16GiB"), ] self.memory_outgoing_bridge = OutgoingRequestBridge( - physical_address_ranges=_my_ranges) + physical_address_ranges=_my_ranges + ) self.memory_outgoing_bridge.port = self.membus.mem_side_ports self.cpu = [TimingSimpleCPU(cpu_id=0)] - self.mem_mode = 'timing' + self.mem_mode = "timing" for cpu in self.cpu: cpu.createThreads() @@ -87,9 +92,10 @@ class ArmSstSystem(ArmSystem): cpu.dcache_port = self.membus.cpu_side_ports cpu.mmu.connectWalkerPorts( - self.membus.cpu_side_ports, self.membus.cpu_side_ports) + self.membus.cpu_side_ports, self.membus.cpu_side_ports + ) - self.bridge = Bridge(delay='50ns') + self.bridge = Bridge(delay="50ns") self.bridge.mem_side_port = self.iobus.cpu_side_ports self.bridge.cpu_side_port = self.membus.mem_side_ports @@ -106,7 +112,8 @@ class ArmSstSystem(ArmSystem): size_in_range = min(mem_size, mem_range.size()) mem_ranges.append( - AddrRange(start=mem_range.start, size=size_in_range)) + AddrRange(start=mem_range.start, size=size_in_range) + ) mem_size -= size_in_range if mem_size == 0: @@ -114,13 +121,14 @@ class ArmSstSystem(ArmSystem): raise ValueError("memory size too big for platform capabilities") + def createArmPlatform(system): class VExpress_GEM5_V1_SST(VExpress_GEM5_V1): bootmem = SubSystem() system.platform = VExpress_GEM5_V1_SST() - if hasattr(system.platform.gic, 'cpu_addr'): + if hasattr(system.platform.gic, "cpu_addr"): system.gic_cpu_addr = system.platform.gic.cpu_addr system.platform.attachOnChipIO(system.membus, system.bridge) @@ -128,11 +136,14 @@ def createArmPlatform(system): system.platform.setupBootLoader(system, SysPaths.binary) + parser = argparse.ArgumentParser() -parser.add_argument('--kernel', help='Path to the Kernel') -parser.add_argument('--cpu-clock-rate', type=str, help='CPU clock rate, e.g. 3GHz') -parser.add_argument('--memory-size', type=str, help='Memory size, e.g. 4GiB') -parser.add_argument('--root-device', type=str, default='/dev/vda') +parser.add_argument("--kernel", help="Path to the Kernel") +parser.add_argument( + "--cpu-clock-rate", type=str, help="CPU clock rate, e.g. 3GHz" +) +parser.add_argument("--memory-size", type=str, help="Memory size, e.g. 4GiB") +parser.add_argument("--root-device", type=str, default="/dev/vda") args = parser.parse_args() system = ArmSstSystem(args.cpu_clock_rate) @@ -140,7 +151,7 @@ system = ArmSstSystem(args.cpu_clock_rate) # Setup Linux workload system.workload = ArmFsLinux() system.workload.object_file = args.kernel -system.workload.dtb_filename = path.join(m5.options.outdir, 'system.dtb') +system.workload.dtb_filename = path.join(m5.options.outdir, "system.dtb") system.workload.addr_check = False # Create RealView platform diff --git a/configs/example/sst/riscv_fs.py b/configs/example/sst/riscv_fs.py index b5a6cc64b8..fb22f29190 100644 --- a/configs/example/sst/riscv_fs.py +++ b/configs/example/sst/riscv_fs.py @@ -33,21 +33,27 @@ from gem5.resources.resource import Resource import argparse + def generateMemNode(state, mem_range): node = FdtNode("memory@%x" % int(mem_range.start)) node.append(FdtPropertyStrings("device_type", ["memory"])) - node.append(FdtPropertyWords("reg", - state.addrCells(mem_range.start) + - state.sizeCells(mem_range.size()) )) + node.append( + FdtPropertyWords( + "reg", + state.addrCells(mem_range.start) + + state.sizeCells(mem_range.size()), + ) + ) return node + def generateDtb(system): """ Autogenerate DTB. Arguments are the folder where the DTB will be stored, and the name of the DTB file. """ state = FdtState(addr_cells=2, size_cells=2, cpu_cells=1) - root = FdtNode('/') + root = FdtNode("/") root.append(state.addrCellsProperty()) root.append(state.sizeCellsProperty()) root.appendCompatible(["riscv-virtio"]) @@ -66,19 +72,22 @@ def generateDtb(system): fdt = Fdt() fdt.add_rootnode(root) - fdt.writeDtsFile(path.join(m5.options.outdir, 'device.dts')) - fdt.writeDtbFile(path.join(m5.options.outdir, 'device.dtb')) + fdt.writeDtsFile(path.join(m5.options.outdir, "device.dts")) + fdt.writeDtbFile(path.join(m5.options.outdir, "device.dtb")) + def createHiFivePlatform(system): # Since the latency from CPU to the bus was set in SST, additional latency # is undesirable. system.membus = NoncoherentXBar( - frontend_latency = 0, forward_latency = 0, response_latency = 0, - header_latency = 0, width = 64 + frontend_latency=0, + forward_latency=0, + response_latency=0, + header_latency=0, + width=64, ) system.membus.badaddr_responder = BadAddr() - system.membus.default = \ - system.membus.badaddr_responder.pio + system.membus.default = system.membus.badaddr_responder.pio system.memory_outgoing_bridge = OutgoingRequestBridge() system.memory_outgoing_bridge.port = system.membus.mem_side_ports @@ -88,8 +97,8 @@ def createHiFivePlatform(system): cpu.dcache_port = system.membus.cpu_side_ports cpu.mmu.connectWalkerPorts( - system.membus.cpu_side_ports, system.membus.cpu_side_ports) - + system.membus.cpu_side_ports, system.membus.cpu_side_ports + ) system.platform = HiFive() @@ -106,7 +115,7 @@ def createHiFivePlatform(system): ) system.iobus = IOXBar() - system.bridge = Bridge(delay='50ns') + system.bridge = Bridge(delay="50ns") system.bridge.mem_side_port = system.iobus.cpu_side_ports system.bridge.cpu_side_port = system.membus.mem_side_ports system.bridge.ranges = system.platform._off_chip_ranges() @@ -118,10 +127,12 @@ def createHiFivePlatform(system): system.platform.attachPlic() + parser = argparse.ArgumentParser() -parser.add_argument('--cpu-clock-rate', type=str, - help='CPU clock rate, e.g. 3GHz') -parser.add_argument('--memory-size', type=str, help='Memory size, e.g. 4GiB') +parser.add_argument( + "--cpu-clock-rate", type=str, help="CPU clock rate, e.g. 3GHz" +) +parser.add_argument("--memory-size", type=str, help="Memory size, e.g. 4GiB") args = parser.parse_args() cpu_clock_rate = args.cpu_clock_rate @@ -140,7 +151,7 @@ system.clk_domain = SrcClockDomain( system.mem_ranges = [AddrRange(start=0x80000000, size=memory_size)] system.cpu = [TimingSimpleCPU(cpu_id=i) for i in range(1)] -system.mem_mode = 'timing' +system.mem_mode = "timing" createHiFivePlatform(system) @@ -150,8 +161,8 @@ generateDtb(system) system.workload = RiscvLinux() system.workload.addr_check = False system.workload.object_file = bbl_path -system.workload.dtb_filename = path.join(m5.options.outdir, 'device.dtb') -system.workload.dtb_addr = 0x87e00000 +system.workload.dtb_filename = path.join(m5.options.outdir, "device.dtb") +system.workload.dtb_addr = 0x87E00000 kernel_cmd = [ # specifying Linux kernel boot options "console=ttyS0" @@ -161,5 +172,4 @@ system.workload.command_line = " ".join(kernel_cmd) for cpu in system.cpu: cpu.createInterruptController() -root = Root(full_system = True, system = system) - +root = Root(full_system=True, system=system) diff --git a/configs/learning_gem5/part1/caches.py b/configs/learning_gem5/part1/caches.py index e4aaa3c087..9bb06ab2e6 100644 --- a/configs/learning_gem5/part1/caches.py +++ b/configs/learning_gem5/part1/caches.py @@ -36,13 +36,14 @@ import m5 from m5.objects import Cache # Add the common scripts to our path -m5.util.addToPath('../../') +m5.util.addToPath("../../") from common import SimpleOpts # Some specific options for caches # For all options see src/mem/cache/BaseCache.py + class L1Cache(Cache): """Simple L1 Cache with default values""" @@ -63,17 +64,19 @@ class L1Cache(Cache): def connectCPU(self, cpu): """Connect this cache's port to a CPU-side port - This must be defined in a subclass""" + This must be defined in a subclass""" raise NotImplementedError + class L1ICache(L1Cache): """Simple L1 instruction cache with default values""" # Set the default size - size = '16kB' + size = "16kB" - SimpleOpts.add_option('--l1i_size', - help="L1 instruction cache size. Default: %s" % size) + SimpleOpts.add_option( + "--l1i_size", help="L1 instruction cache size. Default: %s" % size + ) def __init__(self, opts=None): super(L1ICache, self).__init__(opts) @@ -85,14 +88,16 @@ class L1ICache(L1Cache): """Connect this cache's port to a CPU icache port""" self.cpu_side = cpu.icache_port + class L1DCache(L1Cache): """Simple L1 data cache with default values""" # Set the default size - size = '64kB' + size = "64kB" - SimpleOpts.add_option('--l1d_size', - help="L1 data cache size. Default: %s" % size) + SimpleOpts.add_option( + "--l1d_size", help="L1 data cache size. Default: %s" % size + ) def __init__(self, opts=None): super(L1DCache, self).__init__(opts) @@ -104,11 +109,12 @@ class L1DCache(L1Cache): """Connect this cache's port to a CPU dcache port""" self.cpu_side = cpu.dcache_port + class L2Cache(Cache): """Simple L2 Cache with default values""" # Default parameters - size = '256kB' + size = "256kB" assoc = 8 tag_latency = 20 data_latency = 20 @@ -116,7 +122,9 @@ class L2Cache(Cache): mshrs = 20 tgts_per_mshr = 12 - SimpleOpts.add_option('--l2_size', help="L2 cache size. Default: %s" % size) + SimpleOpts.add_option( + "--l2_size", help="L2 cache size. Default: %s" % size + ) def __init__(self, opts=None): super(L2Cache, self).__init__() diff --git a/configs/learning_gem5/part1/simple-arm.py b/configs/learning_gem5/part1/simple-arm.py new file mode 100644 index 0000000000..62f7645c5a --- /dev/null +++ b/configs/learning_gem5/part1/simple-arm.py @@ -0,0 +1,78 @@ +# Copyright (c) 2015 Jason Power +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" +This is the ARM equivalent to `simple.py` (which is designed to run using the +X86 ISA). More detailed documentation can be found in `simple.py`. +""" + +import m5 +from m5.objects import * + +system = System() + +system.clk_domain = SrcClockDomain() +system.clk_domain.clock = "1GHz" +system.clk_domain.voltage_domain = VoltageDomain() + +system.mem_mode = "timing" +system.mem_ranges = [AddrRange("512MB")] +system.cpu = ArmTimingSimpleCPU() + +system.membus = SystemXBar() + +system.cpu.icache_port = system.membus.cpu_side_ports +system.cpu.dcache_port = system.membus.cpu_side_ports + +system.cpu.createInterruptController() + +system.mem_ctrl = MemCtrl() +system.mem_ctrl.dram = DDR3_1600_8x8() +system.mem_ctrl.dram.range = system.mem_ranges[0] +system.mem_ctrl.port = system.membus.mem_side_ports + +system.system_port = system.membus.cpu_side_ports + +thispath = os.path.dirname(os.path.realpath(__file__)) +binary = os.path.join( + thispath, + "../../../", + "tests/test-progs/hello/bin/arm/linux/hello", +) + +system.workload = SEWorkload.init_compatible(binary) + +process = Process() +process.cmd = [binary] +system.cpu.workload = process +system.cpu.createThreads() + +root = Root(full_system=False, system=system) +m5.instantiate() + +print("Beginning simulation!") +exit_event = m5.simulate() +print("Exiting @ tick %i because %s" % (m5.curTick(), exit_event.getCause())) diff --git a/configs/learning_gem5/part1/simple-riscv.py b/configs/learning_gem5/part1/simple-riscv.py new file mode 100644 index 0000000000..f05ca4ab50 --- /dev/null +++ b/configs/learning_gem5/part1/simple-riscv.py @@ -0,0 +1,78 @@ +# Copyright (c) 2015 Jason Power +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" +This is the RISCV equivalent to `simple.py` (which is designed to run using the +X86 ISA). More detailed documentation can be found in `simple.py`. +""" + +import m5 +from m5.objects import * + +system = System() + +system.clk_domain = SrcClockDomain() +system.clk_domain.clock = "1GHz" +system.clk_domain.voltage_domain = VoltageDomain() + +system.mem_mode = "timing" +system.mem_ranges = [AddrRange("512MB")] +system.cpu = RiscvTimingSimpleCPU() + +system.membus = SystemXBar() + +system.cpu.icache_port = system.membus.cpu_side_ports +system.cpu.dcache_port = system.membus.cpu_side_ports + +system.cpu.createInterruptController() + +system.mem_ctrl = MemCtrl() +system.mem_ctrl.dram = DDR3_1600_8x8() +system.mem_ctrl.dram.range = system.mem_ranges[0] +system.mem_ctrl.port = system.membus.mem_side_ports + +system.system_port = system.membus.cpu_side_ports + +thispath = os.path.dirname(os.path.realpath(__file__)) +binary = os.path.join( + thispath, + "../../../", + "tests/test-progs/hello/bin/riscv/linux/hello", +) + +system.workload = SEWorkload.init_compatible(binary) + +process = Process() +process.cmd = [binary] +system.cpu.workload = process +system.cpu.createThreads() + +root = Root(full_system=False, system=system) +m5.instantiate() + +print("Beginning simulation!") +exit_event = m5.simulate() +print("Exiting @ tick %i because %s" % (m5.curTick(), exit_event.getCause())) diff --git a/configs/learning_gem5/part1/simple.py b/configs/learning_gem5/part1/simple.py index 7810c3d418..e36cd78c8e 100644 --- a/configs/learning_gem5/part1/simple.py +++ b/configs/learning_gem5/part1/simple.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2015 Jason Power # All rights reserved. # @@ -33,10 +32,15 @@ learning_gem5 book for more information about this script. IMPORTANT: If you modify this file, it's likely that the Learning gem5 book also needs to be updated. For now, email Jason +This script uses the X86 ISA. `simple-arm.py` and `simple-riscv.py` may be +referenced as examples of scripts which utilize the ARM and RISC-V ISAs +respectively. + """ # import the m5 (gem5) library created when gem5 is built import m5 + # import all of the SimObjects from m5.objects import * @@ -45,15 +49,17 @@ system = System() # Set the clock frequency of the system (and all of its children) system.clk_domain = SrcClockDomain() -system.clk_domain.clock = '1GHz' +system.clk_domain.clock = "1GHz" system.clk_domain.voltage_domain = VoltageDomain() # Set up the system -system.mem_mode = 'timing' # Use timing accesses -system.mem_ranges = [AddrRange('512MB')] # Create an address range +system.mem_mode = "timing" # Use timing accesses +system.mem_ranges = [AddrRange("512MB")] # Create an address range # Create a simple CPU -system.cpu = TimingSimpleCPU() +# You can use ISA-specific CPU models for different workloads: +# `RiscvTimingSimpleCPU`, `ArmTimingSimpleCPU`. +system.cpu = X86TimingSimpleCPU() # Create a memory bus, a system crossbar, in this case system.membus = SystemXBar() @@ -65,12 +71,12 @@ system.cpu.dcache_port = system.membus.cpu_side_ports # create the interrupt controller for the CPU and connect to the membus system.cpu.createInterruptController() -# For x86 only, make sure the interrupts are connected to the memory -# Note: these are directly connected to the memory bus and are not cached -if m5.defines.buildEnv['TARGET_ISA'] == "x86": - system.cpu.interrupts[0].pio = system.membus.mem_side_ports - system.cpu.interrupts[0].int_requestor = system.membus.cpu_side_ports - system.cpu.interrupts[0].int_responder = system.membus.mem_side_ports +# For X86 only we make sure the interrupts care connect to memory. +# Note: these are directly connected to the memory bus and are not cached. +# For other ISA you should remove the following three lines. +system.cpu.interrupts[0].pio = system.membus.mem_side_ports +system.cpu.interrupts[0].int_requestor = system.membus.cpu_side_ports +system.cpu.interrupts[0].int_responder = system.membus.mem_side_ports # Create a DDR3 memory controller and connect it to the membus system.mem_ctrl = MemCtrl() @@ -81,14 +87,15 @@ system.mem_ctrl.port = system.membus.mem_side_ports # Connect the system up to the membus system.system_port = system.membus.cpu_side_ports -# get ISA for the binary to run. -isa = str(m5.defines.buildEnv['TARGET_ISA']).lower() - -# Default to running 'hello', use the compiled ISA to find the binary -# grab the specific path to the binary +# Here we set the X86 "hello world" binary. With other ISAs you must specify +# workloads compiled to those ISAs. Other "hello world" binaries for other ISAs +# can be found in "tests/test-progs/hello". thispath = os.path.dirname(os.path.realpath(__file__)) -binary = os.path.join(thispath, '../../../', - 'tests/test-progs/hello/bin/', isa, 'linux/hello') +binary = os.path.join( + thispath, + "../../../", + "tests/test-progs/hello/bin/x86/linux/hello", +) system.workload = SEWorkload.init_compatible(binary) @@ -102,10 +109,10 @@ system.cpu.workload = process system.cpu.createThreads() # set up the root SimObject and start the simulation -root = Root(full_system = False, system = system) +root = Root(full_system=False, system=system) # instantiate all of the objects we've created above m5.instantiate() print("Beginning simulation!") exit_event = m5.simulate() -print('Exiting @ tick %i because %s' % (m5.curTick(), exit_event.getCause())) +print("Exiting @ tick %i because %s" % (m5.curTick(), exit_event.getCause())) diff --git a/configs/learning_gem5/part1/two_level.py b/configs/learning_gem5/part1/two_level.py index 7a7956c57f..8aa7dd7e83 100644 --- a/configs/learning_gem5/part1/two_level.py +++ b/configs/learning_gem5/part1/two_level.py @@ -40,11 +40,13 @@ IMPORTANT: If you modify this file, it's likely that the Learning gem5 book # import the m5 (gem5) library created when gem5 is built import m5 + # import all of the SimObjects from m5.objects import * +from gem5.runtime import get_runtime_isa # Add the common scripts to our path -m5.util.addToPath('../../') +m5.util.addToPath("../../") # import the caches which we made from caches import * @@ -52,17 +54,17 @@ from caches import * # import the SimpleOpts module from common import SimpleOpts -# get ISA for the default binary to run. This is mostly for simple testing -isa = str(m5.defines.buildEnv['TARGET_ISA']).lower() - # Default to running 'hello', use the compiled ISA to find the binary # grab the specific path to the binary thispath = os.path.dirname(os.path.realpath(__file__)) -default_binary = os.path.join(thispath, '../../../', - 'tests/test-progs/hello/bin/', isa, 'linux/hello') +default_binary = os.path.join( + thispath, + "../../../", + "tests/test-progs/hello/bin/x86/linux/hello", +) # Binary to execute -SimpleOpts.add_option("binary", nargs='?', default=default_binary) +SimpleOpts.add_option("binary", nargs="?", default=default_binary) # Finalize the arguments and grab the args so we can pass it on to our objects args = SimpleOpts.parse_args() @@ -72,15 +74,15 @@ system = System() # Set the clock frequency of the system (and all of its children) system.clk_domain = SrcClockDomain() -system.clk_domain.clock = '1GHz' +system.clk_domain.clock = "1GHz" system.clk_domain.voltage_domain = VoltageDomain() # Set up the system -system.mem_mode = 'timing' # Use timing accesses -system.mem_ranges = [AddrRange('512MB')] # Create an address range +system.mem_mode = "timing" # Use timing accesses +system.mem_ranges = [AddrRange("512MB")] # Create an address range # Create a simple CPU -system.cpu = TimingSimpleCPU() +system.cpu = X86TimingSimpleCPU() # Create an L1 instruction and data cache system.cpu.icache = L1ICache(args) @@ -109,13 +111,9 @@ system.l2cache.connectMemSideBus(system.membus) # create the interrupt controller for the CPU system.cpu.createInterruptController() - -# For x86 only, make sure the interrupts are connected to the memory -# Note: these are directly connected to the memory bus and are not cached -if m5.defines.buildEnv['TARGET_ISA'] == "x86": - system.cpu.interrupts[0].pio = system.membus.mem_side_ports - system.cpu.interrupts[0].int_requestor = system.membus.cpu_side_ports - system.cpu.interrupts[0].int_responder = system.membus.mem_side_ports +system.cpu.interrupts[0].pio = system.membus.mem_side_ports +system.cpu.interrupts[0].int_requestor = system.membus.cpu_side_ports +system.cpu.interrupts[0].int_responder = system.membus.mem_side_ports # Connect the system up to the membus system.system_port = system.membus.cpu_side_ports @@ -138,10 +136,10 @@ system.cpu.workload = process system.cpu.createThreads() # set up the root SimObject and start the simulation -root = Root(full_system = False, system = system) +root = Root(full_system=False, system=system) # instantiate all of the objects we've created above m5.instantiate() print("Beginning simulation!") exit_event = m5.simulate() -print('Exiting @ tick %i because %s' % (m5.curTick(), exit_event.getCause())) +print("Exiting @ tick %i because %s" % (m5.curTick(), exit_event.getCause())) diff --git a/configs/learning_gem5/part2/hello_goodbye.py b/configs/learning_gem5/part2/hello_goodbye.py index f5ac46bdd1..e4b70ba3ae 100644 --- a/configs/learning_gem5/part2/hello_goodbye.py +++ b/configs/learning_gem5/part2/hello_goodbye.py @@ -36,19 +36,20 @@ IMPORTANT: If you modify this file, it's likely that the Learning gem5 book # import the m5 (gem5) library created when gem5 is built import m5 + # import all of the SimObjects from m5.objects import * # set up the root SimObject and start the simulation -root = Root(full_system = False) +root = Root(full_system=False) # Create an instantiation of the simobject you created -root.hello = HelloObject(time_to_wait = '2us', number_of_fires = 5) -root.hello.goodbye_object = GoodbyeObject(buffer_size='100B') +root.hello = HelloObject(time_to_wait="2us", number_of_fires=5) +root.hello.goodbye_object = GoodbyeObject(buffer_size="100B") # instantiate all of the objects we've created above m5.instantiate() print("Beginning simulation!") exit_event = m5.simulate() -print('Exiting @ tick %i because %s' % (m5.curTick(), exit_event.getCause())) +print("Exiting @ tick %i because %s" % (m5.curTick(), exit_event.getCause())) diff --git a/configs/learning_gem5/part2/run_simple.py b/configs/learning_gem5/part2/run_simple.py index 4d3a253d38..be5f6ee7b8 100644 --- a/configs/learning_gem5/part2/run_simple.py +++ b/configs/learning_gem5/part2/run_simple.py @@ -35,11 +35,12 @@ system. Since there are no events, this "simulation" should finish immediately # import the m5 (gem5) library created when gem5 is built import m5 + # import all of the SimObjects from m5.objects import * # set up the root SimObject and start the simulation -root = Root(full_system = False) +root = Root(full_system=False) # Create an instantiation of the simobject you created root.hello = SimpleObject() @@ -49,4 +50,4 @@ m5.instantiate() print("Beginning simulation!") exit_event = m5.simulate() -print('Exiting @ tick %i because %s' % (m5.curTick(), exit_event.getCause())) +print("Exiting @ tick %i because %s" % (m5.curTick(), exit_event.getCause())) diff --git a/configs/learning_gem5/part2/simple_cache.py b/configs/learning_gem5/part2/simple_cache.py index dc3a162aa1..4228956126 100644 --- a/configs/learning_gem5/part2/simple_cache.py +++ b/configs/learning_gem5/part2/simple_cache.py @@ -33,6 +33,7 @@ This config file assumes that the x86 ISA was built. # import the m5 (gem5) library created when gem5 is built import m5 + # import all of the SimObjects from m5.objects import * @@ -41,21 +42,21 @@ system = System() # Set the clock frequency of the system (and all of its children) system.clk_domain = SrcClockDomain() -system.clk_domain.clock = '1GHz' +system.clk_domain.clock = "1GHz" system.clk_domain.voltage_domain = VoltageDomain() # Set up the system -system.mem_mode = 'timing' # Use timing accesses -system.mem_ranges = [AddrRange('512MB')] # Create an address range +system.mem_mode = "timing" # Use timing accesses +system.mem_ranges = [AddrRange("512MB")] # Create an address range # Create a simple CPU -system.cpu = TimingSimpleCPU() +system.cpu = X86TimingSimpleCPU() # Create a memory bus, a coherent crossbar, in this case system.membus = SystemXBar() # Create a simple cache -system.cache = SimpleCache(size='1kB') +system.cache = SimpleCache(size="1kB") # Connect the I and D cache ports of the CPU to the memobj. # Since cpu_side is a vector port, each time one of these is connected, it will @@ -86,8 +87,9 @@ process = Process() # Set the command # grab the specific path to the binary thispath = os.path.dirname(os.path.realpath(__file__)) -binpath = os.path.join(thispath, '../../../', - 'tests/test-progs/hello/bin/x86/linux/hello') +binpath = os.path.join( + thispath, "../../../", "tests/test-progs/hello/bin/x86/linux/hello" +) # cmd is a list which begins with the executable (like argv) process.cmd = [binpath] # Set the cpu to use the process as its workload and create thread contexts @@ -97,10 +99,10 @@ system.cpu.createThreads() system.workload = SEWorkload.init_compatible(binpath) # set up the root SimObject and start the simulation -root = Root(full_system = False, system = system) +root = Root(full_system=False, system=system) # instantiate all of the objects we've created above m5.instantiate() print("Beginning simulation!") exit_event = m5.simulate() -print('Exiting @ tick %i because %s' % (m5.curTick(), exit_event.getCause())) +print("Exiting @ tick %i because %s" % (m5.curTick(), exit_event.getCause())) diff --git a/configs/learning_gem5/part2/simple_memobj.py b/configs/learning_gem5/part2/simple_memobj.py index eaceae9069..20f4362b81 100644 --- a/configs/learning_gem5/part2/simple_memobj.py +++ b/configs/learning_gem5/part2/simple_memobj.py @@ -33,6 +33,7 @@ This config file assumes that the x86 ISA was built. # import the m5 (gem5) library created when gem5 is built import m5 + # import all of the SimObjects from m5.objects import * @@ -41,15 +42,15 @@ system = System() # Set the clock frequency of the system (and all of its children) system.clk_domain = SrcClockDomain() -system.clk_domain.clock = '1GHz' +system.clk_domain.clock = "1GHz" system.clk_domain.voltage_domain = VoltageDomain() # Set up the system -system.mem_mode = 'timing' # Use timing accesses -system.mem_ranges = [AddrRange('512MB')] # Create an address range +system.mem_mode = "timing" # Use timing accesses +system.mem_ranges = [AddrRange("512MB")] # Create an address range # Create a simple CPU -system.cpu = TimingSimpleCPU() +system.cpu = X86TimingSimpleCPU() # Create the simple memory object system.memobj = SimpleMemobj() @@ -84,8 +85,9 @@ process = Process() # Set the command # grab the specific path to the binary thispath = os.path.dirname(os.path.realpath(__file__)) -binpath = os.path.join(thispath, '../../../', - 'tests/test-progs/hello/bin/x86/linux/hello') +binpath = os.path.join( + thispath, "../../../", "tests/test-progs/hello/bin/x86/linux/hello" +) # cmd is a list which begins with the executable (like argv) process.cmd = [binpath] # Set the cpu to use the process as its workload and create thread contexts @@ -95,10 +97,10 @@ system.cpu.createThreads() system.workload = SEWorkload.init_compatible(binpath) # set up the root SimObject and start the simulation -root = Root(full_system = False, system = system) +root = Root(full_system=False, system=system) # instantiate all of the objects we've created above m5.instantiate() print("Beginning simulation!") exit_event = m5.simulate() -print('Exiting @ tick %i because %s' % (m5.curTick(), exit_event.getCause())) +print("Exiting @ tick %i because %s" % (m5.curTick(), exit_event.getCause())) diff --git a/configs/learning_gem5/part3/msi_caches.py b/configs/learning_gem5/part3/msi_caches.py index 957adf2baf..13b2a11b1a 100644 --- a/configs/learning_gem5/part3/msi_caches.py +++ b/configs/learning_gem5/part3/msi_caches.py @@ -42,19 +42,19 @@ from m5.util import fatal, panic from m5.objects import * -class MyCacheSystem(RubySystem): +class MyCacheSystem(RubySystem): def __init__(self): - if buildEnv['PROTOCOL'] != 'MSI': + if buildEnv["PROTOCOL"] != "MSI": fatal("This system assumes MSI from learning gem5!") super(MyCacheSystem, self).__init__() def setup(self, system, cpus, mem_ctrls): """Set up the Ruby cache subsystem. Note: This can't be done in the - constructor because many of these items require a pointer to the - ruby system (self). This causes infinite recursion in initialize() - if we do this in the __init__. + constructor because many of these items require a pointer to the + ruby system (self). This causes infinite recursion in initialize() + if we do this in the __init__. """ # Ruby's global network. self.network = MyNetwork(self) @@ -70,22 +70,26 @@ class MyCacheSystem(RubySystem): # customized depending on the topology/network requirements. # Create one controller for each L1 cache (and the cache mem obj.) # Create a single directory controller (Really the memory cntrl) - self.controllers = \ - [L1Cache(system, self, cpu) for cpu in cpus] + \ - [DirController(self, system.mem_ranges, mem_ctrls)] + self.controllers = [L1Cache(system, self, cpu) for cpu in cpus] + [ + DirController(self, system.mem_ranges, mem_ctrls) + ] # Create one sequencer per CPU. In many systems this is more # complicated since you have to create sequencers for DMA controllers # and other controllers, too. - self.sequencers = [RubySequencer(version = i, - # I/D cache is combined and grab from ctrl - dcache = self.controllers[i].cacheMemory, - clk_domain = self.controllers[i].clk_domain, - ) for i in range(len(cpus))] + self.sequencers = [ + RubySequencer( + version=i, + # I/D cache is combined and grab from ctrl + dcache=self.controllers[i].cacheMemory, + clk_domain=self.controllers[i].clk_domain, + ) + for i in range(len(cpus)) + ] # We know that we put the controllers in an order such that the first # N of them are the L1 caches which need a sequencer pointer - for i,c in enumerate(self.controllers[0:len(self.sequencers)]): + for i, c in enumerate(self.controllers[0 : len(self.sequencers)]): c.sequencer = self.sequencers[i] self.num_of_sequencers = len(self.sequencers) @@ -101,29 +105,30 @@ class MyCacheSystem(RubySystem): system.system_port = self.sys_port_proxy.in_ports # Connect the cpu's cache, interrupt, and TLB ports to Ruby - for i,cpu in enumerate(cpus): + for i, cpu in enumerate(cpus): self.sequencers[i].connectCpuPorts(cpu) class L1Cache(L1Cache_Controller): _version = 0 + @classmethod def versionCount(cls): - cls._version += 1 # Use count for this particular type + cls._version += 1 # Use count for this particular type return cls._version - 1 def __init__(self, system, ruby_system, cpu): """CPUs are needed to grab the clock domain and system is needed for - the cache block size. + the cache block size. """ super(L1Cache, self).__init__() self.version = self.versionCount() # This is the cache memory object that stores the cache data and tags - self.cacheMemory = RubyCache(size = '16kB', - assoc = 8, - start_index_bit = self.getBlockSizeBits(system)) + self.cacheMemory = RubyCache( + size="16kB", assoc=8, start_index_bit=self.getBlockSizeBits(system) + ) self.clk_domain = cpu.clk_domain self.send_evictions = self.sendEvicts(cpu) self.ruby_system = ruby_system @@ -137,19 +142,17 @@ class L1Cache(L1Cache_Controller): def sendEvicts(self, cpu): """True if the CPU model or ISA requires sending evictions from caches - to the CPU. Two scenarios warrant forwarding evictions to the CPU: - 1. The O3 model must keep the LSQ coherent with the caches - 2. The x86 mwait instruction is built on top of coherence - 3. The local exclusive monitor in ARM systems + to the CPU. Two scenarios warrant forwarding evictions to the CPU: + 1. The O3 model must keep the LSQ coherent with the caches + 2. The x86 mwait instruction is built on top of coherence + 3. The local exclusive monitor in ARM systems + + As this is an X86 simulation we return True. """ - if type(cpu) is DerivO3CPU or \ - buildEnv['TARGET_ISA'] in ('x86', 'arm'): - return True - return False + return True def connectQueues(self, ruby_system): - """Connect all of the queues for this controller. - """ + """Connect all of the queues for this controller.""" # mandatoryQueue is a special variable. It is used by the sequencer to # send RubyRequests from the CPU (or other processor). It isn't # explicitly connected to anything. @@ -160,26 +163,27 @@ class L1Cache(L1Cache_Controller): # mean the same thing as normal gem5 ports. If a MessageBuffer # is a "to" buffer (i.e., out) then you use the "out_port", # otherwise, the in_port. - self.requestToDir = MessageBuffer(ordered = True) + self.requestToDir = MessageBuffer(ordered=True) self.requestToDir.out_port = ruby_system.network.in_port - self.responseToDirOrSibling = MessageBuffer(ordered = True) + self.responseToDirOrSibling = MessageBuffer(ordered=True) self.responseToDirOrSibling.out_port = ruby_system.network.in_port - self.forwardFromDir = MessageBuffer(ordered = True) + self.forwardFromDir = MessageBuffer(ordered=True) self.forwardFromDir.in_port = ruby_system.network.out_port - self.responseFromDirOrSibling = MessageBuffer(ordered = True) + self.responseFromDirOrSibling = MessageBuffer(ordered=True) self.responseFromDirOrSibling.in_port = ruby_system.network.out_port + class DirController(Directory_Controller): _version = 0 + @classmethod def versionCount(cls): - cls._version += 1 # Use count for this particular type + cls._version += 1 # Use count for this particular type return cls._version - 1 def __init__(self, ruby_system, ranges, mem_ctrls): - """ranges are the memory ranges assigned to this controller. - """ + """ranges are the memory ranges assigned to this controller.""" if len(mem_ctrls) > 1: panic("This cache system can only be connected to one mem ctrl") super(DirController, self).__init__() @@ -192,14 +196,14 @@ class DirController(Directory_Controller): self.connectQueues(ruby_system) def connectQueues(self, ruby_system): - self.requestFromCache = MessageBuffer(ordered = True) + self.requestFromCache = MessageBuffer(ordered=True) self.requestFromCache.in_port = ruby_system.network.out_port - self.responseFromCache = MessageBuffer(ordered = True) + self.responseFromCache = MessageBuffer(ordered=True) self.responseFromCache.in_port = ruby_system.network.out_port - self.responseToCache = MessageBuffer(ordered = True) + self.responseToCache = MessageBuffer(ordered=True) self.responseToCache.out_port = ruby_system.network.in_port - self.forwardToCache = MessageBuffer(ordered = True) + self.forwardToCache = MessageBuffer(ordered=True) self.forwardToCache.out_port = ruby_system.network.in_port # These are other special message buffers. They are used to send @@ -209,9 +213,9 @@ class DirController(Directory_Controller): self.requestToMemory = MessageBuffer() self.responseFromMemory = MessageBuffer() + class MyNetwork(SimpleNetwork): - """A simple point-to-point network. This doesn't not use garnet. - """ + """A simple point-to-point network. This doesn't not use garnet.""" def __init__(self, ruby_system): super(MyNetwork, self).__init__() @@ -220,25 +224,28 @@ class MyNetwork(SimpleNetwork): def connectControllers(self, controllers): """Connect all of the controllers to routers and connec the routers - together in a point-to-point network. + together in a point-to-point network. """ # Create one router/switch per controller in the system - self.routers = [Switch(router_id = i) for i in range(len(controllers))] + self.routers = [Switch(router_id=i) for i in range(len(controllers))] # Make a link from each controller to the router. The link goes # externally to the network. - self.ext_links = [SimpleExtLink(link_id=i, ext_node=c, - int_node=self.routers[i]) - for i, c in enumerate(controllers)] + self.ext_links = [ + SimpleExtLink(link_id=i, ext_node=c, int_node=self.routers[i]) + for i, c in enumerate(controllers) + ] # Make an "internal" link (internal to the network) between every pair # of routers. link_count = 0 - self.int_links = [] + int_links = [] for ri in self.routers: for rj in self.routers: - if ri == rj: continue # Don't connect a router to itself! + if ri == rj: + continue # Don't connect a router to itself! link_count += 1 - self.int_links.append(SimpleIntLink(link_id = link_count, - src_node = ri, - dst_node = rj)) + int_links.append( + SimpleIntLink(link_id=link_count, src_node=ri, dst_node=rj) + ) + self.int_links = int_links diff --git a/configs/learning_gem5/part3/ruby_caches_MI_example.py b/configs/learning_gem5/part3/ruby_caches_MI_example.py index b67e6b1791..8c25a9b2d9 100644 --- a/configs/learning_gem5/part3/ruby_caches_MI_example.py +++ b/configs/learning_gem5/part3/ruby_caches_MI_example.py @@ -44,19 +44,19 @@ from m5.util import fatal, panic from m5.objects import * -class MyCacheSystem(RubySystem): +class MyCacheSystem(RubySystem): def __init__(self): - if buildEnv['PROTOCOL'] != 'MI_example': + if buildEnv["PROTOCOL"] != "MI_example": fatal("This system assumes MI_example!") super(MyCacheSystem, self).__init__() def setup(self, system, cpus, mem_ctrls): """Set up the Ruby cache subsystem. Note: This can't be done in the - constructor because many of these items require a pointer to the - ruby system (self). This causes infinite recursion in initialize() - if we do this in the __init__. + constructor because many of these items require a pointer to the + ruby system (self). This causes infinite recursion in initialize() + if we do this in the __init__. """ # Ruby's global network. self.network = MyNetwork(self) @@ -70,20 +70,24 @@ class MyCacheSystem(RubySystem): # customized depending on the topology/network requirements. # Create one controller for each L1 cache (and the cache mem obj.) # Create a single directory controller (Really the memory cntrl) - self.controllers = \ - [L1Cache(system, self, cpu) for cpu in cpus] + \ - [DirController(self, system.mem_ranges, mem_ctrls)] + self.controllers = [L1Cache(system, self, cpu) for cpu in cpus] + [ + DirController(self, system.mem_ranges, mem_ctrls) + ] # Create one sequencer per CPU. In many systems this is more # complicated since you have to create sequencers for DMA controllers # and other controllers, too. - self.sequencers = [RubySequencer(version = i, - # I/D cache is combined and grab from ctrl - dcache = self.controllers[i].cacheMemory, - clk_domain = self.controllers[i].clk_domain, - ) for i in range(len(cpus))] + self.sequencers = [ + RubySequencer( + version=i, + # I/D cache is combined and grab from ctrl + dcache=self.controllers[i].cacheMemory, + clk_domain=self.controllers[i].clk_domain, + ) + for i in range(len(cpus)) + ] - for i,c in enumerate(self.controllers[0:len(cpus)]): + for i, c in enumerate(self.controllers[0 : len(cpus)]): c.sequencer = self.sequencers[i] self.num_of_sequencers = len(self.sequencers) @@ -99,28 +103,30 @@ class MyCacheSystem(RubySystem): system.system_port = self.sys_port_proxy.in_ports # Connect the cpu's cache, interrupt, and TLB ports to Ruby - for i,cpu in enumerate(cpus): + for i, cpu in enumerate(cpus): self.sequencers[i].connectCpuPorts(cpu) + class L1Cache(L1Cache_Controller): _version = 0 + @classmethod def versionCount(cls): - cls._version += 1 # Use count for this particular type + cls._version += 1 # Use count for this particular type return cls._version - 1 def __init__(self, system, ruby_system, cpu): """CPUs are needed to grab the clock domain and system is needed for - the cache block size. + the cache block size. """ super(L1Cache, self).__init__() self.version = self.versionCount() # This is the cache memory object that stores the cache data and tags - self.cacheMemory = RubyCache(size = '16kB', - assoc = 8, - start_index_bit = self.getBlockSizeBits(system)) + self.cacheMemory = RubyCache( + size="16kB", assoc=8, start_index_bit=self.getBlockSizeBits(system) + ) self.clk_domain = cpu.clk_domain self.send_evictions = self.sendEvicts(cpu) self.ruby_system = ruby_system @@ -134,40 +140,39 @@ class L1Cache(L1Cache_Controller): def sendEvicts(self, cpu): """True if the CPU model or ISA requires sending evictions from caches - to the CPU. Two scenarios warrant forwarding evictions to the CPU: - 1. The O3 model must keep the LSQ coherent with the caches - 2. The x86 mwait instruction is built on top of coherence - 3. The local exclusive monitor in ARM systems + to the CPU. Two scenarios warrant forwarding evictions to the CPU: + 1. The O3 model must keep the LSQ coherent with the caches + 2. The x86 mwait instruction is built on top of coherence + 3. The local exclusive monitor in ARM systems + + As this is an X86 simulation we return True. """ - if type(cpu) is DerivO3CPU or \ - buildEnv['TARGET_ISA'] in ('x86', 'arm'): - return True - return False + return True def connectQueues(self, ruby_system): - """Connect all of the queues for this controller. - """ + """Connect all of the queues for this controller.""" self.mandatoryQueue = MessageBuffer() - self.requestFromCache = MessageBuffer(ordered = True) + self.requestFromCache = MessageBuffer(ordered=True) self.requestFromCache.out_port = ruby_system.network.in_port - self.responseFromCache = MessageBuffer(ordered = True) + self.responseFromCache = MessageBuffer(ordered=True) self.responseFromCache.out_port = ruby_system.network.in_port - self.forwardToCache = MessageBuffer(ordered = True) + self.forwardToCache = MessageBuffer(ordered=True) self.forwardToCache.in_port = ruby_system.network.out_port - self.responseToCache = MessageBuffer(ordered = True) + self.responseToCache = MessageBuffer(ordered=True) self.responseToCache.in_port = ruby_system.network.out_port + class DirController(Directory_Controller): _version = 0 + @classmethod def versionCount(cls): - cls._version += 1 # Use count for this particular type + cls._version += 1 # Use count for this particular type return cls._version - 1 def __init__(self, ruby_system, ranges, mem_ctrls): - """ranges are the memory ranges assigned to this controller. - """ + """ranges are the memory ranges assigned to this controller.""" if len(mem_ctrls) > 1: panic("This cache system can only be connected to one mem ctrl") super(DirController, self).__init__() @@ -180,23 +185,23 @@ class DirController(Directory_Controller): self.connectQueues(ruby_system) def connectQueues(self, ruby_system): - self.requestToDir = MessageBuffer(ordered = True) + self.requestToDir = MessageBuffer(ordered=True) self.requestToDir.in_port = ruby_system.network.out_port - self.dmaRequestToDir = MessageBuffer(ordered = True) + self.dmaRequestToDir = MessageBuffer(ordered=True) self.dmaRequestToDir.in_port = ruby_system.network.out_port self.responseFromDir = MessageBuffer() self.responseFromDir.out_port = ruby_system.network.in_port - self.dmaResponseFromDir = MessageBuffer(ordered = True) + self.dmaResponseFromDir = MessageBuffer(ordered=True) self.dmaResponseFromDir.out_port = ruby_system.network.in_port self.forwardFromDir = MessageBuffer() self.forwardFromDir.out_port = ruby_system.network.in_port self.requestToMemory = MessageBuffer() self.responseFromMemory = MessageBuffer() + class MyNetwork(SimpleNetwork): - """A simple point-to-point network. This doesn't not use garnet. - """ + """A simple point-to-point network. This doesn't not use garnet.""" def __init__(self, ruby_system): super(MyNetwork, self).__init__() @@ -205,25 +210,28 @@ class MyNetwork(SimpleNetwork): def connectControllers(self, controllers): """Connect all of the controllers to routers and connec the routers - together in a point-to-point network. + together in a point-to-point network. """ # Create one router/switch per controller in the system - self.routers = [Switch(router_id = i) for i in range(len(controllers))] + self.routers = [Switch(router_id=i) for i in range(len(controllers))] # Make a link from each controller to the router. The link goes # externally to the network. - self.ext_links = [SimpleExtLink(link_id=i, ext_node=c, - int_node=self.routers[i]) - for i, c in enumerate(controllers)] + self.ext_links = [ + SimpleExtLink(link_id=i, ext_node=c, int_node=self.routers[i]) + for i, c in enumerate(controllers) + ] # Make an "internal" link (internal to the network) between every pair # of routers. link_count = 0 - self.int_links = [] + int_links = [] for ri in self.routers: for rj in self.routers: - if ri == rj: continue # Don't connect a router to itself! + if ri == rj: + continue # Don't connect a router to itself! link_count += 1 - self.int_links.append(SimpleIntLink(link_id = link_count, - src_node = ri, - dst_node = rj)) + int_links.append( + SimpleIntLink(link_id=link_count, src_node=ri, dst_node=rj) + ) + self.int_links = int_links diff --git a/configs/learning_gem5/part3/ruby_test.py b/configs/learning_gem5/part3/ruby_test.py index 05096ec53a..d0cc1be613 100644 --- a/configs/learning_gem5/part3/ruby_test.py +++ b/configs/learning_gem5/part3/ruby_test.py @@ -36,6 +36,7 @@ IMPORTANT: If you modify this file, it's likely that the Learning gem5 book # import the m5 (gem5) library created when gem5 is built import m5 + # import all of the SimObjects from m5.objects import * @@ -46,17 +47,17 @@ system = System() # Set the clock frequency of the system (and all of its children) system.clk_domain = SrcClockDomain() -system.clk_domain.clock = '1GHz' +system.clk_domain.clock = "1GHz" system.clk_domain.voltage_domain = VoltageDomain() # Set up the system -system.mem_mode = 'timing' # Use timing accesses -system.mem_ranges = [AddrRange('512MB')] # Create an address range +system.mem_mode = "timing" # Use timing accesses +system.mem_ranges = [AddrRange("512MB")] # Create an address range # Create the tester -system.tester = RubyTester(checks_to_complete = 100, - wakeup_frequency = 10, - num_cpus = 2) +system.tester = RubyTester( + checks_to_complete=100, wakeup_frequency=10, num_cpus=2 +) # Create a simple memory controller and connect it to the membus system.mem_ctrl = SimpleMemory(latency="50ns", bandwidth="0GB/s") @@ -67,16 +68,16 @@ system.caches = TestCacheSystem() system.caches.setup(system, system.tester, [system.mem_ctrl]) # set up the root SimObject and start the simulation -root = Root(full_system = False, system = system) +root = Root(full_system=False, system=system) # Not much point in this being higher than the L1 latency -m5.ticks.setGlobalFrequency('1ns') +m5.ticks.setGlobalFrequency("1ns") # instantiate all of the objects we've created above m5.instantiate() print("Beginning simulation!") exit_event = m5.simulate() -print('Exiting @ tick {} because {}'.format( - m5.curTick(), exit_event.getCause()) - ) +print( + "Exiting @ tick {} because {}".format(m5.curTick(), exit_event.getCause()) +) diff --git a/configs/learning_gem5/part3/simple_ruby.py b/configs/learning_gem5/part3/simple_ruby.py index 3d9fe9a970..b62a7195c8 100644 --- a/configs/learning_gem5/part3/simple_ruby.py +++ b/configs/learning_gem5/part3/simple_ruby.py @@ -39,11 +39,12 @@ IMPORTANT: If you modify this file, it's likely that the Learning gem5 book # import the m5 (gem5) library created when gem5 is built import m5 + # import all of the SimObjects from m5.objects import * # Needed for running C++ threads -m5.util.addToPath('../../') +m5.util.addToPath("../../") from common.FileSystemConfig import config_filesystem # You can import ruby_caches_MI_example to use the MI_example protocol instead @@ -55,15 +56,15 @@ system = System() # Set the clock frequency of the system (and all of its children) system.clk_domain = SrcClockDomain() -system.clk_domain.clock = '1GHz' +system.clk_domain.clock = "1GHz" system.clk_domain.voltage_domain = VoltageDomain() # Set up the system -system.mem_mode = 'timing' # Use timing accesses -system.mem_ranges = [AddrRange('512MB')] # Create an address range +system.mem_mode = "timing" # Use timing accesses +system.mem_ranges = [AddrRange("512MB")] # Create an address range # Create a pair of simple CPUs -system.cpu = [TimingSimpleCPU() for i in range(2)] +system.cpu = [X86TimingSimpleCPU() for i in range(2)] # Create a DDR3 memory controller and connect it to the membus system.mem_ctrl = MemCtrl() @@ -78,14 +79,14 @@ for cpu in system.cpu: system.caches = MyCacheSystem() system.caches.setup(system, system.cpu, [system.mem_ctrl]) -# get ISA for the binary to run. -isa = str(m5.defines.buildEnv['TARGET_ISA']).lower() - # Run application and use the compiled ISA to find the binary # grab the specific path to the binary thispath = os.path.dirname(os.path.realpath(__file__)) -binary = os.path.join(thispath, '../../../', 'tests/test-progs/threads/bin/', - isa, 'linux/threads') +binary = os.path.join( + thispath, + "../../../", + "tests/test-progs/threads/bin/x86/linux/threads", +) # Create a process for a simple "multi-threaded" application process = Process() @@ -103,12 +104,12 @@ system.workload = SEWorkload.init_compatible(binary) config_filesystem(system) # set up the root SimObject and start the simulation -root = Root(full_system = False, system = system) +root = Root(full_system=False, system=system) # instantiate all of the objects we've created above m5.instantiate() print("Beginning simulation!") exit_event = m5.simulate() -print('Exiting @ tick {} because {}'.format( - m5.curTick(), exit_event.getCause()) - ) +print( + "Exiting @ tick {} because {}".format(m5.curTick(), exit_event.getCause()) +) diff --git a/configs/learning_gem5/part3/test_caches.py b/configs/learning_gem5/part3/test_caches.py index 00a1fe3dab..7b0ce52dad 100644 --- a/configs/learning_gem5/part3/test_caches.py +++ b/configs/learning_gem5/part3/test_caches.py @@ -42,21 +42,21 @@ from m5.objects import * from msi_caches import L1Cache, DirController, MyNetwork -class TestCacheSystem(RubySystem): +class TestCacheSystem(RubySystem): def __init__(self): - if buildEnv['PROTOCOL'] != 'MSI': + if buildEnv["PROTOCOL"] != "MSI": fatal("This system assumes MSI from learning gem5!") super(TestCacheSystem, self).__init__() def setup(self, system, tester, mem_ctrls): """Set up the Ruby cache subsystem. Note: This can't be done in the - constructor because many of these items require a pointer to the - ruby system (self). This causes infinite recursion in initialize() - if we do this in the __init__. - Setting up for running the RubyRandomTester is a little different - than when we're using CPUs. + constructor because many of these items require a pointer to the + ruby system (self). This causes infinite recursion in initialize() + if we do this in the __init__. + Setting up for running the RubyRandomTester is a little different + than when we're using CPUs. """ num_testers = tester.num_cpus @@ -67,17 +67,21 @@ class TestCacheSystem(RubySystem): self.number_of_virtual_networks = 3 self.network.number_of_virtual_networks = 3 - self.controllers = \ - [L1Cache(system, self, self) for i in range(num_testers)] + \ - [DirController(self, system.mem_ranges, mem_ctrls)] + self.controllers = [ + L1Cache(system, self, self) for i in range(num_testers) + ] + [DirController(self, system.mem_ranges, mem_ctrls)] - self.sequencers = [RubySequencer(version = i, - # I/D cache is combined and grab from ctrl - dcache = self.controllers[i].cacheMemory, - clk_domain = self.clk_domain, - ) for i in range(num_testers)] + self.sequencers = [ + RubySequencer( + version=i, + # I/D cache is combined and grab from ctrl + dcache=self.controllers[i].cacheMemory, + clk_domain=self.clk_domain, + ) + for i in range(num_testers) + ] - for i,c in enumerate(self.controllers[0:len(self.sequencers)]): + for i, c in enumerate(self.controllers[0 : len(self.sequencers)]): c.sequencer = self.sequencers[i] self.num_of_sequencers = len(self.sequencers) diff --git a/configs/network/Network.py b/configs/network/Network.py index e5a86f6893..a5334741c0 100644 --- a/configs/network/Network.py +++ b/configs/network/Network.py @@ -30,69 +30,106 @@ from m5.objects import * from m5.defines import buildEnv from m5.util import addToPath, fatal, warn + def define_options(parser): # By default, ruby uses the simple timing cpu parser.set_defaults(cpu_type="TimingSimpleCPU") parser.add_argument( - "--topology", type=str, default="Crossbar", - help="check configs/topologies for complete set") + "--topology", + type=str, + default="Crossbar", + help="check configs/topologies for complete set", + ) parser.add_argument( - "--mesh-rows", type=int, default=0, - help="the number of rows in the mesh topology") + "--mesh-rows", + type=int, + default=0, + help="the number of rows in the mesh topology", + ) parser.add_argument( - "--network", default="simple", - choices=['simple', 'garnet'], - help="""'simple'|'garnet' (garnet2.0 will be deprecated.)""") + "--network", + default="simple", + choices=["simple", "garnet"], + help="""'simple'|'garnet' (garnet2.0 will be deprecated.)""", + ) parser.add_argument( - "--router-latency", action="store", type=int, + "--router-latency", + action="store", + type=int, default=1, help="""number of pipeline stages in the garnet router. Has to be >= 1. Can be over-ridden on a per router basis - in the topology file.""") + in the topology file.""", + ) parser.add_argument( - "--link-latency", action="store", type=int, default=1, + "--link-latency", + action="store", + type=int, + default=1, help="""latency of each link the simple/garnet networks. Has to be >= 1. Can be over-ridden on a per link basis - in the topology file.""") + in the topology file.""", + ) parser.add_argument( - "--link-width-bits", action="store", type=int, + "--link-width-bits", + action="store", + type=int, default=128, - help="width in bits for all links inside garnet.") + help="width in bits for all links inside garnet.", + ) parser.add_argument( - "--vcs-per-vnet", action="store", type=int, default=4, + "--vcs-per-vnet", + action="store", + type=int, + default=4, help="""number of virtual channels per virtual network - inside garnet network.""") + inside garnet network.""", + ) parser.add_argument( - "--routing-algorithm", action="store", type=int, + "--routing-algorithm", + action="store", + type=int, default=0, help="""routing algorithm in network. 0: weight-based table 1: XY (for Mesh. see garnet/RoutingUnit.cc) - 2: Custom (see garnet/RoutingUnit.cc""") + 2: Custom (see garnet/RoutingUnit.cc""", + ) parser.add_argument( - "--network-fault-model", action="store_true", + "--network-fault-model", + action="store_true", default=False, help="""enable network fault model: - see src/mem/ruby/network/fault_model/""") + see src/mem/ruby/network/fault_model/""", + ) parser.add_argument( - "--garnet-deadlock-threshold", action="store", - type=int, default=50000, - help="network-level deadlock threshold.") - parser.add_argument("--simple-physical-channels", action="store_true", + "--garnet-deadlock-threshold", + action="store", + type=int, + default=50000, + help="network-level deadlock threshold.", + ) + parser.add_argument( + "--simple-physical-channels", + action="store_true", default=False, help="""SimpleNetwork links uses a separate physical - channel for each virtual network""") + channel for each virtual network""", + ) + def create_network(options, ruby): # Allow legacy users to use garnet through garnet2.0 option # until next gem5 release. if options.network == "garnet2.0": - warn("Usage of option 'garnet2.0' will be depracated. " \ - "Please use 'garnet' for using the latest garnet " \ - "version. Current version: 3.0") + warn( + "Usage of option 'garnet2.0' will be depracated. " + "Please use 'garnet' for using the latest garnet " + "version. Current version: 3.0" + ) options.network = "garnet" # Set the network classes based on the command line options @@ -112,11 +149,18 @@ def create_network(options, ruby): # Instantiate the network object # so that the controllers can connect to it. - network = NetworkClass(ruby_system = ruby, topology = options.topology, - routers = [], ext_links = [], int_links = [], netifs = []) + network = NetworkClass( + ruby_system=ruby, + topology=options.topology, + routers=[], + ext_links=[], + int_links=[], + netifs=[], + ) return (network, IntLinkClass, ExtLinkClass, RouterClass, InterfaceClass) + def init_network(options, network, InterfaceClass): if options.network == "garnet": @@ -129,79 +173,109 @@ def init_network(options, network, InterfaceClass): # Create Bridges and connect them to the corresponding links for intLink in network.int_links: intLink.src_net_bridge = NetworkBridge( - link = intLink.network_link, - vtype = 'OBJECT_LINK', - width = intLink.src_node.width) + link=intLink.network_link, + vtype="OBJECT_LINK", + width=intLink.src_node.width, + ) intLink.src_cred_bridge = NetworkBridge( - link = intLink.credit_link, - vtype = 'LINK_OBJECT', - width = intLink.src_node.width) + link=intLink.credit_link, + vtype="LINK_OBJECT", + width=intLink.src_node.width, + ) intLink.dst_net_bridge = NetworkBridge( - link = intLink.network_link, - vtype = 'LINK_OBJECT', - width = intLink.dst_node.width) + link=intLink.network_link, + vtype="LINK_OBJECT", + width=intLink.dst_node.width, + ) intLink.dst_cred_bridge = NetworkBridge( - link = intLink.credit_link, - vtype = 'OBJECT_LINK', - width = intLink.dst_node.width) + link=intLink.credit_link, + vtype="OBJECT_LINK", + width=intLink.dst_node.width, + ) for extLink in network.ext_links: ext_net_bridges = [] - ext_net_bridges.append(NetworkBridge(link = - extLink.network_links[0], - vtype = 'OBJECT_LINK', - width = extLink.width)) - ext_net_bridges.append(NetworkBridge(link = - extLink.network_links[1], - vtype = 'LINK_OBJECT', - width = extLink.width)) + ext_net_bridges.append( + NetworkBridge( + link=extLink.network_links[0], + vtype="OBJECT_LINK", + width=extLink.width, + ) + ) + ext_net_bridges.append( + NetworkBridge( + link=extLink.network_links[1], + vtype="LINK_OBJECT", + width=extLink.width, + ) + ) extLink.ext_net_bridge = ext_net_bridges ext_credit_bridges = [] - ext_credit_bridges.append(NetworkBridge(link = - extLink.credit_links[0], - vtype = 'LINK_OBJECT', - width = extLink.width)) - ext_credit_bridges.append(NetworkBridge(link = - extLink.credit_links[1], - vtype = 'OBJECT_LINK', - width = extLink.width)) + ext_credit_bridges.append( + NetworkBridge( + link=extLink.credit_links[0], + vtype="LINK_OBJECT", + width=extLink.width, + ) + ) + ext_credit_bridges.append( + NetworkBridge( + link=extLink.credit_links[1], + vtype="OBJECT_LINK", + width=extLink.width, + ) + ) extLink.ext_cred_bridge = ext_credit_bridges int_net_bridges = [] - int_net_bridges.append(NetworkBridge(link = - extLink.network_links[0], - vtype = 'LINK_OBJECT', - width = extLink.int_node.width)) - int_net_bridges.append(NetworkBridge(link = - extLink.network_links[1], - vtype = 'OBJECT_LINK', - width = extLink.int_node.width)) + int_net_bridges.append( + NetworkBridge( + link=extLink.network_links[0], + vtype="LINK_OBJECT", + width=extLink.int_node.width, + ) + ) + int_net_bridges.append( + NetworkBridge( + link=extLink.network_links[1], + vtype="OBJECT_LINK", + width=extLink.int_node.width, + ) + ) extLink.int_net_bridge = int_net_bridges int_cred_bridges = [] - int_cred_bridges.append(NetworkBridge(link = - extLink.credit_links[0], - vtype = 'OBJECT_LINK', - width = extLink.int_node.width)) - int_cred_bridges.append(NetworkBridge(link = - extLink.credit_links[1], - vtype = 'LINK_OBJECT', - width = extLink.int_node.width)) + int_cred_bridges.append( + NetworkBridge( + link=extLink.credit_links[0], + vtype="OBJECT_LINK", + width=extLink.int_node.width, + ) + ) + int_cred_bridges.append( + NetworkBridge( + link=extLink.credit_links[1], + vtype="LINK_OBJECT", + width=extLink.int_node.width, + ) + ) extLink.int_cred_bridge = int_cred_bridges if options.network == "simple": if options.simple_physical_channels: - network.physical_vnets_channels = \ - [1] * int(network.number_of_virtual_networks) + network.physical_vnets_channels = [1] * int( + network.number_of_virtual_networks + ) network.setup_buffers() if InterfaceClass != None: - netifs = [InterfaceClass(id=i) \ - for (i,n) in enumerate(network.ext_links)] + netifs = [ + InterfaceClass(id=i) for (i, n) in enumerate(network.ext_links) + ] network.netifs = netifs if options.network_fault_model: - assert(options.network == "garnet") + assert options.network == "garnet" network.enable_fault_model = True network.fault_model = FaultModel() diff --git a/configs/nvm/sweep.py b/configs/nvm/sweep.py index 09371f97fd..b569cb35b5 100644 --- a/configs/nvm/sweep.py +++ b/configs/nvm/sweep.py @@ -41,7 +41,7 @@ from m5.objects import * from m5.util import addToPath from m5.stats import periodicStatDump -addToPath('../') +addToPath("../") from common import ObjectList from common import MemConfig @@ -53,28 +53,41 @@ from common import MemConfig parser = argparse.ArgumentParser() -nvm_generators = { - "NVM" : lambda x: x.createNvm, -} +nvm_generators = {"NVM": lambda x: x.createNvm} # Use a single-channel DDR3-1600 x64 (8x8 topology) by default -parser.add_argument("--nvm-type", default="NVM_2400_1x64", - choices=ObjectList.mem_list.get_names(), - help = "type of memory to use") +parser.add_argument( + "--nvm-type", + default="NVM_2400_1x64", + choices=ObjectList.mem_list.get_names(), + help="type of memory to use", +) -parser.add_argument("--nvm-ranks", "-r", type=int, default=1, - help = "Number of ranks to iterate across") +parser.add_argument( + "--nvm-ranks", + "-r", + type=int, + default=1, + help="Number of ranks to iterate across", +) -parser.add_argument("--rd_perc", type=int, default=100, - help = "Percentage of read commands") +parser.add_argument( + "--rd_perc", type=int, default=100, help="Percentage of read commands" +) -parser.add_argument("--mode", default="NVM", - choices=nvm_generators.keys(), - help = "NVM: Random traffic") +parser.add_argument( + "--mode", + default="NVM", + choices=nvm_generators.keys(), + help="NVM: Random traffic", +) -parser.add_argument("--addr-map", - choices=ObjectList.dram_addr_map_list.get_names(), - default="RoRaBaCoCh", help = "NVM address map policy") +parser.add_argument( + "--addr-map", + choices=ObjectList.dram_addr_map_list.get_names(), + default="RoRaBaCoCh", + help="NVM address map policy", +) args = parser.parse_args() @@ -84,13 +97,13 @@ args = parser.parse_args() # start with the system itself, using a multi-layer 2.0 GHz # crossbar, delivering 64 bytes / 3 cycles (one header cycle) # which amounts to 42.7 GByte/s per layer and thus per port -system = System(membus = IOXBar(width = 32)) -system.clk_domain = SrcClockDomain(clock = '2.0GHz', - voltage_domain = - VoltageDomain(voltage = '1V')) +system = System(membus=IOXBar(width=32)) +system.clk_domain = SrcClockDomain( + clock="2.0GHz", voltage_domain=VoltageDomain(voltage="1V") +) # we are fine with 256 MB memory for now -mem_range = AddrRange('512MB') +mem_range = AddrRange("512MB") system.mem_ranges = [mem_range] # do not worry about reserving space for the backing store @@ -127,14 +140,21 @@ period = 250000000 nbr_banks = system.mem_ctrls[0].dram.banks_per_rank.value # determine the burst length in bytes -burst_size = int((system.mem_ctrls[0].dram.devices_per_rank.value * - system.mem_ctrls[0].dram.device_bus_width.value * - system.mem_ctrls[0].dram.burst_length.value) / 8) +burst_size = int( + ( + system.mem_ctrls[0].dram.devices_per_rank.value + * system.mem_ctrls[0].dram.device_bus_width.value + * system.mem_ctrls[0].dram.burst_length.value + ) + / 8 +) # next, get the page size in bytes -buffer_size = system.mem_ctrls[0].dram.devices_per_rank.value * \ - system.mem_ctrls[0].dram.device_rowbuffer_size.value +buffer_size = ( + system.mem_ctrls[0].dram.devices_per_rank.value + * system.mem_ctrls[0].dram.device_rowbuffer_size.value +) # match the maximum bandwidth of the memory, the parameter is in seconds # and we need it in ticks (ps) @@ -164,27 +184,42 @@ system.system_port = system.membus.cpu_side_ports periodicStatDump(period) # run Forrest, run! -root = Root(full_system = False, system = system) -root.system.mem_mode = 'timing' +root = Root(full_system=False, system=system) +root.system.mem_mode = "timing" m5.instantiate() + def trace(): addr_map = ObjectList.dram_addr_map_list.get(args.addr_map) generator = nvm_generators[args.mode](system.tgen) for stride_size in range(burst_size, max_stride + 1, burst_size): for bank in range(1, nbr_banks + 1): num_seq_pkts = int(math.ceil(float(stride_size) / burst_size)) - yield generator(period, - 0, max_addr, burst_size, int(itt), int(itt), - args.rd_perc, 0, - num_seq_pkts, buffer_size, nbr_banks, bank, - addr_map, args.dram_ranks) + yield generator( + period, + 0, + max_addr, + burst_size, + int(itt), + int(itt), + args.rd_perc, + 0, + num_seq_pkts, + buffer_size, + nbr_banks, + bank, + addr_map, + args.dram_ranks, + ) yield system.tgen.createExit(0) + system.tgen.start(trace()) m5.simulate() -print("NVM sweep with burst: %d, banks: %d, max stride: %d" % - (burst_size, nbr_banks, max_stride)) +print( + "NVM sweep with burst: %d, banks: %d, max stride: %d" + % (burst_size, nbr_banks, max_stride) +) diff --git a/configs/nvm/sweep_hybrid.py b/configs/nvm/sweep_hybrid.py index 6bccdef1fa..d1e2994268 100644 --- a/configs/nvm/sweep_hybrid.py +++ b/configs/nvm/sweep_hybrid.py @@ -41,7 +41,7 @@ from m5.objects import * from m5.util import addToPath from m5.stats import periodicStatDump -addToPath('../') +addToPath("../") from common import ObjectList from common import MemConfig @@ -53,38 +53,60 @@ from common import MemConfig parser = argparse.ArgumentParser() -hybrid_generators = { - "HYBRID" : lambda x: x.createHybrid, -} +hybrid_generators = {"HYBRID": lambda x: x.createHybrid} # Use a single-channel DDR3-1600 x64 (8x8 topology) by default -parser.add_argument("--nvm-type", default="NVM_2400_1x64", - choices=ObjectList.mem_list.get_names(), - help = "type of memory to use") +parser.add_argument( + "--nvm-type", + default="NVM_2400_1x64", + choices=ObjectList.mem_list.get_names(), + help="type of memory to use", +) -parser.add_argument("--mem-type", default="DDR4_2400_16x4", - choices=ObjectList.mem_list.get_names(), - help = "type of memory to use") +parser.add_argument( + "--mem-type", + default="DDR4_2400_16x4", + choices=ObjectList.mem_list.get_names(), + help="type of memory to use", +) -parser.add_argument("--nvm-ranks", "-n", type=int, default=1, - help = "Number of ranks to iterate across") +parser.add_argument( + "--nvm-ranks", + "-n", + type=int, + default=1, + help="Number of ranks to iterate across", +) -parser.add_argument("--mem-ranks", "-r", type=int, default=2, - help = "Number of ranks to iterate across") +parser.add_argument( + "--mem-ranks", + "-r", + type=int, + default=2, + help="Number of ranks to iterate across", +) -parser.add_argument("--rd-perc", type=int, default=100, - help = "Percentage of read commands") +parser.add_argument( + "--rd-perc", type=int, default=100, help="Percentage of read commands" +) -parser.add_argument("--nvm-perc", type=int, default=100, - help = "Percentage of NVM commands") +parser.add_argument( + "--nvm-perc", type=int, default=100, help="Percentage of NVM commands" +) -parser.add_argument("--mode", default="HYBRID", - choices=hybrid_generators.keys(), - help = "Hybrid: Random DRAM + NVM traffic") +parser.add_argument( + "--mode", + default="HYBRID", + choices=hybrid_generators.keys(), + help="Hybrid: Random DRAM + NVM traffic", +) -parser.add_argument("--addr-map", - choices=ObjectList.dram_addr_map_list.get_names(), - default="RoRaBaCoCh", help = "NVM address map policy") +parser.add_argument( + "--addr-map", + choices=ObjectList.dram_addr_map_list.get_names(), + default="RoRaBaCoCh", + help="NVM address map policy", +) args = parser.parse_args() @@ -94,16 +116,18 @@ args = parser.parse_args() # start with the system itself, using a multi-layer 2.0 GHz # crossbar, delivering 64 bytes / 3 cycles (one header cycle) # which amounts to 42.7 GByte/s per layer and thus per port -system = System(membus = IOXBar(width = 32)) -system.clk_domain = SrcClockDomain(clock = '2.0GHz', - voltage_domain = - VoltageDomain(voltage = '1V')) +system = System(membus=IOXBar(width=32)) +system.clk_domain = SrcClockDomain( + clock="2.0GHz", voltage_domain=VoltageDomain(voltage="1V") +) # set 2 ranges, the first, smaller range for DDR # the second, larger (1024) range for NVM # the NVM range starts directly after the DRAM range -system.mem_ranges = [AddrRange('128MB'), - AddrRange(Addr('128MB'), size ='1024MB')] +system.mem_ranges = [ + AddrRange("128MB"), + AddrRange(Addr("128MB"), size="1024MB"), +] # do not worry about reserving space for the backing store system.mmap_using_noreserve = True @@ -144,33 +168,52 @@ period = 250000000 nbr_banks_dram = system.mem_ctrls[0].dram.banks_per_rank.value # determine the burst length in bytes -burst_size_dram = int((system.mem_ctrls[0].dram.devices_per_rank.value * - system.mem_ctrls[0].dram.device_bus_width.value * - system.mem_ctrls[0].dram.burst_length.value) / 8) +burst_size_dram = int( + ( + system.mem_ctrls[0].dram.devices_per_rank.value + * system.mem_ctrls[0].dram.device_bus_width.value + * system.mem_ctrls[0].dram.burst_length.value + ) + / 8 +) # next, get the page size in bytes -page_size_dram = system.mem_ctrls[0].dram.devices_per_rank.value * \ - system.mem_ctrls[0].dram.device_rowbuffer_size.value +page_size_dram = ( + system.mem_ctrls[0].dram.devices_per_rank.value + * system.mem_ctrls[0].dram.device_rowbuffer_size.value +) # get the number of regions nbr_banks_nvm = system.mem_ctrls[0].nvm.banks_per_rank.value # determine the burst length in bytes -burst_size_nvm = int((system.mem_ctrls[0].nvm.devices_per_rank.value * - system.mem_ctrls[0].nvm.device_bus_width.value * - system.mem_ctrls[0].nvm.burst_length.value) / 8) +burst_size_nvm = int( + ( + system.mem_ctrls[0].nvm.devices_per_rank.value + * system.mem_ctrls[0].nvm.device_bus_width.value + * system.mem_ctrls[0].nvm.burst_length.value + ) + / 8 +) burst_size = max(burst_size_dram, burst_size_nvm) # next, get the page size in bytes -buffer_size_nvm = system.mem_ctrls[0].nvm.devices_per_rank.value * \ - system.mem_ctrls[0].nvm.device_rowbuffer_size.value +buffer_size_nvm = ( + system.mem_ctrls[0].nvm.devices_per_rank.value + * system.mem_ctrls[0].nvm.device_rowbuffer_size.value +) # match the maximum bandwidth of the memory, the parameter is in seconds # and we need it in ticks (ps) -itt = min(system.mem_ctrls[0].dram.tBURST.value, - system.mem_ctrls[0].nvm.tBURST.value) * 1000000000000 +itt = ( + min( + system.mem_ctrls[0].dram.tBURST.value, + system.mem_ctrls[0].nvm.tBURST.value, + ) + * 1000000000000 +) # assume we start at 0 for DRAM max_addr_dram = system.mem_ranges[0].end @@ -198,32 +241,49 @@ system.system_port = system.membus.cpu_side_ports periodicStatDump(period) # run Forrest, run! -root = Root(full_system = False, system = system) -root.system.mem_mode = 'timing' +root = Root(full_system=False, system=system) +root.system.mem_mode = "timing" m5.instantiate() + def trace(): addr_map = ObjectList.dram_addr_map_list.get(args.addr_map) generator = hybrid_generators[args.mode](system.tgen) for stride_size in range(burst_size, max_stride + 1, burst_size): - num_seq_pkts_dram = int(math.ceil(float(stride_size) / - burst_size_dram)) + num_seq_pkts_dram = int( + math.ceil(float(stride_size) / burst_size_dram) + ) num_seq_pkts_nvm = int(math.ceil(float(stride_size) / burst_size_nvm)) - yield generator(period, - 0, max_addr_dram, burst_size_dram, - min_addr_nvm, max_addr_nvm, burst_size_nvm, - int(itt), int(itt), - args.rd_perc, 0, - num_seq_pkts_dram, page_size_dram, - nbr_banks_dram, nbr_banks_dram, - num_seq_pkts_nvm, buffer_size_nvm, - nbr_banks_nvm, nbr_banks_nvm, - addr_map, args.mem_ranks, - args.nvm_ranks, args.nvm_perc) + yield generator( + period, + 0, + max_addr_dram, + burst_size_dram, + min_addr_nvm, + max_addr_nvm, + burst_size_nvm, + int(itt), + int(itt), + args.rd_perc, + 0, + num_seq_pkts_dram, + page_size_dram, + nbr_banks_dram, + nbr_banks_dram, + num_seq_pkts_nvm, + buffer_size_nvm, + nbr_banks_nvm, + nbr_banks_nvm, + addr_map, + args.mem_ranks, + args.nvm_ranks, + args.nvm_perc, + ) yield system.tgen.createExit(0) + system.tgen.start(trace()) m5.simulate() diff --git a/configs/ruby/AMD_Base_Constructor.py b/configs/ruby/AMD_Base_Constructor.py index 93907a33a7..030b45cbb6 100644 --- a/configs/ruby/AMD_Base_Constructor.py +++ b/configs/ruby/AMD_Base_Constructor.py @@ -34,7 +34,7 @@ from m5.defines import buildEnv from m5.util import addToPath, convert from .CntrlBase import * -addToPath('../') +addToPath("../") from topologies.Cluster import Cluster @@ -44,23 +44,27 @@ from topologies.Cluster import Cluster class L1Cache(RubyCache): latency = 1 resourceStalls = False + def create(self, size, assoc, options): self.size = MemorySize(size) self.assoc = assoc self.replacement_policy = TreePLRURP() + # # Note: the L2 Cache latency is not currently used # class L2Cache(RubyCache): latency = 10 resourceStalls = False + def create(self, size, assoc, options): self.size = MemorySize(size) self.assoc = assoc self.replacement_policy = TreePLRURP() -class CPCntrl(AMD_Base_Controller, CntrlBase): + +class CPCntrl(AMD_Base_Controller, CntrlBase): def create(self, options, ruby_system, system): self.version = self.versionCount() self.cntrl_id = self.cntrlCount() @@ -96,16 +100,20 @@ class CPCntrl(AMD_Base_Controller, CntrlBase): if options.recycle_latency: self.recycle_latency = options.recycle_latency + def define_options(parser): parser.add_argument("--cpu-to-dir-latency", type=int, default=15) + def construct(options, system, ruby_system): - if buildEnv['PROTOCOL'] != 'GPU_VIPER': - panic("This script requires VIPER based protocols \ - to be built.") + if buildEnv["PROTOCOL"] != "GPU_VIPER": + panic( + "This script requires VIPER based protocols \ + to be built." + ) cpu_sequencers = [] cpuCluster = None - cpuCluster = Cluster(name="CPU Cluster", extBW = 8, intBW=8) # 16 GB/s + cpuCluster = Cluster(name="CPU Cluster", extBW=8, intBW=8) # 16 GB/s for i in range((options.num_cpus + 1) // 2): cp_cntrl = CPCntrl() diff --git a/configs/ruby/CHI.py b/configs/ruby/CHI.py index c94dc943f9..df97b923ae 100644 --- a/configs/ruby/CHI.py +++ b/configs/ruby/CHI.py @@ -38,33 +38,42 @@ from m5.objects import * from m5.defines import buildEnv from .Ruby import create_topology + def define_options(parser): - parser.add_argument("--chi-config", action="store", type=str, - default=None, - help="NoC config. parameters and bindings. " - "Required for CustomMesh topology") + parser.add_argument( + "--chi-config", + action="store", + type=str, + default=None, + help="NoC config. parameters and bindings. " + "Required for CustomMesh topology", + ) parser.add_argument("--enable-dvm", default=False, action="store_true") + def read_config_file(file): - ''' Read file as a module and return it ''' + """Read file as a module and return it""" import types import importlib.machinery - loader = importlib.machinery.SourceFileLoader('chi_configs', file) + + loader = importlib.machinery.SourceFileLoader("chi_configs", file) chi_configs = types.ModuleType(loader.name) loader.exec_module(chi_configs) return chi_configs -def create_system(options, full_system, system, dma_ports, bootmem, - ruby_system, cpus): - if buildEnv['PROTOCOL'] != 'CHI': +def create_system( + options, full_system, system, dma_ports, bootmem, ruby_system, cpus +): + + if buildEnv["PROTOCOL"] != "CHI": m5.panic("This script requires the CHI build") if options.num_dirs < 1: - m5.fatal('--num-dirs must be at least 1') + m5.fatal("--num-dirs must be at least 1") if options.num_l3caches < 1: - m5.fatal('--num-l3caches must be at least 1') + m5.fatal("--num-l3caches must be at least 1") if full_system and options.enable_dvm: if len(cpus) <= 1: @@ -76,8 +85,8 @@ def create_system(options, full_system, system, dma_ports, bootmem, # read specialized classes from config file if provided if options.chi_config: chi_defs = read_config_file(options.chi_config) - elif options.topology == 'CustomMesh': - m5.fatal('--noc-config must be provided if topology is CustomMesh') + elif options.topology == "CustomMesh": + m5.fatal("--noc-config must be provided if topology is CustomMesh") else: # Use the defaults from CHI_config from . import CHI_config as chi_defs @@ -93,7 +102,6 @@ def create_system(options, full_system, system, dma_ports, bootmem, CHI_RNI_DMA = chi_defs.CHI_RNI_DMA CHI_RNI_IO = chi_defs.CHI_RNI_IO - # Declare caches and controller types used by the protocol # Notice tag and data accesses are not concurrent, so the a cache hit # latency = tag + data + response latencies. @@ -127,7 +135,7 @@ def create_system(options, full_system, system, dma_ports, bootmem, assoc = options.l3_assoc # other functions use system.cache_line_size assuming it has been set - assert(system.cache_line_size.value == options.cacheline_size) + assert system.cache_line_size.value == options.cacheline_size cpu_sequencers = [] mem_cntrls = [] @@ -138,10 +146,17 @@ def create_system(options, full_system, system, dma_ports, bootmem, all_cntrls = [] # Creates on RNF per cpu with priv l2 caches - assert(len(cpus) == options.num_cpus) - ruby_system.rnf = [ CHI_RNF([cpu], ruby_system, L1ICache, L1DCache, - system.cache_line_size.value) - for cpu in cpus ] + assert len(cpus) == options.num_cpus + ruby_system.rnf = [ + CHI_RNF( + [cpu], + ruby_system, + L1ICache, + L1DCache, + system.cache_line_size.value, + ) + for cpu in cpus + ] for rnf in ruby_system.rnf: rnf.addPrivL2Cache(L2Cache) cpu_sequencers.extend(rnf.getSequencers()) @@ -150,20 +165,20 @@ def create_system(options, full_system, system, dma_ports, bootmem, network_cntrls.extend(rnf.getNetworkSideControllers()) # Creates one Misc Node - ruby_system.mn = [ CHI_MN(ruby_system, [cpu.l1d for cpu in cpus]) ] + ruby_system.mn = [CHI_MN(ruby_system, [cpu.l1d for cpu in cpus])] for mn in ruby_system.mn: all_cntrls.extend(mn.getAllControllers()) network_nodes.append(mn) network_cntrls.extend(mn.getNetworkSideControllers()) - assert(mn.getAllControllers() == mn.getNetworkSideControllers()) + assert mn.getAllControllers() == mn.getNetworkSideControllers() # Look for other memories other_memories = [] if bootmem: other_memories.append(bootmem) - if getattr(system, 'sram', None): - other_memories.append(getattr(system, 'sram', None)) - on_chip_mem_ports = getattr(system, '_on_chip_mem_ports', None) + if getattr(system, "sram", None): + other_memories.append(getattr(system, "sram", None)) + on_chip_mem_ports = getattr(system, "_on_chip_mem_ports", None) if on_chip_mem_ports: other_memories.extend([p.simobj for p in on_chip_mem_ports]) @@ -174,15 +189,16 @@ def create_system(options, full_system, system, dma_ports, bootmem, sysranges.append(m.range) hnf_list = [i for i in range(options.num_l3caches)] - CHI_HNF.createAddrRanges(sysranges, system.cache_line_size.value, - hnf_list) - ruby_system.hnf = [ CHI_HNF(i, ruby_system, HNFCache, None) - for i in range(options.num_l3caches) ] + CHI_HNF.createAddrRanges(sysranges, system.cache_line_size.value, hnf_list) + ruby_system.hnf = [ + CHI_HNF(i, ruby_system, HNFCache, None) + for i in range(options.num_l3caches) + ] for hnf in ruby_system.hnf: network_nodes.append(hnf) network_cntrls.extend(hnf.getNetworkSideControllers()) - assert(hnf.getAllControllers() == hnf.getNetworkSideControllers()) + assert hnf.getAllControllers() == hnf.getNetworkSideControllers() all_cntrls.extend(hnf.getAllControllers()) hnf_dests.extend(hnf.getAllControllers()) @@ -190,31 +206,34 @@ def create_system(options, full_system, system, dma_ports, bootmem, # Notice we don't define a Directory_Controller type so we don't use # create_directories shared by other protocols. - ruby_system.snf = [ CHI_SNF_MainMem(ruby_system, None, None) - for i in range(options.num_dirs) ] + ruby_system.snf = [ + CHI_SNF_MainMem(ruby_system, None, None) + for i in range(options.num_dirs) + ] for snf in ruby_system.snf: network_nodes.append(snf) network_cntrls.extend(snf.getNetworkSideControllers()) - assert(snf.getAllControllers() == snf.getNetworkSideControllers()) + assert snf.getAllControllers() == snf.getNetworkSideControllers() mem_cntrls.extend(snf.getAllControllers()) all_cntrls.extend(snf.getAllControllers()) mem_dests.extend(snf.getAllControllers()) if len(other_memories) > 0: - ruby_system.rom_snf = [ CHI_SNF_BootMem(ruby_system, None, m) - for m in other_memories ] + ruby_system.rom_snf = [ + CHI_SNF_BootMem(ruby_system, None, m) for m in other_memories + ] for snf in ruby_system.rom_snf: network_nodes.append(snf) network_cntrls.extend(snf.getNetworkSideControllers()) all_cntrls.extend(snf.getAllControllers()) mem_dests.extend(snf.getAllControllers()) - # Creates the controller for dma ports and io if len(dma_ports) > 0: - ruby_system.dma_rni = [ CHI_RNI_DMA(ruby_system, dma_port, None) - for dma_port in dma_ports ] + ruby_system.dma_rni = [ + CHI_RNI_DMA(ruby_system, dma_port, None) for dma_port in dma_ports + ] for rni in ruby_system.dma_rni: network_nodes.append(rni) network_cntrls.extend(rni.getNetworkSideControllers()) @@ -226,7 +245,6 @@ def create_system(options, full_system, system, dma_ports, bootmem, network_cntrls.extend(ruby_system.io_rni.getNetworkSideControllers()) all_cntrls.extend(ruby_system.io_rni.getAllControllers()) - # Assign downstream destinations for rnf in ruby_system.rnf: rnf.setDownstream(hnf_dests) @@ -248,17 +266,18 @@ def create_system(options, full_system, system, dma_ports, bootmem, ruby_system.network.control_msg_size = params.cntrl_msg_size ruby_system.network.data_msg_size = params.data_width - ruby_system.network.buffer_size = params.router_buffer_size + if options.network == "simple": + ruby_system.network.buffer_size = params.router_buffer_size # Incorporate the params into options so it's propagated to # makeTopology and create_topology the parent scripts for k in dir(params): - if not k.startswith('__'): + if not k.startswith("__"): setattr(options, k, getattr(params, k)) - if options.topology == 'CustomMesh': + if options.topology == "CustomMesh": topology = create_topology(network_nodes, options) - elif options.topology in ['Crossbar', 'Pt2Pt']: + elif options.topology in ["Crossbar", "Pt2Pt"]: topology = create_topology(network_cntrls, options) else: m5.fatal("%s not supported!" % options.topology) diff --git a/configs/ruby/CHI_config.py b/configs/ruby/CHI_config.py index a4b01cad8f..6d2084bc7b 100644 --- a/configs/ruby/CHI_config.py +++ b/configs/ruby/CHI_config.py @@ -1,4 +1,4 @@ -# Copyright (c) 2021 ARM Limited +# Copyright (c) 2021,2022 ARM Limited # All rights reserved. # # The license below extends only to copyright in the software and shall @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -''' +""" Definitions for CHI nodes and controller types. These are used by create_system in configs/ruby/CHI.py or may be used in custom configuration scripts. When used with create_system, the user may provide an additional @@ -43,18 +43,21 @@ defined here. When using the CustomMesh topology, --chi-config must be provided with specialization of the NoC_Param classes defining the NoC dimensions and node to router binding. See configs/example/noc_config/2x4.py for an example. -''' +""" import math import m5 from m5.objects import * + class Versions: - ''' + """ Helper class to obtain unique ids for a given controller class. These are passed as the 'version' parameter when creating the controller. - ''' + """ + _seqs = 0 + @classmethod def getSeqId(cls): val = cls._seqs @@ -62,6 +65,7 @@ class Versions: return val _version = {} + @classmethod def getVersion(cls, tp): if tp not in cls._version: @@ -72,11 +76,12 @@ class Versions: class NoC_Params: - ''' + """ Default parameters for the interconnect. The value of data_width is also used to set the data_channel_size for all CHI controllers. (see configs/ruby/CHI.py) - ''' + """ + router_link_latency = 1 node_link_latency = 1 router_latency = 1 @@ -86,16 +91,17 @@ class NoC_Params: cross_links = [] cross_link_latency = 0 + class CHI_Node(SubSystem): - ''' + """ Base class with common functions for setting up Cache or Memory controllers that are part of a CHI RNF, RNFI, HNF, or SNF nodes. Notice getNetworkSideControllers and getAllControllers must be implemented in the derived classes. - ''' + """ class NoC_Params: - ''' + """ NoC config. parameters and bindings required for CustomMesh topology. Maps 'num_nodes_per_router' CHI nodes to each router provided in @@ -104,7 +110,8 @@ class CHI_Node(SubSystem): If 'num_nodes_per_router' is left undefined, we circulate around 'router_list' until all nodes are mapped. See 'distributeNodes' in configs/topologies/CustomMesh.py - ''' + """ + num_nodes_per_router = None router_list = None @@ -114,30 +121,30 @@ class CHI_Node(SubSystem): self._network = ruby_system.network def getNetworkSideControllers(self): - ''' + """ Returns all ruby controllers that need to be connected to the network - ''' + """ raise NotImplementedError() def getAllControllers(self): - ''' + """ Returns all ruby controllers associated with this node - ''' + """ raise NotImplementedError() def setDownstream(self, cntrls): - ''' + """ Sets cntrls as the downstream list of all controllers in this node - ''' + """ for c in self.getNetworkSideControllers(): c.downstream_destinations = cntrls def connectController(self, cntrl): - ''' + """ Creates and configures the messages buffers for the CHI input/output ports that connect to the network - ''' + """ cntrl.reqOut = MessageBuffer() cntrl.rspOut = MessageBuffer() cntrl.snpOut = MessageBuffer() @@ -162,48 +169,63 @@ class CHI_Node(SubSystem): class TriggerMessageBuffer(MessageBuffer): - ''' + """ MessageBuffer for triggering internal controller events. These buffers should not be affected by the Ruby tester randomization and allow poping messages enqueued in the same cycle. - ''' - randomization = 'disabled' + """ + + randomization = "disabled" allow_zero_latency = True + class OrderedTriggerMessageBuffer(TriggerMessageBuffer): ordered = True + +class MemCtrlMessageBuffer(MessageBuffer): + """ + MessageBuffer exchanging messages with the memory + These buffers should also not be affected by the Ruby tester randomization. + """ + + randomization = "disabled" + ordered = True + + class CHI_Cache_Controller(Cache_Controller): - ''' + """ Default parameters for a Cache controller The Cache_Controller can also be used as a DMA requester or as a pure directory if all cache allocation policies are disabled. - ''' + """ def __init__(self, ruby_system): super(CHI_Cache_Controller, self).__init__( - version = Versions.getVersion(Cache_Controller), - ruby_system = ruby_system, - mandatoryQueue = MessageBuffer(), - prefetchQueue = MessageBuffer(), - triggerQueue = TriggerMessageBuffer(), - retryTriggerQueue = OrderedTriggerMessageBuffer(), - replTriggerQueue = OrderedTriggerMessageBuffer(), - reqRdy = TriggerMessageBuffer(), - snpRdy = TriggerMessageBuffer()) + version=Versions.getVersion(Cache_Controller), + ruby_system=ruby_system, + mandatoryQueue=MessageBuffer(), + prefetchQueue=MessageBuffer(), + triggerQueue=TriggerMessageBuffer(), + retryTriggerQueue=OrderedTriggerMessageBuffer(), + replTriggerQueue=OrderedTriggerMessageBuffer(), + reqRdy=TriggerMessageBuffer(), + snpRdy=TriggerMessageBuffer(), + ) # Set somewhat large number since we really a lot on internal # triggers. To limit the controller performance, tweak other # params such as: input port buffer size, cache banks, and output # port latency - self.transitions_per_cycle = 128 + self.transitions_per_cycle = 1024 # This should be set to true in the data cache controller to enable # timeouts on unique lines when a store conditional fails self.sc_lock_enabled = False + class CHI_L1Controller(CHI_Cache_Controller): - ''' + """ Default parameters for a L1 Cache controller - ''' + """ def __init__(self, ruby_system, sequencer, cache, prefetcher): super(CHI_L1Controller, self).__init__(ruby_system) @@ -235,10 +257,11 @@ class CHI_L1Controller(CHI_Cache_Controller): self.unify_repl_TBEs = False + class CHI_L2Controller(CHI_Cache_Controller): - ''' + """ Default parameters for a L2 Cache controller - ''' + """ def __init__(self, ruby_system, cache, prefetcher): super(CHI_L2Controller, self).__init__(ruby_system) @@ -265,14 +288,15 @@ class CHI_L2Controller(CHI_Cache_Controller): self.number_of_TBEs = 32 self.number_of_repl_TBEs = 32 self.number_of_snoop_TBEs = 16 - self.number_of_DVM_TBEs = 1 # should not receive any dvm - self.number_of_DVM_snoop_TBEs = 1 # should not receive any dvm + self.number_of_DVM_TBEs = 1 # should not receive any dvm + self.number_of_DVM_snoop_TBEs = 1 # should not receive any dvm self.unify_repl_TBEs = False + class CHI_HNFController(CHI_Cache_Controller): - ''' + """ Default parameters for a coherent home node (HNF) cache controller - ''' + """ def __init__(self, ruby_system, cache, prefetcher, addr_ranges): super(CHI_HNFController, self).__init__(ruby_system) @@ -299,27 +323,29 @@ class CHI_HNFController(CHI_Cache_Controller): # Some reasonable default TBE params self.number_of_TBEs = 32 self.number_of_repl_TBEs = 32 - self.number_of_snoop_TBEs = 1 # should not receive any snoop - self.number_of_DVM_TBEs = 1 # should not receive any dvm - self.number_of_DVM_snoop_TBEs = 1 # should not receive any dvm + self.number_of_snoop_TBEs = 1 # should not receive any snoop + self.number_of_DVM_TBEs = 1 # should not receive any dvm + self.number_of_DVM_snoop_TBEs = 1 # should not receive any dvm self.unify_repl_TBEs = False -class CHI_MNController(MiscNode_Controller): - ''' - Default parameters for a Misc Node - ''' - def __init__(self, ruby_system, addr_range, l1d_caches, - early_nonsync_comp): +class CHI_MNController(MiscNode_Controller): + """ + Default parameters for a Misc Node + """ + + def __init__( + self, ruby_system, addr_range, l1d_caches, early_nonsync_comp + ): super(CHI_MNController, self).__init__( - version = Versions.getVersion(MiscNode_Controller), - ruby_system = ruby_system, - mandatoryQueue = MessageBuffer(), - triggerQueue = TriggerMessageBuffer(), - retryTriggerQueue = TriggerMessageBuffer(), - schedRspTriggerQueue = TriggerMessageBuffer(), - reqRdy = TriggerMessageBuffer(), - snpRdy = TriggerMessageBuffer(), + version=Versions.getVersion(MiscNode_Controller), + ruby_system=ruby_system, + mandatoryQueue=MessageBuffer(), + triggerQueue=TriggerMessageBuffer(), + retryTriggerQueue=TriggerMessageBuffer(), + schedRspTriggerQueue=TriggerMessageBuffer(), + reqRdy=TriggerMessageBuffer(), + snpRdy=TriggerMessageBuffer(), ) # Set somewhat large number since we really a lot on internal # triggers. To limit the controller performance, tweak other @@ -335,19 +361,22 @@ class CHI_MNController(MiscNode_Controller): # "upstream_destinations" = targets for DVM snoops self.upstream_destinations = l1d_caches + class CHI_DMAController(CHI_Cache_Controller): - ''' + """ Default parameters for a DMA controller - ''' + """ def __init__(self, ruby_system, sequencer): super(CHI_DMAController, self).__init__(ruby_system) self.sequencer = sequencer + class DummyCache(RubyCache): dataAccessLatency = 0 tagAccessLatency = 1 size = "128" assoc = 1 + self.use_prefetcher = False self.cache = DummyCache() self.sequencer.dcache = NULL @@ -370,37 +399,39 @@ class CHI_DMAController(CHI_Cache_Controller): self.send_evictions = False self.number_of_TBEs = 16 self.number_of_repl_TBEs = 1 - self.number_of_snoop_TBEs = 1 # should not receive any snoop - self.number_of_DVM_TBEs = 1 # should not receive any dvm - self.number_of_DVM_snoop_TBEs = 1 # should not receive any dvm + self.number_of_snoop_TBEs = 1 # should not receive any snoop + self.number_of_DVM_TBEs = 1 # should not receive any dvm + self.number_of_DVM_snoop_TBEs = 1 # should not receive any dvm self.unify_repl_TBEs = False + class CPUSequencerWrapper: - ''' + """ Other generic configuration scripts assume a matching number of sequencers and cpus. This wraps the instruction and data sequencer so they are compatible with the other scripts. This assumes all scripts are using connectCpuPorts/connectIOPorts to bind ports - ''' + """ def __init__(self, iseq, dseq): # use this style due to __setattr__ override below - self.__dict__['inst_seq'] = iseq - self.__dict__['data_seq'] = dseq - self.__dict__['support_data_reqs'] = True - self.__dict__['support_inst_reqs'] = True + self.__dict__["inst_seq"] = iseq + self.__dict__["data_seq"] = dseq + self.__dict__["support_data_reqs"] = True + self.__dict__["support_inst_reqs"] = True # Compatibility with certain scripts that wire up ports # without connectCpuPorts - self.__dict__['in_ports'] = dseq.in_ports + self.__dict__["in_ports"] = dseq.in_ports def connectCpuPorts(self, cpu): - assert(isinstance(cpu, BaseCPU)) + assert isinstance(cpu, BaseCPU) cpu.icache_port = self.inst_seq.in_ports for p in cpu._cached_ports: - if str(p) != 'icache_port': - exec('cpu.%s = self.data_seq.in_ports' % p) + if str(p) != "icache_port": + exec("cpu.%s = self.data_seq.in_ports" % p) cpu.connectUncachedPorts( - self.data_seq.in_ports, self.data_seq.interrupt_out_port) + self.data_seq.in_ports, self.data_seq.interrupt_out_port + ) def connectIOPorts(self, piobus): self.data_seq.connectIOPorts(piobus) @@ -409,18 +440,25 @@ class CPUSequencerWrapper: setattr(self.inst_seq, name, value) setattr(self.data_seq, name, value) + class CHI_RNF(CHI_Node): - ''' + """ Defines a CHI request node. Notice all contollers and sequencers are set as children of the cpus, so this object acts more like a proxy for seting things up and has no topology significance unless the cpus are set as its children at the top level - ''' + """ - def __init__(self, cpus, ruby_system, - l1Icache_type, l1Dcache_type, - cache_line_size, - l1Iprefetcher_type=None, l1Dprefetcher_type=None): + def __init__( + self, + cpus, + ruby_system, + l1Icache_type, + l1Dcache_type, + cache_line_size, + l1Iprefetcher_type=None, + l1Dprefetcher_type=None, + ): super(CHI_RNF, self).__init__(ruby_system) self._block_size_bits = int(math.log(cache_line_size, 2)) @@ -437,33 +475,40 @@ class CHI_RNF(CHI_Node): # First creates L1 caches and sequencers for cpu in self._cpus: - cpu.inst_sequencer = RubySequencer(version = Versions.getSeqId(), - ruby_system = ruby_system) - cpu.data_sequencer = RubySequencer(version = Versions.getSeqId(), - ruby_system = ruby_system) + cpu.inst_sequencer = RubySequencer( + version=Versions.getSeqId(), ruby_system=ruby_system + ) + cpu.data_sequencer = RubySequencer( + version=Versions.getSeqId(), ruby_system=ruby_system + ) - self._seqs.append(CPUSequencerWrapper(cpu.inst_sequencer, - cpu.data_sequencer)) + self._seqs.append( + CPUSequencerWrapper(cpu.inst_sequencer, cpu.data_sequencer) + ) # caches - l1i_cache = l1Icache_type(start_index_bit = self._block_size_bits, - is_icache = True) + l1i_cache = l1Icache_type( + start_index_bit=self._block_size_bits, is_icache=True + ) - l1d_cache = l1Dcache_type(start_index_bit = self._block_size_bits, - is_icache = False) + l1d_cache = l1Dcache_type( + start_index_bit=self._block_size_bits, is_icache=False + ) # Placeholders for future prefetcher support if l1Iprefetcher_type != None or l1Dprefetcher_type != None: - m5.fatal('Prefetching not supported yet') + m5.fatal("Prefetching not supported yet") l1i_pf = NULL l1d_pf = NULL # cache controllers - cpu.l1i = CHI_L1Controller(ruby_system, cpu.inst_sequencer, - l1i_cache, l1i_pf) + cpu.l1i = CHI_L1Controller( + ruby_system, cpu.inst_sequencer, l1i_cache, l1i_pf + ) - cpu.l1d = CHI_L1Controller(ruby_system, cpu.data_sequencer, - l1d_cache, l1d_pf) + cpu.l1d = CHI_L1Controller( + ruby_system, cpu.data_sequencer, l1d_cache, l1d_pf + ) cpu.inst_sequencer.dcache = NULL cpu.data_sequencer.dcache = cpu.l1d.cache @@ -496,10 +541,11 @@ class CHI_RNF(CHI_Node): def addPrivL2Cache(self, cache_type, pf_type=None): self._ll_cntrls = [] for cpu in self._cpus: - l2_cache = cache_type(start_index_bit = self._block_size_bits, - is_icache = False) + l2_cache = cache_type( + start_index_bit=self._block_size_bits, is_icache=False + ) if pf_type != None: - m5.fatal('Prefetching not supported yet') + m5.fatal("Prefetching not supported yet") l2_pf = NULL cpu.l2 = CHI_L2Controller(self._ruby_system, l2_cache, l2_pf) @@ -515,18 +561,20 @@ class CHI_RNF(CHI_Node): class CHI_HNF(CHI_Node): - ''' + """ Encapsulates an HNF cache/directory controller. Before the first controller is created, the class method CHI_HNF.createAddrRanges must be called before creating any CHI_HNF object to set-up the interleaved address ranges used by the HNFs - ''' + """ class NoC_Params(CHI_Node.NoC_Params): - '''HNFs may also define the 'pairing' parameter to allow pairing''' + """HNFs may also define the 'pairing' parameter to allow pairing""" + pairing = None _addr_ranges = {} + @classmethod def createAddrRanges(cls, sys_mem_ranges, cache_line_size, hnfs): # Create the HNFs interleaved addr ranges @@ -536,16 +584,19 @@ class CHI_HNF(CHI_Node): for i, hnf in enumerate(hnfs): ranges = [] for r in sys_mem_ranges: - addr_range = AddrRange(r.start, size = r.size(), - intlvHighBit = numa_bit, - intlvBits = llc_bits, - intlvMatch = i) + addr_range = AddrRange( + r.start, + size=r.size(), + intlvHighBit=numa_bit, + intlvBits=llc_bits, + intlvMatch=i, + ) ranges.append(addr_range) cls._addr_ranges[hnf] = (ranges, numa_bit) @classmethod def getAddrRanges(cls, hnf_idx): - assert(len(cls._addr_ranges) != 0) + assert len(cls._addr_ranges) != 0 return cls._addr_ranges[hnf_idx] # The CHI controller can be a child of this object or another if @@ -553,13 +604,14 @@ class CHI_HNF(CHI_Node): def __init__(self, hnf_idx, ruby_system, llcache_type, parent): super(CHI_HNF, self).__init__(ruby_system) - addr_ranges,intlvHighBit = self.getAddrRanges(hnf_idx) + addr_ranges, intlvHighBit = self.getAddrRanges(hnf_idx) # All ranges should have the same interleaving - assert(len(addr_ranges) >= 1) + assert len(addr_ranges) >= 1 - ll_cache = llcache_type(start_index_bit = intlvHighBit + 1) - self._cntrl = CHI_HNFController(ruby_system, ll_cache, NULL, - addr_ranges) + ll_cache = llcache_type(start_index_bit=intlvHighBit + 1) + self._cntrl = CHI_HNFController( + ruby_system, ll_cache, NULL, addr_ranges + ) if parent == None: self.cntrl = self._cntrl @@ -576,14 +628,14 @@ class CHI_HNF(CHI_Node): class CHI_MN(CHI_Node): - ''' + """ Encapsulates a Misc Node controller. - ''' + """ class NoC_Params(CHI_Node.NoC_Params): - '''HNFs may also define the 'pairing' parameter to allow pairing''' - pairing = None + """HNFs may also define the 'pairing' parameter to allow pairing""" + pairing = None # The CHI controller can be a child of this object or another if # 'parent' if specified @@ -591,10 +643,11 @@ class CHI_MN(CHI_Node): super(CHI_MN, self).__init__(ruby_system) # MiscNode has internal address range starting at 0 - addr_range = AddrRange(0, size = "1kB") + addr_range = AddrRange(0, size="1kB") - self._cntrl = CHI_MNController(ruby_system, addr_range, l1d_caches, - early_nonsync_comp) + self._cntrl = CHI_MNController( + ruby_system, addr_range, l1d_caches, early_nonsync_comp + ) self.cntrl = self._cntrl @@ -609,10 +662,11 @@ class CHI_MN(CHI_Node): def getNetworkSideControllers(self): return [self._cntrl] + class CHI_SNF_Base(CHI_Node): - ''' + """ Creates CHI node controllers for the memory controllers - ''' + """ # The CHI controller can be a child of this object or another if # 'parent' if specified @@ -620,12 +674,21 @@ class CHI_SNF_Base(CHI_Node): super(CHI_SNF_Base, self).__init__(ruby_system) self._cntrl = Memory_Controller( - version = Versions.getVersion(Memory_Controller), - ruby_system = ruby_system, - triggerQueue = TriggerMessageBuffer(), - responseFromMemory = MessageBuffer(), - requestToMemory = MessageBuffer(ordered = True), - reqRdy = TriggerMessageBuffer()) + version=Versions.getVersion(Memory_Controller), + ruby_system=ruby_system, + triggerQueue=TriggerMessageBuffer(), + responseFromMemory=MemCtrlMessageBuffer(), + requestToMemory=MemCtrlMessageBuffer(), + reqRdy=TriggerMessageBuffer(), + transitions_per_cycle=1024, + ) + + # The Memory_Controller implementation deallocates the TBE for + # write requests when they are queue up to memory. The size of this + # buffer must be limited to prevent unlimited outstanding writes. + self._cntrl.requestToMemory.buffer_size = ( + int(self._cntrl.to_memory_controller_latency) + 1 + ) self.connectController(self._cntrl) @@ -643,46 +706,51 @@ class CHI_SNF_Base(CHI_Node): def getMemRange(self, mem_ctrl): # TODO need some kind of transparent API for # MemCtrl+DRAM vs SimpleMemory - if hasattr(mem_ctrl, 'range'): + if hasattr(mem_ctrl, "range"): return mem_ctrl.range else: return mem_ctrl.dram.range + class CHI_SNF_BootMem(CHI_SNF_Base): - ''' + """ Create the SNF for the boot memory - ''' + """ def __init__(self, ruby_system, parent, bootmem): super(CHI_SNF_BootMem, self).__init__(ruby_system, parent) self._cntrl.memory_out_port = bootmem.port self._cntrl.addr_ranges = self.getMemRange(bootmem) -class CHI_SNF_MainMem(CHI_SNF_Base): - ''' - Create the SNF for a list main memory controllers - ''' - def __init__(self, ruby_system, parent, mem_ctrl = None): +class CHI_SNF_MainMem(CHI_SNF_Base): + """ + Create the SNF for a list main memory controllers + """ + + def __init__(self, ruby_system, parent, mem_ctrl=None): super(CHI_SNF_MainMem, self).__init__(ruby_system, parent) if mem_ctrl: self._cntrl.memory_out_port = mem_ctrl.port self._cntrl.addr_ranges = self.getMemRange(mem_ctrl) # else bind ports and range later + class CHI_RNI_Base(CHI_Node): - ''' + """ Request node without cache / DMA - ''' + """ # The CHI controller can be a child of this object or another if # 'parent' if specified def __init__(self, ruby_system, parent): super(CHI_RNI_Base, self).__init__(ruby_system) - self._sequencer = RubySequencer(version = Versions.getSeqId(), - ruby_system = ruby_system, - clk_domain = ruby_system.clk_domain) + self._sequencer = RubySequencer( + version=Versions.getSeqId(), + ruby_system=ruby_system, + clk_domain=ruby_system.clk_domain, + ) self._cntrl = CHI_DMAController(ruby_system, self._sequencer) if parent: @@ -698,20 +766,22 @@ class CHI_RNI_Base(CHI_Node): def getNetworkSideControllers(self): return [self._cntrl] + class CHI_RNI_DMA(CHI_RNI_Base): - ''' + """ DMA controller wiredup to a given dma port - ''' + """ def __init__(self, ruby_system, dma_port, parent): super(CHI_RNI_DMA, self).__init__(ruby_system, parent) - assert(dma_port != None) + assert dma_port != None self._sequencer.in_ports = dma_port + class CHI_RNI_IO(CHI_RNI_Base): - ''' + """ DMA controller wiredup to ruby_system IO port - ''' + """ def __init__(self, ruby_system, parent): super(CHI_RNI_IO, self).__init__(ruby_system, parent) diff --git a/configs/ruby/CntrlBase.py b/configs/ruby/CntrlBase.py index d27e7f197a..674ef8f699 100644 --- a/configs/ruby/CntrlBase.py +++ b/configs/ruby/CntrlBase.py @@ -24,8 +24,10 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + class CntrlBase: _seqs = 0 + @classmethod def seqCount(cls): # Use SeqCount not class since we need global count @@ -33,6 +35,7 @@ class CntrlBase: return CntrlBase._seqs - 1 _cntrls = 0 + @classmethod def cntrlCount(cls): # Use CntlCount not class since we need global count @@ -40,7 +43,8 @@ class CntrlBase: return CntrlBase._cntrls - 1 _version = 0 + @classmethod def versionCount(cls): - cls._version += 1 # Use count for this particular type + cls._version += 1 # Use count for this particular type return cls._version - 1 diff --git a/configs/ruby/GPU_VIPER.py b/configs/ruby/GPU_VIPER.py index dc99429b47..ee8d570498 100644 --- a/configs/ruby/GPU_VIPER.py +++ b/configs/ruby/GPU_VIPER.py @@ -38,13 +38,15 @@ from common import ObjectList from common import MemConfig from common import FileSystemConfig -addToPath('../') +addToPath("../") from topologies.Cluster import Cluster from topologies.Crossbar import Crossbar + class CntrlBase: _seqs = 0 + @classmethod def seqCount(cls): # Use SeqCount not class since we need global count @@ -52,6 +54,7 @@ class CntrlBase: return CntrlBase._seqs - 1 _cntrls = 0 + @classmethod def cntrlCount(cls): # Use CntlCount not class since we need global count @@ -59,34 +62,39 @@ class CntrlBase: return CntrlBase._cntrls - 1 _version = 0 + @classmethod def versionCount(cls): - cls._version += 1 # Use count for this particular type + cls._version += 1 # Use count for this particular type return cls._version - 1 + class L1Cache(RubyCache): resourceStalls = False dataArrayBanks = 2 tagArrayBanks = 2 dataAccessLatency = 1 tagAccessLatency = 1 + def create(self, size, assoc, options): self.size = MemorySize(size) self.assoc = assoc self.replacement_policy = TreePLRURP() + class L2Cache(RubyCache): resourceStalls = False assoc = 16 dataArrayBanks = 16 tagArrayBanks = 16 + def create(self, size, assoc, options): self.size = MemorySize(size) self.assoc = assoc self.replacement_policy = TreePLRURP() -class CPCntrl(CorePair_Controller, CntrlBase): +class CPCntrl(CorePair_Controller, CntrlBase): def create(self, options, ruby_system, system): self.version = self.versionCount() @@ -121,29 +129,35 @@ class CPCntrl(CorePair_Controller, CntrlBase): if options.recycle_latency: self.recycle_latency = options.recycle_latency + class TCPCache(RubyCache): size = "16kB" assoc = 16 - dataArrayBanks = 16 #number of data banks - tagArrayBanks = 16 #number of tag banks + dataArrayBanks = 16 # number of data banks + tagArrayBanks = 16 # number of tag banks dataAccessLatency = 4 tagAccessLatency = 1 + def create(self, options): self.size = MemorySize(options.tcp_size) self.assoc = options.tcp_assoc self.resourceStalls = options.no_tcc_resource_stalls self.replacement_policy = TreePLRURP() -class TCPCntrl(TCP_Controller, CntrlBase): +class TCPCntrl(TCP_Controller, CntrlBase): def create(self, options, ruby_system, system): self.version = self.versionCount() - self.L1cache = TCPCache(tagAccessLatency = options.TCP_latency, - dataAccessLatency = options.TCP_latency) + self.L1cache = TCPCache( + tagAccessLatency=options.TCP_latency, + dataAccessLatency=options.TCP_latency, + ) self.L1cache.resourceStalls = options.no_resource_stalls self.L1cache.create(options) self.issue_latency = 1 + # TCP_Controller inherits this from RubyController + self.mandatory_queue_latency = options.mandatory_queue_latency self.coalescer = VIPERCoalescer() self.coalescer.version = self.seqCount() @@ -153,10 +167,10 @@ class TCPCntrl(TCP_Controller, CntrlBase): self.coalescer.support_inst_reqs = False self.coalescer.is_cpu_sequencer = False if options.tcp_deadlock_threshold: - self.coalescer.deadlock_threshold = \ - options.tcp_deadlock_threshold - self.coalescer.max_coalesces_per_cycle = \ + self.coalescer.deadlock_threshold = options.tcp_deadlock_threshold + self.coalescer.max_coalesces_per_cycle = ( options.max_coalesces_per_cycle + ) self.sequencer = RubySequencer() self.sequencer.version = self.seqCount() @@ -167,6 +181,11 @@ class TCPCntrl(TCP_Controller, CntrlBase): self.use_seq_not_coal = False self.ruby_system = ruby_system + if hasattr(options, "gpu_clock") and hasattr(options, "gpu_voltage"): + self.clk_domain = SrcClockDomain( + clock=options.gpu_clock, + voltage_domain=VoltageDomain(voltage=options.gpu_voltage), + ) if options.recycle_latency: self.recycle_latency = options.recycle_latency @@ -174,8 +193,10 @@ class TCPCntrl(TCP_Controller, CntrlBase): def createCP(self, options, ruby_system, system): self.version = self.versionCount() - self.L1cache = TCPCache(tagAccessLatency = options.TCP_latency, - dataAccessLatency = options.TCP_latency) + self.L1cache = TCPCache( + tagAccessLatency=options.TCP_latency, + dataAccessLatency=options.TCP_latency, + ) self.L1cache.resourceStalls = options.no_resource_stalls self.L1cache.create(options) self.issue_latency = 1 @@ -201,6 +222,7 @@ class TCPCntrl(TCP_Controller, CntrlBase): if options.recycle_latency: self.recycle_latency = options.recycle_latency + class SQCCache(RubyCache): dataArrayBanks = 8 tagArrayBanks = 8 @@ -212,8 +234,8 @@ class SQCCache(RubyCache): self.assoc = options.sqc_assoc self.replacement_policy = TreePLRURP() -class SQCCntrl(SQC_Controller, CntrlBase): +class SQCCntrl(SQC_Controller, CntrlBase): def create(self, options, ruby_system, system): self.version = self.versionCount() @@ -229,38 +251,47 @@ class SQCCntrl(SQC_Controller, CntrlBase): self.sequencer.support_data_reqs = False self.sequencer.is_cpu_sequencer = False if options.sqc_deadlock_threshold: - self.sequencer.deadlock_threshold = \ - options.sqc_deadlock_threshold + self.sequencer.deadlock_threshold = options.sqc_deadlock_threshold self.ruby_system = ruby_system + if hasattr(options, "gpu_clock") and hasattr(options, "gpu_voltage"): + self.clk_domain = SrcClockDomain( + clock=options.gpu_clock, + voltage_domain=VoltageDomain(voltage=options.gpu_voltage), + ) if options.recycle_latency: self.recycle_latency = options.recycle_latency + class TCC(RubyCache): size = MemorySize("256kB") assoc = 16 dataAccessLatency = 8 tagAccessLatency = 2 resourceStalls = True + def create(self, options): self.assoc = options.tcc_assoc - if hasattr(options, 'bw_scalor') and options.bw_scalor > 0: - s = options.num_compute_units - tcc_size = s * 128 - tcc_size = str(tcc_size)+'kB' - self.size = MemorySize(tcc_size) - self.dataArrayBanks = 64 - self.tagArrayBanks = 64 + if hasattr(options, "bw_scalor") and options.bw_scalor > 0: + s = options.num_compute_units + tcc_size = s * 128 + tcc_size = str(tcc_size) + "kB" + self.size = MemorySize(tcc_size) + self.dataArrayBanks = 64 + self.tagArrayBanks = 64 else: - self.size = MemorySize(options.tcc_size) - self.dataArrayBanks = 256 / options.num_tccs #number of data banks - self.tagArrayBanks = 256 / options.num_tccs #number of tag banks + self.size = MemorySize(options.tcc_size) + self.dataArrayBanks = ( + 256 / options.num_tccs + ) # number of data banks + self.tagArrayBanks = 256 / options.num_tccs # number of tag banks self.size.value = self.size.value / options.num_tccs - if ((self.size.value / int(self.assoc)) < 128): + if (self.size.value / int(self.assoc)) < 128: self.size.value = int(128 * self.assoc) - self.start_index_bit = math.log(options.cacheline_size, 2) + \ - math.log(options.num_tccs, 2) + self.start_index_bit = math.log(options.cacheline_size, 2) + math.log( + options.num_tccs, 2 + ) self.replacement_policy = TreePLRURP() @@ -272,10 +303,16 @@ class TCCCntrl(TCC_Controller, CntrlBase): self.L2cache.resourceStalls = options.no_tcc_resource_stalls self.ruby_system = ruby_system + if hasattr(options, "gpu_clock") and hasattr(options, "gpu_voltage"): + self.clk_domain = SrcClockDomain( + clock=options.gpu_clock, + voltage_domain=VoltageDomain(voltage=options.gpu_voltage), + ) if options.recycle_latency: self.recycle_latency = options.recycle_latency + class L3Cache(RubyCache): dataArrayBanks = 16 tagArrayBanks = 16 @@ -293,20 +330,30 @@ class L3Cache(RubyCache): self.resourceStalls = False self.replacement_policy = TreePLRURP() + class L3Cntrl(L3Cache_Controller, CntrlBase): def create(self, options, ruby_system, system): self.version = self.versionCount() self.L3cache = L3Cache() self.L3cache.create(options, ruby_system, system) - self.l3_response_latency = max(self.L3cache.dataAccessLatency, self.L3cache.tagAccessLatency) + self.l3_response_latency = max( + self.L3cache.dataAccessLatency, self.L3cache.tagAccessLatency + ) self.ruby_system = ruby_system if options.recycle_latency: self.recycle_latency = options.recycle_latency - def connectWireBuffers(self, req_to_dir, resp_to_dir, l3_unblock_to_dir, - req_to_l3, probe_to_l3, resp_to_l3): + def connectWireBuffers( + self, + req_to_dir, + resp_to_dir, + l3_unblock_to_dir, + req_to_l3, + probe_to_l3, + resp_to_l3, + ): self.reqToDir = req_to_dir self.respToDir = resp_to_dir self.l3UnblockToDir = l3_unblock_to_dir @@ -314,6 +361,7 @@ class L3Cntrl(L3Cache_Controller, CntrlBase): self.probeToL3 = probe_to_l3 self.respToL3 = resp_to_l3 + class DirCntrl(Directory_Controller, CntrlBase): def create(self, options, dir_ranges, ruby_system, system): self.version = self.versionCount() @@ -326,8 +374,10 @@ class DirCntrl(Directory_Controller, CntrlBase): self.L3CacheMemory = L3Cache() self.L3CacheMemory.create(options, ruby_system, system) - self.l3_hit_latency = max(self.L3CacheMemory.dataAccessLatency, - self.L3CacheMemory.tagAccessLatency) + self.l3_hit_latency = max( + self.L3CacheMemory.dataAccessLatency, + self.L3CacheMemory.tagAccessLatency, + ) self.number_of_TBEs = options.num_tbes @@ -336,8 +386,15 @@ class DirCntrl(Directory_Controller, CntrlBase): if options.recycle_latency: self.recycle_latency = options.recycle_latency - def connectWireBuffers(self, req_to_dir, resp_to_dir, l3_unblock_to_dir, - req_to_l3, probe_to_l3, resp_to_l3): + def connectWireBuffers( + self, + req_to_dir, + resp_to_dir, + l3_unblock_to_dir, + req_to_l3, + probe_to_l3, + resp_to_l3, + ): self.reqToDir = req_to_dir self.respToDir = resp_to_dir self.l3UnblockToDir = l3_unblock_to_dir @@ -352,48 +409,87 @@ def define_options(parser): parser.add_argument("--l3-tag-latency", type=int, default=15) parser.add_argument("--cpu-to-dir-latency", type=int, default=120) parser.add_argument("--gpu-to-dir-latency", type=int, default=120) - parser.add_argument("--no-resource-stalls", action="store_false", - default=True) - parser.add_argument("--no-tcc-resource-stalls", action="store_false", - default=True) + parser.add_argument( + "--no-resource-stalls", action="store_false", default=True + ) + parser.add_argument( + "--no-tcc-resource-stalls", action="store_false", default=True + ) parser.add_argument("--use-L3-on-WT", action="store_true", default=False) parser.add_argument("--num-tbes", type=int, default=256) parser.add_argument("--l2-latency", type=int, default=50) # load to use - parser.add_argument("--num-tccs", type=int, default=1, - help="number of TCC banks in the GPU") - parser.add_argument("--sqc-size", type=str, default='32kB', - help="SQC cache size") - parser.add_argument("--sqc-assoc", type=int, default=8, - help="SQC cache assoc") - parser.add_argument("--sqc-deadlock-threshold", type=int, - help="Set the SQC deadlock threshold to some value") + parser.add_argument( + "--num-tccs", + type=int, + default=1, + help="number of TCC banks in the GPU", + ) + parser.add_argument( + "--sqc-size", type=str, default="32kB", help="SQC cache size" + ) + parser.add_argument( + "--sqc-assoc", type=int, default=8, help="SQC cache assoc" + ) + parser.add_argument( + "--sqc-deadlock-threshold", + type=int, + help="Set the SQC deadlock threshold to some value", + ) - parser.add_argument("--WB_L1", action="store_true", default=False, - help="writeback L1") - parser.add_argument("--WB_L2", action="store_true", default=False, - help="writeback L2") - parser.add_argument("--TCP_latency", type=int, default=4, - help="TCP latency") - parser.add_argument("--TCC_latency", type=int, default=16, - help="TCC latency") - parser.add_argument("--tcc-size", type=str, default='256kB', - help="agregate tcc size") - parser.add_argument("--tcc-assoc", type=int, default=16, - help="tcc assoc") - parser.add_argument("--tcp-size", type=str, default='16kB', - help="tcp size") - parser.add_argument("--tcp-assoc", type=int, default=16, - help="tcp assoc") - parser.add_argument("--tcp-deadlock-threshold", type=int, - help="Set the TCP deadlock threshold to some value") - parser.add_argument("--max-coalesces-per-cycle", type=int, default=1, - help="Maximum insts that may coalesce in a cycle") + parser.add_argument( + "--WB_L1", action="store_true", default=False, help="writeback L1" + ) + parser.add_argument( + "--WB_L2", action="store_true", default=False, help="writeback L2" + ) + parser.add_argument( + "--TCP_latency", + type=int, + default=4, + help="In combination with the number of banks for the " + "TCP, this determines how many requests can happen " + "per cycle (i.e., the bandwidth)", + ) + parser.add_argument( + "--mandatory_queue_latency", + type=int, + default=1, + help="Hit latency for TCP", + ) + parser.add_argument( + "--TCC_latency", type=int, default=16, help="TCC latency" + ) + parser.add_argument( + "--tcc-size", type=str, default="256kB", help="agregate tcc size" + ) + parser.add_argument("--tcc-assoc", type=int, default=16, help="tcc assoc") + parser.add_argument( + "--tcp-size", type=str, default="16kB", help="tcp size" + ) + parser.add_argument("--tcp-assoc", type=int, default=16, help="tcp assoc") + parser.add_argument( + "--tcp-deadlock-threshold", + type=int, + help="Set the TCP deadlock threshold to some value", + ) + parser.add_argument( + "--max-coalesces-per-cycle", + type=int, + default=1, + help="Maximum insts that may coalesce in a cycle", + ) + + parser.add_argument( + "--noL1", action="store_true", default=False, help="bypassL1" + ) + parser.add_argument( + "--scalar-buffer-size", + type=int, + default=128, + help="Size of the mandatory queue in the GPU scalar " + "cache controller", + ) - parser.add_argument("--noL1", action="store_true", default=False, - help="bypassL1") - parser.add_argument("--scalar-buffer-size", type=int, default=128, - help="Size of the mandatory queue in the GPU scalar " - "cache controller") def construct_dirs(options, system, ruby_system, network): @@ -415,20 +511,23 @@ def construct_dirs(options, system, ruby_system, network): for i in range(options.num_dirs): dir_ranges = [] for r in system.mem_ranges: - addr_range = m5.objects.AddrRange(r.start, size = r.size(), - intlvHighBit = numa_bit, - intlvBits = dir_bits, - intlvMatch = i) + addr_range = m5.objects.AddrRange( + r.start, + size=r.size(), + intlvHighBit=numa_bit, + intlvBits=dir_bits, + intlvMatch=i, + ) dir_ranges.append(addr_range) - dir_cntrl = DirCntrl(noTCCdir = True, TCC_select_num_bits = TCC_bits) + dir_cntrl = DirCntrl(noTCCdir=True, TCC_select_num_bits=TCC_bits) dir_cntrl.create(options, dir_ranges, ruby_system, system) dir_cntrl.number_of_TBEs = options.num_tbes dir_cntrl.useL3OnWT = options.use_L3_on_WT # the number_of_TBEs is inclusive of TBEs below # Connect the Directory controller to the ruby network - dir_cntrl.requestFromCores = MessageBuffer(ordered = True) + dir_cntrl.requestFromCores = MessageBuffer(ordered=True) dir_cntrl.requestFromCores.in_port = network.out_port dir_cntrl.responseFromCores = MessageBuffer() @@ -443,10 +542,10 @@ def construct_dirs(options, system, ruby_system, network): dir_cntrl.responseToCore = MessageBuffer() dir_cntrl.responseToCore.out_port = network.in_port - dir_cntrl.triggerQueue = MessageBuffer(ordered = True) - dir_cntrl.L3triggerQueue = MessageBuffer(ordered = True) - dir_cntrl.requestToMemory = MessageBuffer(ordered = True) - dir_cntrl.responseFromMemory = MessageBuffer(ordered = True) + dir_cntrl.triggerQueue = MessageBuffer(ordered=True) + dir_cntrl.L3triggerQueue = MessageBuffer(ordered=True) + dir_cntrl.requestToMemory = MessageBuffer(ordered=True) + dir_cntrl.responseFromMemory = MessageBuffer(ordered=True) dir_cntrl.requestFromDMA = MessageBuffer(ordered=True) dir_cntrl.requestFromDMA.in_port = network.out_port @@ -459,6 +558,7 @@ def construct_dirs(options, system, ruby_system, network): return dir_cntrl_nodes + def construct_gpudirs(options, system, ruby_system, network): dir_cntrl_nodes = [] @@ -473,22 +573,24 @@ def construct_gpudirs(options, system, ruby_system, network): block_size_bits = int(math.log(options.cacheline_size, 2)) numa_bit = block_size_bits + dir_bits - 1 - gpu_mem_range = AddrRange(0, size = options.dgpu_mem_size) + gpu_mem_range = AddrRange(0, size=options.dgpu_mem_size) for i in range(options.dgpu_num_dirs): - addr_range = m5.objects.AddrRange(gpu_mem_range.start, - size = gpu_mem_range.size(), - intlvHighBit = numa_bit, - intlvBits = dir_bits, - intlvMatch = i, - xorHighBit = xor_low_bit) + addr_range = m5.objects.AddrRange( + gpu_mem_range.start, + size=gpu_mem_range.size(), + intlvHighBit=numa_bit, + intlvBits=dir_bits, + intlvMatch=i, + xorHighBit=xor_low_bit, + ) - dir_cntrl = DirCntrl(noTCCdir = True, TCC_select_num_bits = TCC_bits) + dir_cntrl = DirCntrl(noTCCdir=True, TCC_select_num_bits=TCC_bits) dir_cntrl.create(options, [addr_range], ruby_system, system) dir_cntrl.number_of_TBEs = options.num_tbes dir_cntrl.useL3OnWT = False # Connect the Directory controller to the ruby network - dir_cntrl.requestFromCores = MessageBuffer(ordered = True) + dir_cntrl.requestFromCores = MessageBuffer(ordered=True) dir_cntrl.requestFromCores.in_port = network.out_port dir_cntrl.responseFromCores = MessageBuffer() @@ -503,8 +605,8 @@ def construct_gpudirs(options, system, ruby_system, network): dir_cntrl.responseToCore = MessageBuffer() dir_cntrl.responseToCore.out_port = network.in_port - dir_cntrl.triggerQueue = MessageBuffer(ordered = True) - dir_cntrl.L3triggerQueue = MessageBuffer(ordered = True) + dir_cntrl.triggerQueue = MessageBuffer(ordered=True) + dir_cntrl.L3triggerQueue = MessageBuffer(ordered=True) dir_cntrl.requestToMemory = MessageBuffer() dir_cntrl.responseFromMemory = MessageBuffer() @@ -519,11 +621,16 @@ def construct_gpudirs(options, system, ruby_system, network): # Create memory controllers too mem_type = ObjectList.mem_list.get(options.dgpu_mem_type) - dram_intf = MemConfig.create_mem_intf(mem_type, gpu_mem_range, i, - int(math.log(options.dgpu_num_dirs, 2)), options.cacheline_size, - xor_low_bit) + dram_intf = MemConfig.create_mem_intf( + mem_type, + gpu_mem_range, + i, + int(math.log(options.dgpu_num_dirs, 2)), + options.cacheline_size, + xor_low_bit, + ) if issubclass(mem_type, DRAMInterface): - mem_ctrl = m5.objects.MemCtrl(dram = dram_intf) + mem_ctrl = m5.objects.MemCtrl(dram=dram_intf) else: mem_ctrl = dram_intf @@ -540,6 +647,7 @@ def construct_gpudirs(options, system, ruby_system, network): return dir_cntrl_nodes, mem_ctrls + def construct_corepairs(options, system, ruby_system, network): cpu_sequencers = [] @@ -573,12 +681,13 @@ def construct_corepairs(options, system, ruby_system, network): cp_cntrl.responseToCore.in_port = network.out_port cp_cntrl.mandatoryQueue = MessageBuffer() - cp_cntrl.triggerQueue = MessageBuffer(ordered = True) + cp_cntrl.triggerQueue = MessageBuffer(ordered=True) cp_cntrl_nodes.append(cp_cntrl) return (cpu_sequencers, cp_cntrl_nodes) + def construct_tcps(options, system, ruby_system, network): tcp_sequencers = [] @@ -589,9 +698,9 @@ def construct_tcps(options, system, ruby_system, network): for i in range(options.num_compute_units): - tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits, - issue_latency = 1, - number_of_TBEs = 2560) + tcp_cntrl = TCPCntrl( + TCC_select_num_bits=TCC_bits, issue_latency=1, number_of_TBEs=2560 + ) # TBEs set to max outstanding requests tcp_cntrl.create(options, ruby_system, system) tcp_cntrl.WB = options.WB_L1 @@ -607,25 +716,26 @@ def construct_tcps(options, system, ruby_system, network): tcp_cntrl_nodes.append(tcp_cntrl) # Connect the TCP controller to the ruby network - tcp_cntrl.requestFromTCP = MessageBuffer(ordered = True) + tcp_cntrl.requestFromTCP = MessageBuffer(ordered=True) tcp_cntrl.requestFromTCP.out_port = network.in_port - tcp_cntrl.responseFromTCP = MessageBuffer(ordered = True) + tcp_cntrl.responseFromTCP = MessageBuffer(ordered=True) tcp_cntrl.responseFromTCP.out_port = network.in_port tcp_cntrl.unblockFromCore = MessageBuffer() tcp_cntrl.unblockFromCore.out_port = network.in_port - tcp_cntrl.probeToTCP = MessageBuffer(ordered = True) + tcp_cntrl.probeToTCP = MessageBuffer(ordered=True) tcp_cntrl.probeToTCP.in_port = network.out_port - tcp_cntrl.responseToTCP = MessageBuffer(ordered = True) + tcp_cntrl.responseToTCP = MessageBuffer(ordered=True) tcp_cntrl.responseToTCP.in_port = network.out_port tcp_cntrl.mandatoryQueue = MessageBuffer() return (tcp_sequencers, tcp_cntrl_nodes) + def construct_sqcs(options, system, ruby_system, network): sqc_sequencers = [] @@ -636,7 +746,7 @@ def construct_sqcs(options, system, ruby_system, network): for i in range(options.num_sqc): - sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits) + sqc_cntrl = SQCCntrl(TCC_select_num_bits=TCC_bits) sqc_cntrl.create(options, ruby_system, system) exec("ruby_system.sqc_cntrl%d = sqc_cntrl" % i) @@ -647,19 +757,20 @@ def construct_sqcs(options, system, ruby_system, network): sqc_cntrl_nodes.append(sqc_cntrl) # Connect the SQC controller to the ruby network - sqc_cntrl.requestFromSQC = MessageBuffer(ordered = True) + sqc_cntrl.requestFromSQC = MessageBuffer(ordered=True) sqc_cntrl.requestFromSQC.out_port = network.in_port - sqc_cntrl.probeToSQC = MessageBuffer(ordered = True) + sqc_cntrl.probeToSQC = MessageBuffer(ordered=True) sqc_cntrl.probeToSQC.in_port = network.out_port - sqc_cntrl.responseToSQC = MessageBuffer(ordered = True) + sqc_cntrl.responseToSQC = MessageBuffer(ordered=True) sqc_cntrl.responseToSQC.in_port = network.out_port sqc_cntrl.mandatoryQueue = MessageBuffer() return (sqc_sequencers, sqc_cntrl_nodes) + def construct_scalars(options, system, ruby_system, network): scalar_sequencers = [] @@ -669,28 +780,30 @@ def construct_scalars(options, system, ruby_system, network): TCC_bits = int(math.log(options.num_tccs, 2)) for i in range(options.num_scalar_cache): - scalar_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits) + scalar_cntrl = SQCCntrl(TCC_select_num_bits=TCC_bits) scalar_cntrl.create(options, ruby_system, system) - exec('ruby_system.scalar_cntrl%d = scalar_cntrl' % i) + exec("ruby_system.scalar_cntrl%d = scalar_cntrl" % i) scalar_sequencers.append(scalar_cntrl.sequencer) scalar_cntrl_nodes.append(scalar_cntrl) - scalar_cntrl.requestFromSQC = MessageBuffer(ordered = True) + scalar_cntrl.requestFromSQC = MessageBuffer(ordered=True) scalar_cntrl.requestFromSQC.out_port = network.in_port - scalar_cntrl.probeToSQC = MessageBuffer(ordered = True) + scalar_cntrl.probeToSQC = MessageBuffer(ordered=True) scalar_cntrl.probeToSQC.in_port = network.out_port - scalar_cntrl.responseToSQC = MessageBuffer(ordered = True) + scalar_cntrl.responseToSQC = MessageBuffer(ordered=True) scalar_cntrl.responseToSQC.in_port = network.out_port - scalar_cntrl.mandatoryQueue = \ - MessageBuffer(buffer_size=options.scalar_buffer_size) + scalar_cntrl.mandatoryQueue = MessageBuffer( + buffer_size=options.scalar_buffer_size + ) return (scalar_sequencers, scalar_cntrl_nodes) + def construct_cmdprocs(options, system, ruby_system, network): cmdproc_sequencers = [] @@ -704,9 +817,9 @@ def construct_cmdprocs(options, system, ruby_system, network): tcp_ID = options.num_compute_units + i sqc_ID = options.num_sqc + i - tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits, - issue_latency = 1, - number_of_TBEs = 2560) + tcp_cntrl = TCPCntrl( + TCC_select_num_bits=TCC_bits, issue_latency=1, number_of_TBEs=2560 + ) # TBEs set to max outstanding requests tcp_cntrl.createCP(options, ruby_system, system) tcp_cntrl.WB = options.WB_L1 @@ -722,24 +835,24 @@ def construct_cmdprocs(options, system, ruby_system, network): cmdproc_cntrl_nodes.append(tcp_cntrl) # Connect the CP (TCP) controllers to the ruby network - tcp_cntrl.requestFromTCP = MessageBuffer(ordered = True) + tcp_cntrl.requestFromTCP = MessageBuffer(ordered=True) tcp_cntrl.requestFromTCP.out_port = network.in_port - tcp_cntrl.responseFromTCP = MessageBuffer(ordered = True) + tcp_cntrl.responseFromTCP = MessageBuffer(ordered=True) tcp_cntrl.responseFromTCP.out_port = network.in_port - tcp_cntrl.unblockFromCore = MessageBuffer(ordered = True) + tcp_cntrl.unblockFromCore = MessageBuffer(ordered=True) tcp_cntrl.unblockFromCore.out_port = network.in_port - tcp_cntrl.probeToTCP = MessageBuffer(ordered = True) + tcp_cntrl.probeToTCP = MessageBuffer(ordered=True) tcp_cntrl.probeToTCP.in_port = network.out_port - tcp_cntrl.responseToTCP = MessageBuffer(ordered = True) + tcp_cntrl.responseToTCP = MessageBuffer(ordered=True) tcp_cntrl.responseToTCP.in_port = network.out_port tcp_cntrl.mandatoryQueue = MessageBuffer() - sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits) + sqc_cntrl = SQCCntrl(TCC_select_num_bits=TCC_bits) sqc_cntrl.create(options, ruby_system, system) exec("ruby_system.sqc_cntrl%d = sqc_cntrl" % sqc_ID) @@ -751,13 +864,14 @@ def construct_cmdprocs(options, system, ruby_system, network): return (cmdproc_sequencers, cmdproc_cntrl_nodes) + def construct_tccs(options, system, ruby_system, network): tcc_cntrl_nodes = [] for i in range(options.num_tccs): - tcc_cntrl = TCCCntrl(l2_response_latency = options.TCC_latency) + tcc_cntrl = TCCCntrl(l2_response_latency=options.TCC_latency) tcc_cntrl.create(options, ruby_system, system) tcc_cntrl.l2_request_latency = options.gpu_to_dir_latency tcc_cntrl.l2_response_latency = options.TCC_latency @@ -767,10 +881,10 @@ def construct_tccs(options, system, ruby_system, network): # the number_of_TBEs is inclusive of TBEs below # Connect the TCC controllers to the ruby network - tcc_cntrl.requestFromTCP = MessageBuffer(ordered = True) + tcc_cntrl.requestFromTCP = MessageBuffer(ordered=True) tcc_cntrl.requestFromTCP.in_port = network.out_port - tcc_cntrl.responseToCore = MessageBuffer(ordered = True) + tcc_cntrl.responseToCore = MessageBuffer(ordered=True) tcc_cntrl.responseToCore.out_port = network.in_port tcc_cntrl.probeFromNB = MessageBuffer() @@ -779,7 +893,7 @@ def construct_tccs(options, system, ruby_system, network): tcc_cntrl.responseFromNB = MessageBuffer() tcc_cntrl.responseFromNB.in_port = network.out_port - tcc_cntrl.requestToNB = MessageBuffer(ordered = True) + tcc_cntrl.requestToNB = MessageBuffer(ordered=True) tcc_cntrl.requestToNB.out_port = network.in_port tcc_cntrl.responseToNB = MessageBuffer() @@ -788,15 +902,17 @@ def construct_tccs(options, system, ruby_system, network): tcc_cntrl.unblockToNB = MessageBuffer() tcc_cntrl.unblockToNB.out_port = network.in_port - tcc_cntrl.triggerQueue = MessageBuffer(ordered = True) + tcc_cntrl.triggerQueue = MessageBuffer(ordered=True) exec("ruby_system.tcc_cntrl%d = tcc_cntrl" % i) return tcc_cntrl_nodes -def create_system(options, full_system, system, dma_devices, bootmem, - ruby_system, cpus): - if buildEnv['PROTOCOL'] != 'GPU_VIPER': + +def create_system( + options, full_system, system, dma_devices, bootmem, ruby_system, cpus +): + if buildEnv["PROTOCOL"] != "GPU_VIPER": panic("This script requires the GPU_VIPER protocol to be built.") cpu_sequencers = [] @@ -813,28 +929,28 @@ def create_system(options, full_system, system, dma_devices, bootmem, cpuCluster = None gpuCluster = None - if hasattr(options, 'bw_scalor') and options.bw_scalor > 0: - #Assuming a 2GHz clock + if hasattr(options, "bw_scalor") and options.bw_scalor > 0: + # Assuming a 2GHz clock crossbar_bw = 16 * options.num_compute_units * options.bw_scalor - mainCluster = Cluster(intBW = crossbar_bw) - cpuCluster = Cluster(extBW = crossbar_bw, intBW = crossbar_bw) - gpuCluster = Cluster(extBW = crossbar_bw, intBW = crossbar_bw) + mainCluster = Cluster(intBW=crossbar_bw) + cpuCluster = Cluster(extBW=crossbar_bw, intBW=crossbar_bw) + gpuCluster = Cluster(extBW=crossbar_bw, intBW=crossbar_bw) else: - mainCluster = Cluster(intBW = 8) # 16 GB/s - cpuCluster = Cluster(extBW = 8, intBW = 8) # 16 GB/s - gpuCluster = Cluster(extBW = 8, intBW = 8) # 16 GB/s - + mainCluster = Cluster(intBW=8) # 16 GB/s + cpuCluster = Cluster(extBW=8, intBW=8) # 16 GB/s + gpuCluster = Cluster(extBW=8, intBW=8) # 16 GB/s # Create CPU directory controllers - dir_cntrl_nodes = \ - construct_dirs(options, system, ruby_system, ruby_system.network) + dir_cntrl_nodes = construct_dirs( + options, system, ruby_system, ruby_system.network + ) for dir_cntrl in dir_cntrl_nodes: mainCluster.add(dir_cntrl) - # Create CPU core pairs - (cp_sequencers, cp_cntrl_nodes) = \ - construct_corepairs(options, system, ruby_system, ruby_system.network) + (cp_sequencers, cp_cntrl_nodes) = construct_corepairs( + options, system, ruby_system, ruby_system.network + ) cpu_sequencers.extend(cp_sequencers) for cp_cntrl in cp_cntrl_nodes: cpuCluster.add(cp_cntrl) @@ -842,110 +958,131 @@ def create_system(options, full_system, system, dma_devices, bootmem, # Register CPUs and caches for each CorePair and directory (SE mode only) if not full_system: for i in range((options.num_cpus + 1) // 2): - FileSystemConfig.register_cpu(physical_package_id = 0, - core_siblings = \ - range(options.num_cpus), - core_id = i*2, - thread_siblings = []) + FileSystemConfig.register_cpu( + physical_package_id=0, + core_siblings=range(options.num_cpus), + core_id=i * 2, + thread_siblings=[], + ) - FileSystemConfig.register_cpu(physical_package_id = 0, - core_siblings = \ - range(options.num_cpus), - core_id = i*2+1, - thread_siblings = []) + FileSystemConfig.register_cpu( + physical_package_id=0, + core_siblings=range(options.num_cpus), + core_id=i * 2 + 1, + thread_siblings=[], + ) - FileSystemConfig.register_cache(level = 0, - idu_type = 'Instruction', - size = options.l1i_size, - line_size = options.cacheline_size, - assoc = options.l1i_assoc, - cpus = [i*2, i*2+1]) + FileSystemConfig.register_cache( + level=0, + idu_type="Instruction", + size=options.l1i_size, + line_size=options.cacheline_size, + assoc=options.l1i_assoc, + cpus=[i * 2, i * 2 + 1], + ) - FileSystemConfig.register_cache(level = 0, - idu_type = 'Data', - size = options.l1d_size, - line_size = options.cacheline_size, - assoc = options.l1d_assoc, - cpus = [i*2]) + FileSystemConfig.register_cache( + level=0, + idu_type="Data", + size=options.l1d_size, + line_size=options.cacheline_size, + assoc=options.l1d_assoc, + cpus=[i * 2], + ) - FileSystemConfig.register_cache(level = 0, - idu_type = 'Data', - size = options.l1d_size, - line_size = options.cacheline_size, - assoc = options.l1d_assoc, - cpus = [i*2+1]) + FileSystemConfig.register_cache( + level=0, + idu_type="Data", + size=options.l1d_size, + line_size=options.cacheline_size, + assoc=options.l1d_assoc, + cpus=[i * 2 + 1], + ) - FileSystemConfig.register_cache(level = 1, - idu_type = 'Unified', - size = options.l2_size, - line_size = options.cacheline_size, - assoc = options.l2_assoc, - cpus = [i*2, i*2+1]) + FileSystemConfig.register_cache( + level=1, + idu_type="Unified", + size=options.l2_size, + line_size=options.cacheline_size, + assoc=options.l2_assoc, + cpus=[i * 2, i * 2 + 1], + ) for i in range(options.num_dirs): - FileSystemConfig.register_cache(level = 2, - idu_type = 'Unified', - size = options.l3_size, - line_size = options.cacheline_size, - assoc = options.l3_assoc, - cpus = [n for n in - range(options.num_cpus)]) + FileSystemConfig.register_cache( + level=2, + idu_type="Unified", + size=options.l3_size, + line_size=options.cacheline_size, + assoc=options.l3_assoc, + cpus=[n for n in range(options.num_cpus)], + ) # Create TCPs - (tcp_sequencers, tcp_cntrl_nodes) = \ - construct_tcps(options, system, ruby_system, ruby_system.network) + (tcp_sequencers, tcp_cntrl_nodes) = construct_tcps( + options, system, ruby_system, ruby_system.network + ) cpu_sequencers.extend(tcp_sequencers) for tcp_cntrl in tcp_cntrl_nodes: gpuCluster.add(tcp_cntrl) # Create SQCs - (sqc_sequencers, sqc_cntrl_nodes) = \ - construct_sqcs(options, system, ruby_system, ruby_system.network) + (sqc_sequencers, sqc_cntrl_nodes) = construct_sqcs( + options, system, ruby_system, ruby_system.network + ) cpu_sequencers.extend(sqc_sequencers) for sqc_cntrl in sqc_cntrl_nodes: gpuCluster.add(sqc_cntrl) # Create Scalars - (scalar_sequencers, scalar_cntrl_nodes) = \ - construct_scalars(options, system, ruby_system, ruby_system.network) + (scalar_sequencers, scalar_cntrl_nodes) = construct_scalars( + options, system, ruby_system, ruby_system.network + ) cpu_sequencers.extend(scalar_sequencers) for scalar_cntrl in scalar_cntrl_nodes: gpuCluster.add(scalar_cntrl) # Create command processors - (cmdproc_sequencers, cmdproc_cntrl_nodes) = \ - construct_cmdprocs(options, system, ruby_system, ruby_system.network) + (cmdproc_sequencers, cmdproc_cntrl_nodes) = construct_cmdprocs( + options, system, ruby_system, ruby_system.network + ) cpu_sequencers.extend(cmdproc_sequencers) for cmdproc_cntrl in cmdproc_cntrl_nodes: gpuCluster.add(cmdproc_cntrl) # Create TCCs - tcc_cntrl_nodes = \ - construct_tccs(options, system, ruby_system, ruby_system.network) + tcc_cntrl_nodes = construct_tccs( + options, system, ruby_system, ruby_system.network + ) for tcc_cntrl in tcc_cntrl_nodes: gpuCluster.add(tcc_cntrl) for i, dma_device in enumerate(dma_devices): dma_seq = DMASequencer(version=i, ruby_system=ruby_system) - dma_cntrl = DMA_Controller(version=i, dma_sequencer=dma_seq, - ruby_system=ruby_system) - exec('system.dma_cntrl%d = dma_cntrl' % i) + dma_cntrl = DMA_Controller( + version=i, dma_sequencer=dma_seq, ruby_system=ruby_system + ) + exec("system.dma_cntrl%d = dma_cntrl" % i) # IDE doesn't have a .type but seems like everything else does. - if not hasattr(dma_device, 'type'): - exec('system.dma_cntrl%d.dma_sequencer.in_ports = dma_device' % i) - elif dma_device.type == 'MemTest': - exec('system.dma_cntrl%d.dma_sequencer.in_ports = dma_devices.test' - % i) + if not hasattr(dma_device, "type"): + exec("system.dma_cntrl%d.dma_sequencer.in_ports = dma_device" % i) + elif dma_device.type == "MemTest": + exec( + "system.dma_cntrl%d.dma_sequencer.in_ports = dma_devices.test" + % i + ) else: - exec('system.dma_cntrl%d.dma_sequencer.in_ports = dma_device.dma' - % i) + exec( + "system.dma_cntrl%d.dma_sequencer.in_ports = dma_device.dma" + % i + ) dma_cntrl.requestToDir = MessageBuffer(buffer_size=0) dma_cntrl.requestToDir.out_port = ruby_system.network.in_port dma_cntrl.responseFromDir = MessageBuffer(buffer_size=0) dma_cntrl.responseFromDir.in_port = ruby_system.network.out_port - dma_cntrl.mandatoryQueue = MessageBuffer(buffer_size = 0) + dma_cntrl.mandatoryQueue = MessageBuffer(buffer_size=0) gpuCluster.add(dma_cntrl) # Add cpu/gpu clusters to main cluster diff --git a/configs/ruby/Garnet_standalone.py b/configs/ruby/Garnet_standalone.py index f8e21fcf2f..ba5216eb24 100644 --- a/configs/ruby/Garnet_standalone.py +++ b/configs/ruby/Garnet_standalone.py @@ -34,14 +34,18 @@ from .Ruby import create_topology, create_directories # # Declare caches used by the protocol # -class L1Cache(RubyCache): pass +class L1Cache(RubyCache): + pass + def define_options(parser): return -def create_system(options, full_system, system, dma_ports, bootmem, - ruby_system, cpus): - if buildEnv['PROTOCOL'] != 'Garnet_standalone': + +def create_system( + options, full_system, system, dma_ports, bootmem, ruby_system, cpus +): + if buildEnv["PROTOCOL"] != "Garnet_standalone": panic("This script requires Garnet_standalone protocol to be built.") cpu_sequencers = [] @@ -49,7 +53,7 @@ def create_system(options, full_system, system, dma_ports, bootmem, # # The Garnet_standalone protocol does not support fs nor dma # - assert(dma_ports == []) + assert dma_ports == [] # # The ruby network creation expects the list of nodes in the system to be @@ -69,19 +73,18 @@ def create_system(options, full_system, system, dma_ports, bootmem, # Only one cache exists for this protocol, so by default use the L1D # config parameters. # - cache = L1Cache(size = options.l1d_size, - assoc = options.l1d_assoc) + cache = L1Cache(size=options.l1d_size, assoc=options.l1d_assoc) # # Only one unified L1 cache exists. Can cache instructions and data. # - l1_cntrl = L1Cache_Controller(version = i, - cacheMemory = cache, - ruby_system = ruby_system) + l1_cntrl = L1Cache_Controller( + version=i, cacheMemory=cache, ruby_system=ruby_system + ) - cpu_seq = RubySequencer(dcache = cache, - garnet_standalone = True, - ruby_system = ruby_system) + cpu_seq = RubySequencer( + dcache=cache, garnet_standalone=True, ruby_system=ruby_system + ) l1_cntrl.sequencer = cpu_seq exec("ruby_system.l1_cntrl%d = l1_cntrl" % i) @@ -97,7 +100,8 @@ def create_system(options, full_system, system, dma_ports, bootmem, l1_cntrl.forwardFromCache = MessageBuffer() mem_dir_cntrl_nodes, rom_dir_cntrl_node = create_directories( - options, bootmem, ruby_system, system) + options, bootmem, ruby_system, system + ) dir_cntrl_nodes = mem_dir_cntrl_nodes[:] if rom_dir_cntrl_node is not None: dir_cntrl_nodes.append(rom_dir_cntrl_node) @@ -107,7 +111,6 @@ def create_system(options, full_system, system, dma_ports, bootmem, dir_cntrl.forwardToDir = MessageBuffer() dir_cntrl.responseToDir = MessageBuffer() - all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes ruby_system.network.number_of_virtual_networks = 3 topology = create_topology(all_cntrls, options) diff --git a/configs/ruby/MESI_Three_Level.py b/configs/ruby/MESI_Three_Level.py index c184e57e10..70f9c82723 100644 --- a/configs/ruby/MESI_Three_Level.py +++ b/configs/ruby/MESI_Three_Level.py @@ -38,15 +38,26 @@ from common import FileSystemConfig # # Declare caches used by the protocol # -class L0Cache(RubyCache): pass -class L1Cache(RubyCache): pass -class L2Cache(RubyCache): pass +class L0Cache(RubyCache): + pass + + +class L1Cache(RubyCache): + pass + + +class L2Cache(RubyCache): + pass + def define_options(parser): parser.add_argument( - "--num-clusters", type=int, default=1, + "--num-clusters", + type=int, + default=1, help="number of clusters in a design in which there are shared\ - caches private to clusters") + caches private to clusters", + ) parser.add_argument("--l0i_size", type=str, default="4096B") parser.add_argument("--l0d_size", type=str, default="4096B") parser.add_argument("--l0i_assoc", type=int, default=1) @@ -55,16 +66,23 @@ def define_options(parser): parser.add_argument("--l1_transitions_per_cycle", type=int, default=32) parser.add_argument("--l2_transitions_per_cycle", type=int, default=4) parser.add_argument( - "--enable-prefetch", action="store_true", default=False, - help="Enable Ruby hardware prefetcher") + "--enable-prefetch", + action="store_true", + default=False, + help="Enable Ruby hardware prefetcher", + ) return -def create_system(options, full_system, system, dma_ports, bootmem, - ruby_system, cpus): - if buildEnv['PROTOCOL'] != 'MESI_Three_Level': - fatal("This script requires the MESI_Three_Level protocol to be\ - built.") +def create_system( + options, full_system, system, dma_ports, bootmem, ruby_system, cpus +): + + if buildEnv["PROTOCOL"] != "MESI_Three_Level": + fatal( + "This script requires the MESI_Three_Level protocol to be\ + built." + ) cpu_sequencers = [] @@ -79,10 +97,10 @@ def create_system(options, full_system, system, dma_ports, bootmem, l2_cntrl_nodes = [] dma_cntrl_nodes = [] - assert (options.num_cpus % options.num_clusters == 0) + assert options.num_cpus % options.num_clusters == 0 num_cpus_per_cluster = options.num_cpus // options.num_clusters - assert (options.num_l2caches % options.num_clusters == 0) + assert options.num_l2caches % options.num_clusters == 0 num_l2caches_per_cluster = options.num_l2caches // options.num_clusters l2_bits = int(math.log(num_l2caches_per_cluster, 2)) @@ -98,63 +116,79 @@ def create_system(options, full_system, system, dma_ports, bootmem, # # First create the Ruby objects associated with this cpu # - l0i_cache = L0Cache(size = options.l0i_size, - assoc = options.l0i_assoc, - is_icache = True, - start_index_bit = block_size_bits, - replacement_policy = LRURP()) + l0i_cache = L0Cache( + size=options.l0i_size, + assoc=options.l0i_assoc, + is_icache=True, + start_index_bit=block_size_bits, + replacement_policy=LRURP(), + ) - l0d_cache = L0Cache(size = options.l0d_size, - assoc = options.l0d_assoc, - is_icache = False, - start_index_bit = block_size_bits, - replacement_policy = LRURP()) + l0d_cache = L0Cache( + size=options.l0d_size, + assoc=options.l0d_assoc, + is_icache=False, + start_index_bit=block_size_bits, + replacement_policy=LRURP(), + ) clk_domain = cpus[i].clk_domain # Ruby prefetcher prefetcher = RubyPrefetcher( num_streams=16, - unit_filter = 256, - nonunit_filter = 256, - train_misses = 5, - num_startup_pfs = 4, - cross_page = True + unit_filter=256, + nonunit_filter=256, + train_misses=5, + num_startup_pfs=4, + cross_page=True, ) l0_cntrl = L0Cache_Controller( - version = i * num_cpus_per_cluster + j, - Icache = l0i_cache, Dcache = l0d_cache, - transitions_per_cycle = options.l0_transitions_per_cycle, - prefetcher = prefetcher, - enable_prefetch = options.enable_prefetch, - send_evictions = send_evicts(options), - clk_domain = clk_domain, - ruby_system = ruby_system) + version=i * num_cpus_per_cluster + j, + Icache=l0i_cache, + Dcache=l0d_cache, + transitions_per_cycle=options.l0_transitions_per_cycle, + prefetcher=prefetcher, + enable_prefetch=options.enable_prefetch, + send_evictions=send_evicts(options), + clk_domain=clk_domain, + ruby_system=ruby_system, + ) - cpu_seq = RubySequencer(version = i * num_cpus_per_cluster + j, - clk_domain = clk_domain, - dcache = l0d_cache, - ruby_system = ruby_system) + cpu_seq = RubySequencer( + version=i * num_cpus_per_cluster + j, + clk_domain=clk_domain, + dcache=l0d_cache, + ruby_system=ruby_system, + ) l0_cntrl.sequencer = cpu_seq - l1_cache = L1Cache(size = options.l1d_size, - assoc = options.l1d_assoc, - start_index_bit = block_size_bits, - is_icache = False) + l1_cache = L1Cache( + size=options.l1d_size, + assoc=options.l1d_assoc, + start_index_bit=block_size_bits, + is_icache=False, + ) l1_cntrl = L1Cache_Controller( - version = i * num_cpus_per_cluster + j, - cache = l1_cache, l2_select_num_bits = l2_bits, - cluster_id = i, - transitions_per_cycle = options.l1_transitions_per_cycle, - ruby_system = ruby_system) + version=i * num_cpus_per_cluster + j, + cache=l1_cache, + l2_select_num_bits=l2_bits, + cluster_id=i, + transitions_per_cycle=options.l1_transitions_per_cycle, + ruby_system=ruby_system, + ) - exec("ruby_system.l0_cntrl%d = l0_cntrl" - % ( i * num_cpus_per_cluster + j)) - exec("ruby_system.l1_cntrl%d = l1_cntrl" - % ( i * num_cpus_per_cluster + j)) + exec( + "ruby_system.l0_cntrl%d = l0_cntrl" + % (i * num_cpus_per_cluster + j) + ) + exec( + "ruby_system.l1_cntrl%d = l1_cntrl" + % (i * num_cpus_per_cluster + j) + ) # # Add controllers and sequencers to the appropriate lists @@ -166,9 +200,9 @@ def create_system(options, full_system, system, dma_ports, bootmem, # Connect the L0 and L1 controllers l0_cntrl.prefetchQueue = MessageBuffer() l0_cntrl.mandatoryQueue = MessageBuffer() - l0_cntrl.bufferToL1 = MessageBuffer(ordered = True) + l0_cntrl.bufferToL1 = MessageBuffer(ordered=True) l1_cntrl.bufferFromL0 = l0_cntrl.bufferToL1 - l0_cntrl.bufferFromL1 = MessageBuffer(ordered = True) + l0_cntrl.bufferFromL1 = MessageBuffer(ordered=True) l1_cntrl.bufferToL0 = l0_cntrl.bufferFromL1 # Connect the L1 controllers and the network @@ -184,28 +218,36 @@ def create_system(options, full_system, system, dma_ports, bootmem, l1_cntrl.responseFromL2 = MessageBuffer() l1_cntrl.responseFromL2.in_port = ruby_system.network.out_port - for j in range(num_l2caches_per_cluster): - l2_cache = L2Cache(size = options.l2_size, - assoc = options.l2_assoc, - start_index_bit = l2_index_start) + l2_cache = L2Cache( + size=options.l2_size, + assoc=options.l2_assoc, + start_index_bit=l2_index_start, + ) l2_cntrl = L2Cache_Controller( - version = i * num_l2caches_per_cluster + j, - L2cache = l2_cache, cluster_id = i, - transitions_per_cycle =\ - options.l2_transitions_per_cycle, - ruby_system = ruby_system) + version=i * num_l2caches_per_cluster + j, + L2cache=l2_cache, + cluster_id=i, + transitions_per_cycle=options.l2_transitions_per_cycle, + ruby_system=ruby_system, + ) - exec("ruby_system.l2_cntrl%d = l2_cntrl" - % (i * num_l2caches_per_cluster + j)) + exec( + "ruby_system.l2_cntrl%d = l2_cntrl" + % (i * num_l2caches_per_cluster + j) + ) l2_cntrl_nodes.append(l2_cntrl) # Connect the L2 controllers and the network l2_cntrl.DirRequestFromL2Cache = MessageBuffer() - l2_cntrl.DirRequestFromL2Cache.out_port = ruby_system.network.in_port + l2_cntrl.DirRequestFromL2Cache.out_port = ( + ruby_system.network.in_port + ) l2_cntrl.L1RequestFromL2Cache = MessageBuffer() - l2_cntrl.L1RequestFromL2Cache.out_port = ruby_system.network.in_port + l2_cntrl.L1RequestFromL2Cache.out_port = ( + ruby_system.network.in_port + ) l2_cntrl.responseFromL2Cache = MessageBuffer() l2_cntrl.responseFromL2Cache.out_port = ruby_system.network.in_port @@ -220,10 +262,12 @@ def create_system(options, full_system, system, dma_ports, bootmem, # the ruby system # clk_divider value is a fix to pass regression. ruby_system.memctrl_clk_domain = DerivedClockDomain( - clk_domain = ruby_system.clk_domain, clk_divider = 3) + clk_domain=ruby_system.clk_domain, clk_divider=3 + ) mem_dir_cntrl_nodes, rom_dir_cntrl_node = create_directories( - options, bootmem, ruby_system, system) + options, bootmem, ruby_system, system + ) dir_cntrl_nodes = mem_dir_cntrl_nodes[:] if rom_dir_cntrl_node is not None: dir_cntrl_nodes.append(rom_dir_cntrl_node) @@ -242,12 +286,14 @@ def create_system(options, full_system, system, dma_ports, bootmem, # # Create the Ruby objects associated with the dma controller # - dma_seq = DMASequencer(version = i, ruby_system = ruby_system) + dma_seq = DMASequencer(version=i, ruby_system=ruby_system) - dma_cntrl = DMA_Controller(version = i, - dma_sequencer = dma_seq, - transitions_per_cycle = options.ports, - ruby_system = ruby_system) + dma_cntrl = DMA_Controller( + version=i, + dma_sequencer=dma_seq, + transitions_per_cycle=options.ports, + ruby_system=ruby_system, + ) exec("ruby_system.dma_cntrl%d = dma_cntrl" % i) exec("ruby_system.dma_cntrl%d.dma_sequencer.in_ports = dma_port" % i) @@ -255,29 +301,33 @@ def create_system(options, full_system, system, dma_ports, bootmem, # Connect the dma controller to the network dma_cntrl.mandatoryQueue = MessageBuffer() - dma_cntrl.responseFromDir = MessageBuffer(ordered = True) + dma_cntrl.responseFromDir = MessageBuffer(ordered=True) dma_cntrl.responseFromDir.in_port = ruby_system.network.out_port dma_cntrl.requestToDir = MessageBuffer() dma_cntrl.requestToDir.out_port = ruby_system.network.in_port - all_cntrls = l0_cntrl_nodes + \ - l1_cntrl_nodes + \ - l2_cntrl_nodes + \ - dir_cntrl_nodes + \ - dma_cntrl_nodes + all_cntrls = ( + l0_cntrl_nodes + + l1_cntrl_nodes + + l2_cntrl_nodes + + dir_cntrl_nodes + + dma_cntrl_nodes + ) # Create the io controller and the sequencer if full_system: io_seq = DMASequencer(version=len(dma_ports), ruby_system=ruby_system) ruby_system._io_port = io_seq - io_controller = DMA_Controller(version = len(dma_ports), - dma_sequencer = io_seq, - ruby_system = ruby_system) + io_controller = DMA_Controller( + version=len(dma_ports), + dma_sequencer=io_seq, + ruby_system=ruby_system, + ) ruby_system.io_controller = io_controller # Connect the dma controller to the network io_controller.mandatoryQueue = MessageBuffer() - io_controller.responseFromDir = MessageBuffer(ordered = True) + io_controller.responseFromDir = MessageBuffer(ordered=True) io_controller.responseFromDir.in_port = ruby_system.network.out_port io_controller.requestToDir = MessageBuffer() io_controller.requestToDir.out_port = ruby_system.network.in_port @@ -287,41 +337,56 @@ def create_system(options, full_system, system, dma_ports, bootmem, else: for i in range(options.num_clusters): for j in range(num_cpus_per_cluster): - FileSystemConfig.register_cpu(physical_package_id = 0, - core_siblings = range(options.num_cpus), - core_id = i*num_cpus_per_cluster+j, - thread_siblings = []) + FileSystemConfig.register_cpu( + physical_package_id=0, + core_siblings=range(options.num_cpus), + core_id=i * num_cpus_per_cluster + j, + thread_siblings=[], + ) - FileSystemConfig.register_cache(level = 0, - idu_type = 'Instruction', - size = options.l0i_size, - line_size =\ - options.cacheline_size, - assoc = 1, - cpus = [i*num_cpus_per_cluster+j]) - FileSystemConfig.register_cache(level = 0, - idu_type = 'Data', - size = options.l0d_size, - line_size =\ - options.cacheline_size, - assoc = 1, - cpus = [i*num_cpus_per_cluster+j]) + FileSystemConfig.register_cache( + level=0, + idu_type="Instruction", + size=options.l0i_size, + line_size=options.cacheline_size, + assoc=1, + cpus=[i * num_cpus_per_cluster + j], + ) + FileSystemConfig.register_cache( + level=0, + idu_type="Data", + size=options.l0d_size, + line_size=options.cacheline_size, + assoc=1, + cpus=[i * num_cpus_per_cluster + j], + ) - FileSystemConfig.register_cache(level = 1, - idu_type = 'Unified', - size = options.l1d_size, - line_size = options.cacheline_size, - assoc = options.l1d_assoc, - cpus = [i*num_cpus_per_cluster+j]) + FileSystemConfig.register_cache( + level=1, + idu_type="Unified", + size=options.l1d_size, + line_size=options.cacheline_size, + assoc=options.l1d_assoc, + cpus=[i * num_cpus_per_cluster + j], + ) - FileSystemConfig.register_cache(level = 2, - idu_type = 'Unified', - size = str(MemorySize(options.l2_size) * \ - num_l2caches_per_cluster)+'B', - line_size = options.cacheline_size, - assoc = options.l2_assoc, - cpus = [n for n in range(i*num_cpus_per_cluster, \ - (i+1)*num_cpus_per_cluster)]) + FileSystemConfig.register_cache( + level=2, + idu_type="Unified", + size=str( + MemorySize(options.l2_size) * num_l2caches_per_cluster + ) + + "B", + line_size=options.cacheline_size, + assoc=options.l2_assoc, + cpus=[ + n + for n in range( + i * num_cpus_per_cluster, + (i + 1) * num_cpus_per_cluster, + ) + ], + ) ruby_system.network.number_of_virtual_networks = 3 topology = create_topology(all_cntrls, options) diff --git a/configs/ruby/MESI_Three_Level_HTM.py b/configs/ruby/MESI_Three_Level_HTM.py index 974cd7e932..883db9800e 100644 --- a/configs/ruby/MESI_Three_Level_HTM.py +++ b/configs/ruby/MESI_Three_Level_HTM.py @@ -38,14 +38,26 @@ from common import FileSystemConfig # # Declare caches used by the protocol # -class L0Cache(RubyCache): pass -class L1Cache(RubyCache): pass -class L2Cache(RubyCache): pass +class L0Cache(RubyCache): + pass + + +class L1Cache(RubyCache): + pass + + +class L2Cache(RubyCache): + pass + def define_options(parser): - parser.add_argument("--num-clusters", type=int, default=1, - help = "number of clusters in a design in which there are shared\ - caches private to clusters") + parser.add_argument( + "--num-clusters", + type=int, + default=1, + help="number of clusters in a design in which there are shared\ + caches private to clusters", + ) parser.add_argument("--l0i_size", type=str, default="4096B") parser.add_argument("--l0d_size", type=str, default="4096B") parser.add_argument("--l0i_assoc", type=int, default=1) @@ -54,16 +66,23 @@ def define_options(parser): parser.add_argument("--l1_transitions_per_cycle", type=int, default=32) parser.add_argument("--l2_transitions_per_cycle", type=int, default=4) parser.add_argument( - "--enable-prefetch", action="store_true", default=False, - help="Enable Ruby hardware prefetcher") + "--enable-prefetch", + action="store_true", + default=False, + help="Enable Ruby hardware prefetcher", + ) return -def create_system(options, full_system, system, dma_ports, bootmem, - ruby_system, cpus): - if buildEnv['PROTOCOL'] != 'MESI_Three_Level_HTM': - fatal("This script requires the MESI_Three_Level protocol to be\ - built.") +def create_system( + options, full_system, system, dma_ports, bootmem, ruby_system, cpus +): + + if buildEnv["PROTOCOL"] != "MESI_Three_Level_HTM": + fatal( + "This script requires the MESI_Three_Level protocol to be\ + built." + ) cpu_sequencers = [] @@ -78,10 +97,10 @@ def create_system(options, full_system, system, dma_ports, bootmem, l2_cntrl_nodes = [] dma_cntrl_nodes = [] - assert (options.num_cpus % options.num_clusters == 0) + assert options.num_cpus % options.num_clusters == 0 num_cpus_per_cluster = options.num_cpus // options.num_clusters - assert (options.num_l2caches % options.num_clusters == 0) + assert options.num_l2caches % options.num_clusters == 0 num_l2caches_per_cluster = options.num_l2caches // options.num_clusters l2_bits = int(math.log(num_l2caches_per_cluster, 2)) @@ -97,63 +116,79 @@ def create_system(options, full_system, system, dma_ports, bootmem, # # First create the Ruby objects associated with this cpu # - l0i_cache = L0Cache(size = options.l0i_size, - assoc = options.l0i_assoc, - is_icache = True, - start_index_bit = block_size_bits, - replacement_policy = LRURP()) + l0i_cache = L0Cache( + size=options.l0i_size, + assoc=options.l0i_assoc, + is_icache=True, + start_index_bit=block_size_bits, + replacement_policy=LRURP(), + ) - l0d_cache = L0Cache(size = options.l0d_size, - assoc = options.l0d_assoc, - is_icache = False, - start_index_bit = block_size_bits, - replacement_policy = LRURP()) + l0d_cache = L0Cache( + size=options.l0d_size, + assoc=options.l0d_assoc, + is_icache=False, + start_index_bit=block_size_bits, + replacement_policy=LRURP(), + ) clk_domain = cpus[i].clk_domain # Ruby prefetcher prefetcher = RubyPrefetcher( num_streams=16, - unit_filter = 256, - nonunit_filter = 256, - train_misses = 5, - num_startup_pfs = 4, - cross_page = True + unit_filter=256, + nonunit_filter=256, + train_misses=5, + num_startup_pfs=4, + cross_page=True, ) l0_cntrl = L0Cache_Controller( - version = i * num_cpus_per_cluster + j, - Icache = l0i_cache, Dcache = l0d_cache, - transitions_per_cycle = options.l0_transitions_per_cycle, - prefetcher = prefetcher, - enable_prefetch = options.enable_prefetch, - send_evictions = send_evicts(options), - clk_domain = clk_domain, - ruby_system = ruby_system) + version=i * num_cpus_per_cluster + j, + Icache=l0i_cache, + Dcache=l0d_cache, + transitions_per_cycle=options.l0_transitions_per_cycle, + prefetcher=prefetcher, + enable_prefetch=options.enable_prefetch, + send_evictions=send_evicts(options), + clk_domain=clk_domain, + ruby_system=ruby_system, + ) - cpu_seq = RubyHTMSequencer(version = i * num_cpus_per_cluster + j, - clk_domain = clk_domain, - dcache = l0d_cache, - ruby_system = ruby_system) + cpu_seq = RubyHTMSequencer( + version=i * num_cpus_per_cluster + j, + clk_domain=clk_domain, + dcache=l0d_cache, + ruby_system=ruby_system, + ) l0_cntrl.sequencer = cpu_seq - l1_cache = L1Cache(size = options.l1d_size, - assoc = options.l1d_assoc, - start_index_bit = block_size_bits, - is_icache = False) + l1_cache = L1Cache( + size=options.l1d_size, + assoc=options.l1d_assoc, + start_index_bit=block_size_bits, + is_icache=False, + ) l1_cntrl = L1Cache_Controller( - version = i * num_cpus_per_cluster + j, - cache = l1_cache, l2_select_num_bits = l2_bits, - cluster_id = i, - transitions_per_cycle = options.l1_transitions_per_cycle, - ruby_system = ruby_system) + version=i * num_cpus_per_cluster + j, + cache=l1_cache, + l2_select_num_bits=l2_bits, + cluster_id=i, + transitions_per_cycle=options.l1_transitions_per_cycle, + ruby_system=ruby_system, + ) - exec("ruby_system.l0_cntrl%d = l0_cntrl" - % ( i * num_cpus_per_cluster + j)) - exec("ruby_system.l1_cntrl%d = l1_cntrl" - % ( i * num_cpus_per_cluster + j)) + exec( + "ruby_system.l0_cntrl%d = l0_cntrl" + % (i * num_cpus_per_cluster + j) + ) + exec( + "ruby_system.l1_cntrl%d = l1_cntrl" + % (i * num_cpus_per_cluster + j) + ) # # Add controllers and sequencers to the appropriate lists @@ -165,9 +200,9 @@ def create_system(options, full_system, system, dma_ports, bootmem, # Connect the L0 and L1 controllers l0_cntrl.prefetchQueue = MessageBuffer() l0_cntrl.mandatoryQueue = MessageBuffer() - l0_cntrl.bufferToL1 = MessageBuffer(ordered = True) + l0_cntrl.bufferToL1 = MessageBuffer(ordered=True) l1_cntrl.bufferFromL0 = l0_cntrl.bufferToL1 - l0_cntrl.bufferFromL1 = MessageBuffer(ordered = True) + l0_cntrl.bufferFromL1 = MessageBuffer(ordered=True) l1_cntrl.bufferToL0 = l0_cntrl.bufferFromL1 # Connect the L1 controllers and the network @@ -183,28 +218,36 @@ def create_system(options, full_system, system, dma_ports, bootmem, l1_cntrl.responseFromL2 = MessageBuffer() l1_cntrl.responseFromL2.in_port = ruby_system.network.out_port - for j in range(num_l2caches_per_cluster): - l2_cache = L2Cache(size = options.l2_size, - assoc = options.l2_assoc, - start_index_bit = l2_index_start) + l2_cache = L2Cache( + size=options.l2_size, + assoc=options.l2_assoc, + start_index_bit=l2_index_start, + ) l2_cntrl = L2Cache_Controller( - version = i * num_l2caches_per_cluster + j, - L2cache = l2_cache, cluster_id = i, - transitions_per_cycle =\ - options.l2_transitions_per_cycle, - ruby_system = ruby_system) + version=i * num_l2caches_per_cluster + j, + L2cache=l2_cache, + cluster_id=i, + transitions_per_cycle=options.l2_transitions_per_cycle, + ruby_system=ruby_system, + ) - exec("ruby_system.l2_cntrl%d = l2_cntrl" - % (i * num_l2caches_per_cluster + j)) + exec( + "ruby_system.l2_cntrl%d = l2_cntrl" + % (i * num_l2caches_per_cluster + j) + ) l2_cntrl_nodes.append(l2_cntrl) # Connect the L2 controllers and the network l2_cntrl.DirRequestFromL2Cache = MessageBuffer() - l2_cntrl.DirRequestFromL2Cache.out_port = ruby_system.network.in_port + l2_cntrl.DirRequestFromL2Cache.out_port = ( + ruby_system.network.in_port + ) l2_cntrl.L1RequestFromL2Cache = MessageBuffer() - l2_cntrl.L1RequestFromL2Cache.out_port = ruby_system.network.in_port + l2_cntrl.L1RequestFromL2Cache.out_port = ( + ruby_system.network.in_port + ) l2_cntrl.responseFromL2Cache = MessageBuffer() l2_cntrl.responseFromL2Cache.out_port = ruby_system.network.in_port @@ -219,10 +262,12 @@ def create_system(options, full_system, system, dma_ports, bootmem, # the ruby system # clk_divider value is a fix to pass regression. ruby_system.memctrl_clk_domain = DerivedClockDomain( - clk_domain = ruby_system.clk_domain, clk_divider = 3) + clk_domain=ruby_system.clk_domain, clk_divider=3 + ) mem_dir_cntrl_nodes, rom_dir_cntrl_node = create_directories( - options, bootmem, ruby_system, system) + options, bootmem, ruby_system, system + ) dir_cntrl_nodes = mem_dir_cntrl_nodes[:] if rom_dir_cntrl_node is not None: dir_cntrl_nodes.append(rom_dir_cntrl_node) @@ -241,12 +286,14 @@ def create_system(options, full_system, system, dma_ports, bootmem, # # Create the Ruby objects associated with the dma controller # - dma_seq = DMASequencer(version = i, ruby_system = ruby_system) + dma_seq = DMASequencer(version=i, ruby_system=ruby_system) - dma_cntrl = DMA_Controller(version = i, - dma_sequencer = dma_seq, - transitions_per_cycle = options.ports, - ruby_system = ruby_system) + dma_cntrl = DMA_Controller( + version=i, + dma_sequencer=dma_seq, + transitions_per_cycle=options.ports, + ruby_system=ruby_system, + ) exec("ruby_system.dma_cntrl%d = dma_cntrl" % i) exec("ruby_system.dma_cntrl%d.dma_sequencer.in_ports = dma_port" % i) @@ -254,29 +301,33 @@ def create_system(options, full_system, system, dma_ports, bootmem, # Connect the dma controller to the network dma_cntrl.mandatoryQueue = MessageBuffer() - dma_cntrl.responseFromDir = MessageBuffer(ordered = True) + dma_cntrl.responseFromDir = MessageBuffer(ordered=True) dma_cntrl.responseFromDir.in_port = ruby_system.network.out_port dma_cntrl.requestToDir = MessageBuffer() dma_cntrl.requestToDir.out_port = ruby_system.network.in_port - all_cntrls = l0_cntrl_nodes + \ - l1_cntrl_nodes + \ - l2_cntrl_nodes + \ - dir_cntrl_nodes + \ - dma_cntrl_nodes + all_cntrls = ( + l0_cntrl_nodes + + l1_cntrl_nodes + + l2_cntrl_nodes + + dir_cntrl_nodes + + dma_cntrl_nodes + ) # Create the io controller and the sequencer if full_system: io_seq = DMASequencer(version=len(dma_ports), ruby_system=ruby_system) ruby_system._io_port = io_seq - io_controller = DMA_Controller(version = len(dma_ports), - dma_sequencer = io_seq, - ruby_system = ruby_system) + io_controller = DMA_Controller( + version=len(dma_ports), + dma_sequencer=io_seq, + ruby_system=ruby_system, + ) ruby_system.io_controller = io_controller # Connect the dma controller to the network io_controller.mandatoryQueue = MessageBuffer() - io_controller.responseFromDir = MessageBuffer(ordered = True) + io_controller.responseFromDir = MessageBuffer(ordered=True) io_controller.responseFromDir.in_port = ruby_system.network.out_port io_controller.requestToDir = MessageBuffer() io_controller.requestToDir.out_port = ruby_system.network.in_port @@ -286,41 +337,56 @@ def create_system(options, full_system, system, dma_ports, bootmem, else: for i in range(options.num_clusters): for j in range(num_cpus_per_cluster): - FileSystemConfig.register_cpu(physical_package_id = 0, - core_siblings = range(options.num_cpus), - core_id = i*num_cpus_per_cluster+j, - thread_siblings = []) + FileSystemConfig.register_cpu( + physical_package_id=0, + core_siblings=range(options.num_cpus), + core_id=i * num_cpus_per_cluster + j, + thread_siblings=[], + ) - FileSystemConfig.register_cache(level = 0, - idu_type = 'Instruction', - size = options.l0i_size, - line_size =\ - options.cacheline_size, - assoc = 1, - cpus = [i*num_cpus_per_cluster+j]) - FileSystemConfig.register_cache(level = 0, - idu_type = 'Data', - size = options.l0d_size, - line_size =\ - options.cacheline_size, - assoc = 1, - cpus = [i*num_cpus_per_cluster+j]) + FileSystemConfig.register_cache( + level=0, + idu_type="Instruction", + size=options.l0i_size, + line_size=options.cacheline_size, + assoc=1, + cpus=[i * num_cpus_per_cluster + j], + ) + FileSystemConfig.register_cache( + level=0, + idu_type="Data", + size=options.l0d_size, + line_size=options.cacheline_size, + assoc=1, + cpus=[i * num_cpus_per_cluster + j], + ) - FileSystemConfig.register_cache(level = 1, - idu_type = 'Unified', - size = options.l1d_size, - line_size = options.cacheline_size, - assoc = options.l1d_assoc, - cpus = [i*num_cpus_per_cluster+j]) + FileSystemConfig.register_cache( + level=1, + idu_type="Unified", + size=options.l1d_size, + line_size=options.cacheline_size, + assoc=options.l1d_assoc, + cpus=[i * num_cpus_per_cluster + j], + ) - FileSystemConfig.register_cache(level = 2, - idu_type = 'Unified', - size = str(MemorySize(options.l2_size) * \ - num_l2caches_per_cluster)+'B', - line_size = options.cacheline_size, - assoc = options.l2_assoc, - cpus = [n for n in range(i*num_cpus_per_cluster, \ - (i+1)*num_cpus_per_cluster)]) + FileSystemConfig.register_cache( + level=2, + idu_type="Unified", + size=str( + MemorySize(options.l2_size) * num_l2caches_per_cluster + ) + + "B", + line_size=options.cacheline_size, + assoc=options.l2_assoc, + cpus=[ + n + for n in range( + i * num_cpus_per_cluster, + (i + 1) * num_cpus_per_cluster, + ) + ], + ) ruby_system.network.number_of_virtual_networks = 3 topology = create_topology(all_cntrls, options) diff --git a/configs/ruby/MESI_Two_Level.py b/configs/ruby/MESI_Two_Level.py index e1bb9e77e2..80a823bc52 100644 --- a/configs/ruby/MESI_Two_Level.py +++ b/configs/ruby/MESI_Two_Level.py @@ -35,16 +35,23 @@ from .Ruby import send_evicts # # Declare caches used by the protocol # -class L1Cache(RubyCache): pass -class L2Cache(RubyCache): pass +class L1Cache(RubyCache): + pass + + +class L2Cache(RubyCache): + pass + def define_options(parser): return -def create_system(options, full_system, system, dma_ports, bootmem, - ruby_system, cpus): - if buildEnv['PROTOCOL'] != 'MESI_Two_Level': +def create_system( + options, full_system, system, dma_ports, bootmem, ruby_system, cpus +): + + if buildEnv["PROTOCOL"] != "MESI_Two_Level": fatal("This script requires the MESI_Two_Level protocol to be built.") cpu_sequencers = [] @@ -69,33 +76,42 @@ def create_system(options, full_system, system, dma_ports, bootmem, # # First create the Ruby objects associated with this cpu # - l1i_cache = L1Cache(size = options.l1i_size, - assoc = options.l1i_assoc, - start_index_bit = block_size_bits, - is_icache = True) - l1d_cache = L1Cache(size = options.l1d_size, - assoc = options.l1d_assoc, - start_index_bit = block_size_bits, - is_icache = False) + l1i_cache = L1Cache( + size=options.l1i_size, + assoc=options.l1i_assoc, + start_index_bit=block_size_bits, + is_icache=True, + ) + l1d_cache = L1Cache( + size=options.l1d_size, + assoc=options.l1d_assoc, + start_index_bit=block_size_bits, + is_icache=False, + ) prefetcher = RubyPrefetcher() clk_domain = cpus[i].clk_domain - l1_cntrl = L1Cache_Controller(version = i, L1Icache = l1i_cache, - L1Dcache = l1d_cache, - l2_select_num_bits = l2_bits, - send_evictions = send_evicts(options), - prefetcher = prefetcher, - ruby_system = ruby_system, - clk_domain = clk_domain, - transitions_per_cycle = options.ports, - enable_prefetch = False) - - cpu_seq = RubySequencer(version = i, - dcache = l1d_cache, clk_domain = clk_domain, - ruby_system = ruby_system) + l1_cntrl = L1Cache_Controller( + version=i, + L1Icache=l1i_cache, + L1Dcache=l1d_cache, + l2_select_num_bits=l2_bits, + send_evictions=send_evicts(options), + prefetcher=prefetcher, + ruby_system=ruby_system, + clk_domain=clk_domain, + transitions_per_cycle=options.ports, + enable_prefetch=False, + ) + cpu_seq = RubySequencer( + version=i, + dcache=l1d_cache, + clk_domain=clk_domain, + ruby_system=ruby_system, + ) l1_cntrl.sequencer = cpu_seq exec("ruby_system.l1_cntrl%d = l1_cntrl" % i) @@ -120,21 +136,24 @@ def create_system(options, full_system, system, dma_ports, bootmem, l1_cntrl.responseToL1Cache = MessageBuffer() l1_cntrl.responseToL1Cache.in_port = ruby_system.network.out_port - l2_index_start = block_size_bits + l2_bits for i in range(options.num_l2caches): # # First create the Ruby objects associated with this cpu # - l2_cache = L2Cache(size = options.l2_size, - assoc = options.l2_assoc, - start_index_bit = l2_index_start) + l2_cache = L2Cache( + size=options.l2_size, + assoc=options.l2_assoc, + start_index_bit=l2_index_start, + ) - l2_cntrl = L2Cache_Controller(version = i, - L2cache = l2_cache, - transitions_per_cycle = options.ports, - ruby_system = ruby_system) + l2_cntrl = L2Cache_Controller( + version=i, + L2cache=l2_cache, + transitions_per_cycle=options.ports, + ruby_system=ruby_system, + ) exec("ruby_system.l2_cntrl%d = l2_cntrl" % i) l2_cntrl_nodes.append(l2_cntrl) @@ -154,16 +173,16 @@ def create_system(options, full_system, system, dma_ports, bootmem, l2_cntrl.responseToL2Cache = MessageBuffer() l2_cntrl.responseToL2Cache.in_port = ruby_system.network.out_port - # Run each of the ruby memory controllers at a ratio of the frequency of # the ruby system # clk_divider value is a fix to pass regression. ruby_system.memctrl_clk_domain = DerivedClockDomain( - clk_domain = ruby_system.clk_domain, - clk_divider = 3) + clk_domain=ruby_system.clk_domain, clk_divider=3 + ) mem_dir_cntrl_nodes, rom_dir_cntrl_node = create_directories( - options, bootmem, ruby_system, system) + options, bootmem, ruby_system, system + ) dir_cntrl_nodes = mem_dir_cntrl_nodes[:] if rom_dir_cntrl_node is not None: dir_cntrl_nodes.append(rom_dir_cntrl_node) @@ -178,44 +197,47 @@ def create_system(options, full_system, system, dma_ports, bootmem, dir_cntrl.requestToMemory = MessageBuffer() dir_cntrl.responseFromMemory = MessageBuffer() - for i, dma_port in enumerate(dma_ports): # Create the Ruby objects associated with the dma controller - dma_seq = DMASequencer(version = i, ruby_system = ruby_system, - in_ports = dma_port) + dma_seq = DMASequencer( + version=i, ruby_system=ruby_system, in_ports=dma_port + ) - dma_cntrl = DMA_Controller(version = i, dma_sequencer = dma_seq, - transitions_per_cycle = options.ports, - ruby_system = ruby_system) + dma_cntrl = DMA_Controller( + version=i, + dma_sequencer=dma_seq, + transitions_per_cycle=options.ports, + ruby_system=ruby_system, + ) exec("ruby_system.dma_cntrl%d = dma_cntrl" % i) dma_cntrl_nodes.append(dma_cntrl) # Connect the dma controller to the network dma_cntrl.mandatoryQueue = MessageBuffer() - dma_cntrl.responseFromDir = MessageBuffer(ordered = True) + dma_cntrl.responseFromDir = MessageBuffer(ordered=True) dma_cntrl.responseFromDir.in_port = ruby_system.network.out_port dma_cntrl.requestToDir = MessageBuffer() dma_cntrl.requestToDir.out_port = ruby_system.network.in_port - all_cntrls = l1_cntrl_nodes + \ - l2_cntrl_nodes + \ - dir_cntrl_nodes + \ - dma_cntrl_nodes + all_cntrls = ( + l1_cntrl_nodes + l2_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes + ) # Create the io controller and the sequencer if full_system: - io_seq = DMASequencer(version = len(dma_ports), - ruby_system = ruby_system) + io_seq = DMASequencer(version=len(dma_ports), ruby_system=ruby_system) ruby_system._io_port = io_seq - io_controller = DMA_Controller(version = len(dma_ports), - dma_sequencer = io_seq, - ruby_system = ruby_system) + io_controller = DMA_Controller( + version=len(dma_ports), + dma_sequencer=io_seq, + ruby_system=ruby_system, + ) ruby_system.io_controller = io_controller # Connect the dma controller to the network io_controller.mandatoryQueue = MessageBuffer() - io_controller.responseFromDir = MessageBuffer(ordered = True) + io_controller.responseFromDir = MessageBuffer(ordered=True) io_controller.responseFromDir.in_port = ruby_system.network.out_port io_controller.requestToDir = MessageBuffer() io_controller.requestToDir.out_port = ruby_system.network.in_port diff --git a/configs/ruby/MI_example.py b/configs/ruby/MI_example.py index a46a9e99d1..0ccfd75506 100644 --- a/configs/ruby/MI_example.py +++ b/configs/ruby/MI_example.py @@ -35,15 +35,19 @@ from .Ruby import send_evicts # # Declare caches used by the protocol # -class L1Cache(RubyCache): pass +class L1Cache(RubyCache): + pass + def define_options(parser): return -def create_system(options, full_system, system, dma_ports, bootmem, - ruby_system, cpus): - if buildEnv['PROTOCOL'] != 'MI_example': +def create_system( + options, full_system, system, dma_ports, bootmem, ruby_system, cpus +): + + if buildEnv["PROTOCOL"] != "MI_example": panic("This script requires the MI_example protocol to be built.") cpu_sequencers = [] @@ -68,22 +72,30 @@ def create_system(options, full_system, system, dma_ports, bootmem, # Only one cache exists for this protocol, so by default use the L1D # config parameters. # - cache = L1Cache(size = options.l1d_size, - assoc = options.l1d_assoc, - start_index_bit = block_size_bits) - + cache = L1Cache( + size=options.l1d_size, + assoc=options.l1d_assoc, + start_index_bit=block_size_bits, + ) clk_domain = cpus[i].clk_domain # Only one unified L1 cache exists. Can cache instructions and data. - l1_cntrl = L1Cache_Controller(version=i, cacheMemory=cache, - send_evictions=send_evicts(options), - transitions_per_cycle=options.ports, - clk_domain=clk_domain, - ruby_system=ruby_system) + l1_cntrl = L1Cache_Controller( + version=i, + cacheMemory=cache, + send_evictions=send_evicts(options), + transitions_per_cycle=options.ports, + clk_domain=clk_domain, + ruby_system=ruby_system, + ) - cpu_seq = RubySequencer(version=i, dcache=cache, - clk_domain=clk_domain, ruby_system=ruby_system) + cpu_seq = RubySequencer( + version=i, + dcache=cache, + clk_domain=clk_domain, + ruby_system=ruby_system, + ) l1_cntrl.sequencer = cpu_seq exec("ruby_system.l1_cntrl%d = l1_cntrl" % i) @@ -94,59 +106,60 @@ def create_system(options, full_system, system, dma_ports, bootmem, # Connect the L1 controllers and the network l1_cntrl.mandatoryQueue = MessageBuffer() - l1_cntrl.requestFromCache = MessageBuffer(ordered = True) + l1_cntrl.requestFromCache = MessageBuffer(ordered=True) l1_cntrl.requestFromCache.out_port = ruby_system.network.in_port - l1_cntrl.responseFromCache = MessageBuffer(ordered = True) + l1_cntrl.responseFromCache = MessageBuffer(ordered=True) l1_cntrl.responseFromCache.out_port = ruby_system.network.in_port - l1_cntrl.forwardToCache = MessageBuffer(ordered = True) + l1_cntrl.forwardToCache = MessageBuffer(ordered=True) l1_cntrl.forwardToCache.in_port = ruby_system.network.out_port - l1_cntrl.responseToCache = MessageBuffer(ordered = True) + l1_cntrl.responseToCache = MessageBuffer(ordered=True) l1_cntrl.responseToCache.in_port = ruby_system.network.out_port phys_mem_size = sum([r.size() for r in system.mem_ranges]) - assert(phys_mem_size % options.num_dirs == 0) + assert phys_mem_size % options.num_dirs == 0 mem_module_size = phys_mem_size / options.num_dirs # Run each of the ruby memory controllers at a ratio of the frequency of # the ruby system. # clk_divider value is a fix to pass regression. ruby_system.memctrl_clk_domain = DerivedClockDomain( - clk_domain=ruby_system.clk_domain, - clk_divider=3) + clk_domain=ruby_system.clk_domain, clk_divider=3 + ) mem_dir_cntrl_nodes, rom_dir_cntrl_node = create_directories( - options, bootmem, ruby_system, system) + options, bootmem, ruby_system, system + ) dir_cntrl_nodes = mem_dir_cntrl_nodes[:] if rom_dir_cntrl_node is not None: dir_cntrl_nodes.append(rom_dir_cntrl_node) for dir_cntrl in dir_cntrl_nodes: # Connect the directory controllers and the network - dir_cntrl.requestToDir = MessageBuffer(ordered = True) + dir_cntrl.requestToDir = MessageBuffer(ordered=True) dir_cntrl.requestToDir.in_port = ruby_system.network.out_port - dir_cntrl.dmaRequestToDir = MessageBuffer(ordered = True) + dir_cntrl.dmaRequestToDir = MessageBuffer(ordered=True) dir_cntrl.dmaRequestToDir.in_port = ruby_system.network.out_port dir_cntrl.responseFromDir = MessageBuffer() dir_cntrl.responseFromDir.out_port = ruby_system.network.in_port - dir_cntrl.dmaResponseFromDir = MessageBuffer(ordered = True) + dir_cntrl.dmaResponseFromDir = MessageBuffer(ordered=True) dir_cntrl.dmaResponseFromDir.out_port = ruby_system.network.in_port dir_cntrl.forwardFromDir = MessageBuffer() dir_cntrl.forwardFromDir.out_port = ruby_system.network.in_port dir_cntrl.requestToMemory = MessageBuffer() dir_cntrl.responseFromMemory = MessageBuffer() - for i, dma_port in enumerate(dma_ports): # # Create the Ruby objects associated with the dma controller # - dma_seq = DMASequencer(version = i, - ruby_system = ruby_system) + dma_seq = DMASequencer(version=i, ruby_system=ruby_system) - dma_cntrl = DMA_Controller(version = i, - dma_sequencer = dma_seq, - transitions_per_cycle = options.ports, - ruby_system = ruby_system) + dma_cntrl = DMA_Controller( + version=i, + dma_sequencer=dma_seq, + transitions_per_cycle=options.ports, + ruby_system=ruby_system, + ) exec("ruby_system.dma_cntrl%d = dma_cntrl" % i) exec("ruby_system.dma_cntrl%d.dma_sequencer.in_ports = dma_port" % i) @@ -156,7 +169,7 @@ def create_system(options, full_system, system, dma_ports, bootmem, dma_cntrl.mandatoryQueue = MessageBuffer() dma_cntrl.requestToDir = MessageBuffer() dma_cntrl.requestToDir.out_port = ruby_system.network.in_port - dma_cntrl.responseFromDir = MessageBuffer(ordered = True) + dma_cntrl.responseFromDir = MessageBuffer(ordered=True) dma_cntrl.responseFromDir.in_port = ruby_system.network.out_port all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes @@ -165,16 +178,18 @@ def create_system(options, full_system, system, dma_ports, bootmem, if full_system: io_seq = DMASequencer(version=len(dma_ports), ruby_system=ruby_system) ruby_system._io_port = io_seq - io_controller = DMA_Controller(version = len(dma_ports), - dma_sequencer = io_seq, - ruby_system = ruby_system) + io_controller = DMA_Controller( + version=len(dma_ports), + dma_sequencer=io_seq, + ruby_system=ruby_system, + ) ruby_system.io_controller = io_controller # Connect the dma controller to the network io_controller.mandatoryQueue = MessageBuffer() io_controller.requestToDir = MessageBuffer() io_controller.requestToDir.out_port = ruby_system.network.in_port - io_controller.responseFromDir = MessageBuffer(ordered = True) + io_controller.responseFromDir = MessageBuffer(ordered=True) io_controller.responseFromDir.in_port = ruby_system.network.out_port all_cntrls = all_cntrls + [io_controller] diff --git a/configs/ruby/MOESI_AMD_Base.py b/configs/ruby/MOESI_AMD_Base.py index 12b87712fc..30c7678f08 100644 --- a/configs/ruby/MOESI_AMD_Base.py +++ b/configs/ruby/MOESI_AMD_Base.py @@ -36,13 +36,15 @@ from .Ruby import create_topology from .Ruby import send_evicts from common import FileSystemConfig -addToPath('../') +addToPath("../") from topologies.Cluster import Cluster from topologies.Crossbar import Crossbar + class CntrlBase: _seqs = 0 + @classmethod def seqCount(cls): # Use SeqCount not class since we need global count @@ -50,6 +52,7 @@ class CntrlBase: return CntrlBase._seqs - 1 _cntrls = 0 + @classmethod def cntrlCount(cls): # Use CntlCount not class since we need global count @@ -57,34 +60,41 @@ class CntrlBase: return CntrlBase._cntrls - 1 _version = 0 + @classmethod def versionCount(cls): - cls._version += 1 # Use count for this particular type + cls._version += 1 # Use count for this particular type return cls._version - 1 + class L1DCache(RubyCache): resourceStalls = False + def create(self, options): self.size = MemorySize(options.l1d_size) self.assoc = options.l1d_assoc self.replacement_policy = TreePLRURP() + class L1ICache(RubyCache): resourceStalls = False + def create(self, options): self.size = MemorySize(options.l1i_size) self.assoc = options.l1i_assoc self.replacement_policy = TreePLRURP() + class L2Cache(RubyCache): resourceStalls = False + def create(self, options): self.size = MemorySize(options.l2_size) self.assoc = options.l2_assoc self.replacement_policy = TreePLRURP() -class CPCntrl(CorePair_Controller, CntrlBase): +class CPCntrl(CorePair_Controller, CntrlBase): def create(self, options, ruby_system, system): self.version = self.versionCount() @@ -122,6 +132,7 @@ class CPCntrl(CorePair_Controller, CntrlBase): if options.recycle_latency: self.recycle_latency = options.recycle_latency + class L3Cache(RubyCache): assoc = 8 dataArrayBanks = 256 @@ -139,21 +150,30 @@ class L3Cache(RubyCache): self.resourceStalls = options.no_resource_stalls self.replacement_policy = TreePLRURP() + class L3Cntrl(L3Cache_Controller, CntrlBase): def create(self, options, ruby_system, system): self.version = self.versionCount() self.L3cache = L3Cache() self.L3cache.create(options, ruby_system, system) - self.l3_response_latency = max(self.L3cache.dataAccessLatency, - self.L3cache.tagAccessLatency) + self.l3_response_latency = max( + self.L3cache.dataAccessLatency, self.L3cache.tagAccessLatency + ) self.ruby_system = ruby_system if options.recycle_latency: self.recycle_latency = options.recycle_latency - def connectWireBuffers(self, req_to_dir, resp_to_dir, l3_unblock_to_dir, - req_to_l3, probe_to_l3, resp_to_l3): + def connectWireBuffers( + self, + req_to_dir, + resp_to_dir, + l3_unblock_to_dir, + req_to_l3, + probe_to_l3, + resp_to_l3, + ): self.reqToDir = req_to_dir self.respToDir = resp_to_dir self.l3UnblockToDir = l3_unblock_to_dir @@ -161,6 +181,7 @@ class L3Cntrl(L3Cache_Controller, CntrlBase): self.probeToL3 = probe_to_l3 self.respToL3 = resp_to_l3 + class DirCntrl(Directory_Controller, CntrlBase): def create(self, options, dir_ranges, ruby_system, system): self.version = self.versionCount() @@ -173,8 +194,10 @@ class DirCntrl(Directory_Controller, CntrlBase): self.L3CacheMemory = L3Cache() self.L3CacheMemory.create(options, ruby_system, system) - self.l3_hit_latency = max(self.L3CacheMemory.dataAccessLatency, - self.L3CacheMemory.tagAccessLatency) + self.l3_hit_latency = max( + self.L3CacheMemory.dataAccessLatency, + self.L3CacheMemory.tagAccessLatency, + ) self.number_of_TBEs = options.num_tbes @@ -185,8 +208,15 @@ class DirCntrl(Directory_Controller, CntrlBase): self.CPUonly = True - def connectWireBuffers(self, req_to_dir, resp_to_dir, l3_unblock_to_dir, - req_to_l3, probe_to_l3, resp_to_l3): + def connectWireBuffers( + self, + req_to_dir, + resp_to_dir, + l3_unblock_to_dir, + req_to_l3, + probe_to_l3, + resp_to_l3, + ): self.reqToDir = req_to_dir self.respToDir = resp_to_dir self.l3UnblockToDir = l3_unblock_to_dir @@ -194,19 +224,23 @@ class DirCntrl(Directory_Controller, CntrlBase): self.probeToL3 = probe_to_l3 self.respToL3 = resp_to_l3 + def define_options(parser): parser.add_argument("--num-subcaches", type=int, default=4) parser.add_argument("--l3-data-latency", type=int, default=20) parser.add_argument("--l3-tag-latency", type=int, default=15) parser.add_argument("--cpu-to-dir-latency", type=int, default=15) - parser.add_argument("--no-resource-stalls", action="store_false", - default=True) + parser.add_argument( + "--no-resource-stalls", action="store_false", default=True + ) parser.add_argument("--num-tbes", type=int, default=256) - parser.add_argument("--l2-latency", type=int, default=50) # load to use + parser.add_argument("--l2-latency", type=int, default=50) # load to use -def create_system(options, full_system, system, dma_devices, bootmem, - ruby_system): - if buildEnv['PROTOCOL'] != 'MOESI_AMD_Base': + +def create_system( + options, full_system, system, dma_devices, bootmem, ruby_system +): + if buildEnv["PROTOCOL"] != "MOESI_AMD_Base": panic("This script requires the MOESI_AMD_Base protocol.") cpu_sequencers = [] @@ -230,7 +264,7 @@ def create_system(options, full_system, system, dma_devices, bootmem, # This is the base crossbar that connects the L3s, Dirs, and cpu # Cluster - mainCluster = Cluster(extBW = 512, intBW = 512) # 1 TB/s + mainCluster = Cluster(extBW=512, intBW=512) # 1 TB/s if options.numa_high_bit: numa_bit = options.numa_high_bit @@ -245,18 +279,20 @@ def create_system(options, full_system, system, dma_devices, bootmem, for i in range(options.num_dirs): dir_ranges = [] for r in system.mem_ranges: - addr_range = m5.objects.AddrRange(r.start, size = r.size(), - intlvHighBit = numa_bit, - intlvBits = dir_bits, - intlvMatch = i) + addr_range = m5.objects.AddrRange( + r.start, + size=r.size(), + intlvHighBit=numa_bit, + intlvBits=dir_bits, + intlvMatch=i, + ) dir_ranges.append(addr_range) - - dir_cntrl = DirCntrl(TCC_select_num_bits = 0) + dir_cntrl = DirCntrl(TCC_select_num_bits=0) dir_cntrl.create(options, dir_ranges, ruby_system, system) # Connect the Directory controller to the ruby network - dir_cntrl.requestFromCores = MessageBuffer(ordered = True) + dir_cntrl.requestFromCores = MessageBuffer(ordered=True) dir_cntrl.requestFromCores.in_port = ruby_system.network.out_port dir_cntrl.responseFromCores = MessageBuffer() @@ -271,8 +307,8 @@ def create_system(options, full_system, system, dma_devices, bootmem, dir_cntrl.responseToCore = MessageBuffer() dir_cntrl.responseToCore.out_port = ruby_system.network.in_port - dir_cntrl.triggerQueue = MessageBuffer(ordered = True) - dir_cntrl.L3triggerQueue = MessageBuffer(ordered = True) + dir_cntrl.triggerQueue = MessageBuffer(ordered=True) + dir_cntrl.L3triggerQueue = MessageBuffer(ordered=True) dir_cntrl.requestToMemory = MessageBuffer() dir_cntrl.responseFromMemory = MessageBuffer() @@ -286,10 +322,10 @@ def create_system(options, full_system, system, dma_devices, bootmem, # level config files, such as the ruby_random_tester, will get confused if # the number of cpus does not equal the number of sequencers. Thus make # sure that an even number of cpus is specified. - assert((options.num_cpus % 2) == 0) + assert (options.num_cpus % 2) == 0 # For an odd number of CPUs, still create the right number of controllers - cpuCluster = Cluster(extBW = 512, intBW = 512) # 1 TB/s + cpuCluster = Cluster(extBW=512, intBW=512) # 1 TB/s for i in range((options.num_cpus + 1) // 2): cp_cntrl = CPCntrl() @@ -318,64 +354,75 @@ def create_system(options, full_system, system, dma_devices, bootmem, cp_cntrl.responseToCore.in_port = ruby_system.network.out_port cp_cntrl.mandatoryQueue = MessageBuffer() - cp_cntrl.triggerQueue = MessageBuffer(ordered = True) + cp_cntrl.triggerQueue = MessageBuffer(ordered=True) cpuCluster.add(cp_cntrl) # Register CPUs and caches for each CorePair and directory (SE mode only) if not full_system: for i in range((options.num_cpus + 1) // 2): - FileSystemConfig.register_cpu(physical_package_id = 0, - core_siblings = - range(options.num_cpus), - core_id = i*2, - thread_siblings = []) + FileSystemConfig.register_cpu( + physical_package_id=0, + core_siblings=range(options.num_cpus), + core_id=i * 2, + thread_siblings=[], + ) - FileSystemConfig.register_cpu(physical_package_id = 0, - core_siblings = - range(options.num_cpus), - core_id = i*2+1, - thread_siblings = []) + FileSystemConfig.register_cpu( + physical_package_id=0, + core_siblings=range(options.num_cpus), + core_id=i * 2 + 1, + thread_siblings=[], + ) - FileSystemConfig.register_cache(level = 0, - idu_type = 'Instruction', - size = options.l1i_size, - line_size = options.cacheline_size, - assoc = options.l1i_assoc, - cpus = [i*2, i*2+1]) + FileSystemConfig.register_cache( + level=0, + idu_type="Instruction", + size=options.l1i_size, + line_size=options.cacheline_size, + assoc=options.l1i_assoc, + cpus=[i * 2, i * 2 + 1], + ) - FileSystemConfig.register_cache(level = 0, - idu_type = 'Data', - size = options.l1d_size, - line_size = options.cacheline_size, - assoc = options.l1d_assoc, - cpus = [i*2]) + FileSystemConfig.register_cache( + level=0, + idu_type="Data", + size=options.l1d_size, + line_size=options.cacheline_size, + assoc=options.l1d_assoc, + cpus=[i * 2], + ) - FileSystemConfig.register_cache(level = 0, - idu_type = 'Data', - size = options.l1d_size, - line_size = options.cacheline_size, - assoc = options.l1d_assoc, - cpus = [i*2+1]) + FileSystemConfig.register_cache( + level=0, + idu_type="Data", + size=options.l1d_size, + line_size=options.cacheline_size, + assoc=options.l1d_assoc, + cpus=[i * 2 + 1], + ) - FileSystemConfig.register_cache(level = 1, - idu_type = 'Unified', - size = options.l2_size, - line_size = options.cacheline_size, - assoc = options.l2_assoc, - cpus = [i*2, i*2+1]) + FileSystemConfig.register_cache( + level=1, + idu_type="Unified", + size=options.l2_size, + line_size=options.cacheline_size, + assoc=options.l2_assoc, + cpus=[i * 2, i * 2 + 1], + ) for i in range(options.num_dirs): - FileSystemConfig.register_cache(level = 2, - idu_type = 'Unified', - size = options.l3_size, - line_size = options.cacheline_size, - assoc = options.l3_assoc, - cpus = [n for n in - range(options.num_cpus)]) + FileSystemConfig.register_cache( + level=2, + idu_type="Unified", + size=options.l3_size, + line_size=options.cacheline_size, + assoc=options.l3_assoc, + cpus=[n for n in range(options.num_cpus)], + ) # Assuming no DMA devices - assert(len(dma_devices) == 0) + assert len(dma_devices) == 0 # Add cpu/gpu clusters to main cluster mainCluster.add(cpuCluster) diff --git a/configs/ruby/MOESI_CMP_directory.py b/configs/ruby/MOESI_CMP_directory.py index dcab1e46e8..ead03c1693 100644 --- a/configs/ruby/MOESI_CMP_directory.py +++ b/configs/ruby/MOESI_CMP_directory.py @@ -51,18 +51,24 @@ class L1Cache(RubyCache): dataAccessLatency = 1 tagAccessLatency = 1 + class L2Cache(RubyCache): dataAccessLatency = 20 tagAccessLatency = 20 + def define_options(parser): return -def create_system(options, full_system, system, dma_ports, bootmem, - ruby_system, cpus): - if buildEnv['PROTOCOL'] != 'MOESI_CMP_directory': - panic("This script requires the MOESI_CMP_directory protocol to be built.") +def create_system( + options, full_system, system, dma_ports, bootmem, ruby_system, cpus +): + + if buildEnv["PROTOCOL"] != "MOESI_CMP_directory": + panic( + "This script requires the MOESI_CMP_directory protocol to be built." + ) cpu_sequencers = [] @@ -85,27 +91,37 @@ def create_system(options, full_system, system, dma_ports, bootmem, # # First create the Ruby objects associated with this cpu # - l1i_cache = L1Cache(size = options.l1i_size, - assoc = options.l1i_assoc, - start_index_bit = block_size_bits, - is_icache = True) - l1d_cache = L1Cache(size = options.l1d_size, - assoc = options.l1d_assoc, - start_index_bit = block_size_bits, - is_icache = False) + l1i_cache = L1Cache( + size=options.l1i_size, + assoc=options.l1i_assoc, + start_index_bit=block_size_bits, + is_icache=True, + ) + l1d_cache = L1Cache( + size=options.l1d_size, + assoc=options.l1d_assoc, + start_index_bit=block_size_bits, + is_icache=False, + ) clk_domain = cpus[i].clk_domain - l1_cntrl = L1Cache_Controller(version=i, L1Icache=l1i_cache, - L1Dcache=l1d_cache, - send_evictions=send_evicts(options), - transitions_per_cycle=options.ports, - clk_domain=clk_domain, - ruby_system=ruby_system) + l1_cntrl = L1Cache_Controller( + version=i, + L1Icache=l1i_cache, + L1Dcache=l1d_cache, + send_evictions=send_evicts(options), + transitions_per_cycle=options.ports, + clk_domain=clk_domain, + ruby_system=ruby_system, + ) - cpu_seq = RubySequencer(version=i, - dcache=l1d_cache, clk_domain=clk_domain, - ruby_system=ruby_system) + cpu_seq = RubySequencer( + version=i, + dcache=l1d_cache, + clk_domain=clk_domain, + ruby_system=ruby_system, + ) l1_cntrl.sequencer = cpu_seq exec("ruby_system.l1_cntrl%d = l1_cntrl" % i) @@ -124,22 +140,25 @@ def create_system(options, full_system, system, dma_ports, bootmem, l1_cntrl.requestToL1Cache.in_port = ruby_system.network.out_port l1_cntrl.responseToL1Cache = MessageBuffer() l1_cntrl.responseToL1Cache.in_port = ruby_system.network.out_port - l1_cntrl.triggerQueue = MessageBuffer(ordered = True) - + l1_cntrl.triggerQueue = MessageBuffer(ordered=True) # Create the L2s interleaved addr ranges l2_addr_ranges = [] l2_bits = int(math.log(options.num_l2caches, 2)) numa_bit = block_size_bits + l2_bits - 1 sysranges = [] + system.mem_ranges - if bootmem: sysranges.append(bootmem.range) + if bootmem: + sysranges.append(bootmem.range) for i in range(options.num_l2caches): ranges = [] for r in sysranges: - addr_range = AddrRange(r.start, size = r.size(), - intlvHighBit = numa_bit, - intlvBits = l2_bits, - intlvMatch = i) + addr_range = AddrRange( + r.start, + size=r.size(), + intlvHighBit=numa_bit, + intlvBits=l2_bits, + intlvMatch=i, + ) ranges.append(addr_range) l2_addr_ranges.append(ranges) @@ -147,22 +166,28 @@ def create_system(options, full_system, system, dma_ports, bootmem, # # First create the Ruby objects associated with this cpu # - l2_cache = L2Cache(size = options.l2_size, - assoc = options.l2_assoc, - start_index_bit = block_size_bits + l2_bits) + l2_cache = L2Cache( + size=options.l2_size, + assoc=options.l2_assoc, + start_index_bit=block_size_bits + l2_bits, + ) - l2_cntrl = L2Cache_Controller(version = i, - L2cache = l2_cache, - transitions_per_cycle = options.ports, - ruby_system = ruby_system, - addr_ranges = l2_addr_ranges[i]) + l2_cntrl = L2Cache_Controller( + version=i, + L2cache=l2_cache, + transitions_per_cycle=options.ports, + ruby_system=ruby_system, + addr_ranges=l2_addr_ranges[i], + ) exec("ruby_system.l2_cntrl%d = l2_cntrl" % i) l2_cntrl_nodes.append(l2_cntrl) # Connect the L2 controllers and the network l2_cntrl.GlobalRequestFromL2Cache = MessageBuffer() - l2_cntrl.GlobalRequestFromL2Cache.out_port = ruby_system.network.in_port + l2_cntrl.GlobalRequestFromL2Cache.out_port = ( + ruby_system.network.in_port + ) l2_cntrl.L1RequestFromL2Cache = MessageBuffer() l2_cntrl.L1RequestFromL2Cache.out_port = ruby_system.network.in_port l2_cntrl.responseFromL2Cache = MessageBuffer() @@ -174,18 +199,18 @@ def create_system(options, full_system, system, dma_ports, bootmem, l2_cntrl.L1RequestToL2Cache.in_port = ruby_system.network.out_port l2_cntrl.responseToL2Cache = MessageBuffer() l2_cntrl.responseToL2Cache.in_port = ruby_system.network.out_port - l2_cntrl.triggerQueue = MessageBuffer(ordered = True) + l2_cntrl.triggerQueue = MessageBuffer(ordered=True) # Run each of the ruby memory controllers at a ratio of the frequency of # the ruby system. # clk_divider value is a fix to pass regression. ruby_system.memctrl_clk_domain = DerivedClockDomain( - clk_domain=ruby_system.clk_domain, - clk_divider=3) - + clk_domain=ruby_system.clk_domain, clk_divider=3 + ) mem_dir_cntrl_nodes, rom_dir_cntrl_node = create_directories( - options, bootmem, ruby_system, system) + options, bootmem, ruby_system, system + ) dir_cntrl_nodes = mem_dir_cntrl_nodes[:] if rom_dir_cntrl_node is not None: dir_cntrl_nodes.append(rom_dir_cntrl_node) @@ -201,21 +226,22 @@ def create_system(options, full_system, system, dma_ports, bootmem, dir_cntrl.forwardFromDir.out_port = ruby_system.network.in_port dir_cntrl.requestToMemory = MessageBuffer() dir_cntrl.responseFromMemory = MessageBuffer() - dir_cntrl.triggerQueue = MessageBuffer(ordered = True) - + dir_cntrl.triggerQueue = MessageBuffer(ordered=True) for i, dma_port in enumerate(dma_ports): # # Create the Ruby objects associated with the dma controller # - dma_seq = DMASequencer(version = i, - ruby_system = ruby_system, - in_ports = dma_port) + dma_seq = DMASequencer( + version=i, ruby_system=ruby_system, in_ports=dma_port + ) - dma_cntrl = DMA_Controller(version = i, - dma_sequencer = dma_seq, - transitions_per_cycle = options.ports, - ruby_system = ruby_system) + dma_cntrl = DMA_Controller( + version=i, + dma_sequencer=dma_seq, + transitions_per_cycle=options.ports, + ruby_system=ruby_system, + ) exec("ruby_system.dma_cntrl%d = dma_cntrl" % i) dma_cntrl_nodes.append(dma_cntrl) @@ -228,21 +254,21 @@ def create_system(options, full_system, system, dma_ports, bootmem, dma_cntrl.reqToDir.out_port = ruby_system.network.in_port dma_cntrl.respToDir = MessageBuffer() dma_cntrl.respToDir.out_port = ruby_system.network.in_port - dma_cntrl.triggerQueue = MessageBuffer(ordered = True) + dma_cntrl.triggerQueue = MessageBuffer(ordered=True) - - all_cntrls = l1_cntrl_nodes + \ - l2_cntrl_nodes + \ - dir_cntrl_nodes + \ - dma_cntrl_nodes + all_cntrls = ( + l1_cntrl_nodes + l2_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes + ) # Create the io controller and the sequencer if full_system: io_seq = DMASequencer(version=len(dma_ports), ruby_system=ruby_system) ruby_system._io_port = io_seq - io_controller = DMA_Controller(version = len(dma_ports), - dma_sequencer = io_seq, - ruby_system = ruby_system) + io_controller = DMA_Controller( + version=len(dma_ports), + dma_sequencer=io_seq, + ruby_system=ruby_system, + ) ruby_system.io_controller = io_controller # Connect the dma controller to the network @@ -253,7 +279,7 @@ def create_system(options, full_system, system, dma_ports, bootmem, io_controller.reqToDir.out_port = ruby_system.network.in_port io_controller.respToDir = MessageBuffer() io_controller.respToDir.out_port = ruby_system.network.in_port - io_controller.triggerQueue = MessageBuffer(ordered = True) + io_controller.triggerQueue = MessageBuffer(ordered=True) all_cntrls = all_cntrls + [io_controller] diff --git a/configs/ruby/MOESI_CMP_token.py b/configs/ruby/MOESI_CMP_token.py index d23c5ed52f..a610db5076 100644 --- a/configs/ruby/MOESI_CMP_token.py +++ b/configs/ruby/MOESI_CMP_token.py @@ -35,27 +35,44 @@ from .Ruby import send_evicts # # Declare caches used by the protocol # -class L1Cache(RubyCache): pass -class L2Cache(RubyCache): pass +class L1Cache(RubyCache): + pass + + +class L2Cache(RubyCache): + pass + def define_options(parser): parser.add_argument( - "--l1-retries", type=int, default=1, - help="Token_CMP: # of l1 retries before going persistent") + "--l1-retries", + type=int, + default=1, + help="Token_CMP: # of l1 retries before going persistent", + ) parser.add_argument( - "--timeout-latency", type=int, default=300, - help="Token_CMP: cycles until issuing again"); + "--timeout-latency", + type=int, + default=300, + help="Token_CMP: cycles until issuing again", + ) parser.add_argument( - "--disable-dyn-timeouts", action="store_true", - help="Token_CMP: disable dyanimc timeouts, use fixed latency instead") + "--disable-dyn-timeouts", + action="store_true", + help="Token_CMP: disable dyanimc timeouts, use fixed latency instead", + ) parser.add_argument( - "--allow-atomic-migration", action="store_true", - help="allow migratory sharing for atomic only accessed blocks") + "--allow-atomic-migration", + action="store_true", + help="allow migratory sharing for atomic only accessed blocks", + ) -def create_system(options, full_system, system, dma_ports, bootmem, - ruby_system, cpus): - if buildEnv['PROTOCOL'] != 'MOESI_CMP_token': +def create_system( + options, full_system, system, dma_ports, bootmem, ruby_system, cpus +): + + if buildEnv["PROTOCOL"] != "MOESI_CMP_token": panic("This script requires the MOESI_CMP_token protocol to be built.") # @@ -86,34 +103,41 @@ def create_system(options, full_system, system, dma_ports, bootmem, # # First create the Ruby objects associated with this cpu # - l1i_cache = L1Cache(size = options.l1i_size, - assoc = options.l1i_assoc, - start_index_bit = block_size_bits) - l1d_cache = L1Cache(size = options.l1d_size, - assoc = options.l1d_assoc, - start_index_bit = block_size_bits) + l1i_cache = L1Cache( + size=options.l1i_size, + assoc=options.l1i_assoc, + start_index_bit=block_size_bits, + ) + l1d_cache = L1Cache( + size=options.l1d_size, + assoc=options.l1d_assoc, + start_index_bit=block_size_bits, + ) clk_domain = cpus[i].clk_domain - l1_cntrl = L1Cache_Controller(version=i, L1Icache=l1i_cache, - L1Dcache=l1d_cache, - l2_select_num_bits=l2_bits, - N_tokens=n_tokens, - retry_threshold=options.l1_retries, - fixed_timeout_latency=\ - options.timeout_latency, - dynamic_timeout_enabled=\ - not options.disable_dyn_timeouts, - no_mig_atomic=not \ - options.allow_atomic_migration, - send_evictions=send_evicts(options), - transitions_per_cycle=options.ports, - clk_domain=clk_domain, - ruby_system=ruby_system) + l1_cntrl = L1Cache_Controller( + version=i, + L1Icache=l1i_cache, + L1Dcache=l1d_cache, + l2_select_num_bits=l2_bits, + N_tokens=n_tokens, + retry_threshold=options.l1_retries, + fixed_timeout_latency=options.timeout_latency, + dynamic_timeout_enabled=not options.disable_dyn_timeouts, + no_mig_atomic=not options.allow_atomic_migration, + send_evictions=send_evicts(options), + transitions_per_cycle=options.ports, + clk_domain=clk_domain, + ruby_system=ruby_system, + ) - cpu_seq = RubySequencer(version=i, - dcache=l1d_cache, clk_domain=clk_domain, - ruby_system=ruby_system) + cpu_seq = RubySequencer( + version=i, + dcache=l1d_cache, + clk_domain=clk_domain, + ruby_system=ruby_system, + ) l1_cntrl.sequencer = cpu_seq exec("ruby_system.l1_cntrl%d = l1_cntrl" % i) @@ -127,7 +151,7 @@ def create_system(options, full_system, system, dma_ports, bootmem, l1_cntrl.requestFromL1Cache.out_port = ruby_system.network.in_port l1_cntrl.responseFromL1Cache = MessageBuffer() l1_cntrl.responseFromL1Cache.out_port = ruby_system.network.in_port - l1_cntrl.persistentFromL1Cache = MessageBuffer(ordered = True) + l1_cntrl.persistentFromL1Cache = MessageBuffer(ordered=True) l1_cntrl.persistentFromL1Cache.out_port = ruby_system.network.in_port l1_cntrl.mandatoryQueue = MessageBuffer() @@ -135,32 +159,37 @@ def create_system(options, full_system, system, dma_ports, bootmem, l1_cntrl.requestToL1Cache.in_port = ruby_system.network.out_port l1_cntrl.responseToL1Cache = MessageBuffer() l1_cntrl.responseToL1Cache.in_port = ruby_system.network.out_port - l1_cntrl.persistentToL1Cache = MessageBuffer(ordered = True) + l1_cntrl.persistentToL1Cache = MessageBuffer(ordered=True) l1_cntrl.persistentToL1Cache.in_port = ruby_system.network.out_port - l2_index_start = block_size_bits + l2_bits for i in range(options.num_l2caches): # # First create the Ruby objects associated with this cpu # - l2_cache = L2Cache(size = options.l2_size, - assoc = options.l2_assoc, - start_index_bit = l2_index_start) + l2_cache = L2Cache( + size=options.l2_size, + assoc=options.l2_assoc, + start_index_bit=l2_index_start, + ) - l2_cntrl = L2Cache_Controller(version = i, - L2cache = l2_cache, - N_tokens = n_tokens, - transitions_per_cycle = options.ports, - ruby_system = ruby_system) + l2_cntrl = L2Cache_Controller( + version=i, + L2cache=l2_cache, + N_tokens=n_tokens, + transitions_per_cycle=options.ports, + ruby_system=ruby_system, + ) exec("ruby_system.l2_cntrl%d = l2_cntrl" % i) l2_cntrl_nodes.append(l2_cntrl) # Connect the L2 controllers and the network l2_cntrl.GlobalRequestFromL2Cache = MessageBuffer() - l2_cntrl.GlobalRequestFromL2Cache.out_port = ruby_system.network.in_port + l2_cntrl.GlobalRequestFromL2Cache.out_port = ( + ruby_system.network.in_port + ) l2_cntrl.L1RequestFromL2Cache = MessageBuffer() l2_cntrl.L1RequestFromL2Cache.out_port = ruby_system.network.in_port l2_cntrl.responseFromL2Cache = MessageBuffer() @@ -172,19 +201,19 @@ def create_system(options, full_system, system, dma_ports, bootmem, l2_cntrl.L1RequestToL2Cache.in_port = ruby_system.network.out_port l2_cntrl.responseToL2Cache = MessageBuffer() l2_cntrl.responseToL2Cache.in_port = ruby_system.network.out_port - l2_cntrl.persistentToL2Cache = MessageBuffer(ordered = True) + l2_cntrl.persistentToL2Cache = MessageBuffer(ordered=True) l2_cntrl.persistentToL2Cache.in_port = ruby_system.network.out_port - # Run each of the ruby memory controllers at a ratio of the frequency of # the ruby system # clk_divider value is a fix to pass regression. ruby_system.memctrl_clk_domain = DerivedClockDomain( - clk_domain=ruby_system.clk_domain, - clk_divider=3) + clk_domain=ruby_system.clk_domain, clk_divider=3 + ) mem_dir_cntrl_nodes, rom_dir_cntrl_node = create_directories( - options, bootmem, ruby_system, system) + options, bootmem, ruby_system, system + ) dir_cntrl_nodes = mem_dir_cntrl_nodes[:] if rom_dir_cntrl_node is not None: dir_cntrl_nodes.append(rom_dir_cntrl_node) @@ -195,63 +224,65 @@ def create_system(options, full_system, system, dma_ports, bootmem, dir_cntrl.requestToDir.in_port = ruby_system.network.out_port dir_cntrl.responseToDir = MessageBuffer() dir_cntrl.responseToDir.in_port = ruby_system.network.out_port - dir_cntrl.persistentToDir = MessageBuffer(ordered = True) + dir_cntrl.persistentToDir = MessageBuffer(ordered=True) dir_cntrl.persistentToDir.in_port = ruby_system.network.out_port - dir_cntrl.dmaRequestToDir = MessageBuffer(ordered = True) + dir_cntrl.dmaRequestToDir = MessageBuffer(ordered=True) dir_cntrl.dmaRequestToDir.in_port = ruby_system.network.out_port dir_cntrl.requestFromDir = MessageBuffer() dir_cntrl.requestFromDir.out_port = ruby_system.network.in_port dir_cntrl.responseFromDir = MessageBuffer() dir_cntrl.responseFromDir.out_port = ruby_system.network.in_port - dir_cntrl.persistentFromDir = MessageBuffer(ordered = True) + dir_cntrl.persistentFromDir = MessageBuffer(ordered=True) dir_cntrl.persistentFromDir.out_port = ruby_system.network.in_port - dir_cntrl.dmaResponseFromDir = MessageBuffer(ordered = True) + dir_cntrl.dmaResponseFromDir = MessageBuffer(ordered=True) dir_cntrl.dmaResponseFromDir.out_port = ruby_system.network.in_port dir_cntrl.requestToMemory = MessageBuffer() dir_cntrl.responseFromMemory = MessageBuffer() - for i, dma_port in enumerate(dma_ports): # # Create the Ruby objects associated with the dma controller # - dma_seq = DMASequencer(version = i, - ruby_system = ruby_system, - in_ports = dma_port) + dma_seq = DMASequencer( + version=i, ruby_system=ruby_system, in_ports=dma_port + ) - dma_cntrl = DMA_Controller(version = i, - dma_sequencer = dma_seq, - transitions_per_cycle = options.ports, - ruby_system = ruby_system) + dma_cntrl = DMA_Controller( + version=i, + dma_sequencer=dma_seq, + transitions_per_cycle=options.ports, + ruby_system=ruby_system, + ) exec("ruby_system.dma_cntrl%d = dma_cntrl" % i) dma_cntrl_nodes.append(dma_cntrl) # Connect the dma controller to the network dma_cntrl.mandatoryQueue = MessageBuffer() - dma_cntrl.responseFromDir = MessageBuffer(ordered = True) + dma_cntrl.responseFromDir = MessageBuffer(ordered=True) dma_cntrl.responseFromDir.in_port = ruby_system.network.out_port dma_cntrl.reqToDirectory = MessageBuffer() dma_cntrl.reqToDirectory.out_port = ruby_system.network.in_port - all_cntrls = l1_cntrl_nodes + \ - l2_cntrl_nodes + \ - dir_cntrl_nodes + \ - dma_cntrl_nodes + all_cntrls = ( + l1_cntrl_nodes + l2_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes + ) # Create the io controller and the sequencer if full_system: io_seq = DMASequencer(version=len(dma_ports), ruby_system=ruby_system) ruby_system._io_port = io_seq - io_controller = DMA_Controller(version = len(dma_ports), - dma_sequencer = io_seq, - ruby_system = ruby_system) + io_controller = DMA_Controller( + version=len(dma_ports), + dma_sequencer=io_seq, + ruby_system=ruby_system, + ) ruby_system.io_controller = io_controller # Connect the dma controller to the network io_controller.mandatoryQueue = MessageBuffer() - io_controller.responseFromDir = MessageBuffer(ordered = True) + io_controller.responseFromDir = MessageBuffer(ordered=True) io_controller.responseFromDir.in_port = ruby_system.network.out_port io_controller.reqToDirectory = MessageBuffer() io_controller.reqToDirectory.out_port = ruby_system.network.in_port diff --git a/configs/ruby/MOESI_hammer.py b/configs/ruby/MOESI_hammer.py index 15a6c682da..65ec11a1ad 100644 --- a/configs/ruby/MOESI_hammer.py +++ b/configs/ruby/MOESI_hammer.py @@ -36,25 +36,42 @@ from common import FileSystemConfig # # Declare caches used by the protocol # -class L1Cache(RubyCache): pass -class L2Cache(RubyCache): pass +class L1Cache(RubyCache): + pass + + +class L2Cache(RubyCache): + pass + + # # Probe filter is a cache # -class ProbeFilter(RubyCache): pass +class ProbeFilter(RubyCache): + pass + def define_options(parser): - parser.add_argument("--allow-atomic-migration", action="store_true", - help="allow migratory sharing for atomic only accessed blocks") - parser.add_argument("--pf-on", action="store_true", - help="Hammer: enable Probe Filter") - parser.add_argument("--dir-on", action="store_true", - help="Hammer: enable Full-bit Directory") + parser.add_argument( + "--allow-atomic-migration", + action="store_true", + help="allow migratory sharing for atomic only accessed blocks", + ) + parser.add_argument( + "--pf-on", action="store_true", help="Hammer: enable Probe Filter" + ) + parser.add_argument( + "--dir-on", + action="store_true", + help="Hammer: enable Full-bit Directory", + ) -def create_system(options, full_system, system, dma_ports, bootmem, - ruby_system, cpus): - if buildEnv['PROTOCOL'] != 'MOESI_hammer': +def create_system( + options, full_system, system, dma_ports, bootmem, ruby_system, cpus +): + + if buildEnv["PROTOCOL"] != "MOESI_hammer": panic("This script requires the MOESI_hammer protocol to be built.") cpu_sequencers = [] @@ -77,31 +94,43 @@ def create_system(options, full_system, system, dma_ports, bootmem, # # First create the Ruby objects associated with this cpu # - l1i_cache = L1Cache(size = options.l1i_size, - assoc = options.l1i_assoc, - start_index_bit = block_size_bits, - is_icache = True) - l1d_cache = L1Cache(size = options.l1d_size, - assoc = options.l1d_assoc, - start_index_bit = block_size_bits) - l2_cache = L2Cache(size = options.l2_size, - assoc = options.l2_assoc, - start_index_bit = block_size_bits) + l1i_cache = L1Cache( + size=options.l1i_size, + assoc=options.l1i_assoc, + start_index_bit=block_size_bits, + is_icache=True, + ) + l1d_cache = L1Cache( + size=options.l1d_size, + assoc=options.l1d_assoc, + start_index_bit=block_size_bits, + ) + l2_cache = L2Cache( + size=options.l2_size, + assoc=options.l2_assoc, + start_index_bit=block_size_bits, + ) clk_domain = cpus[i].clk_domain - l1_cntrl = L1Cache_Controller(version=i, L1Icache=l1i_cache, - L1Dcache=l1d_cache, L2cache=l2_cache, - no_mig_atomic=not \ - options.allow_atomic_migration, - send_evictions=send_evicts(options), - transitions_per_cycle=options.ports, - clk_domain=clk_domain, - ruby_system=ruby_system) + l1_cntrl = L1Cache_Controller( + version=i, + L1Icache=l1i_cache, + L1Dcache=l1d_cache, + L2cache=l2_cache, + no_mig_atomic=not options.allow_atomic_migration, + send_evictions=send_evicts(options), + transitions_per_cycle=options.ports, + clk_domain=clk_domain, + ruby_system=ruby_system, + ) - cpu_seq = RubySequencer(version=i, - dcache=l1d_cache,clk_domain=clk_domain, - ruby_system=ruby_system) + cpu_seq = RubySequencer( + version=i, + dcache=l1d_cache, + clk_domain=clk_domain, + ruby_system=ruby_system, + ) l1_cntrl.sequencer = cpu_seq if options.recycle_latency: @@ -131,7 +160,6 @@ def create_system(options, full_system, system, dma_ports, bootmem, l1_cntrl.responseToCache = MessageBuffer() l1_cntrl.responseToCache.in_port = ruby_system.network.out_port - # # determine size and index bits for probe filter # By default, the probe filter size is configured to be twice the @@ -145,7 +173,7 @@ def create_system(options, full_system, system, dma_ports, bootmem, if options.pf_on or options.dir_on: # if numa high bit explicitly set, make sure it does not overlap # with the probe filter index - assert(options.numa_high_bit - dir_bits > pf_bits) + assert options.numa_high_bit - dir_bits > pf_bits # set the probe filter start bit to just above the block offset pf_start_bit = block_size_bits @@ -159,17 +187,17 @@ def create_system(options, full_system, system, dma_ports, bootmem, # the ruby system # clk_divider value is a fix to pass regression. ruby_system.memctrl_clk_domain = DerivedClockDomain( - clk_domain=ruby_system.clk_domain, - clk_divider=3) + clk_domain=ruby_system.clk_domain, clk_divider=3 + ) mem_dir_cntrl_nodes, rom_dir_cntrl_node = create_directories( - options, bootmem, ruby_system, system) + options, bootmem, ruby_system, system + ) dir_cntrl_nodes = mem_dir_cntrl_nodes[:] if rom_dir_cntrl_node is not None: dir_cntrl_nodes.append(rom_dir_cntrl_node) for dir_cntrl in dir_cntrl_nodes: - pf = ProbeFilter(size = pf_size, assoc = 4, - start_index_bit = pf_start_bit) + pf = ProbeFilter(size=pf_size, assoc=4, start_index_bit=pf_start_bit) dir_cntrl.probeFilter = pf dir_cntrl.probe_filter_enabled = options.pf_on @@ -183,10 +211,10 @@ def create_system(options, full_system, system, dma_ports, bootmem, dir_cntrl.forwardFromDir.out_port = ruby_system.network.in_port dir_cntrl.responseFromDir = MessageBuffer() dir_cntrl.responseFromDir.out_port = ruby_system.network.in_port - dir_cntrl.dmaResponseFromDir = MessageBuffer(ordered = True) + dir_cntrl.dmaResponseFromDir = MessageBuffer(ordered=True) dir_cntrl.dmaResponseFromDir.out_port = ruby_system.network.in_port - dir_cntrl.triggerQueue = MessageBuffer(ordered = True) + dir_cntrl.triggerQueue = MessageBuffer(ordered=True) dir_cntrl.unblockToDir = MessageBuffer() dir_cntrl.unblockToDir.in_port = ruby_system.network.out_port @@ -194,24 +222,25 @@ def create_system(options, full_system, system, dma_ports, bootmem, dir_cntrl.responseToDir.in_port = ruby_system.network.out_port dir_cntrl.requestToDir = MessageBuffer() dir_cntrl.requestToDir.in_port = ruby_system.network.out_port - dir_cntrl.dmaRequestToDir = MessageBuffer(ordered = True) + dir_cntrl.dmaRequestToDir = MessageBuffer(ordered=True) dir_cntrl.dmaRequestToDir.in_port = ruby_system.network.out_port dir_cntrl.requestToMemory = MessageBuffer() dir_cntrl.responseFromMemory = MessageBuffer() - for i, dma_port in enumerate(dma_ports): # # Create the Ruby objects associated with the dma controller # - dma_seq = DMASequencer(version = i, - ruby_system = ruby_system, - in_ports = dma_port) + dma_seq = DMASequencer( + version=i, ruby_system=ruby_system, in_ports=dma_port + ) - dma_cntrl = DMA_Controller(version = i, - dma_sequencer = dma_seq, - transitions_per_cycle = options.ports, - ruby_system = ruby_system) + dma_cntrl = DMA_Controller( + version=i, + dma_sequencer=dma_seq, + transitions_per_cycle=options.ports, + ruby_system=ruby_system, + ) exec("ruby_system.dma_cntrl%d = dma_cntrl" % i) dma_cntrl_nodes.append(dma_cntrl) @@ -220,7 +249,7 @@ def create_system(options, full_system, system, dma_ports, bootmem, dma_cntrl.recycle_latency = options.recycle_latency # Connect the dma controller to the network - dma_cntrl.responseFromDir = MessageBuffer(ordered = True) + dma_cntrl.responseFromDir = MessageBuffer(ordered=True) dma_cntrl.responseFromDir.in_port = ruby_system.network.out_port dma_cntrl.requestToDir = MessageBuffer() dma_cntrl.requestToDir.out_port = ruby_system.network.in_port @@ -232,13 +261,15 @@ def create_system(options, full_system, system, dma_ports, bootmem, if full_system: io_seq = DMASequencer(version=len(dma_ports), ruby_system=ruby_system) ruby_system._io_port = io_seq - io_controller = DMA_Controller(version = len(dma_ports), - dma_sequencer = io_seq, - ruby_system = ruby_system) + io_controller = DMA_Controller( + version=len(dma_ports), + dma_sequencer=io_seq, + ruby_system=ruby_system, + ) ruby_system.io_controller = io_controller # Connect the dma controller to the network - io_controller.responseFromDir = MessageBuffer(ordered = True) + io_controller.responseFromDir = MessageBuffer(ordered=True) io_controller.responseFromDir.in_port = ruby_system.network.out_port io_controller.requestToDir = MessageBuffer() io_controller.requestToDir.out_port = ruby_system.network.in_port @@ -248,30 +279,38 @@ def create_system(options, full_system, system, dma_ports, bootmem, # Register configuration with filesystem else: for i in range(options.num_cpus): - FileSystemConfig.register_cpu(physical_package_id = 0, - core_siblings = [], - core_id = i, - thread_siblings = []) + FileSystemConfig.register_cpu( + physical_package_id=0, + core_siblings=[], + core_id=i, + thread_siblings=[], + ) - FileSystemConfig.register_cache(level = 1, - idu_type = 'Instruction', - size = options.l1i_size, - line_size = options.cacheline_size, - assoc = options.l1i_assoc, - cpus = [i]) - FileSystemConfig.register_cache(level = 1, - idu_type = 'Data', - size = options.l1d_size, - line_size = options.cacheline_size, - assoc = options.l1d_assoc, - cpus = [i]) + FileSystemConfig.register_cache( + level=1, + idu_type="Instruction", + size=options.l1i_size, + line_size=options.cacheline_size, + assoc=options.l1i_assoc, + cpus=[i], + ) + FileSystemConfig.register_cache( + level=1, + idu_type="Data", + size=options.l1d_size, + line_size=options.cacheline_size, + assoc=options.l1d_assoc, + cpus=[i], + ) - FileSystemConfig.register_cache(level = 2, - idu_type = 'Unified', - size = options.l2_size, - line_size = options.cacheline_size, - assoc = options.l2_assoc, - cpus = [i]) + FileSystemConfig.register_cache( + level=2, + idu_type="Unified", + size=options.l2_size, + line_size=options.cacheline_size, + assoc=options.l2_assoc, + cpus=[i], + ) ruby_system.network.number_of_virtual_networks = 6 topology = create_topology(all_cntrls, options) diff --git a/configs/ruby/Ruby.py b/configs/ruby/Ruby.py index ba94c15122..3ca7b95140 100644 --- a/configs/ruby/Ruby.py +++ b/configs/ruby/Ruby.py @@ -42,8 +42,10 @@ import m5 from m5.objects import * from m5.defines import buildEnv from m5.util import addToPath, fatal +from gem5.isas import ISA +from gem5.runtime import get_runtime_isa -addToPath('../') +addToPath("../") from common import ObjectList from common import MemConfig @@ -52,57 +54,82 @@ from common import FileSystemConfig from topologies import * from network import Network + def define_options(parser): # By default, ruby uses the simple timing cpu parser.set_defaults(cpu_type="TimingSimpleCPU") parser.add_argument( - "--ruby-clock", action="store", type=str, - default='2GHz', - help="Clock for blocks running at Ruby system's speed") + "--ruby-clock", + action="store", + type=str, + default="2GHz", + help="Clock for blocks running at Ruby system's speed", + ) parser.add_argument( - "--access-backing-store", action="store_true", default=False, - help="Should ruby maintain a second copy of memory") + "--access-backing-store", + action="store_true", + default=False, + help="Should ruby maintain a second copy of memory", + ) # Options related to cache structure parser.add_argument( - "--ports", action="store", type=int, default=4, + "--ports", + action="store", + type=int, + default=4, help="used of transitions per cycle which is a proxy \ - for the number of ports.") + for the number of ports.", + ) # network options are in network/Network.py # ruby mapping options parser.add_argument( - "--numa-high-bit", type=int, default=0, + "--numa-high-bit", + type=int, + default=0, help="high order address bit to use for numa mapping. " - "0 = highest bit, not specified = lowest bit") + "0 = highest bit, not specified = lowest bit", + ) parser.add_argument( - "--interleaving-bits", type=int, default=0, - help="number of bits to specify interleaving " \ - "in directory, memory controllers and caches. " - "0 = not specified") + "--interleaving-bits", + type=int, + default=0, + help="number of bits to specify interleaving " + "in directory, memory controllers and caches. " + "0 = not specified", + ) parser.add_argument( - "--xor-low-bit", type=int, default=20, - help="hashing bit for channel selection" \ - "see MemConfig for explanation of the default"\ - "parameter. If set to 0, xor_high_bit is also"\ - "set to 0.") + "--xor-low-bit", + type=int, + default=20, + help="hashing bit for channel selection" + "see MemConfig for explanation of the default" + "parameter. If set to 0, xor_high_bit is also" + "set to 0.", + ) parser.add_argument( - "--recycle-latency", type=int, default=10, - help="Recycle latency for ruby controller input buffers") + "--recycle-latency", + type=int, + default=10, + help="Recycle latency for ruby controller input buffers", + ) - protocol = buildEnv['PROTOCOL'] + protocol = buildEnv["PROTOCOL"] exec("from . import %s" % protocol) eval("%s.define_options(parser)" % protocol) Network.define_options(parser) + def setup_memory_controllers(system, ruby, dir_cntrls, options): - if (options.numa_high_bit): - block_size_bits = options.numa_high_bit + 1 - \ - int(math.log(options.num_dirs, 2)) + if options.numa_high_bit: + block_size_bits = ( + options.numa_high_bit + 1 - int(math.log(options.num_dirs, 2)) + ) ruby.block_size_bytes = 2 ** (block_size_bits) else: ruby.block_size_bytes = options.cacheline_size @@ -135,16 +162,21 @@ def setup_memory_controllers(system, ruby, dir_cntrls, options): dir_ranges = [] for r in system.mem_ranges: mem_type = ObjectList.mem_list.get(options.mem_type) - dram_intf = MemConfig.create_mem_intf(mem_type, r, index, + dram_intf = MemConfig.create_mem_intf( + mem_type, + r, + index, int(math.log(options.num_dirs, 2)), - intlv_size, options.xor_low_bit) + intlv_size, + options.xor_low_bit, + ) if issubclass(mem_type, DRAMInterface): - mem_ctrl = m5.objects.MemCtrl(dram = dram_intf) + mem_ctrl = m5.objects.MemCtrl(dram=dram_intf) else: mem_ctrl = dram_intf if options.access_backing_store: - dram_intf.kvm_map=False + dram_intf.kvm_map = False mem_ctrls.append(mem_ctrl) dir_ranges.append(dram_intf.range) @@ -156,8 +188,9 @@ def setup_memory_controllers(system, ruby, dir_cntrls, options): # Enable low-power DRAM states if option is set if issubclass(mem_type, DRAMInterface): - mem_ctrl.dram.enable_dram_powerdown = \ - options.enable_dram_powerdown + mem_ctrl.dram.enable_dram_powerdown = ( + options.enable_dram_powerdown + ) index += 1 dir_cntrl.addr_ranges = dir_ranges @@ -169,17 +202,25 @@ def setup_memory_controllers(system, ruby, dir_cntrls, options): def create_topology(controllers, options): - """ Called from create_system in configs/ruby/.py - Must return an object which is a subclass of BaseTopology - found in configs/topologies/BaseTopology.py - This is a wrapper for the legacy topologies. + """Called from create_system in configs/ruby/.py + Must return an object which is a subclass of BaseTopology + found in configs/topologies/BaseTopology.py + This is a wrapper for the legacy topologies. """ exec("import topologies.%s as Topo" % options.topology) topology = eval("Topo.%s(controllers)" % options.topology) return topology -def create_system(options, full_system, system, piobus = None, dma_ports = [], - bootmem=None, cpus=None): + +def create_system( + options, + full_system, + system, + piobus=None, + dma_ports=[], + bootmem=None, + cpus=None, +): system.ruby = RubySystem() ruby = system.ruby @@ -188,40 +229,46 @@ def create_system(options, full_system, system, piobus = None, dma_ports = [], FileSystemConfig.config_filesystem(system, options) # Create the network object - (network, IntLinkClass, ExtLinkClass, RouterClass, InterfaceClass) = \ - Network.create_network(options, ruby) + ( + network, + IntLinkClass, + ExtLinkClass, + RouterClass, + InterfaceClass, + ) = Network.create_network(options, ruby) ruby.network = network if cpus is None: cpus = system.cpu - protocol = buildEnv['PROTOCOL'] + protocol = buildEnv["PROTOCOL"] exec("from . import %s" % protocol) try: - (cpu_sequencers, dir_cntrls, topology) = \ - eval("%s.create_system(options, full_system, system, dma_ports,\ + (cpu_sequencers, dir_cntrls, topology) = eval( + "%s.create_system(options, full_system, system, dma_ports,\ bootmem, ruby, cpus)" - % protocol) + % protocol + ) except: print("Error: could not create sytem for ruby protocol %s" % protocol) raise # Create the network topology - topology.makeTopology(options, network, IntLinkClass, ExtLinkClass, - RouterClass) + topology.makeTopology( + options, network, IntLinkClass, ExtLinkClass, RouterClass + ) # Register the topology elements with faux filesystem (SE mode only) if not full_system: topology.registerTopology(options) - # Initialize network based on topology Network.init_network(options, network, InterfaceClass) # Create a port proxy for connecting the system port. This is # independent of the protocol and kept in the protocol-agnostic # part (i.e. here). - sys_port_proxy = RubyPortProxy(ruby_system = ruby) + sys_port_proxy = RubyPortProxy(ruby_system=ruby) if piobus is not None: sys_port_proxy.pio_request_port = piobus.cpu_side_ports @@ -246,8 +293,10 @@ def create_system(options, full_system, system, piobus = None, dma_ports = [], # Create a backing copy of physical memory in case required if options.access_backing_store: ruby.access_backing_store = True - ruby.phys_mem = SimpleMemory(range=system.mem_ranges[0], - in_addr_map=False) + ruby.phys_mem = SimpleMemory( + range=system.mem_ranges[0], in_addr_map=False + ) + def create_directories(options, bootmem, ruby_system, system): dir_cntrl_nodes = [] @@ -271,12 +320,15 @@ def create_directories(options, bootmem, ruby_system, system): return (dir_cntrl_nodes, None) + def send_evicts(options): # currently, 2 scenarios warrant forwarding evictions to the CPU: # 1. The O3 model must keep the LSQ coherent with the caches # 2. The x86 mwait instruction is built on top of coherence invalidations # 3. The local exclusive monitor in ARM systems - if options.cpu_type == "DerivO3CPU" or \ - buildEnv['TARGET_ISA'] in ('x86', 'arm'): + if options.cpu_type == "DerivO3CPU" or get_runtime_isa() in ( + ISA.X86, + ISA.ARM, + ): return True return False diff --git a/configs/splash2/cluster.py b/configs/splash2/cluster.py index 46bcfbfe29..4c09eee8f8 100644 --- a/configs/splash2/cluster.py +++ b/configs/splash2/cluster.py @@ -44,26 +44,23 @@ parser = argparse.ArgumentParser() parser.add_argument("-d", "--detailed", action="store_true") parser.add_argument("-t", "--timing", action="store_true") parser.add_argument("-m", "--maxtick", type=int) -parser.add_argument("-c", "--numclusters", - help="Number of clusters", type=int) -parser.add_argument("-n", "--numcpus", - help="Number of cpus in total", type=int) -parser.add_argument("-f", "--frequency", - default = "1GHz", - help="Frequency of each CPU") -parser.add_argument("--l1size", - default = "32kB") -parser.add_argument("--l1latency", - default = 1) -parser.add_argument("--l2size", - default = "256kB") -parser.add_argument("--l2latency", - default = 10) -parser.add_argument("--rootdir", - help="ROot directory of Splash2", - default="/dist/splash2/codes/") -parser.add_argument("-b", "--benchmark", - help="Splash 2 benchmark to run") +parser.add_argument("-c", "--numclusters", help="Number of clusters", type=int) +parser.add_argument( + "-n", "--numcpus", help="Number of cpus in total", type=int +) +parser.add_argument( + "-f", "--frequency", default="1GHz", help="Frequency of each CPU" +) +parser.add_argument("--l1size", default="32kB") +parser.add_argument("--l1latency", default=1) +parser.add_argument("--l2size", default="256kB") +parser.add_argument("--l2latency", default=10) +parser.add_argument( + "--rootdir", + help="ROot directory of Splash2", + default="/dist/splash2/codes/", +) +parser.add_argument("-b", "--benchmark", help="Splash 2 benchmark to run") args = parser.parse_args() @@ -71,86 +68,112 @@ args = parser.parse_args() # Define Splash2 Benchmarks # ==================== class Cholesky(Process): - executable = args.rootdir + '/kernels/cholesky/CHOLESKY' - cmd = 'CHOLESKY -p' + str(args.numcpus) + ' '\ - + args.rootdir + '/kernels/cholesky/inputs/tk23.O' + executable = args.rootdir + "/kernels/cholesky/CHOLESKY" + cmd = ( + "CHOLESKY -p" + + str(args.numcpus) + + " " + + args.rootdir + + "/kernels/cholesky/inputs/tk23.O" + ) + class FFT(Process): - executable = args.rootdir + 'kernels/fft/FFT' - cmd = 'FFT -p' + str(args.numcpus) + ' -m18' + executable = args.rootdir + "kernels/fft/FFT" + cmd = "FFT -p" + str(args.numcpus) + " -m18" + class LU_contig(Process): - executable = args.rootdir + 'kernels/lu/contiguous_blocks/LU' - cmd = 'LU -p' + str(args.numcpus) + executable = args.rootdir + "kernels/lu/contiguous_blocks/LU" + cmd = "LU -p" + str(args.numcpus) + class LU_noncontig(Process): - executable = args.rootdir + 'kernels/lu/non_contiguous_blocks/LU' - cmd = 'LU -p' + str(args.numcpus) + executable = args.rootdir + "kernels/lu/non_contiguous_blocks/LU" + cmd = "LU -p" + str(args.numcpus) + class Radix(Process): - executable = args.rootdir + 'kernels/radix/RADIX' - cmd = 'RADIX -n524288 -p' + str(args.numcpus) + executable = args.rootdir + "kernels/radix/RADIX" + cmd = "RADIX -n524288 -p" + str(args.numcpus) + class Barnes(Process): - executable = args.rootdir + 'apps/barnes/BARNES' - cmd = 'BARNES' - input = args.rootdir + 'apps/barnes/input.p' + str(args.numcpus) + executable = args.rootdir + "apps/barnes/BARNES" + cmd = "BARNES" + input = args.rootdir + "apps/barnes/input.p" + str(args.numcpus) + class FMM(Process): - executable = args.rootdir + 'apps/fmm/FMM' - cmd = 'FMM' - input = args.rootdir + 'apps/fmm/inputs/input.2048.p' + str(args.numcpus) + executable = args.rootdir + "apps/fmm/FMM" + cmd = "FMM" + input = args.rootdir + "apps/fmm/inputs/input.2048.p" + str(args.numcpus) + class Ocean_contig(Process): - executable = args.rootdir + 'apps/ocean/contiguous_partitions/OCEAN' - cmd = 'OCEAN -p' + str(args.numcpus) + executable = args.rootdir + "apps/ocean/contiguous_partitions/OCEAN" + cmd = "OCEAN -p" + str(args.numcpus) + class Ocean_noncontig(Process): - executable = args.rootdir + 'apps/ocean/non_contiguous_partitions/OCEAN' - cmd = 'OCEAN -p' + str(args.numcpus) + executable = args.rootdir + "apps/ocean/non_contiguous_partitions/OCEAN" + cmd = "OCEAN -p" + str(args.numcpus) + class Raytrace(Process): - executable = args.rootdir + 'apps/raytrace/RAYTRACE' - cmd = 'RAYTRACE -p' + str(args.numcpus) + ' ' \ - + args.rootdir + 'apps/raytrace/inputs/teapot.env' + executable = args.rootdir + "apps/raytrace/RAYTRACE" + cmd = ( + "RAYTRACE -p" + + str(args.numcpus) + + " " + + args.rootdir + + "apps/raytrace/inputs/teapot.env" + ) + class Water_nsquared(Process): - executable = args.rootdir + 'apps/water-nsquared/WATER-NSQUARED' - cmd = 'WATER-NSQUARED' - input = args.rootdir + 'apps/water-nsquared/input.p' + str(args.numcpus) + executable = args.rootdir + "apps/water-nsquared/WATER-NSQUARED" + cmd = "WATER-NSQUARED" + input = args.rootdir + "apps/water-nsquared/input.p" + str(args.numcpus) + class Water_spatial(Process): - executable = args.rootdir + 'apps/water-spatial/WATER-SPATIAL' - cmd = 'WATER-SPATIAL' - input = args.rootdir + 'apps/water-spatial/input.p' + str(args.numcpus) + executable = args.rootdir + "apps/water-spatial/WATER-SPATIAL" + cmd = "WATER-SPATIAL" + input = args.rootdir + "apps/water-spatial/input.p" + str(args.numcpus) # -------------------- # Base L1 Cache Definition # ==================== + class L1(Cache): latency = args.l1latency mshrs = 12 tgts_per_mshr = 8 + # ---------------------- # Base L2 Cache Definition # ---------------------- + class L2(Cache): latency = args.l2latency mshrs = 92 tgts_per_mshr = 16 write_buffers = 8 + # ---------------------- # Define the clusters with their cpus # ---------------------- class Cluster: pass -cpusPerCluster = args.numcpus/args.numclusters + +cpusPerCluster = args.numcpus / args.numclusters busFrequency = Frequency(args.frequency) busFrequency *= cpusPerCluster @@ -159,55 +182,62 @@ all_cpus = [] all_l1s = [] all_l1buses = [] if args.timing: - clusters = [ Cluster() for i in range(args.numclusters)] + clusters = [Cluster() for i in range(args.numclusters)] for j in range(args.numclusters): clusters[j].id = j for cluster in clusters: cluster.clusterbus = L2XBar(clock=busFrequency) all_l1buses += [cluster.clusterbus] - cluster.cpus = [TimingSimpleCPU(cpu_id = i + cluster.id, - clock=args.frequency) - for i in range(cpusPerCluster)] + cluster.cpus = [ + TimingSimpleCPU(cpu_id=i + cluster.id, clock=args.frequency) + for i in range(cpusPerCluster) + ] all_cpus += cluster.cpus - cluster.l1 = L1(size=args.l1size, assoc = 4) + cluster.l1 = L1(size=args.l1size, assoc=4) all_l1s += [cluster.l1] elif args.detailed: - clusters = [ Cluster() for i in range(args.numclusters)] + clusters = [Cluster() for i in range(args.numclusters)] for j in range(args.numclusters): clusters[j].id = j for cluster in clusters: cluster.clusterbus = L2XBar(clock=busFrequency) all_l1buses += [cluster.clusterbus] - cluster.cpus = [DerivO3CPU(cpu_id = i + cluster.id, - clock=args.frequency) - for i in range(cpusPerCluster)] + cluster.cpus = [ + DerivO3CPU(cpu_id=i + cluster.id, clock=args.frequency) + for i in range(cpusPerCluster) + ] all_cpus += cluster.cpus - cluster.l1 = L1(size=args.l1size, assoc = 4) + cluster.l1 = L1(size=args.l1size, assoc=4) all_l1s += [cluster.l1] else: - clusters = [ Cluster() for i in range(args.numclusters)] + clusters = [Cluster() for i in range(args.numclusters)] for j in range(args.numclusters): clusters[j].id = j for cluster in clusters: cluster.clusterbus = L2XBar(clock=busFrequency) all_l1buses += [cluster.clusterbus] - cluster.cpus = [AtomicSimpleCPU(cpu_id = i + cluster.id, - clock=args.frequency) - for i in range(cpusPerCluster)] + cluster.cpus = [ + AtomicSimpleCPU(cpu_id=i + cluster.id, clock=args.frequency) + for i in range(cpusPerCluster) + ] all_cpus += cluster.cpus - cluster.l1 = L1(size=args.l1size, assoc = 4) + cluster.l1 = L1(size=args.l1size, assoc=4) all_l1s += [cluster.l1] # ---------------------- # Create a system, and add system wide objects # ---------------------- -system = System(cpu = all_cpus, l1_ = all_l1s, l1bus_ = all_l1buses, - physmem = SimpleMemory(), - membus = SystemXBar(clock = busFrequency)) -system.clock = '1GHz' +system = System( + cpu=all_cpus, + l1_=all_l1s, + l1bus_=all_l1buses, + physmem=SimpleMemory(), + membus=SystemXBar(clock=busFrequency), +) +system.clock = "1GHz" -system.toL2bus = L2XBar(clock = busFrequency) -system.l2 = L2(size = args.l2size, assoc = 8) +system.toL2bus = L2XBar(clock=busFrequency) +system.l2 = L2(size=args.l2size, assoc=8) # ---------------------- # Connect the L2 cache and memory together @@ -231,41 +261,43 @@ for cluster in clusters: # Define the root # ---------------------- -root = Root(full_system = False, system = system) +root = Root(full_system=False, system=system) # -------------------- # Pick the correct Splash2 Benchmarks # ==================== -if args.benchmark == 'Cholesky': +if args.benchmark == "Cholesky": root.workload = Cholesky() -elif args.benchmark == 'FFT': +elif args.benchmark == "FFT": root.workload = FFT() -elif args.benchmark == 'LUContig': +elif args.benchmark == "LUContig": root.workload = LU_contig() -elif args.benchmark == 'LUNoncontig': +elif args.benchmark == "LUNoncontig": root.workload = LU_noncontig() -elif args.benchmark == 'Radix': +elif args.benchmark == "Radix": root.workload = Radix() -elif args.benchmark == 'Barnes': +elif args.benchmark == "Barnes": root.workload = Barnes() -elif args.benchmark == 'FMM': +elif args.benchmark == "FMM": root.workload = FMM() -elif args.benchmark == 'OceanContig': +elif args.benchmark == "OceanContig": root.workload = Ocean_contig() -elif args.benchmark == 'OceanNoncontig': +elif args.benchmark == "OceanNoncontig": root.workload = Ocean_noncontig() -elif args.benchmark == 'Raytrace': +elif args.benchmark == "Raytrace": root.workload = Raytrace() -elif args.benchmark == 'WaterNSquared': +elif args.benchmark == "WaterNSquared": root.workload = Water_nsquared() -elif args.benchmark == 'WaterSpatial': +elif args.benchmark == "WaterSpatial": root.workload = Water_spatial() else: - m5.util.panic(""" + m5.util.panic( + """ The --benchmark environment variable was set to something improper. Use Cholesky, FFT, LUContig, LUNoncontig, Radix, Barnes, FMM, OceanContig, OceanNoncontig, Raytrace, WaterNSquared, or WaterSpatial -""") +""" + ) # -------------------- # Assign the workload to the cpus @@ -282,7 +314,7 @@ system.workload = SEWorkload.init_compatible(root.workload.executable) # ---------------------- if args.timing or args.detailed: - root.system.mem_mode = 'timing' + root.system.mem_mode = "timing" # instantiate configuration m5.instantiate() @@ -293,5 +325,4 @@ if args.maxtick: else: exit_event = m5.simulate(m5.MaxTick) -print('Exiting @ tick', m5.curTick(), 'because', exit_event.getCause()) - +print("Exiting @ tick", m5.curTick(), "because", exit_event.getCause()) diff --git a/configs/splash2/run.py b/configs/splash2/run.py index 9daf0d7fc5..08c11e0f5a 100644 --- a/configs/splash2/run.py +++ b/configs/splash2/run.py @@ -43,24 +43,22 @@ parser = argparse.ArgumentParser() parser.add_argument("-d", "--detailed", action="store_true") parser.add_argument("-t", "--timing", action="store_true") parser.add_argument("-m", "--maxtick", type=int) -parser.add_argument("-n", "--numcpus", - help="Number of cpus in total", type=int) -parser.add_argument("-f", "--frequency", - default = "1GHz", - help="Frequency of each CPU") -parser.add_argument("--l1size", - default = "32kB") -parser.add_argument("--l1latency", - default = "1ns") -parser.add_argument("--l2size", - default = "256kB") -parser.add_argument("--l2latency", - default = "10ns") -parser.add_argument("--rootdir", - help="Root directory of Splash2", - default="/dist/splash2/codes") -parser.add_argument("-b", "--benchmark", - help="Splash 2 benchmark to run") +parser.add_argument( + "-n", "--numcpus", help="Number of cpus in total", type=int +) +parser.add_argument( + "-f", "--frequency", default="1GHz", help="Frequency of each CPU" +) +parser.add_argument("--l1size", default="32kB") +parser.add_argument("--l1latency", default="1ns") +parser.add_argument("--l2size", default="256kB") +parser.add_argument("--l2latency", default="10ns") +parser.add_argument( + "--rootdir", + help="Root directory of Splash2", + default="/dist/splash2/codes", +) +parser.add_argument("-b", "--benchmark", help="Splash 2 benchmark to run") args = parser.parse_args() @@ -72,99 +70,127 @@ if not args.numcpus: # Define Splash2 Benchmarks # ==================== class Cholesky(Process): - cwd = args.rootdir + '/kernels/cholesky' - executable = args.rootdir + '/kernels/cholesky/CHOLESKY' - cmd = ['CHOLESKY', '-p' + str(args.numcpus), - args.rootdir + '/kernels/cholesky/inputs/tk23.O'] + cwd = args.rootdir + "/kernels/cholesky" + executable = args.rootdir + "/kernels/cholesky/CHOLESKY" + cmd = [ + "CHOLESKY", + "-p" + str(args.numcpus), + args.rootdir + "/kernels/cholesky/inputs/tk23.O", + ] + class FFT(Process): - cwd = args.rootdir + '/kernels/fft' - executable = args.rootdir + '/kernels/fft/FFT' - cmd = ['FFT', '-p', str(args.numcpus), '-m18'] + cwd = args.rootdir + "/kernels/fft" + executable = args.rootdir + "/kernels/fft/FFT" + cmd = ["FFT", "-p", str(args.numcpus), "-m18"] + class LU_contig(Process): - executable = args.rootdir + '/kernels/lu/contiguous_blocks/LU' - cmd = ['LU', '-p', str(args.numcpus)] - cwd = args.rootdir + '/kernels/lu/contiguous_blocks' + executable = args.rootdir + "/kernels/lu/contiguous_blocks/LU" + cmd = ["LU", "-p", str(args.numcpus)] + cwd = args.rootdir + "/kernels/lu/contiguous_blocks" + class LU_noncontig(Process): - executable = args.rootdir + '/kernels/lu/non_contiguous_blocks/LU' - cmd = ['LU', '-p', str(args.numcpus)] - cwd = args.rootdir + '/kernels/lu/non_contiguous_blocks' + executable = args.rootdir + "/kernels/lu/non_contiguous_blocks/LU" + cmd = ["LU", "-p", str(args.numcpus)] + cwd = args.rootdir + "/kernels/lu/non_contiguous_blocks" + class Radix(Process): - executable = args.rootdir + '/kernels/radix/RADIX' - cmd = ['RADIX', '-n524288', '-p', str(args.numcpus)] - cwd = args.rootdir + '/kernels/radix' + executable = args.rootdir + "/kernels/radix/RADIX" + cmd = ["RADIX", "-n524288", "-p", str(args.numcpus)] + cwd = args.rootdir + "/kernels/radix" + class Barnes(Process): - executable = args.rootdir + '/apps/barnes/BARNES' - cmd = ['BARNES'] - input = args.rootdir + '/apps/barnes/input.p' + str(args.numcpus) - cwd = args.rootdir + '/apps/barnes' + executable = args.rootdir + "/apps/barnes/BARNES" + cmd = ["BARNES"] + input = args.rootdir + "/apps/barnes/input.p" + str(args.numcpus) + cwd = args.rootdir + "/apps/barnes" + class FMM(Process): - executable = args.rootdir + '/apps/fmm/FMM' - cmd = ['FMM'] - if str(args.numcpus) == '1': - input = args.rootdir + '/apps/fmm/inputs/input.2048' + executable = args.rootdir + "/apps/fmm/FMM" + cmd = ["FMM"] + if str(args.numcpus) == "1": + input = args.rootdir + "/apps/fmm/inputs/input.2048" else: - input = args.rootdir + '/apps/fmm/inputs/input.2048.p' + str(args.numcpus) - cwd = args.rootdir + '/apps/fmm' + input = ( + args.rootdir + "/apps/fmm/inputs/input.2048.p" + str(args.numcpus) + ) + cwd = args.rootdir + "/apps/fmm" + class Ocean_contig(Process): - executable = args.rootdir + '/apps/ocean/contiguous_partitions/OCEAN' - cmd = ['OCEAN', '-p', str(args.numcpus)] - cwd = args.rootdir + '/apps/ocean/contiguous_partitions' + executable = args.rootdir + "/apps/ocean/contiguous_partitions/OCEAN" + cmd = ["OCEAN", "-p", str(args.numcpus)] + cwd = args.rootdir + "/apps/ocean/contiguous_partitions" + class Ocean_noncontig(Process): - executable = args.rootdir + '/apps/ocean/non_contiguous_partitions/OCEAN' - cmd = ['OCEAN', '-p', str(args.numcpus)] - cwd = args.rootdir + '/apps/ocean/non_contiguous_partitions' + executable = args.rootdir + "/apps/ocean/non_contiguous_partitions/OCEAN" + cmd = ["OCEAN", "-p", str(args.numcpus)] + cwd = args.rootdir + "/apps/ocean/non_contiguous_partitions" + class Raytrace(Process): - executable = args.rootdir + '/apps/raytrace/RAYTRACE' - cmd = ['RAYTRACE', '-p' + str(args.numcpus), - args.rootdir + '/apps/raytrace/inputs/teapot.env'] - cwd = args.rootdir + '/apps/raytrace' + executable = args.rootdir + "/apps/raytrace/RAYTRACE" + cmd = [ + "RAYTRACE", + "-p" + str(args.numcpus), + args.rootdir + "/apps/raytrace/inputs/teapot.env", + ] + cwd = args.rootdir + "/apps/raytrace" + class Water_nsquared(Process): - executable = args.rootdir + '/apps/water-nsquared/WATER-NSQUARED' - cmd = ['WATER-NSQUARED'] - if args.numcpus==1: - input = args.rootdir + '/apps/water-nsquared/input' + executable = args.rootdir + "/apps/water-nsquared/WATER-NSQUARED" + cmd = ["WATER-NSQUARED"] + if args.numcpus == 1: + input = args.rootdir + "/apps/water-nsquared/input" else: - input = args.rootdir + '/apps/water-nsquared/input.p' + str(args.numcpus) - cwd = args.rootdir + '/apps/water-nsquared' + input = ( + args.rootdir + "/apps/water-nsquared/input.p" + str(args.numcpus) + ) + cwd = args.rootdir + "/apps/water-nsquared" + class Water_spatial(Process): - executable = args.rootdir + '/apps/water-spatial/WATER-SPATIAL' - cmd = ['WATER-SPATIAL'] - if args.numcpus==1: - input = args.rootdir + '/apps/water-spatial/input' + executable = args.rootdir + "/apps/water-spatial/WATER-SPATIAL" + cmd = ["WATER-SPATIAL"] + if args.numcpus == 1: + input = args.rootdir + "/apps/water-spatial/input" else: - input = args.rootdir + '/apps/water-spatial/input.p' + str(args.numcpus) - cwd = args.rootdir + '/apps/water-spatial' + input = ( + args.rootdir + "/apps/water-spatial/input.p" + str(args.numcpus) + ) + cwd = args.rootdir + "/apps/water-spatial" + # -------------------- # Base L1 Cache Definition # ==================== + class L1(Cache): latency = args.l1latency mshrs = 12 tgts_per_mshr = 8 + # ---------------------- # Base L2 Cache Definition # ---------------------- + class L2(Cache): latency = args.l2latency mshrs = 92 tgts_per_mshr = 16 write_buffers = 8 + # ---------------------- # Define the cpus # ---------------------- @@ -172,27 +198,30 @@ class L2(Cache): busFrequency = Frequency(args.frequency) if args.timing: - cpus = [TimingSimpleCPU(cpu_id = i, - clock=args.frequency) - for i in range(args.numcpus)] + cpus = [ + TimingSimpleCPU(cpu_id=i, clock=args.frequency) + for i in range(args.numcpus) + ] elif args.detailed: - cpus = [DerivO3CPU(cpu_id = i, - clock=args.frequency) - for i in range(args.numcpus)] + cpus = [ + DerivO3CPU(cpu_id=i, clock=args.frequency) for i in range(args.numcpus) + ] else: - cpus = [AtomicSimpleCPU(cpu_id = i, - clock=args.frequency) - for i in range(args.numcpus)] + cpus = [ + AtomicSimpleCPU(cpu_id=i, clock=args.frequency) + for i in range(args.numcpus) + ] # ---------------------- # Create a system, and add system wide objects # ---------------------- -system = System(cpu = cpus, physmem = SimpleMemory(), - membus = SystemXBar(clock = busFrequency)) -system.clock = '1GHz' +system = System( + cpu=cpus, physmem=SimpleMemory(), membus=SystemXBar(clock=busFrequency) +) +system.clock = "1GHz" -system.toL2bus = L2XBar(clock = busFrequency) -system.l2 = L2(size = args.l2size, assoc = 8) +system.toL2bus = L2XBar(clock=busFrequency) +system.l2 = L2(size=args.l2size, assoc=8) # ---------------------- # Connect the L2 cache and memory together @@ -207,53 +236,58 @@ system.system_port = system.membus.cpu_side_ports # Connect the L2 cache and clusters together # ---------------------- for cpu in cpus: - cpu.addPrivateSplitL1Caches(L1(size = args.l1size, assoc = 1), - L1(size = args.l1size, assoc = 4)) + cpu.addPrivateSplitL1Caches( + L1(size=args.l1size, assoc=1), L1(size=args.l1size, assoc=4) + ) # connect cpu level-1 caches to shared level-2 cache cpu.connectAllPorts( system.toL2bus.cpu_side_ports, system.membus.cpu_side_ports, - system.membus.mem_side_ports) + system.membus.mem_side_ports, + ) # ---------------------- # Define the root # ---------------------- -root = Root(full_system = False, system = system) +root = Root(full_system=False, system=system) # -------------------- # Pick the correct Splash2 Benchmarks # ==================== -if args.benchmark == 'Cholesky': +if args.benchmark == "Cholesky": root.workload = Cholesky() -elif args.benchmark == 'FFT': +elif args.benchmark == "FFT": root.workload = FFT() -elif args.benchmark == 'LUContig': +elif args.benchmark == "LUContig": root.workload = LU_contig() -elif args.benchmark == 'LUNoncontig': +elif args.benchmark == "LUNoncontig": root.workload = LU_noncontig() -elif args.benchmark == 'Radix': +elif args.benchmark == "Radix": root.workload = Radix() -elif args.benchmark == 'Barnes': +elif args.benchmark == "Barnes": root.workload = Barnes() -elif args.benchmark == 'FMM': +elif args.benchmark == "FMM": root.workload = FMM() -elif args.benchmark == 'OceanContig': +elif args.benchmark == "OceanContig": root.workload = Ocean_contig() -elif args.benchmark == 'OceanNoncontig': +elif args.benchmark == "OceanNoncontig": root.workload = Ocean_noncontig() -elif args.benchmark == 'Raytrace': +elif args.benchmark == "Raytrace": root.workload = Raytrace() -elif args.benchmark == 'WaterNSquared': +elif args.benchmark == "WaterNSquared": root.workload = Water_nsquared() -elif args.benchmark == 'WaterSpatial': +elif args.benchmark == "WaterSpatial": root.workload = Water_spatial() else: - print("The --benchmark environment variable was set to something " - "improper. Use Cholesky, FFT, LUContig, LUNoncontig, Radix, " - "Barnes, FMM, OceanContig, OceanNoncontig, Raytrace, WaterNSquared, " - "or WaterSpatial", file=sys.stderr) + print( + "The --benchmark environment variable was set to something " + "improper. Use Cholesky, FFT, LUContig, LUNoncontig, Radix, " + "Barnes, FMM, OceanContig, OceanNoncontig, Raytrace, WaterNSquared, " + "or WaterSpatial", + file=sys.stderr, + ) sys.exit(1) # -------------------- @@ -270,7 +304,7 @@ system.workload = SEWorkload.init_compatible(root.workload.executable) # ---------------------- if args.timing or args.detailed: - root.system.mem_mode = 'timing' + root.system.mem_mode = "timing" # instantiate configuration m5.instantiate() @@ -281,5 +315,4 @@ if args.maxtick: else: exit_event = m5.simulate(m5.MaxTick) -print('Exiting @ tick', m5.curTick(), 'because', exit_event.getCause()) - +print("Exiting @ tick", m5.curTick(), "because", exit_event.getCause()) diff --git a/configs/topologies/BaseTopology.py b/configs/topologies/BaseTopology.py index 848f230e9f..cdcca3f7eb 100644 --- a/configs/topologies/BaseTopology.py +++ b/configs/topologies/BaseTopology.py @@ -26,41 +26,44 @@ import m5 + class BaseTopology(object): description = "BaseTopology" def __init__(self): - """ When overriding place any objects created in - configs/ruby/.py that are needed in - makeTopology (below) here. The minimum is usually - all of the controllers created in the above file. + """When overriding place any objects created in + configs/ruby/.py that are needed in + makeTopology (below) here. The minimum is usually + all of the controllers created in the above file. """ def makeTopology(self, options, network, IntLink, ExtLink, Router): - """ Called from configs/ruby/Ruby.py - The return value is ( list(Router), list(IntLink), list(ExtLink)) - The API of this function cannot change when subclassing!! - Any additional information needed to create this topology should - be passed into the constructor when it's instantiated in - configs/ruby/.py + """Called from configs/ruby/Ruby.py + The return value is ( list(Router), list(IntLink), list(ExtLink)) + The API of this function cannot change when subclassing!! + Any additional information needed to create this topology should + be passed into the constructor when it's instantiated in + configs/ruby/.py """ m5.util.fatal("BaseTopology should have been overridden!!") def registerTopology(self, options): - """ Called from configs/ruby/Ruby.py - There is no return value. This should only be called in - SE mode. It is used by some topology objects to populate - the faux filesystem with accurate file contents. - No need to implement if not using FilesystemRegister - functionality. + """Called from configs/ruby/Ruby.py + There is no return value. This should only be called in + SE mode. It is used by some topology objects to populate + the faux filesystem with accurate file contents. + No need to implement if not using FilesystemRegister + functionality. """ + class SimpleTopology(BaseTopology): - """ Provides methods needed for the topologies included in Ruby before - topology changes. - These topologies are "simple" in the sense that they only use a flat - list of controllers to construct the topology. + """Provides methods needed for the topologies included in Ruby before + topology changes. + These topologies are "simple" in the sense that they only use a flat + list of controllers to construct the topology. """ + description = "SimpleTopology" def __init__(self, controllers): diff --git a/configs/topologies/Cluster.py b/configs/topologies/Cluster.py index 5d292c9d32..e0504f6043 100644 --- a/configs/topologies/Cluster.py +++ b/configs/topologies/Cluster.py @@ -26,11 +26,12 @@ from topologies.BaseTopology import BaseTopology + class Cluster(BaseTopology): - """ A cluster is a group of nodes which are all one hop from eachother - Clusters can also contain other clusters - When creating this kind of topology, return a single cluster (usually - the root cluster) from create_system in configs/ruby/.py + """A cluster is a group of nodes which are all one hop from eachother + Clusters can also contain other clusters + When creating this kind of topology, return a single cluster (usually + the root cluster) from create_system in configs/ruby/.py """ _num_int_links = 0 @@ -42,26 +43,28 @@ class Cluster(BaseTopology): def num_int_links(cls): cls._num_int_links += 1 return cls._num_int_links - 1 + @classmethod def num_ext_links(cls): cls._num_ext_links += 1 return cls._num_ext_links - 1 + @classmethod def num_routers(cls): cls._num_routers += 1 return cls._num_routers - 1 def __init__(self, intBW=0, extBW=0, intLatency=0, extLatency=0): - """ internalBandwidth is bandwidth of all links within the cluster - externalBandwidth is bandwidth from this cluster to any cluster - connecting to it. - internal/externalLatency are similar - **** When creating a cluster with sub-clusters, the sub-cluster - external bandwidth overrides the internal bandwidth of the - super cluster + """internalBandwidth is bandwidth of all links within the cluster + externalBandwidth is bandwidth from this cluster to any cluster + connecting to it. + internal/externalLatency are similar + **** When creating a cluster with sub-clusters, the sub-cluster + external bandwidth overrides the internal bandwidth of the + super cluster """ self.nodes = [] - self.router = None # created in makeTopology + self.router = None # created in makeTopology self.intBW = intBW self.extBW = extBW self.intLatency = intLatency @@ -71,8 +74,7 @@ class Cluster(BaseTopology): self.nodes.append(node) def makeTopology(self, options, network, IntLink, ExtLink, Router): - """ Recursively make all of the links and routers - """ + """Recursively make all of the links and routers""" # make a router to connect all of the nodes self.router = Router(router_id=self.num_routers()) @@ -80,14 +82,19 @@ class Cluster(BaseTopology): for node in self.nodes: if type(node) == Cluster: - node.makeTopology(options, network, IntLink, - ExtLink, Router) + node.makeTopology(options, network, IntLink, ExtLink, Router) # connect this cluster to the router - link_out = IntLink(link_id=self.num_int_links(), src_node=self.router, - dst_node=node.router) - link_in = IntLink(link_id=self.num_int_links(), src_node=node.router, - dst_node=self.router) + link_out = IntLink( + link_id=self.num_int_links(), + src_node=self.router, + dst_node=node.router, + ) + link_in = IntLink( + link_id=self.num_int_links(), + src_node=node.router, + dst_node=self.router, + ) if node.extBW: link_out.bandwidth_factor = node.extBW @@ -111,8 +118,11 @@ class Cluster(BaseTopology): else: # node is just a controller, # connect it to the router via a ext_link - link = ExtLink(link_id=self.num_ext_links(), ext_node=node, - int_node=self.router) + link = ExtLink( + link_id=self.num_ext_links(), + ext_node=node, + int_node=self.router, + ) if self.intBW: link.bandwidth_factor = self.intBW @@ -122,5 +132,6 @@ class Cluster(BaseTopology): network.ext_links.append(link) def __len__(self): - return len([i for i in self.nodes if type(i) != Cluster]) + \ - sum([len(i) for i in self.nodes if type(i) == Cluster]) + return len([i for i in self.nodes if type(i) != Cluster]) + sum( + [len(i) for i in self.nodes if type(i) == Cluster] + ) diff --git a/configs/topologies/Crossbar.py b/configs/topologies/Crossbar.py index 63e90bd4d7..e0d220a0fd 100644 --- a/configs/topologies/Crossbar.py +++ b/configs/topologies/Crossbar.py @@ -29,15 +29,16 @@ from m5.objects import * from topologies.BaseTopology import SimpleTopology + class Crossbar(SimpleTopology): - description='Crossbar' + description = "Crossbar" def makeTopology(self, options, network, IntLink, ExtLink, Router): # default values for link latency and router latency. # Can be over-ridden on a per link/router basis - link_latency = options.link_latency # used by simple and garnet - router_latency = options.router_latency # only used by garnet + link_latency = options.link_latency # used by simple and garnet + router_latency = options.router_latency # only used by garnet # Create an individual router for each controller plus one more for # the centralized crossbar. The large numbers of routers are needed @@ -45,30 +46,46 @@ class Crossbar(SimpleTopology): # simple network, but internal links do. # For garnet, one router suffices, use CrossbarGarnet.py - routers = [Router(router_id=i) for i in range(len(self.nodes)+1)] - xbar = routers[len(self.nodes)] # the crossbar router is the last router created + routers = [Router(router_id=i) for i in range(len(self.nodes) + 1)] + xbar = routers[ + len(self.nodes) + ] # the crossbar router is the last router created network.routers = routers - ext_links = [ExtLink(link_id=i, ext_node=n, int_node=routers[i], - latency = link_latency) - for (i, n) in enumerate(self.nodes)] + ext_links = [ + ExtLink( + link_id=i, + ext_node=n, + int_node=routers[i], + latency=link_latency, + ) + for (i, n) in enumerate(self.nodes) + ] network.ext_links = ext_links link_count = len(self.nodes) int_links = [] for i in range(len(self.nodes)): - int_links.append(IntLink(link_id=(link_count+i), - src_node=routers[i], - dst_node=xbar, - latency = link_latency)) + int_links.append( + IntLink( + link_id=(link_count + i), + src_node=routers[i], + dst_node=xbar, + latency=link_latency, + ) + ) link_count += len(self.nodes) for i in range(len(self.nodes)): - int_links.append(IntLink(link_id=(link_count+i), - src_node=xbar, - dst_node=routers[i], - latency = link_latency)) + int_links.append( + IntLink( + link_id=(link_count + i), + src_node=xbar, + dst_node=routers[i], + latency=link_latency, + ) + ) network.int_links = int_links diff --git a/configs/topologies/CrossbarGarnet.py b/configs/topologies/CrossbarGarnet.py index db7dc27476..603e1dfd35 100644 --- a/configs/topologies/CrossbarGarnet.py +++ b/configs/topologies/CrossbarGarnet.py @@ -29,8 +29,9 @@ from m5.objects import * from topologies.BaseTopology import SimpleTopology + class CrossbarGarnet(SimpleTopology): - description='CrossbarGarnet' + description = "CrossbarGarnet" def makeTopology(self, options, network, IntLink, ExtLink, Router): # Create one router in Garnet. Internally models a crossbar and @@ -40,8 +41,10 @@ class CrossbarGarnet(SimpleTopology): xbar = Router(router_id=0) network.routers = xbar - ext_links = [ExtLink(link_id=i, ext_node=n, int_node=xbar) - for (i, n) in enumerate(self.nodes)] + ext_links = [ + ExtLink(link_id=i, ext_node=n, int_node=xbar) + for (i, n) in enumerate(self.nodes) + ] network.ext_links = ext_links int_links = [] diff --git a/configs/topologies/CustomMesh.py b/configs/topologies/CustomMesh.py index 2519bddaf0..088e4b9cfe 100644 --- a/configs/topologies/CustomMesh.py +++ b/configs/topologies/CustomMesh.py @@ -1,4 +1,4 @@ -# Copyright (c) 2021 ARM Limited +# Copyright (c) 2021,2022 ARM Limited # All rights reserved. # # The license below extends only to copyright in the software and shall @@ -41,23 +41,32 @@ from m5.params import * from m5.objects import * from m5.defines import buildEnv -if buildEnv['PROTOCOL'] == 'CHI': + +if buildEnv["PROTOCOL"] == "CHI": import ruby.CHI_config as CHI from topologies.BaseTopology import SimpleTopology + class CustomMesh(SimpleTopology): - description = 'CustomMesh' + description = "CustomMesh" def __init__(self, controllers): self.nodes = controllers - #-------------------------------------------------------------------------- + # -------------------------------------------------------------------------- # _makeMesh - #-------------------------------------------------------------------------- + # -------------------------------------------------------------------------- - def _makeMesh(self, IntLink, link_latency, num_rows, num_columns, - cross_links, cross_link_latency): + def _makeMesh( + self, + IntLink, + link_latency, + num_rows, + num_columns, + cross_links, + cross_link_latency, + ): # East->West, West->East, North->South, South->North # XY routing weights @@ -66,99 +75,124 @@ class CustomMesh(SimpleTopology): # East output to West input links for row in range(num_rows): for col in range(num_columns): - if (col + 1 < num_columns): + if col + 1 < num_columns: east_out = col + (row * num_columns) west_in = (col + 1) + (row * num_columns) - llat = cross_link_latency \ - if (east_out, west_in) in cross_links \ - else link_latency - self._int_links.append(\ - IntLink(link_id=self._link_count, - src_node=self._routers[east_out], - dst_node=self._routers[west_in], - dst_inport="West", - latency = llat, - weight=link_weights[0])) + llat = ( + cross_link_latency + if (east_out, west_in) in cross_links + else link_latency + ) + self._int_links.append( + IntLink( + link_id=self._link_count, + src_node=self._routers[east_out], + dst_node=self._routers[west_in], + dst_inport="West", + latency=llat, + weight=link_weights[0], + ) + ) self._link_count += 1 # West output to East input links for row in range(num_rows): for col in range(num_columns): - if (col + 1 < num_columns): + if col + 1 < num_columns: east_in = col + (row * num_columns) west_out = (col + 1) + (row * num_columns) - llat = cross_link_latency \ - if (west_out, east_in) in cross_links \ - else link_latency - self._int_links.append(\ - IntLink(link_id=self._link_count, - src_node=self._routers[west_out], - dst_node=self._routers[east_in], - dst_inport="East", - latency = llat, - weight=link_weights[1])) + llat = ( + cross_link_latency + if (west_out, east_in) in cross_links + else link_latency + ) + self._int_links.append( + IntLink( + link_id=self._link_count, + src_node=self._routers[west_out], + dst_node=self._routers[east_in], + dst_inport="East", + latency=llat, + weight=link_weights[1], + ) + ) self._link_count += 1 # North output to South input links for col in range(num_columns): for row in range(num_rows): - if (row + 1 < num_rows): + if row + 1 < num_rows: north_out = col + (row * num_columns) south_in = col + ((row + 1) * num_columns) - llat = cross_link_latency \ - if (north_out, south_in) in cross_links \ - else link_latency - self._int_links.append(\ - IntLink(link_id=self._link_count, - src_node=self._routers[north_out], - dst_node=self._routers[south_in], - dst_inport="South", - latency = llat, - weight=link_weights[2])) + llat = ( + cross_link_latency + if (north_out, south_in) in cross_links + else link_latency + ) + self._int_links.append( + IntLink( + link_id=self._link_count, + src_node=self._routers[north_out], + dst_node=self._routers[south_in], + dst_inport="South", + latency=llat, + weight=link_weights[2], + ) + ) self._link_count += 1 # South output to North input links for col in range(num_columns): for row in range(num_rows): - if (row + 1 < num_rows): + if row + 1 < num_rows: north_in = col + (row * num_columns) south_out = col + ((row + 1) * num_columns) - llat = cross_link_latency \ - if (south_out, north_in) in cross_links \ - else link_latency - self._int_links.append(\ - IntLink(link_id=self._link_count, - src_node=self._routers[south_out], - dst_node=self._routers[north_in], - dst_inport="North", - latency = llat, - weight=link_weights[3])) + llat = ( + cross_link_latency + if (south_out, north_in) in cross_links + else link_latency + ) + self._int_links.append( + IntLink( + link_id=self._link_count, + src_node=self._routers[south_out], + dst_node=self._routers[north_in], + dst_inport="North", + latency=llat, + weight=link_weights[3], + ) + ) self._link_count += 1 - #-------------------------------------------------------------------------- + # -------------------------------------------------------------------------- # distributeNodes - #-------------------------------------------------------------------------- + # -------------------------------------------------------------------------- def _createRNFRouter(self, mesh_router): # Create a zero-latency router bridging node controllers # and the mesh router - node_router = self._Router(router_id = len(self._routers), - latency = 0) + node_router = self._Router(router_id=len(self._routers), latency=0) self._routers.append(node_router) # connect node_router <-> mesh router - self._int_links.append(self._IntLink( \ - link_id = self._link_count, - src_node = node_router, - dst_node = mesh_router, - latency = self._router_link_latency)) + self._int_links.append( + self._IntLink( + link_id=self._link_count, + src_node=node_router, + dst_node=mesh_router, + latency=self._router_link_latency, + ) + ) self._link_count += 1 - self._int_links.append(self._IntLink( \ - link_id = self._link_count, - src_node = mesh_router, - dst_node = node_router, - latency = self._router_link_latency)) + self._int_links.append( + self._IntLink( + link_id=self._link_count, + src_node=mesh_router, + dst_node=node_router, + latency=self._router_link_latency, + ) + ) self._link_count += 1 return node_router @@ -172,7 +206,9 @@ class CustomMesh(SimpleTopology): if num_nodes_per_router: # evenly distribute nodes to all listed routers - assert(len(router_idx_list)*num_nodes_per_router == len(node_list)) + assert len(router_idx_list) * num_nodes_per_router == len( + node_list + ) for idx, node in enumerate(node_list): mesh_router_idx = router_idx_list[idx // num_nodes_per_router] @@ -187,11 +223,14 @@ class CustomMesh(SimpleTopology): # connect all ctrls in the node to node_router ctrls = node.getNetworkSideControllers() for c in ctrls: - self._ext_links.append(self._ExtLink( - link_id = self._link_count, - ext_node = c, - int_node = router, - latency = self._node_link_latency)) + self._ext_links.append( + self._ExtLink( + link_id=self._link_count, + ext_node=c, + int_node=router, + latency=self._node_link_latency, + ) + ) self._link_count += 1 else: # try to circulate all nodes to all routers, some routers may be @@ -205,20 +244,23 @@ class CustomMesh(SimpleTopology): router = self._createRNFRouter(router) ctrls = node.getNetworkSideControllers() for c in ctrls: - self._ext_links.append(self._ExtLink( \ - link_id = self._link_count, - ext_node = c, - int_node = router, - latency = self._node_link_latency)) + self._ext_links.append( + self._ExtLink( + link_id=self._link_count, + ext_node=c, + int_node=router, + latency=self._node_link_latency, + ) + ) self._link_count += 1 idx = (idx + 1) % len(router_idx_list) - #-------------------------------------------------------------------------- + # -------------------------------------------------------------------------- # makeTopology - #-------------------------------------------------------------------------- + # -------------------------------------------------------------------------- def makeTopology(self, options, network, IntLink, ExtLink, Router): - assert(buildEnv['PROTOCOL'] == 'CHI') + assert buildEnv["PROTOCOL"] == "CHI" num_rows = options.num_rows num_cols = options.num_cols @@ -228,7 +270,7 @@ class CustomMesh(SimpleTopology): self._ExtLink = ExtLink self._Router = Router - if hasattr(options, 'router_link_latency'): + if hasattr(options, "router_link_latency"): self._router_link_latency = options.router_link_latency self._node_link_latency = options.node_link_latency else: @@ -256,7 +298,7 @@ class CustomMesh(SimpleTopology): rni_io_params = None def check_same(val, curr): - assert(curr == None or curr == val) + assert curr == None or curr == val return val for n in self.nodes: @@ -282,20 +324,31 @@ class CustomMesh(SimpleTopology): rni_io_nodes.append(n) rni_io_params = check_same(type(n).NoC_Params, rni_io_params) else: - fatal('topologies.CustomMesh: {} not supported' - .format(n.__class__.__name__)) + fatal( + "topologies.CustomMesh: {} not supported".format( + n.__class__.__name__ + ) + ) # Create all mesh routers - self._routers = [Router(router_id=i, latency = options.router_latency)\ - for i in range(num_mesh_routers)] + self._routers = [ + Router(router_id=i, latency=options.router_latency) + for i in range(num_mesh_routers) + ] self._link_count = 0 self._int_links = [] self._ext_links = [] # Create all the mesh internal links. - self._makeMesh(IntLink, self._router_link_latency, num_rows, num_cols, - options.cross_links, options.cross_link_latency) + self._makeMesh( + IntLink, + self._router_link_latency, + num_rows, + num_cols, + options.cross_links, + options.cross_link_latency, + ) # Place CHI_RNF on the mesh self.distributeNodes(rnf_params, rnf_nodes) @@ -304,7 +357,7 @@ class CustomMesh(SimpleTopology): self.distributeNodes(hnf_params, hnf_nodes) # Place CHI_MN on the mesh - self.distributeNodes(options, mn_params, mn_nodes) + self.distributeNodes(mn_params, mn_nodes) # Place CHI_SNF_MainMem on the mesh self.distributeNodes(mem_params, mem_nodes) @@ -319,15 +372,19 @@ class CustomMesh(SimpleTopology): # Set up network.int_links = self._int_links network.ext_links = self._ext_links + # fix Routers being set as link child + for r in self._routers: + if r.has_parent(): + r.get_parent().clear_child(r.get_name()) network.routers = self._routers - pairing = getattr(options, 'pairing', None) + pairing = getattr(options, "pairing", None) if pairing != None: self._autoPairHNFandSNF(hnf_list, mem_ctrls, pairing) - #-------------------------------------------------------------------------- + # -------------------------------------------------------------------------- # _autoPair - #-------------------------------------------------------------------------- + # -------------------------------------------------------------------------- def _autoPairHNFandSNF(self, cache_ctrls, mem_ctrls, pairing): # Use the pairing defined by the configuration to reassign the # memory ranges @@ -337,49 +394,57 @@ class CustomMesh(SimpleTopology): print(pairing) all_cache = [] - for c in cache_ctrls: all_cache.extend(c.getNetworkSideControllers()) + for c in cache_ctrls: + all_cache.extend(c.getNetworkSideControllers()) all_mem = [] - for c in mem_ctrls: all_mem.extend(c.getNetworkSideControllers()) + for c in mem_ctrls: + all_mem.extend(c.getNetworkSideControllers()) # checks and maps index from pairing map to component - assert(len(pairing) == len(all_cache)) + assert len(pairing) == len(all_cache) - def _tolist(val): return val if isinstance(val, list) else [val] + def _tolist(val): + return val if isinstance(val, list) else [val] - for m in all_mem: m._pairing = [] + for m in all_mem: + m._pairing = [] pairing_check = max(1, len(all_mem) / len(all_cache)) - for cidx,c in enumerate(all_cache): + for cidx, c in enumerate(all_cache): c._pairing = [] for midx in _tolist(pairing[cidx]): c._pairing.append(all_mem[midx]) if c not in all_mem[midx]._pairing: all_mem[midx]._pairing.append(c) - assert(len(c._pairing) == pairing_check) + assert len(c._pairing) == pairing_check if pair_debug: print(c.path()) for r in c.addr_ranges: print("%s" % r) for p in c._pairing: - print("\t"+p.path()) + print("\t" + p.path()) for r in p.addr_ranges: print("\t%s" % r) # all must be paired - for c in all_cache: assert(len(c._pairing) > 0) - for m in all_mem: assert(len(m._pairing) > 0) + for c in all_cache: + assert len(c._pairing) > 0 + for m in all_mem: + assert len(m._pairing) > 0 # only support a single range for the main memory controllers tgt_range_start = all_mem[0].addr_ranges[0].start.value for mem in all_mem: for r in mem.addr_ranges: if r.start.value != tgt_range_start: - fatal('topologies.CustomMesh: not supporting pairing of '\ - 'main memory with multiple ranges') + fatal( + "topologies.CustomMesh: not supporting pairing of " + "main memory with multiple ranges" + ) # reassign ranges for a 1 -> N paring def _rerange(src_cntrls, tgt_cntrls, fix_tgt_peer): - assert(len(tgt_cntrls) >= len(src_cntrls)) + assert len(tgt_cntrls) >= len(src_cntrls) def _rangeToBit(addr_ranges): bit = None @@ -387,14 +452,14 @@ class CustomMesh(SimpleTopology): if bit == None: bit = r.intlvMatch else: - assert(bit == r.intlvMatch) + assert bit == r.intlvMatch return bit def _getPeer(cntrl): return cntrl.memory_out_port.peer.simobj sorted_src = list(src_cntrls) - sorted_src.sort(key = lambda x: _rangeToBit(x.addr_ranges)) + sorted_src.sort(key=lambda x: _rangeToBit(x.addr_ranges)) # paired controllers need to have seq. interleaving match values intlvMatch = 0 @@ -414,17 +479,16 @@ class CustomMesh(SimpleTopology): new_src_mask = [] for m in src_range.masks: # TODO should mask all the way to the max range size - new_src_mask.append(m | (m*2) | (m*4) | - (m*8) | (m*16)) + new_src_mask.append( + m | (m * 2) | (m * 4) | (m * 8) | (m * 16) + ) for tgt in src._pairing: paired = False for tgt_range in tgt.addr_ranges: - if tgt_range.start.value == \ - src_range.start.value: + if tgt_range.start.value == src_range.start.value: src_range.masks = new_src_mask new_tgt_mask = [] - lsbs = len(tgt_range.masks) - \ - len(new_src_mask) + lsbs = len(tgt_range.masks) - len(new_src_mask) for i in range(lsbs): new_tgt_mask.append(tgt_range.masks[i]) for m in new_src_mask: @@ -434,9 +498,13 @@ class CustomMesh(SimpleTopology): _getPeer(tgt).range.masks = new_tgt_mask paired = True if not paired: - fatal('topologies.CustomMesh: could not ' \ - 'reassign ranges {} {}'.format( - src.path(), tgt.path())) + fatal( + "topologies.CustomMesh: could not " + "reassign ranges {} {}".format( + src.path(), tgt.path() + ) + ) + if len(all_mem) >= len(all_cache): _rerange(all_cache, all_mem, True) else: @@ -444,14 +512,12 @@ class CustomMesh(SimpleTopology): if pair_debug: print("") - for cidx,c in enumerate(all_cache): - assert(len(c._pairing) == pairing_check) + for cidx, c in enumerate(all_cache): + assert len(c._pairing) == pairing_check print(c.path()) for r in c.addr_ranges: print("%s" % r) for p in c._pairing: - print("\t"+p.path()) + print("\t" + p.path()) for r in p.addr_ranges: print("\t%s" % r) - - diff --git a/configs/topologies/MeshDirCorners_XY.py b/configs/topologies/MeshDirCorners_XY.py index b4100ff5c6..6faf340c5b 100644 --- a/configs/topologies/MeshDirCorners_XY.py +++ b/configs/topologies/MeshDirCorners_XY.py @@ -35,8 +35,9 @@ from topologies.BaseTopology import SimpleTopology # One L1 (and L2, depending on the protocol) are connected to each router. # XY routing is enforced (using link weights) to guarantee deadlock freedom. + class MeshDirCorners_XY(SimpleTopology): - description='MeshDirCorners_XY' + description = "MeshDirCorners_XY" def __init__(self, controllers): self.nodes = controllers @@ -49,37 +50,40 @@ class MeshDirCorners_XY(SimpleTopology): # default values for link latency and router latency. # Can be over-ridden on a per link/router basis - link_latency = options.link_latency # used by simple and garnet - router_latency = options.router_latency # only used by garnet - + link_latency = options.link_latency # used by simple and garnet + router_latency = options.router_latency # only used by garnet # First determine which nodes are cache cntrls vs. dirs vs. dma cache_nodes = [] dir_nodes = [] dma_nodes = [] for node in nodes: - if node.type == 'L1Cache_Controller' or \ - node.type == 'L2Cache_Controller': + if ( + node.type == "L1Cache_Controller" + or node.type == "L2Cache_Controller" + ): cache_nodes.append(node) - elif node.type == 'Directory_Controller': + elif node.type == "Directory_Controller": dir_nodes.append(node) - elif node.type == 'DMA_Controller': + elif node.type == "DMA_Controller": dma_nodes.append(node) # Obviously the number or rows must be <= the number of routers # and evenly divisible. Also the number of caches must be a # multiple of the number of routers and the number of directories # must be four. - assert(num_rows > 0 and num_rows <= num_routers) + assert num_rows > 0 and num_rows <= num_routers num_columns = int(num_routers / num_rows) - assert(num_columns * num_rows == num_routers) + assert num_columns * num_rows == num_routers caches_per_router, remainder = divmod(len(cache_nodes), num_routers) - assert(remainder == 0) - assert(len(dir_nodes) == 4) + assert remainder == 0 + assert len(dir_nodes) == 4 # Create the routers in the mesh - routers = [Router(router_id=i, latency = router_latency) \ - for i in range(num_routers)] + routers = [ + Router(router_id=i, latency=router_latency) + for i in range(num_routers) + ] network.routers = routers # link counter to set unique link ids @@ -89,24 +93,26 @@ class MeshDirCorners_XY(SimpleTopology): ext_links = [] for (i, n) in enumerate(cache_nodes): cntrl_level, router_id = divmod(i, num_routers) - assert(cntrl_level < caches_per_router) - ext_links.append(ExtLink(link_id=link_count, ext_node=n, - int_node=routers[router_id], - latency = link_latency)) + assert cntrl_level < caches_per_router + ext_links.append( + ExtLink( + link_id=link_count, + ext_node=n, + int_node=routers[router_id], + latency=link_latency, + ) + ) link_count += 1 # NUMA Node for each quadrant # With odd columns or rows, the nodes will be unequal - numa_nodes = [ [], [], [], []] + numa_nodes = [[], [], [], []] for i in range(num_routers): - if i % num_columns < num_columns / 2 and \ - i < num_routers / 2: + if i % num_columns < num_columns / 2 and i < num_routers / 2: numa_nodes[0].append(i) - elif i % num_columns >= num_columns / 2 and \ - i < num_routers / 2: + elif i % num_columns >= num_columns / 2 and i < num_routers / 2: numa_nodes[1].append(i) - elif i % num_columns < num_columns / 2 and \ - i >= num_routers / 2: + elif i % num_columns < num_columns / 2 and i >= num_routers / 2: numa_nodes[2].append(i) else: numa_nodes[3].append(i) @@ -117,29 +123,54 @@ class MeshDirCorners_XY(SimpleTopology): num_numa_nodes += 1 # Connect the dir nodes to the corners. - ext_links.append(ExtLink(link_id=link_count, ext_node=dir_nodes[0], - int_node=routers[0], - latency = link_latency)) + ext_links.append( + ExtLink( + link_id=link_count, + ext_node=dir_nodes[0], + int_node=routers[0], + latency=link_latency, + ) + ) link_count += 1 - ext_links.append(ExtLink(link_id=link_count, ext_node=dir_nodes[1], - int_node=routers[num_columns - 1], - latency = link_latency)) + ext_links.append( + ExtLink( + link_id=link_count, + ext_node=dir_nodes[1], + int_node=routers[num_columns - 1], + latency=link_latency, + ) + ) link_count += 1 - ext_links.append(ExtLink(link_id=link_count, ext_node=dir_nodes[2], - int_node=routers[num_routers - num_columns], - latency = link_latency)) + ext_links.append( + ExtLink( + link_id=link_count, + ext_node=dir_nodes[2], + int_node=routers[num_routers - num_columns], + latency=link_latency, + ) + ) link_count += 1 - ext_links.append(ExtLink(link_id=link_count, ext_node=dir_nodes[3], - int_node=routers[num_routers - 1], - latency = link_latency)) + ext_links.append( + ExtLink( + link_id=link_count, + ext_node=dir_nodes[3], + int_node=routers[num_routers - 1], + latency=link_latency, + ) + ) link_count += 1 # Connect the dma nodes to router 0. These should only be DMA nodes. for (i, node) in enumerate(dma_nodes): - assert(node.type == 'DMA_Controller') - ext_links.append(ExtLink(link_id=link_count, ext_node=node, - int_node=routers[0], - latency = link_latency)) + assert node.type == "DMA_Controller" + ext_links.append( + ExtLink( + link_id=link_count, + ext_node=node, + int_node=routers[0], + latency=link_latency, + ) + ) network.ext_links = ext_links @@ -149,64 +180,79 @@ class MeshDirCorners_XY(SimpleTopology): # East output to West input links (weight = 1) for row in range(num_rows): for col in range(num_columns): - if (col + 1 < num_columns): + if col + 1 < num_columns: east_out = col + (row * num_columns) west_in = (col + 1) + (row * num_columns) - int_links.append(IntLink(link_id=link_count, - src_node=routers[east_out], - dst_node=routers[west_in], - src_outport="East", - dst_inport="West", - latency = link_latency, - weight=1)) + int_links.append( + IntLink( + link_id=link_count, + src_node=routers[east_out], + dst_node=routers[west_in], + src_outport="East", + dst_inport="West", + latency=link_latency, + weight=1, + ) + ) link_count += 1 # West output to East input links (weight = 1) for row in range(num_rows): for col in range(num_columns): - if (col + 1 < num_columns): + if col + 1 < num_columns: east_in = col + (row * num_columns) west_out = (col + 1) + (row * num_columns) - int_links.append(IntLink(link_id=link_count, - src_node=routers[west_out], - dst_node=routers[east_in], - src_outport="West", - dst_inport="East", - latency = link_latency, - weight=1)) + int_links.append( + IntLink( + link_id=link_count, + src_node=routers[west_out], + dst_node=routers[east_in], + src_outport="West", + dst_inport="East", + latency=link_latency, + weight=1, + ) + ) link_count += 1 # North output to South input links (weight = 2) for col in range(num_columns): for row in range(num_rows): - if (row + 1 < num_rows): + if row + 1 < num_rows: north_out = col + (row * num_columns) south_in = col + ((row + 1) * num_columns) - int_links.append(IntLink(link_id=link_count, - src_node=routers[north_out], - dst_node=routers[south_in], - src_outport="North", - dst_inport="South", - latency = link_latency, - weight=2)) + int_links.append( + IntLink( + link_id=link_count, + src_node=routers[north_out], + dst_node=routers[south_in], + src_outport="North", + dst_inport="South", + latency=link_latency, + weight=2, + ) + ) link_count += 1 # South output to North input links (weight = 2) for col in range(num_columns): for row in range(num_rows): - if (row + 1 < num_rows): + if row + 1 < num_rows: north_in = col + (row * num_columns) south_out = col + ((row + 1) * num_columns) - int_links.append(IntLink(link_id=link_count, - src_node=routers[south_out], - dst_node=routers[north_in], - src_outport="South", - dst_inport="North", - latency = link_latency, - weight=2)) + int_links.append( + IntLink( + link_id=link_count, + src_node=routers[south_out], + dst_node=routers[north_in], + src_outport="South", + dst_inport="North", + latency=link_latency, + weight=2, + ) + ) link_count += 1 - network.int_links = int_links # Register nodes with filesystem @@ -214,7 +260,7 @@ class MeshDirCorners_XY(SimpleTopology): i = 0 for n in numa_nodes: if n: - FileSystemConfig.register_node(n, - MemorySize(options.mem_size) // num_numa_nodes, i) + FileSystemConfig.register_node( + n, MemorySize(options.mem_size) // num_numa_nodes, i + ) i += 1 - diff --git a/configs/topologies/Mesh_XY.py b/configs/topologies/Mesh_XY.py index 8926bcd29c..94cb770750 100644 --- a/configs/topologies/Mesh_XY.py +++ b/configs/topologies/Mesh_XY.py @@ -37,8 +37,9 @@ from topologies.BaseTopology import SimpleTopology # XY routing is enforced (using link weights) # to guarantee deadlock freedom. + class Mesh_XY(SimpleTopology): - description='Mesh_XY' + description = "Mesh_XY" def __init__(self, controllers): self.nodes = controllers @@ -54,20 +55,21 @@ class Mesh_XY(SimpleTopology): # default values for link latency and router latency. # Can be over-ridden on a per link/router basis - link_latency = options.link_latency # used by simple and garnet - router_latency = options.router_latency # only used by garnet - + link_latency = options.link_latency # used by simple and garnet + router_latency = options.router_latency # only used by garnet # There must be an evenly divisible number of cntrls to routers # Also, obviously the number or rows must be <= the number of routers cntrls_per_router, remainder = divmod(len(nodes), num_routers) - assert(num_rows > 0 and num_rows <= num_routers) + assert num_rows > 0 and num_rows <= num_routers num_columns = int(num_routers / num_rows) - assert(num_columns * num_rows == num_routers) + assert num_columns * num_rows == num_routers # Create the routers in the mesh - routers = [Router(router_id=i, latency = router_latency) \ - for i in range(num_routers)] + routers = [ + Router(router_id=i, latency=router_latency) + for i in range(num_routers) + ] network.routers = routers # link counter to set unique link ids @@ -87,20 +89,30 @@ class Mesh_XY(SimpleTopology): ext_links = [] for (i, n) in enumerate(network_nodes): cntrl_level, router_id = divmod(i, num_routers) - assert(cntrl_level < cntrls_per_router) - ext_links.append(ExtLink(link_id=link_count, ext_node=n, - int_node=routers[router_id], - latency = link_latency)) + assert cntrl_level < cntrls_per_router + ext_links.append( + ExtLink( + link_id=link_count, + ext_node=n, + int_node=routers[router_id], + latency=link_latency, + ) + ) link_count += 1 # Connect the remainding nodes to router 0. These should only be # DMA nodes. for (i, node) in enumerate(remainder_nodes): - assert(node.type == 'DMA_Controller') - assert(i < remainder) - ext_links.append(ExtLink(link_id=link_count, ext_node=node, - int_node=routers[0], - latency = link_latency)) + assert node.type == "DMA_Controller" + assert i < remainder + ext_links.append( + ExtLink( + link_id=link_count, + ext_node=node, + int_node=routers[0], + latency=link_latency, + ) + ) link_count += 1 network.ext_links = ext_links @@ -111,68 +123,84 @@ class Mesh_XY(SimpleTopology): # East output to West input links (weight = 1) for row in range(num_rows): for col in range(num_columns): - if (col + 1 < num_columns): + if col + 1 < num_columns: east_out = col + (row * num_columns) west_in = (col + 1) + (row * num_columns) - int_links.append(IntLink(link_id=link_count, - src_node=routers[east_out], - dst_node=routers[west_in], - src_outport="East", - dst_inport="West", - latency = link_latency, - weight=1)) + int_links.append( + IntLink( + link_id=link_count, + src_node=routers[east_out], + dst_node=routers[west_in], + src_outport="East", + dst_inport="West", + latency=link_latency, + weight=1, + ) + ) link_count += 1 # West output to East input links (weight = 1) for row in range(num_rows): for col in range(num_columns): - if (col + 1 < num_columns): + if col + 1 < num_columns: east_in = col + (row * num_columns) west_out = (col + 1) + (row * num_columns) - int_links.append(IntLink(link_id=link_count, - src_node=routers[west_out], - dst_node=routers[east_in], - src_outport="West", - dst_inport="East", - latency = link_latency, - weight=1)) + int_links.append( + IntLink( + link_id=link_count, + src_node=routers[west_out], + dst_node=routers[east_in], + src_outport="West", + dst_inport="East", + latency=link_latency, + weight=1, + ) + ) link_count += 1 # North output to South input links (weight = 2) for col in range(num_columns): for row in range(num_rows): - if (row + 1 < num_rows): + if row + 1 < num_rows: north_out = col + (row * num_columns) south_in = col + ((row + 1) * num_columns) - int_links.append(IntLink(link_id=link_count, - src_node=routers[north_out], - dst_node=routers[south_in], - src_outport="North", - dst_inport="South", - latency = link_latency, - weight=2)) + int_links.append( + IntLink( + link_id=link_count, + src_node=routers[north_out], + dst_node=routers[south_in], + src_outport="North", + dst_inport="South", + latency=link_latency, + weight=2, + ) + ) link_count += 1 # South output to North input links (weight = 2) for col in range(num_columns): for row in range(num_rows): - if (row + 1 < num_rows): + if row + 1 < num_rows: north_in = col + (row * num_columns) south_out = col + ((row + 1) * num_columns) - int_links.append(IntLink(link_id=link_count, - src_node=routers[south_out], - dst_node=routers[north_in], - src_outport="South", - dst_inport="North", - latency = link_latency, - weight=2)) + int_links.append( + IntLink( + link_id=link_count, + src_node=routers[south_out], + dst_node=routers[north_in], + src_outport="South", + dst_inport="North", + latency=link_latency, + weight=2, + ) + ) link_count += 1 - network.int_links = int_links # Register nodes with filesystem def registerTopology(self, options): for i in range(options.num_cpus): - FileSystemConfig.register_node([i], - MemorySize(options.mem_size) // options.num_cpus, i) + FileSystemConfig.register_node( + [i], MemorySize(options.mem_size) // options.num_cpus, i + ) diff --git a/configs/topologies/Mesh_westfirst.py b/configs/topologies/Mesh_westfirst.py index 9b73c05bc9..663c31e2cf 100644 --- a/configs/topologies/Mesh_westfirst.py +++ b/configs/topologies/Mesh_westfirst.py @@ -40,8 +40,9 @@ from topologies.BaseTopology import SimpleTopology # is always chosen based on which appears first inside the # routing table. + class Mesh_westfirst(SimpleTopology): - description='Mesh_westfirst' + description = "Mesh_westfirst" def __init__(self, controllers): self.nodes = controllers @@ -57,19 +58,21 @@ class Mesh_westfirst(SimpleTopology): # default values for link latency and router latency. # Can be over-ridden on a per link/router basis - link_latency = options.link_latency # used by simple and garnet - router_latency = options.router_latency # only used by garnet + link_latency = options.link_latency # used by simple and garnet + router_latency = options.router_latency # only used by garnet # There must be an evenly divisible number of cntrls to routers # Also, obviously the number or rows must be <= the number of routers cntrls_per_router, remainder = divmod(len(nodes), num_routers) - assert(num_rows > 0 and num_rows <= num_routers) + assert num_rows > 0 and num_rows <= num_routers num_columns = int(num_routers / num_rows) - assert(num_columns * num_rows == num_routers) + assert num_columns * num_rows == num_routers # Create the routers in the mesh - routers = [Router(router_id=i, latency=router_latency) \ - for i in range(num_routers)] + routers = [ + Router(router_id=i, latency=router_latency) + for i in range(num_routers) + ] network.routers = routers # link counter to set unique link ids @@ -89,20 +92,30 @@ class Mesh_westfirst(SimpleTopology): ext_links = [] for (i, n) in enumerate(network_nodes): cntrl_level, router_id = divmod(i, num_routers) - assert(cntrl_level < cntrls_per_router) - ext_links.append(ExtLink(link_id=link_count, ext_node=n, - int_node=routers[router_id], - latency = link_latency)) + assert cntrl_level < cntrls_per_router + ext_links.append( + ExtLink( + link_id=link_count, + ext_node=n, + int_node=routers[router_id], + latency=link_latency, + ) + ) link_count += 1 # Connect the remainding nodes to router 0. These should only be # DMA nodes. for (i, node) in enumerate(remainder_nodes): - assert(node.type == 'DMA_Controller') - assert(i < remainder) - ext_links.append(ExtLink(link_id=link_count, ext_node=node, - int_node=routers[0], - latency = link_latency)) + assert node.type == "DMA_Controller" + assert i < remainder + ext_links.append( + ExtLink( + link_id=link_count, + ext_node=node, + int_node=routers[0], + latency=link_latency, + ) + ) link_count += 1 network.ext_links = ext_links @@ -113,55 +126,69 @@ class Mesh_westfirst(SimpleTopology): # East output to West input links (weight = 2) for row in range(num_rows): for col in range(num_columns): - if (col + 1 < num_columns): + if col + 1 < num_columns: east_out = col + (row * num_columns) west_in = (col + 1) + (row * num_columns) - int_links.append(IntLink(link_id=link_count, - src_node=routers[east_out], - dst_node=routers[west_in], - latency = link_latency, - weight=2)) + int_links.append( + IntLink( + link_id=link_count, + src_node=routers[east_out], + dst_node=routers[west_in], + latency=link_latency, + weight=2, + ) + ) link_count += 1 # West output to East input links (weight = 1) for row in range(num_rows): for col in range(num_columns): - if (col + 1 < num_columns): + if col + 1 < num_columns: east_in = col + (row * num_columns) west_out = (col + 1) + (row * num_columns) - int_links.append(IntLink(link_id=link_count, - src_node=routers[west_out], - dst_node=routers[east_in], - latency = link_latency, - weight=1)) + int_links.append( + IntLink( + link_id=link_count, + src_node=routers[west_out], + dst_node=routers[east_in], + latency=link_latency, + weight=1, + ) + ) link_count += 1 - # North output to South input links (weight = 2) for col in range(num_columns): for row in range(num_rows): - if (row + 1 < num_rows): + if row + 1 < num_rows: north_out = col + (row * num_columns) south_in = col + ((row + 1) * num_columns) - int_links.append(IntLink(link_id=link_count, - src_node=routers[north_out], - dst_node=routers[south_in], - latency = link_latency, - weight=2)) + int_links.append( + IntLink( + link_id=link_count, + src_node=routers[north_out], + dst_node=routers[south_in], + latency=link_latency, + weight=2, + ) + ) link_count += 1 # South output to North input links (weight = 2) for col in range(num_columns): for row in range(num_rows): - if (row + 1 < num_rows): + if row + 1 < num_rows: north_in = col + (row * num_columns) south_out = col + ((row + 1) * num_columns) - int_links.append(IntLink(link_id=link_count, - src_node=routers[south_out], - dst_node=routers[north_in], - latency = link_latency, - weight=2)) + int_links.append( + IntLink( + link_id=link_count, + src_node=routers[south_out], + dst_node=routers[north_in], + latency=link_latency, + weight=2, + ) + ) link_count += 1 - network.int_links = int_links diff --git a/configs/topologies/Pt2Pt.py b/configs/topologies/Pt2Pt.py index fb7554918e..8d85b31f1e 100644 --- a/configs/topologies/Pt2Pt.py +++ b/configs/topologies/Pt2Pt.py @@ -30,8 +30,9 @@ from m5.objects import * from topologies.BaseTopology import SimpleTopology + class Pt2Pt(SimpleTopology): - description='Pt2Pt' + description = "Pt2Pt" def __init__(self, controllers): self.nodes = controllers @@ -41,32 +42,44 @@ class Pt2Pt(SimpleTopology): # default values for link latency and router latency. # Can be over-ridden on a per link/router basis - link_latency = options.link_latency # used by simple and garnet - router_latency = options.router_latency # only used by garnet + link_latency = options.link_latency # used by simple and garnet + router_latency = options.router_latency # only used by garnet # Create an individual router for each controller, # and connect all to all. # Since this is a high-radix router, router_latency should # accordingly be set to a higher value than the default # (which is 1 for mesh routers) - routers = [Router(router_id=i, latency = router_latency) \ - for i in range(len(nodes))] + routers = [ + Router(router_id=i, latency=router_latency) + for i in range(len(nodes)) + ] network.routers = routers - ext_links = [ExtLink(link_id=i, ext_node=n, int_node=routers[i], - latency = link_latency) - for (i, n) in enumerate(nodes)] + ext_links = [ + ExtLink( + link_id=i, + ext_node=n, + int_node=routers[i], + latency=link_latency, + ) + for (i, n) in enumerate(nodes) + ] network.ext_links = ext_links link_count = len(nodes) int_links = [] for i in range(len(nodes)): for j in range(len(nodes)): - if (i != j): + if i != j: link_count += 1 - int_links.append(IntLink(link_id=link_count, - src_node=routers[i], - dst_node=routers[j], - latency = link_latency)) + int_links.append( + IntLink( + link_id=link_count, + src_node=routers[i], + dst_node=routers[j], + latency=link_latency, + ) + ) network.int_links = int_links diff --git a/ext/googletest/BUILD.bazel b/ext/googletest/BUILD.bazel index 3c9a228f42..ac62251e10 100644 --- a/ext/googletest/BUILD.bazel +++ b/ext/googletest/BUILD.bazel @@ -30,8 +30,6 @@ # # Bazel Build for Google C++ Testing Framework(Google Test) -load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test") - package(default_visibility = ["//visibility:public"]) licenses(["notice"]) @@ -48,6 +46,16 @@ config_setting( constraint_values = ["@platforms//os:windows"], ) +config_setting( + name = "freebsd", + constraint_values = ["@platforms//os:freebsd"], +) + +config_setting( + name = "openbsd", + constraint_values = ["@platforms//os:openbsd"], +) + config_setting( name = "msvc_compiler", flag_values = { @@ -110,8 +118,16 @@ cc_library( "googletest/include", ], linkopts = select({ - ":qnx": [], + ":qnx": ["-lregex"], ":windows": [], + ":freebsd": [ + "-lm", + "-pthread", + ], + ":openbsd": [ + "-lm", + "-pthread", + ], "//conditions:default": ["-pthread"], }), deps = select({ @@ -119,10 +135,15 @@ cc_library( "@com_google_absl//absl/debugging:failure_signal_handler", "@com_google_absl//absl/debugging:stacktrace", "@com_google_absl//absl/debugging:symbolize", + "@com_google_absl//absl/flags:flag", + "@com_google_absl//absl/flags:parse", + "@com_google_absl//absl/flags:reflection", + "@com_google_absl//absl/flags:usage", "@com_google_absl//absl/strings", "@com_google_absl//absl/types:any", "@com_google_absl//absl/types:optional", "@com_google_absl//absl/types:variant", + "@com_googlesource_code_re2//:re2", ], "//conditions:default": [], }), diff --git a/ext/googletest/CMakeLists.txt b/ext/googletest/CMakeLists.txt index ea81ab1292..4daf35b546 100644 --- a/ext/googletest/CMakeLists.txt +++ b/ext/googletest/CMakeLists.txt @@ -1,19 +1,21 @@ # Note: CMake support is community-based. The maintainers do not use CMake # internally. -cmake_minimum_required(VERSION 2.8.12) +cmake_minimum_required(VERSION 3.5) if (POLICY CMP0048) cmake_policy(SET CMP0048 NEW) endif (POLICY CMP0048) +if (POLICY CMP0077) + cmake_policy(SET CMP0077 NEW) +endif (POLICY CMP0077) + project(googletest-distribution) set(GOOGLETEST_VERSION 1.11.0) -if (CMAKE_VERSION VERSION_GREATER "3.0.2") - if(NOT CYGWIN AND NOT MSYS AND NOT ${CMAKE_SYSTEM_NAME} STREQUAL QNX) - set(CMAKE_CXX_EXTENSIONS OFF) - endif() +if(NOT CYGWIN AND NOT MSYS AND NOT ${CMAKE_SYSTEM_NAME} STREQUAL QNX) + set(CMAKE_CXX_EXTENSIONS OFF) endif() enable_testing() diff --git a/ext/googletest/CONTRIBUTING.md b/ext/googletest/CONTRIBUTING.md index da45e4450c..b3f50436e5 100644 --- a/ext/googletest/CONTRIBUTING.md +++ b/ext/googletest/CONTRIBUTING.md @@ -21,8 +21,8 @@ accept your pull requests. ## Are you a Googler? -If you are a Googler, please make an attempt to submit an internal change rather -than a GitHub Pull Request. If you are not able to submit an internal change a +If you are a Googler, please make an attempt to submit an internal contribution +rather than a GitHub Pull Request. If you are not able to submit internally, a PR is acceptable as an alternative. ## Contributing A Patch @@ -36,7 +36,8 @@ PR is acceptable as an alternative. This ensures that work isn't being duplicated and communicating your plan early also generally leads to better patches. 4. If your proposed change is accepted, and you haven't already done so, sign a - Contributor License Agreement (see details above). + Contributor License Agreement + ([see details above](#contributor-license-agreements)). 5. Fork the desired repo, develop and test your code changes. 6. Ensure that your code adheres to the existing style in the sample to which you are contributing. diff --git a/ext/googletest/CONTRIBUTORS b/ext/googletest/CONTRIBUTORS index d9bc587b1b..77397a5b53 100644 --- a/ext/googletest/CONTRIBUTORS +++ b/ext/googletest/CONTRIBUTORS @@ -56,6 +56,7 @@ Russ Rufer Sean Mcafee Sigurður Ásgeirsson Sverre Sundsdal +Szymon Sobik Takeshi Yoshino Tracy Bialik Vadim Berman diff --git a/ext/googletest/README.md b/ext/googletest/README.md index e207d38975..30edaecf31 100644 --- a/ext/googletest/README.md +++ b/ext/googletest/README.md @@ -6,7 +6,8 @@ GoogleTest now follows the [Abseil Live at Head philosophy](https://abseil.io/about/philosophy#upgrade-support). -We recommend using the latest commit in the `master` branch in your projects. +We recommend +[updating to the latest commit in the `main` branch as often as possible](https://github.com/abseil/abseil-cpp/blob/master/FAQ.md#what-is-live-at-head-and-how-do-i-do-it). #### Documentation Updates @@ -121,11 +122,11 @@ result output. If your test runner understands TAP, you may find it useful. runs tests from your binary in parallel to provide significant speed-up. [GoogleTest Adapter](https://marketplace.visualstudio.com/items?itemName=DavidSchuldenfrei.gtest-adapter) -is a VS Code extension allowing to view GoogleTest in a tree view, and run/debug +is a VS Code extension allowing to view GoogleTest in a tree view and run/debug your tests. [C++ TestMate](https://github.com/matepek/vscode-catch2-test-adapter) is a VS -Code extension allowing to view GoogleTest in a tree view, and run/debug your +Code extension allowing to view GoogleTest in a tree view and run/debug your tests. [Cornichon](https://pypi.org/project/cornichon/) is a small Gherkin DSL parser diff --git a/ext/googletest/SConscript b/ext/googletest/SConscript index 21830f0308..3bd5ab8d10 100644 --- a/ext/googletest/SConscript +++ b/ext/googletest/SConscript @@ -52,6 +52,6 @@ gmock_all = genv.Object(gmock_src.File('gmock-all.cc')) gtest_main = genv.StaticLibrary(target='libgtest', source=[ gtest_all, gmock_all, gtest_src.File('gtest_main.cc')]) -env['GTEST_LIBS'] = ['libgtest', 'pthread'] +env['GTEST_LIBS'] = [gtest_main[0], 'pthread'] env['GTEST_CPPFLAGS'] = [ '-pthread', '-DUSE_GMOCK', '-Wno-undef', '-isystem', gtest_include.abspath] diff --git a/ext/googletest/WORKSPACE b/ext/googletest/WORKSPACE index 614f55778e..4d7b3988a2 100644 --- a/ext/googletest/WORKSPACE +++ b/ext/googletest/WORKSPACE @@ -4,21 +4,36 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") http_archive( name = "com_google_absl", - urls = ["https://github.com/abseil/abseil-cpp/archive/7971fb358ae376e016d2d4fc9327aad95659b25e.zip"], # 2021-05-20T02:59:16Z - strip_prefix = "abseil-cpp-7971fb358ae376e016d2d4fc9327aad95659b25e", - sha256 = "aeba534f7307e36fe084b452299e49b97420667a8d28102cf9a0daeed340b859", + sha256 = "1a1745b5ee81392f5ea4371a4ca41e55d446eeaee122903b2eaffbd8a3b67a2b", + strip_prefix = "abseil-cpp-01cc6567cff77738e416a7ddc17de2d435a780ce", + urls = ["https://github.com/abseil/abseil-cpp/archive/01cc6567cff77738e416a7ddc17de2d435a780ce.zip"], # 2022-06-21T19:28:27Z +) + +# Note this must use a commit from the `abseil` branch of the RE2 project. +# https://github.com/google/re2/tree/abseil +http_archive( + name = "com_googlesource_code_re2", + sha256 = "0a890c2aa0bb05b2ce906a15efb520d0f5ad4c7d37b8db959c43772802991887", + strip_prefix = "re2-a427f10b9fb4622dd6d8643032600aa1b50fbd12", + urls = ["https://github.com/google/re2/archive/a427f10b9fb4622dd6d8643032600aa1b50fbd12.zip"], # 2022-06-09 ) http_archive( - name = "rules_cc", - urls = ["https://github.com/bazelbuild/rules_cc/archive/68cb652a71e7e7e2858c50593e5a9e3b94e5b9a9.zip"], # 2021-05-14T14:51:14Z - strip_prefix = "rules_cc-68cb652a71e7e7e2858c50593e5a9e3b94e5b9a9", - sha256 = "1e19e9a3bc3d4ee91d7fcad00653485ee6c798efbbf9588d40b34cbfbded143d", + name = "rules_python", + sha256 = "0b460f17771258341528753b1679335b629d1d25e3af28eda47d009c103a6e15", + strip_prefix = "rules_python-aef17ad72919d184e5edb7abf61509eb78e57eda", + urls = ["https://github.com/bazelbuild/rules_python/archive/aef17ad72919d184e5edb7abf61509eb78e57eda.zip"], # 2022-06-21T23:44:47Z ) http_archive( - name = "rules_python", - urls = ["https://github.com/bazelbuild/rules_python/archive/ed6cc8f2c3692a6a7f013ff8bc185ba77eb9b4d2.zip"], # 2021-05-17T00:24:16Z - strip_prefix = "rules_python-ed6cc8f2c3692a6a7f013ff8bc185ba77eb9b4d2", - sha256 = "98b3c592faea9636ac8444bfd9de7f3fb4c60590932d6e6ac5946e3f8dbd5ff6", + name = "bazel_skylib", + urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/1.2.1/bazel-skylib-1.2.1.tar.gz"], + sha256 = "f7be3474d42aae265405a592bb7da8e171919d74c16f082a5457840f06054728", +) + +http_archive( + name = "platforms", + sha256 = "a879ea428c6d56ab0ec18224f976515948822451473a80d06c2e50af0bbe5121", + strip_prefix = "platforms-da5541f26b7de1dc8e04c075c99df5351742a4a2", + urls = ["https://github.com/bazelbuild/platforms/archive/da5541f26b7de1dc8e04c075c99df5351742a4a2.zip"], # 2022-05-27 ) diff --git a/ext/googletest/ci/linux-presubmit.sh b/ext/googletest/ci/linux-presubmit.sh index 6bea1cde26..0ee5670417 100644 --- a/ext/googletest/ci/linux-presubmit.sh +++ b/ext/googletest/ci/linux-presubmit.sh @@ -31,8 +31,8 @@ set -euox pipefail -readonly LINUX_LATEST_CONTAINER="gcr.io/google.com/absl-177019/linux_hybrid-latest:20210525" -readonly LINUX_GCC_FLOOR_CONTAINER="gcr.io/google.com/absl-177019/linux_gcc-floor:20201015" +readonly LINUX_LATEST_CONTAINER="gcr.io/google.com/absl-177019/linux_hybrid-latest:20220217" +readonly LINUX_GCC_FLOOR_CONTAINER="gcr.io/google.com/absl-177019/linux_gcc-floor:20220621" if [[ -z ${GTEST_ROOT:-} ]]; then GTEST_ROOT="$(realpath $(dirname ${0})/..)" @@ -76,7 +76,9 @@ time docker run \ /usr/local/bin/bazel test ... \ --copt="-Wall" \ --copt="-Werror" \ + --copt="-Wuninitialized" \ --copt="-Wno-error=pragmas" \ + --distdir="/bazel-distdir" \ --keep_going \ --show_timestamps \ --test_output=errors @@ -94,6 +96,7 @@ for std in ${STD}; do /usr/local/bin/bazel test ... \ --copt="-Wall" \ --copt="-Werror" \ + --copt="-Wuninitialized" \ --define="absl=${absl}" \ --distdir="/bazel-distdir" \ --keep_going \ @@ -116,6 +119,7 @@ for std in ${STD}; do --copt="--gcc-toolchain=/usr/local" \ --copt="-Wall" \ --copt="-Werror" \ + --copt="-Wuninitialized" \ --define="absl=${absl}" \ --distdir="/bazel-distdir" \ --keep_going \ diff --git a/ext/googletest/docs/advanced.md b/ext/googletest/docs/advanced.md index 8dff5ba150..9a752b922a 100644 --- a/ext/googletest/docs/advanced.md +++ b/ext/googletest/docs/advanced.md @@ -157,8 +157,11 @@ that can be used in the predicate assertion macro example: ```c++ -EXPECT_PRED_FORMAT2(testing::FloatLE, val1, val2); -EXPECT_PRED_FORMAT2(testing::DoubleLE, val1, val2); +using ::testing::FloatLE; +using ::testing::DoubleLE; +... +EXPECT_PRED_FORMAT2(FloatLE, val1, val2); +EXPECT_PRED_FORMAT2(DoubleLE, val1, val2); ``` The above code verifies that `val1` is less than, or approximately equal to, @@ -202,10 +205,9 @@ You can call the function to assert that types `T1` and `T2` are the same. The function does nothing if the assertion is satisfied. If the types are different, the function call will -fail to compile, the compiler error message will say that -`T1 and T2 are not the same type` and most likely (depending on the compiler) -show you the actual values of `T1` and `T2`. This is mainly useful inside -template code. +fail to compile, the compiler error message will say that `T1 and T2 are not the +same type` and most likely (depending on the compiler) show you the actual +values of `T1` and `T2`. This is mainly useful inside template code. **Caveat**: When used inside a member function of a class template or a function template, `StaticAssertTypeEq()` is effective only if the function is @@ -383,10 +385,10 @@ EXPECT_TRUE(IsCorrectBarIntVector(bar_ints)) ## Death Tests In many applications, there are assertions that can cause application failure if -a condition is not met. These sanity checks, which ensure that the program is in -a known good state, are there to fail at the earliest possible time after some -program state is corrupted. If the assertion checks the wrong condition, then -the program may proceed in an erroneous state, which could lead to memory +a condition is not met. These consistency checks, which ensure that the program +is in a known good state, are there to fail at the earliest possible time after +some program state is corrupted. If the assertion checks the wrong condition, +then the program may proceed in an erroneous state, which could lead to memory corruption, security holes, or worse. Hence it is vitally important to test that such assertion statements work as expected. @@ -480,9 +482,11 @@ TEST_F(FooDeathTest, DoesThat) { ### Regular Expression Syntax -On POSIX systems (e.g. Linux, Cygwin, and Mac), googletest uses the +When built with Bazel and using Abseil, googletest uses the +[RE2](https://github.com/google/re2/wiki/Syntax) syntax. Otherwise, for POSIX +systems (Linux, Cygwin, Mac), googletest uses the [POSIX extended regular expression](http://www.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap09.html#tag_09_04) -syntax. To learn about this syntax, you may want to read this +syntax. To learn about POSIX syntax, you may want to read this [Wikipedia entry](http://en.wikipedia.org/wiki/Regular_expression#POSIX_Extended_Regular_Expressions). On Windows, googletest uses its own simple regular expression implementation. It @@ -558,7 +562,7 @@ The automated testing framework does not set the style flag. You can choose a particular style of death tests by setting the flag programmatically: ```c++ -testing::FLAGS_gtest_death_test_style="threadsafe" +GTEST_FLAG_SET(death_test_style, "threadsafe") ``` You can do this in `main()` to set the style for all death tests in the binary, @@ -568,12 +572,12 @@ restored afterwards, so you need not do that yourself. For example: ```c++ int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); - testing::FLAGS_gtest_death_test_style = "fast"; + GTEST_FLAG_SET(death_test_style, "fast"); return RUN_ALL_TESTS(); } TEST(MyDeathTest, TestOne) { - testing::FLAGS_gtest_death_test_style = "threadsafe"; + GTEST_FLAG_SET(death_test_style, "threadsafe"); // This test is run in the "threadsafe" style: ASSERT_DEATH(ThisShouldDie(), ""); } @@ -610,15 +614,14 @@ Despite the improved thread safety afforded by the "threadsafe" style of death test, thread problems such as deadlock are still possible in the presence of handlers registered with `pthread_atfork(3)`. - ## Using Assertions in Sub-routines {: .callout .note} Note: If you want to put a series of test assertions in a subroutine to check for a complex condition, consider using -[a custom GMock matcher](gmock_cook_book.md#NewMatchers) -instead. This lets you provide a more readable error message in case of failure -and avoid all of the issues described below. +[a custom GMock matcher](gmock_cook_book.md#NewMatchers) instead. This lets you +provide a more readable error message in case of failure and avoid all of the +issues described below. ### Adding Traces to Assertions @@ -631,6 +634,7 @@ the `SCOPED_TRACE` macro or the `ScopedTrace` utility: ```c++ SCOPED_TRACE(message); ``` + ```c++ ScopedTrace trace("file_path", line_number, message); ``` @@ -837,7 +841,7 @@ will output XML like this: ```xml ... - + ... ``` @@ -888,6 +892,12 @@ preceding or following another. Also, the tests must either not modify the state of any shared resource, or, if they do modify the state, they must restore the state to its original value before passing control to the next test. +Note that `SetUpTestSuite()` may be called multiple times for a test fixture +class that has derived classes, so you should not expect code in the function +body to be run only once. Also, derived classes still have access to shared +resources defined as static members, so careful consideration is needed when +managing shared resources to avoid memory leaks. + Here's an example of per-test-suite set-up and tear-down: ```c++ @@ -897,7 +907,10 @@ class FooTest : public testing::Test { // Called before the first test in this test suite. // Can be omitted if not needed. static void SetUpTestSuite() { - shared_resource_ = new ...; + // Avoid reallocating static objects if called in subclasses of FooTest. + if (shared_resource_ == nullptr) { + shared_resource_ = new ...; + } } // Per-test-suite tear-down. @@ -1302,6 +1315,7 @@ First, define a fixture class template, as we did with typed tests: ```c++ template class FooTest : public testing::Test { + void DoSomethingInteresting(); ... }; ``` @@ -1319,6 +1333,9 @@ this as many times as you want: TYPED_TEST_P(FooTest, DoesBlah) { // Inside a test, refer to TypeParam to get the type parameter. TypeParam n = 0; + + // You will need to use `this` explicitly to refer to fixture members. + this->DoSomethingInteresting() ... } @@ -1481,8 +1498,8 @@ In frameworks that report a failure by throwing an exception, you could catch the exception and assert on it. But googletest doesn't use exceptions, so how do we test that a piece of code generates an expected failure? -`"gtest/gtest-spi.h"` contains some constructs to do this. After #including this header, -you can use +`"gtest/gtest-spi.h"` contains some constructs to do this. +After #including this header, you can use ```c++ EXPECT_FATAL_FAILURE(statement, substring); @@ -1586,12 +1603,14 @@ void RegisterMyTests(const std::vector& values) { } ... int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); std::vector values_to_test = LoadValuesFromConfig(); RegisterMyTests(values_to_test); ... return RUN_ALL_TESTS(); } ``` + ## Getting the Current Test's Name Sometimes a function may need to know the name of the currently running test. @@ -1816,8 +1835,7 @@ By default, a googletest program runs all tests the user has defined. In some cases (e.g. iterative test development & execution) it may be desirable stop test execution upon first failure (trading improved latency for completeness). If `GTEST_FAIL_FAST` environment variable or `--gtest_fail_fast` flag is set, -the test runner will stop execution as soon as the first test failure is -found. +the test runner will stop execution as soon as the first test failure is found. #### Temporarily Disabling Tests @@ -1911,6 +1929,58 @@ time. If you combine this with `--gtest_repeat=N`, googletest will pick a different random seed and re-shuffle the tests in each iteration. +### Distributing Test Functions to Multiple Machines + +If you have more than one machine you can use to run a test program, you might +want to run the test functions in parallel and get the result faster. We call +this technique *sharding*, where each machine is called a *shard*. + +GoogleTest is compatible with test sharding. To take advantage of this feature, +your test runner (not part of GoogleTest) needs to do the following: + +1. Allocate a number of machines (shards) to run the tests. +1. On each shard, set the `GTEST_TOTAL_SHARDS` environment variable to the total + number of shards. It must be the same for all shards. +1. On each shard, set the `GTEST_SHARD_INDEX` environment variable to the index + of the shard. Different shards must be assigned different indices, which + must be in the range `[0, GTEST_TOTAL_SHARDS - 1]`. +1. Run the same test program on all shards. When GoogleTest sees the above two + environment variables, it will select a subset of the test functions to run. + Across all shards, each test function in the program will be run exactly + once. +1. Wait for all shards to finish, then collect and report the results. + +Your project may have tests that were written without GoogleTest and thus don't +understand this protocol. In order for your test runner to figure out which test +supports sharding, it can set the environment variable `GTEST_SHARD_STATUS_FILE` +to a non-existent file path. If a test program supports sharding, it will create +this file to acknowledge that fact; otherwise it will not create it. The actual +contents of the file are not important at this time, although we may put some +useful information in it in the future. + +Here's an example to make it clear. Suppose you have a test program `foo_test` +that contains the following 5 test functions: + +``` +TEST(A, V) +TEST(A, W) +TEST(B, X) +TEST(B, Y) +TEST(B, Z) +``` + +Suppose you have 3 machines at your disposal. To run the test functions in +parallel, you would set `GTEST_TOTAL_SHARDS` to 3 on all machines, and set +`GTEST_SHARD_INDEX` to 0, 1, and 2 on the machines respectively. Then you would +run the same `foo_test` on each machine. + +GoogleTest reserves the right to change how the work is distributed across the +shards, but here's one possible scenario: + +* Machine #0 runs `A.V` and `B.X`. +* Machine #1 runs `A.W` and `B.Y`. +* Machine #2 runs `B.Z`. + ### Controlling Test Output #### Colored Terminal Output @@ -1965,8 +2035,6 @@ text because, for example, you don't have an UTF-8 compatible output medium, run the test program with `--gtest_print_utf8=0` or set the `GTEST_PRINT_UTF8` environment variable to `0`. - - #### Generating an XML Report googletest can emit a detailed XML report to a file in addition to its normal @@ -2020,15 +2088,15 @@ could generate this report: - + ... ... - + - + @@ -2046,6 +2114,9 @@ Things to note: * The `timestamp` attribute records the local date and time of the test execution. +* The `file` and `line` attributes record the source file location, where the + test was defined. + * Each `` element corresponds to a single failed googletest assertion. @@ -2085,6 +2156,8 @@ The report format conforms to the following JSON Schema: "type": "object", "properties": { "name": { "type": "string" }, + "file": { "type": "string" }, + "line": { "type": "integer" }, "status": { "type": "string", "enum": ["RUN", "NOTRUN"] @@ -2162,6 +2235,8 @@ message TestCase { message TestInfo { string name = 1; + string file = 6; + int32 line = 7; enum Status { RUN = 0; NOTRUN = 1; @@ -2205,6 +2280,8 @@ could generate this report: "testsuite": [ { "name": "Addition", + "file": "test.cpp", + "line": 1, "status": "RUN", "time": "0.007s", "classname": "", @@ -2221,6 +2298,8 @@ could generate this report: }, { "name": "Subtraction", + "file": "test.cpp", + "line": 2, "status": "RUN", "time": "0.005s", "classname": "" @@ -2236,6 +2315,8 @@ could generate this report: "testsuite": [ { "name": "NonContradiction", + "file": "test.cpp", + "line": 3, "status": "RUN", "time": "0.005s", "classname": "" @@ -2253,12 +2334,11 @@ IMPORTANT: The exact format of the JSON document is subject to change. #### Detecting Test Premature Exit -Google Test implements the _premature-exit-file_ protocol for test runners -to catch any kind of unexpected exits of test programs. Upon start, -Google Test creates the file which will be automatically deleted after -all work has been finished. Then, the test runner can check if this file -exists. In case the file remains undeleted, the inspected test has exited -prematurely. +Google Test implements the _premature-exit-file_ protocol for test runners to +catch any kind of unexpected exits of test programs. Upon start, Google Test +creates the file which will be automatically deleted after all work has been +finished. Then, the test runner can check if this file exists. In case the file +remains undeleted, the inspected test has exited prematurely. This feature is enabled only if the `TEST_PREMATURE_EXIT_FILE` environment variable has been set. diff --git a/ext/googletest/docs/faq.md b/ext/googletest/docs/faq.md index 9042da1efb..c849aff923 100644 --- a/ext/googletest/docs/faq.md +++ b/ext/googletest/docs/faq.md @@ -1,9 +1,9 @@ -# Googletest FAQ +# GoogleTest FAQ ## Why should test suite names and test names not contain underscore? {: .callout .note} -Note: Googletest reserves underscore (`_`) for special purpose keywords, such as +Note: GoogleTest reserves underscore (`_`) for special purpose keywords, such as [the `DISABLED_` prefix](advanced.md#temporarily-disabling-tests), in addition to the following rationale. @@ -50,15 +50,15 @@ Now, the two `TEST`s will both generate the same class So for simplicity, we just ask the users to avoid `_` in `TestSuiteName` and `TestName`. The rule is more constraining than necessary, but it's simple and -easy to remember. It also gives googletest some wiggle room in case its +easy to remember. It also gives GoogleTest some wiggle room in case its implementation needs to change in the future. If you violate the rule, there may not be immediate consequences, but your test may (just may) break with a new compiler (or a new version of the compiler you -are using) or with a new version of googletest. Therefore it's best to follow +are using) or with a new version of GoogleTest. Therefore it's best to follow the rule. -## Why does googletest support `EXPECT_EQ(NULL, ptr)` and `ASSERT_EQ(NULL, ptr)` but not `EXPECT_NE(NULL, ptr)` and `ASSERT_NE(NULL, ptr)`? +## Why does GoogleTest support `EXPECT_EQ(NULL, ptr)` and `ASSERT_EQ(NULL, ptr)` but not `EXPECT_NE(NULL, ptr)` and `ASSERT_NE(NULL, ptr)`? First of all, you can use `nullptr` with each of these macros, e.g. `EXPECT_EQ(ptr, nullptr)`, `EXPECT_NE(ptr, nullptr)`, `ASSERT_EQ(ptr, nullptr)`, @@ -68,7 +68,7 @@ because `nullptr` does not have the type problems that `NULL` does. Due to some peculiarity of C++, it requires some non-trivial template meta programming tricks to support using `NULL` as an argument of the `EXPECT_XX()` and `ASSERT_XX()` macros. Therefore we only do it where it's most needed -(otherwise we make the implementation of googletest harder to maintain and more +(otherwise we make the implementation of GoogleTest harder to maintain and more error-prone than necessary). Historically, the `EXPECT_EQ()` macro took the *expected* value as its first @@ -162,7 +162,7 @@ methods, the parent process will think the calls have never occurred. Therefore, you may want to move your `EXPECT_CALL` statements inside the `EXPECT_DEATH` macro. -## EXPECT_EQ(htonl(blah), blah_blah) generates weird compiler errors in opt mode. Is this a googletest bug? +## EXPECT_EQ(htonl(blah), blah_blah) generates weird compiler errors in opt mode. Is this a GoogleTest bug? Actually, the bug is in `htonl()`. @@ -199,7 +199,7 @@ const int Foo::kBar; // No initializer here. ``` Otherwise your code is **invalid C++**, and may break in unexpected ways. In -particular, using it in googletest comparison assertions (`EXPECT_EQ`, etc) will +particular, using it in GoogleTest comparison assertions (`EXPECT_EQ`, etc) will generate an "undefined reference" linker error. The fact that "it used to work" doesn't mean it's valid. It just means that you were lucky. :-) @@ -225,7 +225,7 @@ cases may want to use the same or slightly different fixtures. For example, you may want to make sure that all of a GUI library's test suites don't leak important system resources like fonts and brushes. -In googletest, you share a fixture among test suites by putting the shared logic +In GoogleTest, you share a fixture among test suites by putting the shared logic in a base test fixture, then deriving from that base a separate fixture for each test suite that wants to use this common logic. You then use `TEST_F()` to write tests using each derived fixture. @@ -264,7 +264,7 @@ TEST_F(FooTest, Baz) { ... } ``` If necessary, you can continue to derive test fixtures from a derived fixture. -googletest has no limit on how deep the hierarchy can be. +GoogleTest has no limit on how deep the hierarchy can be. For a complete example using derived test fixtures, see [sample5_unittest.cc](https://github.com/google/googletest/blob/master/googletest/samples/sample5_unittest.cc). @@ -278,7 +278,7 @@ disabled by our build system. Please see more details ## My death test hangs (or seg-faults). How do I fix it? -In googletest, death tests are run in a child process and the way they work is +In GoogleTest, death tests are run in a child process and the way they work is delicate. To write death tests you really need to understand how they work—see the details at [Death Assertions](reference/assertions.md#death) in the Assertions Reference. @@ -305,8 +305,8 @@ bullet - sorry! ## Should I use the constructor/destructor of the test fixture or SetUp()/TearDown()? {#CtorVsSetUp} -The first thing to remember is that googletest does **not** reuse the same test -fixture object across multiple tests. For each `TEST_F`, googletest will create +The first thing to remember is that GoogleTest does **not** reuse the same test +fixture object across multiple tests. For each `TEST_F`, GoogleTest will create a **fresh** test fixture object, immediately call `SetUp()`, run the test body, call `TearDown()`, and then delete the test fixture object. @@ -328,7 +328,7 @@ You may still want to use `SetUp()/TearDown()` in the following cases: * C++ does not allow virtual function calls in constructors and destructors. You can call a method declared as virtual, but it will not use dynamic - dispatch, it will use the definition from the class the constructor of which + dispatch. It will use the definition from the class the constructor of which is currently executing. This is because calling a virtual method before the derived class constructor has a chance to run is very dangerous - the virtual method might operate on uninitialized data. Therefore, if you need @@ -345,11 +345,11 @@ You may still want to use `SetUp()/TearDown()` in the following cases: that many standard libraries (like STL) may throw when exceptions are enabled in the compiler. Therefore you should prefer `TearDown()` if you want to write portable tests that work with or without exceptions. -* The googletest team is considering making the assertion macros throw on +* The GoogleTest team is considering making the assertion macros throw on platforms where exceptions are enabled (e.g. Windows, Mac OS, and Linux client-side), which will eliminate the need for the user to propagate failures from a subroutine to its caller. Therefore, you shouldn't use - googletest assertions in a destructor if your code could run on such a + GoogleTest assertions in a destructor if your code could run on such a platform. ## The compiler complains "no matching function to call" when I use ASSERT_PRED*. How do I fix it? @@ -375,7 +375,7 @@ they write This is **wrong and dangerous**. The testing services needs to see the return value of `RUN_ALL_TESTS()` in order to determine if a test has passed. If your `main()` function ignores it, your test will be considered successful even if it -has a googletest assertion failure. Very bad. +has a GoogleTest assertion failure. Very bad. We have decided to fix this (thanks to Michael Chastain for the idea). Now, your code will no longer be able to ignore `RUN_ALL_TESTS()` when compiled with @@ -410,7 +410,6 @@ C++ is case-sensitive. Did you spell it as `Setup()`? Similarly, sometimes people spell `SetUpTestSuite()` as `SetupTestSuite()` and wonder why it's never called. - ## I have several test suites which share the same test fixture logic, do I have to define a new test fixture class for each of them? This seems pretty tedious. You don't have to. Instead of @@ -441,14 +440,14 @@ TEST_F(BarTest, Abc) { ... } TEST_F(BarTest, Def) { ... } ``` -## googletest output is buried in a whole bunch of LOG messages. What do I do? +## GoogleTest output is buried in a whole bunch of LOG messages. What do I do? -The googletest output is meant to be a concise and human-friendly report. If -your test generates textual output itself, it will mix with the googletest +The GoogleTest output is meant to be a concise and human-friendly report. If +your test generates textual output itself, it will mix with the GoogleTest output, making it hard to read. However, there is an easy solution to this problem. -Since `LOG` messages go to stderr, we decided to let googletest output go to +Since `LOG` messages go to stderr, we decided to let GoogleTest output go to stdout. This way, you can easily separate the two using redirection. For example: @@ -521,7 +520,7 @@ TEST(MyDeathTest, CompoundStatement) { ## I have a fixture class `FooTest`, but `TEST_F(FooTest, Bar)` gives me error ``"no matching function for call to `FooTest::FooTest()'"``. Why? -Googletest needs to be able to create objects of your test fixture class, so it +GoogleTest needs to be able to create objects of your test fixture class, so it must have a default constructor. Normally the compiler will define one for you. However, there are cases where you have to define your own: @@ -546,11 +545,11 @@ The new NPTL thread library doesn't suffer from this problem, as it doesn't create a manager thread. However, if you don't control which machine your test runs on, you shouldn't depend on this. -## Why does googletest require the entire test suite, instead of individual tests, to be named *DeathTest when it uses ASSERT_DEATH? +## Why does GoogleTest require the entire test suite, instead of individual tests, to be named *DeathTest when it uses ASSERT_DEATH? -googletest does not interleave tests from different test suites. That is, it +GoogleTest does not interleave tests from different test suites. That is, it runs all tests in one test suite first, and then runs all tests in the next test -suite, and so on. googletest does this because it needs to set up a test suite +suite, and so on. GoogleTest does this because it needs to set up a test suite before the first test in it is run, and tear it down afterwards. Splitting up the test case would require multiple set-up and tear-down processes, which is inefficient and makes the semantics unclean. @@ -589,11 +588,11 @@ TEST_F(FooDeathTest, Uvw) { ... EXPECT_DEATH(...) ... } TEST_F(FooDeathTest, Xyz) { ... ASSERT_DEATH(...) ... } ``` -## googletest prints the LOG messages in a death test's child process only when the test fails. How can I see the LOG messages when the death test succeeds? +## GoogleTest prints the LOG messages in a death test's child process only when the test fails. How can I see the LOG messages when the death test succeeds? Printing the LOG messages generated by the statement inside `EXPECT_DEATH()` makes it harder to search for real problems in the parent's log. Therefore, -googletest only prints them when the death test has failed. +GoogleTest only prints them when the death test has failed. If you really need to see such LOG messages, a workaround is to temporarily break the death test (e.g. by changing the regex pattern it is expected to @@ -612,7 +611,7 @@ needs to be defined in the *same* name space. See ## How do I suppress the memory leak messages on Windows? -Since the statically initialized googletest singleton requires allocations on +Since the statically initialized GoogleTest singleton requires allocations on the heap, the Visual C++ memory leak detector will report memory leaks at the end of the program run. The easiest way to avoid this is to use the `_CrtMemCheckpoint` and `_CrtMemDumpAllObjectsSince` calls to not report any @@ -626,7 +625,7 @@ things accordingly, you are leaking test-only logic into production code and there is no easy way to ensure that the test-only code paths aren't run by mistake in production. Such cleverness also leads to [Heisenbugs](https://en.wikipedia.org/wiki/Heisenbug). Therefore we strongly -advise against the practice, and googletest doesn't provide a way to do it. +advise against the practice, and GoogleTest doesn't provide a way to do it. In general, the recommended way to cause the code to behave differently under test is [Dependency Injection](http://en.wikipedia.org/wiki/Dependency_injection). You can inject @@ -673,7 +672,7 @@ TEST(CoolTest, DoSomething) { ``` However, the following code is **not allowed** and will produce a runtime error -from googletest because the test methods are using different test fixture +from GoogleTest because the test methods are using different test fixture classes with the same test suite name. ```c++ diff --git a/ext/googletest/docs/gmock_cheat_sheet.md b/ext/googletest/docs/gmock_cheat_sheet.md index 3d164ad629..67d075dd9e 100644 --- a/ext/googletest/docs/gmock_cheat_sheet.md +++ b/ext/googletest/docs/gmock_cheat_sheet.md @@ -8,7 +8,7 @@ Given ```cpp class Foo { - ... + public: virtual ~Foo(); virtual int GetSize() const = 0; virtual string Describe(const char* name) = 0; @@ -23,7 +23,7 @@ class Foo { #include "gmock/gmock.h" class MockFoo : public Foo { - ... + public: MOCK_METHOD(int, GetSize, (), (const, override)); MOCK_METHOD(string, Describe, (const char* name), (override)); MOCK_METHOD(string, Describe, (int type), (override)); @@ -58,7 +58,7 @@ To mock ```cpp template class StackInterface { - ... + public: virtual ~StackInterface(); virtual int GetSize() const = 0; virtual void Push(const Elem& x) = 0; @@ -71,7 +71,7 @@ class StackInterface { ```cpp template class MockStack : public StackInterface { - ... + public: MOCK_METHOD(int, GetSize, (), (const, override)); MOCK_METHOD(void, Push, (const Elem& x), (override)); }; diff --git a/ext/googletest/docs/gmock_cook_book.md b/ext/googletest/docs/gmock_cook_book.md index c08958eb16..8a11d864f2 100644 --- a/ext/googletest/docs/gmock_cook_book.md +++ b/ext/googletest/docs/gmock_cook_book.md @@ -392,8 +392,7 @@ Old macros and their new equivalents: If a mock method has no `EXPECT_CALL` spec but is called, we say that it's an "uninteresting call", and the default action (which can be specified using `ON_CALL()`) of the method will be taken. Currently, an uninteresting call will -also by default cause gMock to print a warning. (In the future, we might remove -this warning by default.) +also by default cause gMock to print a warning. However, sometimes you may want to ignore these uninteresting calls, and sometimes you may want to treat them as errors. gMock lets you make the decision @@ -1084,7 +1083,7 @@ using ::testing::Lt; ``` says that `Blah` will be called with arguments `x`, `y`, and `z` where `x < y < -z`. Note that in this example, it wasn't necessary specify the positional +z`. Note that in this example, it wasn't necessary to specify the positional matchers. As a convenience and example, gMock provides some matchers for 2-tuples, @@ -1300,23 +1299,27 @@ What if you have a pointer to pointer? You guessed it - you can use nested `Pointee(Pointee(Lt(3)))` matches a pointer that points to a pointer that points to a number less than 3 (what a mouthful...). -### Testing a Certain Property of an Object +### Defining a Custom Matcher Class {#CustomMatcherClass} -Sometimes you want to specify that an object argument has a certain property, -but there is no existing matcher that does this. If you want good error -messages, you should [define a matcher](#NewMatchers). If you want to do it -quick and dirty, you could get away with writing an ordinary function. +Most matchers can be simply defined using [the MATCHER* macros](#NewMatchers), +which are terse and flexible, and produce good error messages. However, these +macros are not very explicit about the interfaces they create and are not always +suitable, especially for matchers that will be widely reused. -Let's say you have a mock function that takes an object of type `Foo`, which has -an `int bar()` method and an `int baz()` method, and you want to constrain that -the argument's `bar()` value plus its `baz()` value is a given number. Here's -how you can define a matcher to do it: +For more advanced cases, you may need to define your own matcher class. A custom +matcher allows you to test a specific invariant property of that object. Let's +take a look at how to do so. + +Imagine you have a mock function that takes an object of type `Foo`, which has +an `int bar()` method and an `int baz()` method. You want to constrain that the +argument's `bar()` value plus its `baz()` value is a given number. (This is an +invariant.) Here's how we can write and use a matcher class to do so: ```cpp -using ::testing::Matcher; - class BarPlusBazEqMatcher { public: + using is_gtest_matcher = void; + explicit BarPlusBazEqMatcher(int expected_sum) : expected_sum_(expected_sum) {} @@ -1325,23 +1328,24 @@ class BarPlusBazEqMatcher { return (foo.bar() + foo.baz()) == expected_sum_; } - void DescribeTo(std::ostream& os) const { - os << "bar() + baz() equals " << expected_sum_; + void DescribeTo(std::ostream* os) const { + *os << "bar() + baz() equals " << expected_sum_; } - void DescribeNegationTo(std::ostream& os) const { - os << "bar() + baz() does not equal " << expected_sum_; + void DescribeNegationTo(std::ostream* os) const { + *os << "bar() + baz() does not equal " << expected_sum_; } private: const int expected_sum_; }; -Matcher BarPlusBazEq(int expected_sum) { +::testing::Matcher BarPlusBazEq(int expected_sum) { return BarPlusBazEqMatcher(expected_sum); } ... - EXPECT_CALL(..., DoThis(BarPlusBazEq(5)))...; + Foo foo; + EXPECT_CALL(foo, BarPlusBazEq(5))...; ``` ### Matching Containers @@ -1452,7 +1456,7 @@ the pointer is copied. When the last matcher that references the implementation object dies, the implementation object will be deleted. Therefore, if you have some complex matcher that you want to use again and -again, there is no need to build it everytime. Just assign it to a matcher +again, there is no need to build it every time. Just assign it to a matcher variable and use that variable repeatedly! For example, ```cpp @@ -1754,7 +1758,7 @@ specifies the following DAG (where `s1` is `A -> B`, and `s2` is `A -> C -> D`): | A ---| | - +---> C ---> D + +---> C ---> D ``` This means that A must occur before B and C, and C must occur before D. There's @@ -1980,6 +1984,7 @@ If the mock method also needs to return a value as well, you can chain ```cpp using ::testing::_; +using ::testing::DoAll; using ::testing::Return; using ::testing::SetArgPointee; @@ -2033,10 +2038,7 @@ class MockRolodex : public Rolodex { } ... MockRolodex rolodex; - vector names; - names.push_back("George"); - names.push_back("John"); - names.push_back("Thomas"); + vector names = {"George", "John", "Thomas"}; EXPECT_CALL(rolodex, GetNames(_)) .WillOnce(SetArrayArgument<0>(names.begin(), names.end())); ``` @@ -2604,7 +2606,7 @@ efficient. When the last action that references the implementation object dies, the implementation object will be deleted. If you have some complex action that you want to use again and again, you may -not have to build it from scratch everytime. If the action doesn't have an +not have to build it from scratch every time. If the action doesn't have an internal state (i.e. if it always does the same thing no matter how many times it has been called), you can assign it to an action variable and use that variable repeatedly. For example: @@ -3809,22 +3811,19 @@ Cardinality EvenNumber() { .Times(EvenNumber()); ``` -### Writing New Actions Quickly {#QuickNewActions} +### Writing New Actions {#QuickNewActions} If the built-in actions don't work for you, you can easily define your own one. -Just define a functor class with a (possibly templated) call operator, matching -the signature of your action. +All you need is a call operator with a signature compatible with the mocked +function. So you can use a lambda: -```cpp -struct Increment { - template - T operator()(T* arg) { - return ++(*arg); - } -} +``` +MockFunction mock; +EXPECT_CALL(mock, Call).WillOnce([](const int input) { return input * 7; }); +EXPECT_EQ(14, mock.AsStdFunction()(2)); ``` -The same approach works with stateful functors (or any callable, really): +Or a struct with a call operator (even a templated one): ``` struct MultiplyBy { @@ -3832,12 +3831,54 @@ struct MultiplyBy { T operator()(T arg) { return arg * multiplier; } int multiplier; -} +}; // Then use: // EXPECT_CALL(...).WillOnce(MultiplyBy{7}); ``` +It's also fine for the callable to take no arguments, ignoring the arguments +supplied to the mock function: + +``` +MockFunction mock; +EXPECT_CALL(mock, Call).WillOnce([] { return 17; }); +EXPECT_EQ(17, mock.AsStdFunction()(0)); +``` + +When used with `WillOnce`, the callable can assume it will be called at most +once and is allowed to be a move-only type: + +``` +// An action that contains move-only types and has an &&-qualified operator, +// demanding in the type system that it be called at most once. This can be +// used with WillOnce, but the compiler will reject it if handed to +// WillRepeatedly. +struct MoveOnlyAction { + std::unique_ptr move_only_state; + std::unique_ptr operator()() && { return std::move(move_only_state); } +}; + +MockFunction()> mock; +EXPECT_CALL(mock, Call).WillOnce(MoveOnlyAction{std::make_unique(17)}); +EXPECT_THAT(mock.AsStdFunction()(), Pointee(Eq(17))); +``` + +More generally, to use with a mock function whose signature is `R(Args...)` the +object can be anything convertible to `OnceAction` or +`Action. The difference between the two is that `OnceAction` has +weaker requirements (`Action` requires a copy-constructible input that can be +called repeatedly whereas `OnceAction` requires only move-constructible and +supports `&&`-qualified call operators), but can be used only with `WillOnce`. +`OnceAction` is typically relevant only when supporting move-only types or +actions that want a type-system guarantee that they will be called at most once. + +Typically the `OnceAction` and `Action` templates need not be referenced +directly in your actions: a struct or class with a call operator is sufficient, +as in the examples above. But fancier polymorphic actions that need to know the +specific return type of the mock function can define templated conversion +operators to make that possible. See `gmock-actions.h` for examples. + #### Legacy macro-based Actions Before C++11, the functor-based actions were not supported; the old way of @@ -4191,7 +4232,7 @@ This implementation class does *not* need to inherit from any particular class. What matters is that it must have a `Perform()` method template. This method template takes the mock function's arguments as a tuple in a **single** argument, and returns the result of the action. It can be either `const` or not, -but must be invokable with exactly one template argument, which is the result +but must be invocable with exactly one template argument, which is the result type. In other words, you must be able to call `Perform(args)` where `R` is the mock function's return type and `args` is its arguments in a tuple. diff --git a/ext/googletest/docs/gmock_faq.md b/ext/googletest/docs/gmock_faq.md index 2cd9b3f315..8f220bf7a8 100644 --- a/ext/googletest/docs/gmock_faq.md +++ b/ext/googletest/docs/gmock_faq.md @@ -369,8 +369,8 @@ Usually, if your action is for a particular function type, defining it using different types (e.g. if you are defining `Return(*value*)`), `MakePolymorphicAction()` is easiest. Sometimes you want precise control on what types of functions the action can be used in, and implementing `ActionInterface` -is the way to go here. See the implementation of `Return()` in -`testing/base/public/gmock-actions.h` for an example. +is the way to go here. See the implementation of `Return()` in `gmock-actions.h` +for an example. ### I use SetArgPointee() in WillOnce(), but gcc complains about "conflicting return type specified". What does it mean? diff --git a/ext/googletest/docs/gmock_for_dummies.md b/ext/googletest/docs/gmock_for_dummies.md index 0392b5de3d..b7264d3587 100644 --- a/ext/googletest/docs/gmock_for_dummies.md +++ b/ext/googletest/docs/gmock_for_dummies.md @@ -190,10 +190,10 @@ Some people put it in a `_test.cc`. This is fine when the interface being mocked `Foo` changes it, your test could break. (You can't really expect `Foo`'s maintainer to fix every test that uses `Foo`, can you?) -So, the rule of thumb is: if you need to mock `Foo` and it's owned by others, -define the mock class in `Foo`'s package (better, in a `testing` sub-package -such that you can clearly separate production code and testing utilities), put -it in a `.h` and a `cc_library`. Then everyone can reference them from their +Generally, you should not mock classes you don't own. If you must mock such a +class owned by others, define the mock class in `Foo`'s Bazel package (usually +the same directory or a `testing` sub-directory), and put it in a `.h` and a +`cc_library` with `testonly=True`. Then everyone can reference them from their tests. If `Foo` ever changes, there is only one copy of `MockFoo` to change, and only tests that depend on the changed methods need to be fixed. diff --git a/ext/googletest/docs/primer.md b/ext/googletest/docs/primer.md index 6d8fdf4439..aecc368b08 100644 --- a/ext/googletest/docs/primer.md +++ b/ext/googletest/docs/primer.md @@ -162,9 +162,9 @@ TEST(TestSuiteName, TestName) { `TEST()` arguments go from general to specific. The *first* argument is the name of the test suite, and the *second* argument is the test's name within the test -suite. Both names must be valid C++ identifiers, and they should not contain -any underscores (`_`). A test's *full name* consists of its containing test suite and -its individual name. Tests from different test suites can have the same +suite. Both names must be valid C++ identifiers, and they should not contain any +underscores (`_`). A test's *full name* consists of its containing test suite +and its individual name. Tests from different test suites can have the same individual name. For example, let's take a simple integer function: @@ -245,8 +245,8 @@ Also, you must first define a test fixture class before using it in a declaration`". For each test defined with `TEST_F()`, googletest will create a *fresh* test -fixture at runtime, immediately initialize it via `SetUp()`, run the test, -clean up by calling `TearDown()`, and then delete the test fixture. Note that +fixture at runtime, immediately initialize it via `SetUp()`, run the test, clean +up by calling `TearDown()`, and then delete the test fixture. Note that different tests in the same test suite have different test fixture objects, and googletest always deletes a test fixture before it creates the next one. googletest does **not** reuse the same test fixture for multiple tests. Any @@ -342,8 +342,8 @@ your defined tests in order to run them. After defining your tests, you can run them with `RUN_ALL_TESTS()`, which returns `0` if all the tests are successful, or `1` otherwise. Note that -`RUN_ALL_TESTS()` runs *all tests* in your link unit--they can be from -different test suites, or even different source files. +`RUN_ALL_TESTS()` runs *all tests* in your link unit--they can be from different +test suites, or even different source files. When invoked, the `RUN_ALL_TESTS()` macro: @@ -456,8 +456,8 @@ int main(int argc, char **argv) { The `::testing::InitGoogleTest()` function parses the command line for googletest flags, and removes all recognized flags. This allows the user to -control a test program's behavior via various flags, which we'll cover in -the [AdvancedGuide](advanced.md). You **must** call this function before calling +control a test program's behavior via various flags, which we'll cover in the +[AdvancedGuide](advanced.md). You **must** call this function before calling `RUN_ALL_TESTS()`, or the flags won't be properly initialized. On Windows, `InitGoogleTest()` also works with wide strings, so it can be used diff --git a/ext/googletest/docs/quickstart-bazel.md b/ext/googletest/docs/quickstart-bazel.md index 362ee6d050..5d6e9c68ad 100644 --- a/ext/googletest/docs/quickstart-bazel.md +++ b/ext/googletest/docs/quickstart-bazel.md @@ -17,7 +17,7 @@ See [Supported Platforms](platforms.md) for more information about platforms compatible with GoogleTest. If you don't already have Bazel installed, see the -[Bazel installation guide](https://docs.bazel.build/versions/master/install.html). +[Bazel installation guide](https://docs.bazel.build/versions/main/install.html). {: .callout .note} Note: The terminal commands in this tutorial show a Unix shell prompt, but the @@ -26,7 +26,7 @@ commands work on the Windows command line as well. ## Set up a Bazel workspace A -[Bazel workspace](https://docs.bazel.build/versions/master/build-ref.html#workspace) +[Bazel workspace](https://docs.bazel.build/versions/main/build-ref.html#workspace) is a directory on your filesystem that you use to manage source files for the software you want to build. Each workspace directory has a text file named `WORKSPACE` which may be empty, or may contain references to external @@ -40,9 +40,9 @@ $ mkdir my_workspace && cd my_workspace Next, you’ll create the `WORKSPACE` file to specify dependencies. A common and recommended way to depend on GoogleTest is to use a -[Bazel external dependency](https://docs.bazel.build/versions/master/external.html) +[Bazel external dependency](https://docs.bazel.build/versions/main/external.html) via the -[`http_archive` rule](https://docs.bazel.build/versions/master/repo/http.html#http_archive). +[`http_archive` rule](https://docs.bazel.build/versions/main/repo/http.html#http_archive). To do this, in the root directory of your workspace (`my_workspace/`), create a file named `WORKSPACE` with the following contents: @@ -62,18 +62,6 @@ as a ZIP archive from GitHub. In the above example, GoogleTest version to use; we recommend updating the hash often to point to the latest version. -Bazel also needs a dependency on the -[`rules_cc` repository](https://github.com/bazelbuild/rules_cc) to build C++ -code, so add the following to the `WORKSPACE` file: - -``` -http_archive( - name = "rules_cc", - urls = ["https://github.com/bazelbuild/rules_cc/archive/40548a2974f1aea06215272d9c2b47a14a24e556.zip"], - strip_prefix = "rules_cc-40548a2974f1aea06215272d9c2b47a14a24e556", -) -``` - Now you're ready to build C++ code that uses GoogleTest. ## Create and run a binary @@ -104,8 +92,6 @@ To build the code, create a file named `BUILD` in the same directory with the following contents: ``` -load("@rules_cc//cc:defs.bzl", "cc_test") - cc_test( name = "hello_test", size = "small", @@ -118,7 +104,7 @@ This `cc_test` rule declares the C++ test binary you want to build, and links to GoogleTest (`//:gtest_main`) using the prefix you specified in the `WORKSPACE` file (`@com_google_googletest`). For more information about Bazel `BUILD` files, see the -[Bazel C++ Tutorial](https://docs.bazel.build/versions/master/tutorial/cpp.html). +[Bazel C++ Tutorial](https://docs.bazel.build/versions/main/tutorial/cpp.html). Now you can build and run your test: diff --git a/ext/googletest/docs/reference/matchers.md b/ext/googletest/docs/reference/matchers.md index 1a60b4c0dc..9fb1592751 100644 --- a/ext/googletest/docs/reference/matchers.md +++ b/ext/googletest/docs/reference/matchers.md @@ -8,9 +8,13 @@ A **matcher** matches a *single* argument. You can use it inside `ON_CALL()` or | `EXPECT_THAT(actual_value, matcher)` | Asserts that `actual_value` matches `matcher`. | | `ASSERT_THAT(actual_value, matcher)` | The same as `EXPECT_THAT(actual_value, matcher)`, except that it generates a **fatal** failure. | -{: .callout .note} -**Note:** Although equality matching via `EXPECT_THAT(actual_value, -expected_value)` is supported, prefer to make the comparison explicit via +{: .callout .warning} +**WARNING:** Equality matching via `EXPECT_THAT(actual_value, expected_value)` +is supported, however note that implicit conversions can cause surprising +results. For example, `EXPECT_THAT(some_bool, "some string")` will compile and +may pass unintentionally. + +**BEST PRACTICE:** Prefer to make the comparison explicit via `EXPECT_THAT(actual_value, Eq(expected_value))` or `EXPECT_EQ(actual_value, expected_value)`. @@ -88,16 +92,17 @@ The `argument` can be either a C string or a C++ string object: | Matcher | Description | | :---------------------- | :------------------------------------------------- | -| `ContainsRegex(string)` | `argument` matches the given regular expression. | -| `EndsWith(suffix)` | `argument` ends with string `suffix`. | -| `HasSubstr(string)` | `argument` contains `string` as a sub-string. | -| `IsEmpty()` | `argument` is an empty string. | -| `MatchesRegex(string)` | `argument` matches the given regular expression with the match starting at the first character and ending at the last character. | -| `StartsWith(prefix)` | `argument` starts with string `prefix`. | -| `StrCaseEq(string)` | `argument` is equal to `string`, ignoring case. | -| `StrCaseNe(string)` | `argument` is not equal to `string`, ignoring case. | -| `StrEq(string)` | `argument` is equal to `string`. | -| `StrNe(string)` | `argument` is not equal to `string`. | +| `ContainsRegex(string)` | `argument` matches the given regular expression. | +| `EndsWith(suffix)` | `argument` ends with string `suffix`. | +| `HasSubstr(string)` | `argument` contains `string` as a sub-string. | +| `IsEmpty()` | `argument` is an empty string. | +| `MatchesRegex(string)` | `argument` matches the given regular expression with the match starting at the first character and ending at the last character. | +| `StartsWith(prefix)` | `argument` starts with string `prefix`. | +| `StrCaseEq(string)` | `argument` is equal to `string`, ignoring case. | +| `StrCaseNe(string)` | `argument` is not equal to `string`, ignoring case. | +| `StrEq(string)` | `argument` is equal to `string`. | +| `StrNe(string)` | `argument` is not equal to `string`. | +| `WhenBase64Unescaped(m)` | `argument` is a base-64 escaped string whose unescaped string matches `m`. | `ContainsRegex()` and `MatchesRegex()` take ownership of the `RE` object. They use the regular expression syntax defined @@ -147,7 +152,6 @@ messages, you can use: one might write: ```cpp - using ::std::get; MATCHER(FooEq, "") { return std::get<0>(arg).Equals(std::get<1>(arg)); } @@ -194,6 +198,7 @@ messages, you can use: | Matcher | Description | | :--------------- | :------------------------------------------------ | | `ResultOf(f, m)` | `f(argument)` matches matcher `m`, where `f` is a function or functor. | +| `ResultOf(result_description, f, m)` | The same as the two-parameter version, but provides a better error message. ## Pointer Matchers @@ -238,7 +243,7 @@ You can make a matcher from one or more other matchers: | `AnyOf(m1, m2, ..., mn)` | `argument` matches at least one of the matchers `m1` to `mn`. | | `AnyOfArray({m0, m1, ..., mn})`, `AnyOfArray(a_container)`, `AnyOfArray(begin, end)`, `AnyOfArray(array)`, or `AnyOfArray(array, count)` | The same as `AnyOf()` except that the matchers come from an initializer list, STL-style container, iterator range, or C-style array. | | `Not(m)` | `argument` doesn't match matcher `m`. | -| `Conditional(cond, m1, m2)` | Matches matcher `m1` if `cond` evalutes to true, else matches `m2`.| +| `Conditional(cond, m1, m2)` | Matches matcher `m1` if `cond` evaluates to true, else matches `m2`.| ## Adapters for Matchers diff --git a/ext/googletest/docs/reference/mocking.md b/ext/googletest/docs/reference/mocking.md index c29f71603f..e414ffbd0d 100644 --- a/ext/googletest/docs/reference/mocking.md +++ b/ext/googletest/docs/reference/mocking.md @@ -248,7 +248,9 @@ EXPECT_CALL(my_mock, GetNumber()) .WillOnce(Return(3)); ``` -The `WillOnce` clause can be used any number of times on an expectation. +The `WillOnce` clause can be used any number of times on an expectation. Unlike +`WillRepeatedly`, the action fed to each `WillOnce` call will be called at most +once, so may be a move-only type and/or have an `&&`-qualified call operator. #### WillRepeatedly {#EXPECT_CALL.WillRepeatedly} diff --git a/ext/googletest/docs/reference/testing.md b/ext/googletest/docs/reference/testing.md index 554d6c9584..dc47942399 100644 --- a/ext/googletest/docs/reference/testing.md +++ b/ext/googletest/docs/reference/testing.md @@ -518,8 +518,8 @@ Logs a property for the current test, test suite, or entire invocation of the test program. Only the last value for a given key is logged. The key must be a valid XML attribute name, and cannot conflict with the ones -already used by GoogleTest (`name`, `status`, `time`, `classname`, `type_param`, -and `value_param`). +already used by GoogleTest (`name`, `file`, `line`, `status`, `time`, +`classname`, `type_param`, and `value_param`). `RecordProperty` is `public static` so it can be called from utility functions that are not members of the test fixture. diff --git a/ext/googletest/googlemock/CMakeLists.txt b/ext/googletest/googlemock/CMakeLists.txt index e7df8ec53d..5c1f0dafea 100644 --- a/ext/googletest/googlemock/CMakeLists.txt +++ b/ext/googletest/googlemock/CMakeLists.txt @@ -36,13 +36,9 @@ endif() # as ${gmock_SOURCE_DIR} and to the root binary directory as # ${gmock_BINARY_DIR}. # Language "C" is required for find_package(Threads). -if (CMAKE_VERSION VERSION_LESS 3.0) - project(gmock CXX C) -else() - cmake_policy(SET CMP0048 NEW) - project(gmock VERSION ${GOOGLETEST_VERSION} LANGUAGES CXX C) -endif() -cmake_minimum_required(VERSION 2.8.12) +cmake_minimum_required(VERSION 3.5) +cmake_policy(SET CMP0048 NEW) +project(gmock VERSION ${GOOGLETEST_VERSION} LANGUAGES CXX C) if (COMMAND set_up_hermetic_build) set_up_hermetic_build() @@ -109,11 +105,12 @@ endif() # to the targets for when we are part of a parent build (ie being pulled # in via add_subdirectory() rather than being a standalone build). if (DEFINED CMAKE_VERSION AND NOT "${CMAKE_VERSION}" VERSION_LESS "2.8.11") + string(REPLACE ";" "$" dirs "${gmock_build_include_dirs}") target_include_directories(gmock SYSTEM INTERFACE - "$" + "$" "$/${CMAKE_INSTALL_INCLUDEDIR}>") target_include_directories(gmock_main SYSTEM INTERFACE - "$" + "$" "$/${CMAKE_INSTALL_INCLUDEDIR}>") endif() @@ -154,7 +151,10 @@ if (gmock_build_tests) cxx_test(gmock_ex_test gmock_main) cxx_test(gmock-function-mocker_test gmock_main) cxx_test(gmock-internal-utils_test gmock_main) - cxx_test(gmock-matchers_test gmock_main) + cxx_test(gmock-matchers-arithmetic_test gmock_main) + cxx_test(gmock-matchers-comparisons_test gmock_main) + cxx_test(gmock-matchers-containers_test gmock_main) + cxx_test(gmock-matchers-misc_test gmock_main) cxx_test(gmock-more-actions_test gmock_main) cxx_test(gmock-nice-strict_test gmock_main) cxx_test(gmock-port_test gmock_main) diff --git a/ext/googletest/googlemock/README.md b/ext/googletest/googlemock/README.md index ead688325d..7da60655db 100644 --- a/ext/googletest/googlemock/README.md +++ b/ext/googletest/googlemock/README.md @@ -35,10 +35,6 @@ Details and examples can be found here: * [gMock Cookbook](https://google.github.io/googletest/gmock_cook_book.html) * [gMock Cheat Sheet](https://google.github.io/googletest/gmock_cheat_sheet.html) -Please note that code under scripts/generator/ is from the -[cppclean project](http://code.google.com/p/cppclean/) and under the Apache -License, which is different from GoogleMock's license. - GoogleMock is a part of [GoogleTest C++ testing framework](http://github.com/google/googletest/) and a subject to the same requirements. diff --git a/ext/googletest/googlemock/include/gmock/gmock-actions.h b/ext/googletest/googlemock/include/gmock/gmock-actions.h index f2393bd3af..c785ad8abb 100644 --- a/ext/googletest/googlemock/include/gmock/gmock-actions.h +++ b/ext/googletest/googlemock/include/gmock/gmock-actions.h @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Mock - a framework for writing C++ mock classes. // // The ACTION* family of macros can be used in a namespace scope to @@ -125,13 +124,14 @@ // To learn more about using these macros, please search for 'ACTION' on // https://github.com/google/googletest/blob/master/docs/gmock_cook_book.md -// GOOGLETEST_CM0002 DO NOT DELETE +// IWYU pragma: private, include "gmock/gmock.h" +// IWYU pragma: friend gmock/.* #ifndef GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_ACTIONS_H_ #define GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_ACTIONS_H_ #ifndef _WIN32_WCE -# include +#include #endif #include @@ -147,8 +147,8 @@ #include "gmock/internal/gmock-pp.h" #ifdef _MSC_VER -# pragma warning(push) -# pragma warning(disable:4100) +#pragma warning(push) +#pragma warning(disable : 4100) #endif namespace testing { @@ -196,9 +196,7 @@ class BuiltInDefaultValue { public: // This function returns true if and only if type T has a built-in default // value. - static bool Exists() { - return ::std::is_default_constructible::value; - } + static bool Exists() { return ::std::is_default_constructible::value; } static T Get() { return BuiltInDefaultValueGetter< @@ -227,11 +225,11 @@ class BuiltInDefaultValue { // The following specializations define the default values for // specific types we care about. #define GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(type, value) \ - template <> \ - class BuiltInDefaultValue { \ - public: \ - static bool Exists() { return true; } \ - static type Get() { return value; } \ + template <> \ + class BuiltInDefaultValue { \ + public: \ + static bool Exists() { return true; } \ + static type Get() { return value; } \ } GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(void, ); // NOLINT @@ -255,21 +253,309 @@ GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned short, 0U); // NOLINT GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed short, 0); // NOLINT GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned int, 0U); GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed int, 0); -GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned long, 0UL); // NOLINT -GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed long, 0L); // NOLINT +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned long, 0UL); // NOLINT +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed long, 0L); // NOLINT GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned long long, 0); // NOLINT -GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed long long, 0); // NOLINT +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed long long, 0); // NOLINT GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(float, 0); GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(double, 0); #undef GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_ -// Simple two-arg form of std::disjunction. -template -using disjunction = typename ::std::conditional::type; +// Partial implementations of metaprogramming types from the standard library +// not available in C++11. + +template +struct negation + // NOLINTNEXTLINE + : std::integral_constant {}; + +// Base case: with zero predicates the answer is always true. +template +struct conjunction : std::true_type {}; + +// With a single predicate, the answer is that predicate. +template +struct conjunction : P1 {}; + +// With multiple predicates the answer is the first predicate if that is false, +// and we recurse otherwise. +template +struct conjunction + : std::conditional, P1>::type {}; + +template +struct disjunction : std::false_type {}; + +template +struct disjunction : P1 {}; + +template +struct disjunction + // NOLINTNEXTLINE + : std::conditional, P1>::type {}; + +template +using void_t = void; + +// Detects whether an expression of type `From` can be implicitly converted to +// `To` according to [conv]. In C++17, [conv]/3 defines this as follows: +// +// An expression e can be implicitly converted to a type T if and only if +// the declaration T t=e; is well-formed, for some invented temporary +// variable t ([dcl.init]). +// +// [conv]/2 implies we can use function argument passing to detect whether this +// initialization is valid. +// +// Note that this is distinct from is_convertible, which requires this be valid: +// +// To test() { +// return declval(); +// } +// +// In particular, is_convertible doesn't give the correct answer when `To` and +// `From` are the same non-moveable type since `declval` will be an rvalue +// reference, defeating the guaranteed copy elision that would otherwise make +// this function work. +// +// REQUIRES: `From` is not cv void. +template +struct is_implicitly_convertible { + private: + // A function that accepts a parameter of type T. This can be called with type + // U successfully only if U is implicitly convertible to T. + template + static void Accept(T); + + // A function that creates a value of type T. + template + static T Make(); + + // An overload be selected when implicit conversion from T to To is possible. + template (Make()))> + static std::true_type TestImplicitConversion(int); + + // A fallback overload selected in all other cases. + template + static std::false_type TestImplicitConversion(...); + + public: + using type = decltype(TestImplicitConversion(0)); + static constexpr bool value = type::value; +}; + +// Like std::invoke_result_t from C++17, but works only for objects with call +// operators (not e.g. member function pointers, which we don't need specific +// support for in OnceAction because std::function deals with them). +template +using call_result_t = decltype(std::declval()(std::declval()...)); + +template +struct is_callable_r_impl : std::false_type {}; + +// Specialize the struct for those template arguments where call_result_t is +// well-formed. When it's not, the generic template above is chosen, resulting +// in std::false_type. +template +struct is_callable_r_impl>, R, F, Args...> + : std::conditional< + std::is_void::value, // + std::true_type, // + is_implicitly_convertible, R>>::type {}; + +// Like std::is_invocable_r from C++17, but works only for objects with call +// operators. See the note on call_result_t. +template +using is_callable_r = is_callable_r_impl; + +// Like std::as_const from C++17. +template +typename std::add_const::type& as_const(T& t) { + return t; +} } // namespace internal +// Specialized for function types below. +template +class OnceAction; + +// An action that can only be used once. +// +// This is accepted by WillOnce, which doesn't require the underlying action to +// be copy-constructible (only move-constructible), and promises to invoke it as +// an rvalue reference. This allows the action to work with move-only types like +// std::move_only_function in a type-safe manner. +// +// For example: +// +// // Assume we have some API that needs to accept a unique pointer to some +// // non-copyable object Foo. +// void AcceptUniquePointer(std::unique_ptr foo); +// +// // We can define an action that provides a Foo to that API. Because It +// // has to give away its unique pointer, it must not be called more than +// // once, so its call operator is &&-qualified. +// struct ProvideFoo { +// std::unique_ptr foo; +// +// void operator()() && { +// AcceptUniquePointer(std::move(Foo)); +// } +// }; +// +// // This action can be used with WillOnce. +// EXPECT_CALL(mock, Call) +// .WillOnce(ProvideFoo{std::make_unique(...)}); +// +// // But a call to WillRepeatedly will fail to compile. This is correct, +// // since the action cannot correctly be used repeatedly. +// EXPECT_CALL(mock, Call) +// .WillRepeatedly(ProvideFoo{std::make_unique(...)}); +// +// A less-contrived example would be an action that returns an arbitrary type, +// whose &&-qualified call operator is capable of dealing with move-only types. +template +class OnceAction final { + private: + // True iff we can use the given callable type (or lvalue reference) directly + // via StdFunctionAdaptor. + template + using IsDirectlyCompatible = internal::conjunction< + // It must be possible to capture the callable in StdFunctionAdaptor. + std::is_constructible::type, Callable>, + // The callable must be compatible with our signature. + internal::is_callable_r::type, + Args...>>; + + // True iff we can use the given callable type via StdFunctionAdaptor once we + // ignore incoming arguments. + template + using IsCompatibleAfterIgnoringArguments = internal::conjunction< + // It must be possible to capture the callable in a lambda. + std::is_constructible::type, Callable>, + // The callable must be invocable with zero arguments, returning something + // convertible to Result. + internal::is_callable_r::type>>; + + public: + // Construct from a callable that is directly compatible with our mocked + // signature: it accepts our function type's arguments and returns something + // convertible to our result type. + template ::type>>, + IsDirectlyCompatible> // + ::value, + int>::type = 0> + OnceAction(Callable&& callable) // NOLINT + : function_(StdFunctionAdaptor::type>( + {}, std::forward(callable))) {} + + // As above, but for a callable that ignores the mocked function's arguments. + template ::type>>, + // Exclude callables for which the overload above works. + // We'd rather provide the arguments if possible. + internal::negation>, + IsCompatibleAfterIgnoringArguments>::value, + int>::type = 0> + OnceAction(Callable&& callable) // NOLINT + // Call the constructor above with a callable + // that ignores the input arguments. + : OnceAction(IgnoreIncomingArguments::type>{ + std::forward(callable)}) {} + + // We are naturally copyable because we store only an std::function, but + // semantically we should not be copyable. + OnceAction(const OnceAction&) = delete; + OnceAction& operator=(const OnceAction&) = delete; + OnceAction(OnceAction&&) = default; + + // Invoke the underlying action callable with which we were constructed, + // handing it the supplied arguments. + Result Call(Args... args) && { + return function_(std::forward(args)...); + } + + private: + // An adaptor that wraps a callable that is compatible with our signature and + // being invoked as an rvalue reference so that it can be used as an + // StdFunctionAdaptor. This throws away type safety, but that's fine because + // this is only used by WillOnce, which we know calls at most once. + // + // Once we have something like std::move_only_function from C++23, we can do + // away with this. + template + class StdFunctionAdaptor final { + public: + // A tag indicating that the (otherwise universal) constructor is accepting + // the callable itself, instead of e.g. stealing calls for the move + // constructor. + struct CallableTag final {}; + + template + explicit StdFunctionAdaptor(CallableTag, F&& callable) + : callable_(std::make_shared(std::forward(callable))) {} + + // Rather than explicitly returning Result, we return whatever the wrapped + // callable returns. This allows for compatibility with existing uses like + // the following, when the mocked function returns void: + // + // EXPECT_CALL(mock_fn_, Call) + // .WillOnce([&] { + // [...] + // return 0; + // }); + // + // Such a callable can be turned into std::function. If we use an + // explicit return type of Result here then it *doesn't* work with + // std::function, because we'll get a "void function should not return a + // value" error. + // + // We need not worry about incompatible result types because the SFINAE on + // OnceAction already checks this for us. std::is_invocable_r_v itself makes + // the same allowance for void result types. + template + internal::call_result_t operator()( + ArgRefs&&... args) const { + return std::move(*callable_)(std::forward(args)...); + } + + private: + // We must put the callable on the heap so that we are copyable, which + // std::function needs. + std::shared_ptr callable_; + }; + + // An adaptor that makes a callable that accepts zero arguments callable with + // our mocked arguments. + template + struct IgnoreIncomingArguments { + internal::call_result_t operator()(Args&&...) { + return std::move(callable)(); + } + + Callable callable; + }; + + std::function function_; +}; + // When an unexpected function call is encountered, Google Mock will // let it return a default value if the user has specified one for its // return type, or if the return type has a built-in default value; @@ -339,7 +625,8 @@ class DefaultValue { private: const T value_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(FixedValueProducer); + FixedValueProducer(const FixedValueProducer&) = delete; + FixedValueProducer& operator=(const FixedValueProducer&) = delete; }; class FactoryValueProducer : public ValueProducer { @@ -350,7 +637,8 @@ class DefaultValue { private: const FactoryFunction factory_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(FactoryValueProducer); + FactoryValueProducer(const FactoryValueProducer&) = delete; + FactoryValueProducer& operator=(const FactoryValueProducer&) = delete; }; static ValueProducer* producer_; @@ -424,28 +712,34 @@ class ActionInterface { virtual Result Perform(const ArgumentTuple& args) = 0; private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(ActionInterface); + ActionInterface(const ActionInterface&) = delete; + ActionInterface& operator=(const ActionInterface&) = delete; }; -// An Action is a copyable and IMMUTABLE (except by assignment) -// object that represents an action to be taken when a mock function -// of type F is called. The implementation of Action is just a -// std::shared_ptr to const ActionInterface. Don't inherit from Action! -// You can view an object implementing ActionInterface as a -// concrete action (including its current state), and an Action -// object as a handle to it. template -class Action { +class Action; + +// An Action is a copyable and IMMUTABLE (except by assignment) +// object that represents an action to be taken when a mock function of type +// R(Args...) is called. The implementation of Action is just a +// std::shared_ptr to const ActionInterface. Don't inherit from Action! You +// can view an object implementing ActionInterface as a concrete action +// (including its current state), and an Action object as a handle to it. +template +class Action { + private: + using F = R(Args...); + // Adapter class to allow constructing Action from a legacy ActionInterface. // New code should create Actions from functors instead. struct ActionAdapter { // Adapter must be copyable to satisfy std::function requirements. ::std::shared_ptr> impl_; - template - typename internal::Function::Result operator()(Args&&... args) { + template + typename internal::Function::Result operator()(InArgs&&... args) { return impl_->Perform( - ::std::forward_as_tuple(::std::forward(args)...)); + ::std::forward_as_tuple(::std::forward(args)...)); } }; @@ -480,7 +774,8 @@ class Action { // Action, as long as F's arguments can be implicitly converted // to Func's and Func's return type can be implicitly converted to F's. template - explicit Action(const Action& action) : fun_(action.fun_) {} + Action(const Action& action) // NOLINT + : fun_(action.fun_) {} // Returns true if and only if this is the DoDefault() action. bool IsDoDefault() const { return fun_ == nullptr; } @@ -498,6 +793,24 @@ class Action { return internal::Apply(fun_, ::std::move(args)); } + // An action can be used as a OnceAction, since it's obviously safe to call it + // once. + operator OnceAction() const { // NOLINT + // Return a OnceAction-compatible callable that calls Perform with the + // arguments it is provided. We could instead just return fun_, but then + // we'd need to handle the IsDoDefault() case separately. + struct OA { + Action action; + + R operator()(Args... args) && { + return action.Perform( + std::forward_as_tuple(std::forward(args)...)); + } + }; + + return OA{*this}; + } + private: template friend class Action; @@ -514,8 +827,8 @@ class Action { template struct IgnoreArgs { - template - Result operator()(const Args&...) const { + template + Result operator()(const InArgs&...) const { return function_impl(); } @@ -606,118 +919,198 @@ struct ByMoveWrapper { T payload; }; -// Implements the polymorphic Return(x) action, which can be used in -// any function that returns the type of x, regardless of the argument -// types. -// -// Note: The value passed into Return must be converted into -// Function::Result when this action is cast to Action rather than -// when that action is performed. This is important in scenarios like -// -// MOCK_METHOD1(Method, T(U)); -// ... -// { -// Foo foo; -// X x(&foo); -// EXPECT_CALL(mock, Method(_)).WillOnce(Return(x)); -// } -// -// In the example above the variable x holds reference to foo which leaves -// scope and gets destroyed. If copying X just copies a reference to foo, -// that copy will be left with a hanging reference. If conversion to T -// makes a copy of foo, the above code is safe. To support that scenario, we -// need to make sure that the type conversion happens inside the EXPECT_CALL -// statement, and conversion of the result of Return to Action is a -// good place for that. -// -// The real life example of the above scenario happens when an invocation -// of gtl::Container() is passed into Return. -// +// The general implementation of Return(R). Specializations follow below. template -class ReturnAction { +class ReturnAction final { public: - // Constructs a ReturnAction object from the value to be returned. - // 'value' is passed by value instead of by const reference in order - // to allow Return("string literal") to compile. - explicit ReturnAction(R value) : value_(new R(std::move(value))) {} + explicit ReturnAction(R value) : value_(std::move(value)) {} - // This template type conversion operator allows Return(x) to be - // used in ANY function that returns x's type. - template - operator Action() const { // NOLINT - // Assert statement belongs here because this is the best place to verify - // conditions on F. It produces the clearest error messages - // in most compilers. - // Impl really belongs in this scope as a local class but can't - // because MSVC produces duplicate symbols in different translation units - // in this case. Until MS fixes that bug we put Impl into the class scope - // and put the typedef both here (for use in assert statement) and - // in the Impl class. But both definitions must be the same. - typedef typename Function::Result Result; - GTEST_COMPILE_ASSERT_( - !std::is_reference::value, - use_ReturnRef_instead_of_Return_to_return_a_reference); - static_assert(!std::is_void::value, - "Can't use Return() on an action expected to return `void`."); - return Action(new Impl(value_)); + template >, // + negation>, // + std::is_convertible, // + std::is_move_constructible>::value>::type> + operator OnceAction() && { // NOLINT + return Impl(std::move(value_)); + } + + template >, // + negation>, // + std::is_convertible, // + std::is_copy_constructible>::value>::type> + operator Action() const { // NOLINT + return Impl(value_); } private: - // Implements the Return(x) action for a particular function type F. - template - class Impl : public ActionInterface { + // Implements the Return(x) action for a mock function that returns type U. + template + class Impl final { public: - typedef typename Function::Result Result; - typedef typename Function::ArgumentTuple ArgumentTuple; + // The constructor used when the return value is allowed to move from the + // input value (i.e. we are converting to OnceAction). + explicit Impl(R&& input_value) + : state_(new State(std::move(input_value))) {} - // The implicit cast is necessary when Result has more than one - // single-argument constructor (e.g. Result is std::vector) and R - // has a type conversion operator template. In that case, value_(value) - // won't compile as the compiler doesn't known which constructor of - // Result to call. ImplicitCast_ forces the compiler to convert R to - // Result without considering explicit constructors, thus resolving the - // ambiguity. value_ is then initialized using its copy constructor. - explicit Impl(const std::shared_ptr& value) - : value_before_cast_(*value), - value_(ImplicitCast_(value_before_cast_)) {} + // The constructor used when the return value is not allowed to move from + // the input value (i.e. we are converting to Action). + explicit Impl(const R& input_value) : state_(new State(input_value)) {} - Result Perform(const ArgumentTuple&) override { return value_; } + U operator()() && { return std::move(state_->value); } + U operator()() const& { return state_->value; } private: - GTEST_COMPILE_ASSERT_(!std::is_reference::value, - Result_cannot_be_a_reference_type); - // We save the value before casting just in case it is being cast to a - // wrapper type. - R value_before_cast_; - Result value_; + // We put our state on the heap so that the compiler-generated copy/move + // constructors work correctly even when U is a reference-like type. This is + // necessary only because we eagerly create State::value (see the note on + // that symbol for details). If we instead had only the input value as a + // member then the default constructors would work fine. + // + // For example, when R is std::string and U is std::string_view, value is a + // reference to the string backed by input_value. The copy constructor would + // copy both, so that we wind up with a new input_value object (with the + // same contents) and a reference to the *old* input_value object rather + // than the new one. + struct State { + explicit State(const R& input_value_in) + : input_value(input_value_in), + // Make an implicit conversion to Result before initializing the U + // object we store, avoiding calling any explicit constructor of U + // from R. + // + // This simulates the language rules: a function with return type U + // that does `return R()` requires R to be implicitly convertible to + // U, and uses that path for the conversion, even U Result has an + // explicit constructor from R. + value(ImplicitCast_(internal::as_const(input_value))) {} - GTEST_DISALLOW_COPY_AND_ASSIGN_(Impl); + // As above, but for the case where we're moving from the ReturnAction + // object because it's being used as a OnceAction. + explicit State(R&& input_value_in) + : input_value(std::move(input_value_in)), + // For the same reason as above we make an implicit conversion to U + // before initializing the value. + // + // Unlike above we provide the input value as an rvalue to the + // implicit conversion because this is a OnceAction: it's fine if it + // wants to consume the input value. + value(ImplicitCast_(std::move(input_value))) {} + + // A copy of the value originally provided by the user. We retain this in + // addition to the value of the mock function's result type below in case + // the latter is a reference-like type. See the std::string_view example + // in the documentation on Return. + R input_value; + + // The value we actually return, as the type returned by the mock function + // itself. + // + // We eagerly initialize this here, rather than lazily doing the implicit + // conversion automatically each time Perform is called, for historical + // reasons: in 2009-11, commit a070cbd91c (Google changelist 13540126) + // made the Action conversion operator eagerly convert the R value to + // U, but without keeping the R alive. This broke the use case discussed + // in the documentation for Return, making reference-like types such as + // std::string_view not safe to use as U where the input type R is a + // value-like type such as std::string. + // + // The example the commit gave was not very clear, nor was the issue + // thread (https://github.com/google/googlemock/issues/86), but it seems + // the worry was about reference-like input types R that flatten to a + // value-like type U when being implicitly converted. An example of this + // is std::vector::reference, which is often a proxy type with an + // reference to the underlying vector: + // + // // Helper method: have the mock function return bools according + // // to the supplied script. + // void SetActions(MockFunction& mock, + // const std::vector& script) { + // for (size_t i = 0; i < script.size(); ++i) { + // EXPECT_CALL(mock, Call(i)).WillOnce(Return(script[i])); + // } + // } + // + // TEST(Foo, Bar) { + // // Set actions using a temporary vector, whose operator[] + // // returns proxy objects that references that will be + // // dangling once the call to SetActions finishes and the + // // vector is destroyed. + // MockFunction mock; + // SetActions(mock, {false, true}); + // + // EXPECT_FALSE(mock.AsStdFunction()(0)); + // EXPECT_TRUE(mock.AsStdFunction()(1)); + // } + // + // This eager conversion helps with a simple case like this, but doesn't + // fully make these types work in general. For example the following still + // uses a dangling reference: + // + // TEST(Foo, Baz) { + // MockFunction()> mock; + // + // // Return the same vector twice, and then the empty vector + // // thereafter. + // auto action = Return(std::initializer_list{ + // "taco", "burrito", + // }); + // + // EXPECT_CALL(mock, Call) + // .WillOnce(action) + // .WillOnce(action) + // .WillRepeatedly(Return(std::vector{})); + // + // EXPECT_THAT(mock.AsStdFunction()(), + // ElementsAre("taco", "burrito")); + // EXPECT_THAT(mock.AsStdFunction()(), + // ElementsAre("taco", "burrito")); + // EXPECT_THAT(mock.AsStdFunction()(), IsEmpty()); + // } + // + U value; + }; + + const std::shared_ptr state_; }; - // Partially specialize for ByMoveWrapper. This version of ReturnAction will - // move its contents instead. - template - class Impl, F> : public ActionInterface { - public: - typedef typename Function::Result Result; - typedef typename Function::ArgumentTuple ArgumentTuple; + R value_; +}; - explicit Impl(const std::shared_ptr& wrapper) - : performed_(false), wrapper_(wrapper) {} +// A specialization of ReturnAction when R is ByMoveWrapper for some T. +// +// This version applies the type system-defeating hack of moving from T even in +// the const call operator, checking at runtime that it isn't called more than +// once, since the user has declared their intent to do so by using ByMove. +template +class ReturnAction> final { + public: + explicit ReturnAction(ByMoveWrapper wrapper) + : state_(new State(std::move(wrapper.payload))) {} - Result Perform(const ArgumentTuple&) override { - GTEST_CHECK_(!performed_) - << "A ByMove() action should only be performed once."; - performed_ = true; - return std::move(wrapper_->payload); - } + T operator()() const { + GTEST_CHECK_(!state_->called) + << "A ByMove() action must be performed at most once."; - private: - bool performed_; - const std::shared_ptr wrapper_; + state_->called = true; + return std::move(state_->value); + } + + private: + // We store our state on the heap so that we are copyable as required by + // Action, despite the fact that we are stateful and T may not be copyable. + struct State { + explicit State(T&& value_in) : value(std::move(value_in)) {} + + T value; + bool called = false; }; - const std::shared_ptr value_; + const std::shared_ptr state_; }; // Implements the ReturnNull() action. @@ -759,8 +1152,8 @@ class ReturnRefAction { // Asserts that the function return type is a reference. This // catches the user error of using ReturnRef(x) when Return(x) // should be used, and generates some helpful error message. - GTEST_COMPILE_ASSERT_(std::is_reference::value, - use_Return_instead_of_ReturnRef_to_return_a_value); + static_assert(std::is_reference::value, + "use Return instead of ReturnRef to return a value"); return Action(new Impl(ref_)); } @@ -801,9 +1194,8 @@ class ReturnRefOfCopyAction { // Asserts that the function return type is a reference. This // catches the user error of using ReturnRefOfCopy(x) when Return(x) // should be used, and generates some helpful error message. - GTEST_COMPILE_ASSERT_( - std::is_reference::value, - use_Return_instead_of_ReturnRefOfCopy_to_return_a_value); + static_assert(std::is_reference::value, + "use Return instead of ReturnRefOfCopy to return a value"); return Action(new Impl(value_)); } @@ -839,7 +1231,7 @@ class ReturnRoundRobinAction { template T operator()(Args&&...) const { - return state_->Next(); + return state_->Next(); } private: @@ -862,7 +1254,9 @@ class DoDefaultAction { // This template type conversion operator allows DoDefault() to be // used in any function. template - operator Action() const { return Action(); } // NOLINT + operator Action() const { + return Action(); + } // NOLINT }; // Implements the Assign action to set a given pointer referent to a @@ -890,8 +1284,7 @@ template class SetErrnoAndReturnAction { public: SetErrnoAndReturnAction(int errno_value, T result) - : errno_(errno_value), - result_(result) {} + : errno_(errno_value), result_(result) {} template Result Perform(const ArgumentTuple& /* args */) const { errno = errno_; @@ -1002,8 +1395,8 @@ class IgnoreResultAction { private: // Type OriginalFunction is the same as F except that its return // type is IgnoredValue. - typedef typename internal::Function::MakeResultIgnoredValue - OriginalFunction; + typedef + typename internal::Function::MakeResultIgnoredValue OriginalFunction; const Action action_; }; @@ -1013,55 +1406,239 @@ class IgnoreResultAction { template struct WithArgsAction { - InnerAction action; + InnerAction inner_action; - // The inner action could be anything convertible to Action. - // We use the conversion operator to detect the signature of the inner Action. + // The signature of the function as seen by the inner action, given an out + // action with the given result and argument types. template - operator Action() const { // NOLINT - using TupleType = std::tuple; - Action::type...)> - converted(action); + using InnerSignature = + R(typename std::tuple_element>::type...); - return [converted](Args... args) -> R { + // Rather than a call operator, we must define conversion operators to + // particular action types. This is necessary for embedded actions like + // DoDefault(), which rely on an action conversion operators rather than + // providing a call operator because even with a particular set of arguments + // they don't have a fixed return type. + + template >::type...)>>::value, + int>::type = 0> + operator OnceAction() && { // NOLINT + struct OA { + OnceAction> inner_action; + + R operator()(Args&&... args) && { + return std::move(inner_action) + .Call(std::get( + std::forward_as_tuple(std::forward(args)...))...); + } + }; + + return OA{std::move(inner_action)}; + } + + template >::type...)>>::value, + int>::type = 0> + operator Action() const { // NOLINT + Action> converted(inner_action); + + return [converted](Args&&... args) -> R { return converted.Perform(std::forward_as_tuple( - std::get(std::forward_as_tuple(std::forward(args)...))...)); + std::get(std::forward_as_tuple(std::forward(args)...))...)); }; } }; template -struct DoAllAction { - private: +class DoAllAction; + +// Base case: only a single action. +template +class DoAllAction { + public: + struct UserConstructorTag {}; + template - using NonFinalType = + explicit DoAllAction(UserConstructorTag, T&& action) + : final_action_(std::forward(action)) {} + + // Rather than a call operator, we must define conversion operators to + // particular action types. This is necessary for embedded actions like + // DoDefault(), which rely on an action conversion operators rather than + // providing a call operator because even with a particular set of arguments + // they don't have a fixed return type. + + template >::value, + int>::type = 0> + operator OnceAction() && { // NOLINT + return std::move(final_action_); + } + + template < + typename R, typename... Args, + typename std::enable_if< + std::is_convertible>::value, + int>::type = 0> + operator Action() const { // NOLINT + return final_action_; + } + + private: + FinalAction final_action_; +}; + +// Recursive case: support N actions by calling the initial action and then +// calling through to the base class containing N-1 actions. +template +class DoAllAction + : private DoAllAction { + private: + using Base = DoAllAction; + + // The type of reference that should be provided to an initial action for a + // mocked function parameter of type T. + // + // There are two quirks here: + // + // * Unlike most forwarding functions, we pass scalars through by value. + // This isn't strictly necessary because an lvalue reference would work + // fine too and be consistent with other non-reference types, but it's + // perhaps less surprising. + // + // For example if the mocked function has signature void(int), then it + // might seem surprising for the user's initial action to need to be + // convertible to Action. This is perhaps less + // surprising for a non-scalar type where there may be a performance + // impact, or it might even be impossible, to pass by value. + // + // * More surprisingly, `const T&` is often not a const reference type. + // By the reference collapsing rules in C++17 [dcl.ref]/6, if T refers to + // U& or U&& for some non-scalar type U, then InitialActionArgType is + // U&. In other words, we may hand over a non-const reference. + // + // So for example, given some non-scalar type Obj we have the following + // mappings: + // + // T InitialActionArgType + // ------- ----------------------- + // Obj const Obj& + // Obj& Obj& + // Obj&& Obj& + // const Obj const Obj& + // const Obj& const Obj& + // const Obj&& const Obj& + // + // In other words, the initial actions get a mutable view of an non-scalar + // argument if and only if the mock function itself accepts a non-const + // reference type. They are never given an rvalue reference to an + // non-scalar type. + // + // This situation makes sense if you imagine use with a matcher that is + // designed to write through a reference. For example, if the caller wants + // to fill in a reference argument and then return a canned value: + // + // EXPECT_CALL(mock, Call) + // .WillOnce(DoAll(SetArgReferee<0>(17), Return(19))); + // + template + using InitialActionArgType = typename std::conditional::value, T, const T&>::type; - template - std::vector Convert(IndexSequence) const { - return {ActionT(std::get(actions))...}; - } - public: - std::tuple actions; + struct UserConstructorTag {}; - template - operator Action() const { // NOLINT - struct Op { - std::vector...)>> converted; - Action last; - R operator()(Args... args) const { - auto tuple_args = std::forward_as_tuple(std::forward(args)...); - for (auto& a : converted) { - a.Perform(tuple_args); - } - return last.Perform(std::move(tuple_args)); + template + explicit DoAllAction(UserConstructorTag, T&& initial_action, + U&&... other_actions) + : Base({}, std::forward(other_actions)...), + initial_action_(std::forward(initial_action)) {} + + template ...)>>, + std::is_convertible>>::value, + int>::type = 0> + operator OnceAction() && { // NOLINT + // Return an action that first calls the initial action with arguments + // filtered through InitialActionArgType, then forwards arguments directly + // to the base class to deal with the remaining actions. + struct OA { + OnceAction...)> initial_action; + OnceAction remaining_actions; + + R operator()(Args... args) && { + std::move(initial_action) + .Call(static_cast>(args)...); + + return std::move(remaining_actions).Call(std::forward(args)...); } }; - return Op{Convert...)>>( - MakeIndexSequence()), - std::get(actions)}; + + return OA{ + std::move(initial_action_), + std::move(static_cast(*this)), + }; } + + template < + typename R, typename... Args, + typename std::enable_if< + conjunction< + // Both the initial action and the rest must support conversion to + // Action. + std::is_convertible...)>>, + std::is_convertible>>::value, + int>::type = 0> + operator Action() const { // NOLINT + // Return an action that first calls the initial action with arguments + // filtered through InitialActionArgType, then forwards arguments directly + // to the base class to deal with the remaining actions. + struct OA { + Action...)> initial_action; + Action remaining_actions; + + R operator()(Args... args) const { + initial_action.Perform(std::forward_as_tuple( + static_cast>(args)...)); + + return remaining_actions.Perform( + std::forward_as_tuple(std::forward(args)...)); + } + }; + + return OA{ + initial_action_, + static_cast(*this), + }; + } + + private: + InitialAction initial_action_; }; template @@ -1078,10 +1655,11 @@ struct ReturnNewAction { template struct ReturnArgAction { - template - auto operator()(const Args&... args) const -> - typename std::tuple_element>::type { - return std::get(std::tie(args...)); + template ::type> + auto operator()(Args&&... args) const -> decltype(std::get( + std::forward_as_tuple(std::forward(args)...))) { + return std::get(std::forward_as_tuple(std::forward(args)...)); } }; @@ -1203,7 +1781,8 @@ typedef internal::IgnoredValue Unused; template internal::DoAllAction::type...> DoAll( Action&&... action) { - return {std::forward_as_tuple(std::forward(action)...)}; + return internal::DoAllAction::type...>( + {}, std::forward(action)...); } // WithArg(an_action) creates an action that passes the k-th @@ -1212,8 +1791,8 @@ internal::DoAllAction::type...> DoAll( // multiple arguments. For convenience, we also provide // WithArgs(an_action) (defined below) as a synonym. template -internal::WithArgsAction::type, k> -WithArg(InnerAction&& action) { +internal::WithArgsAction::type, k> WithArg( + InnerAction&& action) { return {std::forward(action)}; } @@ -1232,14 +1811,35 @@ WithArgs(InnerAction&& action) { // argument. In other words, it adapts an action accepting no // argument to one that accepts (and ignores) arguments. template -internal::WithArgsAction::type> -WithoutArgs(InnerAction&& action) { +internal::WithArgsAction::type> WithoutArgs( + InnerAction&& action) { return {std::forward(action)}; } -// Creates an action that returns 'value'. 'value' is passed by value -// instead of const reference - otherwise Return("string literal") -// will trigger a compiler error about using array as initializer. +// Creates an action that returns a value. +// +// The returned type can be used with a mock function returning a non-void, +// non-reference type U as follows: +// +// * If R is convertible to U and U is move-constructible, then the action can +// be used with WillOnce. +// +// * If const R& is convertible to U and U is copy-constructible, then the +// action can be used with both WillOnce and WillRepeatedly. +// +// The mock expectation contains the R value from which the U return value is +// constructed (a move/copy of the argument to Return). This means that the R +// value will survive at least until the mock object's expectations are cleared +// or the mock object is destroyed, meaning that U can safely be a +// reference-like type such as std::string_view: +// +// // The mock function returns a view of a copy of the string fed to +// // Return. The view is valid even after the action is performed. +// MockFunction mock; +// EXPECT_CALL(mock, Call).WillOnce(Return(std::string("taco"))); +// const std::string_view result = mock.AsStdFunction()(); +// EXPECT_EQ("taco", result); +// template internal::ReturnAction Return(R value) { return internal::ReturnAction(std::move(value)); @@ -1273,6 +1873,8 @@ inline internal::ReturnRefOfCopyAction ReturnRefOfCopy(const R& x) { return internal::ReturnRefOfCopyAction(x); } +// DEPRECATED: use Return(x) directly with WillOnce. +// // Modifies the parent action (a Return() action) to perform a move of the // argument instead of a copy. // Return(ByMove()) actions can only be executed once and will assert this @@ -1319,7 +1921,7 @@ internal::SetArgumentPointeeAction SetArgumentPointee(T value) { // Creates an action that sets a pointer referent to a given value. template -PolymorphicAction > Assign(T1* ptr, T2 val) { +PolymorphicAction> Assign(T1* ptr, T2 val) { return MakePolymorphicAction(internal::AssignAction(ptr, val)); } @@ -1327,8 +1929,8 @@ PolymorphicAction > Assign(T1* ptr, T2 val) { // Creates an action that sets errno and returns the appropriate error. template -PolymorphicAction > -SetErrnoAndReturn(int errval, T result) { +PolymorphicAction> SetErrnoAndReturn( + int errval, T result) { return MakePolymorphicAction( internal::SetErrnoAndReturnAction(errval, result)); } @@ -1482,7 +2084,8 @@ struct ExcessiveArg {}; // Builds an implementation of an Action<> for some particular signature, using // a class defined by an ACTION* macro. -template struct ActionImpl; +template +struct ActionImpl; template struct ImplBase { @@ -1502,7 +2105,7 @@ struct ActionImpl : ImplBase::type { using args_type = std::tuple; ActionImpl() = default; // Only defined if appropriate for Base. - explicit ActionImpl(std::shared_ptr impl) : Base{std::move(impl)} { } + explicit ActionImpl(std::shared_ptr impl) : Base{std::move(impl)} {} R operator()(Args&&... arg) const { static constexpr size_t kMaxArgs = @@ -1521,12 +2124,14 @@ struct ActionImpl : ImplBase::type { // args_type get passed, followed by a dummy of unspecified type for the // remainder up to 10 explicit args. static constexpr ExcessiveArg kExcessArg{}; - return static_cast(*this).template gmock_PerformImpl< - /*function_type=*/function_type, /*return_type=*/R, - /*args_type=*/args_type, - /*argN_type=*/typename std::tuple_element::type...>( - /*args=*/args, std::get(args)..., - ((void)excess_id, kExcessArg)...); + return static_cast(*this) + .template gmock_PerformImpl< + /*function_type=*/function_type, /*return_type=*/R, + /*args_type=*/args_type, + /*argN_type=*/ + typename std::tuple_element::type...>( + /*args=*/args, std::get(args)..., + ((void)excess_id, kExcessArg)...); } }; @@ -1545,7 +2150,7 @@ template #define GMOCK_INTERNAL_ARG_UNUSED(i, data, el) \ , const arg##i##_type& arg##i GTEST_ATTRIBUTE_UNUSED_ -#define GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_ \ +#define GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_ \ const args_type& args GTEST_ATTRIBUTE_UNUSED_ GMOCK_PP_REPEAT( \ GMOCK_INTERNAL_ARG_UNUSED, , 10) @@ -1584,42 +2189,47 @@ template #define GMOCK_ACTION_FIELD_PARAMS_(params) \ GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_FIELD_PARAM, , params) -#define GMOCK_INTERNAL_ACTION(name, full_name, params) \ - template \ - class full_name { \ - public: \ - explicit full_name(GMOCK_ACTION_TYPE_GVALUE_PARAMS_(params)) \ - : impl_(std::make_shared( \ - GMOCK_ACTION_GVALUE_PARAMS_(params))) { } \ - full_name(const full_name&) = default; \ - full_name(full_name&&) noexcept = default; \ - template \ - operator ::testing::Action() const { \ - return ::testing::internal::MakeAction(impl_); \ - } \ - private: \ - class gmock_Impl { \ - public: \ - explicit gmock_Impl(GMOCK_ACTION_TYPE_GVALUE_PARAMS_(params)) \ - : GMOCK_ACTION_INIT_PARAMS_(params) {} \ - template \ - return_type gmock_PerformImpl(GMOCK_ACTION_ARG_TYPES_AND_NAMES_) const; \ - GMOCK_ACTION_FIELD_PARAMS_(params) \ - }; \ - std::shared_ptr impl_; \ - }; \ - template \ - inline full_name name( \ - GMOCK_ACTION_TYPE_GVALUE_PARAMS_(params)) { \ - return full_name( \ - GMOCK_ACTION_GVALUE_PARAMS_(params)); \ - } \ - template \ - template \ - return_type full_name::gmock_Impl:: \ - gmock_PerformImpl(GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const +#define GMOCK_INTERNAL_ACTION(name, full_name, params) \ + template \ + class full_name { \ + public: \ + explicit full_name(GMOCK_ACTION_TYPE_GVALUE_PARAMS_(params)) \ + : impl_(std::make_shared( \ + GMOCK_ACTION_GVALUE_PARAMS_(params))) {} \ + full_name(const full_name&) = default; \ + full_name(full_name&&) noexcept = default; \ + template \ + operator ::testing::Action() const { \ + return ::testing::internal::MakeAction(impl_); \ + } \ + \ + private: \ + class gmock_Impl { \ + public: \ + explicit gmock_Impl(GMOCK_ACTION_TYPE_GVALUE_PARAMS_(params)) \ + : GMOCK_ACTION_INIT_PARAMS_(params) {} \ + template \ + return_type gmock_PerformImpl(GMOCK_ACTION_ARG_TYPES_AND_NAMES_) const; \ + GMOCK_ACTION_FIELD_PARAMS_(params) \ + }; \ + std::shared_ptr impl_; \ + }; \ + template \ + inline full_name name( \ + GMOCK_ACTION_TYPE_GVALUE_PARAMS_(params)) GTEST_MUST_USE_RESULT_; \ + template \ + inline full_name name( \ + GMOCK_ACTION_TYPE_GVALUE_PARAMS_(params)) { \ + return full_name( \ + GMOCK_ACTION_GVALUE_PARAMS_(params)); \ + } \ + template \ + template \ + return_type \ + full_name::gmock_Impl::gmock_PerformImpl( \ + GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const } // namespace internal @@ -1627,12 +2237,13 @@ template #define ACTION(name) \ class name##Action { \ public: \ - explicit name##Action() noexcept {} \ - name##Action(const name##Action&) noexcept {} \ + explicit name##Action() noexcept {} \ + name##Action(const name##Action&) noexcept {} \ template \ operator ::testing::Action() const { \ return ::testing::internal::MakeAction(); \ } \ + \ private: \ class gmock_Impl { \ public: \ @@ -1681,7 +2292,7 @@ template } // namespace testing #ifdef _MSC_VER -# pragma warning(pop) +#pragma warning(pop) #endif #endif // GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_ACTIONS_H_ diff --git a/ext/googletest/googlemock/include/gmock/gmock-cardinalities.h b/ext/googletest/googlemock/include/gmock/gmock-cardinalities.h index fc7f803a7a..b6ab648e50 100644 --- a/ext/googletest/googlemock/include/gmock/gmock-cardinalities.h +++ b/ext/googletest/googlemock/include/gmock/gmock-cardinalities.h @@ -27,21 +27,23 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Mock - a framework for writing C++ mock classes. // // This file implements some commonly used cardinalities. More // cardinalities can be defined by the user implementing the // CardinalityInterface interface if necessary. -// GOOGLETEST_CM0002 DO NOT DELETE +// IWYU pragma: private, include "gmock/gmock.h" +// IWYU pragma: friend gmock/.* #ifndef GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_CARDINALITIES_H_ #define GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_CARDINALITIES_H_ #include + #include #include // NOLINT + #include "gmock/internal/gmock-port.h" #include "gtest/gtest.h" @@ -116,7 +118,7 @@ class GTEST_API_ Cardinality { // cardinality, i.e. exceed the maximum number of allowed calls. bool IsOverSaturatedByCallCount(int call_count) const { return impl_->IsSaturatedByCallCount(call_count) && - !impl_->IsSatisfiedByCallCount(call_count); + !impl_->IsSatisfiedByCallCount(call_count); } // Describes self to an ostream diff --git a/ext/googletest/googlemock/include/gmock/gmock-function-mocker.h b/ext/googletest/googlemock/include/gmock/gmock-function-mocker.h index 0fc6f6f3f1..f565d980c5 100644 --- a/ext/googletest/googlemock/include/gmock/gmock-function-mocker.h +++ b/ext/googletest/googlemock/include/gmock/gmock-function-mocker.h @@ -31,7 +31,8 @@ // // This file implements MOCK_METHOD. -// GOOGLETEST_CM0002 DO NOT DELETE +// IWYU pragma: private, include "gmock/gmock.h" +// IWYU pragma: friend gmock/.* #ifndef GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_FUNCTION_MOCKER_H_ // NOLINT #define GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_FUNCTION_MOCKER_H_ // NOLINT @@ -64,6 +65,39 @@ struct ThisRefAdjuster { } }; +constexpr bool PrefixOf(const char* a, const char* b) { + return *a == 0 || (*a == *b && internal::PrefixOf(a + 1, b + 1)); +} + +template +constexpr bool StartsWith(const char (&prefix)[N], const char (&str)[M]) { + return N <= M && internal::PrefixOf(prefix, str); +} + +template +constexpr bool EndsWith(const char (&suffix)[N], const char (&str)[M]) { + return N <= M && internal::PrefixOf(suffix, str + M - N); +} + +template +constexpr bool Equals(const char (&a)[N], const char (&b)[M]) { + return N == M && internal::PrefixOf(a, b); +} + +template +constexpr bool ValidateSpec(const char (&spec)[N]) { + return internal::Equals("const", spec) || + internal::Equals("override", spec) || + internal::Equals("final", spec) || + internal::Equals("noexcept", spec) || + (internal::StartsWith("noexcept(", spec) && + internal::EndsWith(")", spec)) || + internal::Equals("ref(&)", spec) || + internal::Equals("ref(&&)", spec) || + (internal::StartsWith("Calltype(", spec) && + internal::EndsWith(")", spec)); +} + } // namespace internal // The style guide prohibits "using" statements in a namespace scope @@ -86,17 +120,18 @@ using internal::FunctionMocker; #define GMOCK_INTERNAL_MOCK_METHOD_ARG_3(_Ret, _MethodName, _Args) \ GMOCK_INTERNAL_MOCK_METHOD_ARG_4(_Ret, _MethodName, _Args, ()) -#define GMOCK_INTERNAL_MOCK_METHOD_ARG_4(_Ret, _MethodName, _Args, _Spec) \ - GMOCK_INTERNAL_ASSERT_PARENTHESIS(_Args); \ - GMOCK_INTERNAL_ASSERT_PARENTHESIS(_Spec); \ - GMOCK_INTERNAL_ASSERT_VALID_SIGNATURE( \ - GMOCK_PP_NARG0 _Args, GMOCK_INTERNAL_SIGNATURE(_Ret, _Args)); \ - GMOCK_INTERNAL_ASSERT_VALID_SPEC(_Spec) \ - GMOCK_INTERNAL_MOCK_METHOD_IMPL( \ - GMOCK_PP_NARG0 _Args, _MethodName, GMOCK_INTERNAL_HAS_CONST(_Spec), \ - GMOCK_INTERNAL_HAS_OVERRIDE(_Spec), GMOCK_INTERNAL_HAS_FINAL(_Spec), \ - GMOCK_INTERNAL_GET_NOEXCEPT_SPEC(_Spec), \ - GMOCK_INTERNAL_GET_CALLTYPE(_Spec), GMOCK_INTERNAL_GET_REF_SPEC(_Spec), \ +#define GMOCK_INTERNAL_MOCK_METHOD_ARG_4(_Ret, _MethodName, _Args, _Spec) \ + GMOCK_INTERNAL_ASSERT_PARENTHESIS(_Args); \ + GMOCK_INTERNAL_ASSERT_PARENTHESIS(_Spec); \ + GMOCK_INTERNAL_ASSERT_VALID_SIGNATURE( \ + GMOCK_PP_NARG0 _Args, GMOCK_INTERNAL_SIGNATURE(_Ret, _Args)); \ + GMOCK_INTERNAL_ASSERT_VALID_SPEC(_Spec) \ + GMOCK_INTERNAL_MOCK_METHOD_IMPL( \ + GMOCK_PP_NARG0 _Args, _MethodName, GMOCK_INTERNAL_HAS_CONST(_Spec), \ + GMOCK_INTERNAL_HAS_OVERRIDE(_Spec), GMOCK_INTERNAL_HAS_FINAL(_Spec), \ + GMOCK_INTERNAL_GET_NOEXCEPT_SPEC(_Spec), \ + GMOCK_INTERNAL_GET_CALLTYPE_SPEC(_Spec), \ + GMOCK_INTERNAL_GET_REF_SPEC(_Spec), \ (GMOCK_INTERNAL_SIGNATURE(_Ret, _Args))) #define GMOCK_INTERNAL_MOCK_METHOD_ARG_5(...) \ @@ -166,11 +201,11 @@ using internal::FunctionMocker; GMOCK_INTERNAL_A_MATCHER_ARGUMENT, _Signature, _N)); \ } \ mutable ::testing::FunctionMocker \ - GMOCK_MOCKER_(_N, _Constness, _MethodName) + GMOCK_MOCKER_(_N, _Constness, _MethodName) #define GMOCK_INTERNAL_EXPAND(...) __VA_ARGS__ -// Five Valid modifiers. +// Valid modifiers. #define GMOCK_INTERNAL_HAS_CONST(_Tuple) \ GMOCK_PP_HAS_COMMA(GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_DETECT_CONST, ~, _Tuple)) @@ -189,6 +224,14 @@ using internal::FunctionMocker; GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_NOEXCEPT(_i, _, _elem)), \ _elem, ) +#define GMOCK_INTERNAL_GET_CALLTYPE_SPEC(_Tuple) \ + GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_CALLTYPE_SPEC_IF_CALLTYPE, ~, _Tuple) + +#define GMOCK_INTERNAL_CALLTYPE_SPEC_IF_CALLTYPE(_i, _, _elem) \ + GMOCK_PP_IF( \ + GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_CALLTYPE(_i, _, _elem)), \ + GMOCK_PP_CAT(GMOCK_INTERNAL_UNPACK_, _elem), ) + #define GMOCK_INTERNAL_GET_REF_SPEC(_Tuple) \ GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_REF_SPEC_IF_REF, ~, _Tuple) @@ -196,19 +239,25 @@ using internal::FunctionMocker; GMOCK_PP_IF(GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_REF(_i, _, _elem)), \ GMOCK_PP_CAT(GMOCK_INTERNAL_UNPACK_, _elem), ) -#define GMOCK_INTERNAL_GET_CALLTYPE(_Tuple) \ - GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_GET_CALLTYPE_IMPL, ~, _Tuple) - -#define GMOCK_INTERNAL_ASSERT_VALID_SPEC_ELEMENT(_i, _, _elem) \ - static_assert( \ - (GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_CONST(_i, _, _elem)) + \ - GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_OVERRIDE(_i, _, _elem)) + \ - GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_FINAL(_i, _, _elem)) + \ - GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_NOEXCEPT(_i, _, _elem)) + \ - GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_REF(_i, _, _elem)) + \ - GMOCK_INTERNAL_IS_CALLTYPE(_elem)) == 1, \ - GMOCK_PP_STRINGIZE( \ +#ifdef GMOCK_INTERNAL_STRICT_SPEC_ASSERT +#define GMOCK_INTERNAL_ASSERT_VALID_SPEC_ELEMENT(_i, _, _elem) \ + static_assert( \ + ::testing::internal::ValidateSpec(GMOCK_PP_STRINGIZE(_elem)), \ + "Token \'" GMOCK_PP_STRINGIZE( \ + _elem) "\' cannot be recognized as a valid specification " \ + "modifier. Is a ',' missing?"); +#else +#define GMOCK_INTERNAL_ASSERT_VALID_SPEC_ELEMENT(_i, _, _elem) \ + static_assert( \ + (GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_CONST(_i, _, _elem)) + \ + GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_OVERRIDE(_i, _, _elem)) + \ + GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_FINAL(_i, _, _elem)) + \ + GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_NOEXCEPT(_i, _, _elem)) + \ + GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_REF(_i, _, _elem)) + \ + GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_CALLTYPE(_i, _, _elem))) == 1, \ + GMOCK_PP_STRINGIZE( \ _elem) " cannot be recognized as a valid specification modifier."); +#endif // GMOCK_INTERNAL_STRICT_SPEC_ASSERT // Modifiers implementation. #define GMOCK_INTERNAL_DETECT_CONST(_i, _, _elem) \ @@ -238,26 +287,12 @@ using internal::FunctionMocker; #define GMOCK_INTERNAL_UNPACK_ref(x) x -#define GMOCK_INTERNAL_GET_CALLTYPE_IMPL(_i, _, _elem) \ - GMOCK_PP_IF(GMOCK_INTERNAL_IS_CALLTYPE(_elem), \ - GMOCK_INTERNAL_GET_VALUE_CALLTYPE, GMOCK_PP_EMPTY) \ - (_elem) +#define GMOCK_INTERNAL_DETECT_CALLTYPE(_i, _, _elem) \ + GMOCK_PP_CAT(GMOCK_INTERNAL_DETECT_CALLTYPE_I_, _elem) -// TODO(iserna): GMOCK_INTERNAL_IS_CALLTYPE and -// GMOCK_INTERNAL_GET_VALUE_CALLTYPE needed more expansions to work on windows -// maybe they can be simplified somehow. -#define GMOCK_INTERNAL_IS_CALLTYPE(_arg) \ - GMOCK_INTERNAL_IS_CALLTYPE_I( \ - GMOCK_PP_CAT(GMOCK_INTERNAL_IS_CALLTYPE_HELPER_, _arg)) -#define GMOCK_INTERNAL_IS_CALLTYPE_I(_arg) GMOCK_PP_IS_ENCLOSED_PARENS(_arg) +#define GMOCK_INTERNAL_DETECT_CALLTYPE_I_Calltype , -#define GMOCK_INTERNAL_GET_VALUE_CALLTYPE(_arg) \ - GMOCK_INTERNAL_GET_VALUE_CALLTYPE_I( \ - GMOCK_PP_CAT(GMOCK_INTERNAL_IS_CALLTYPE_HELPER_, _arg)) -#define GMOCK_INTERNAL_GET_VALUE_CALLTYPE_I(_arg) \ - GMOCK_PP_IDENTITY _arg - -#define GMOCK_INTERNAL_IS_CALLTYPE_HELPER_Calltype +#define GMOCK_INTERNAL_UNPACK_Calltype(...) __VA_ARGS__ // Note: The use of `identity_t` here allows _Ret to represent return types that // would normally need to be specified in a different way. For example, a method diff --git a/ext/googletest/googlemock/include/gmock/gmock-matchers.h b/ext/googletest/googlemock/include/gmock/gmock-matchers.h index f1bb22caa9..6282901145 100644 --- a/ext/googletest/googlemock/include/gmock/gmock-matchers.h +++ b/ext/googletest/googlemock/include/gmock/gmock-matchers.h @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Mock - a framework for writing C++ mock classes. // // The MATCHER* family of macros can be used in a namespace scope to @@ -250,7 +249,8 @@ // See googletest/include/gtest/gtest-matchers.h for the definition of class // Matcher, class MatcherInterface, and others. -// GOOGLETEST_CM0002 DO NOT DELETE +// IWYU pragma: private, include "gmock/gmock.h" +// IWYU pragma: friend gmock/.* #ifndef GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_MATCHERS_H_ #define GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_MATCHERS_H_ @@ -313,7 +313,9 @@ class StringMatchResultListener : public MatchResultListener { private: ::std::stringstream ss_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(StringMatchResultListener); + StringMatchResultListener(const StringMatchResultListener&) = delete; + StringMatchResultListener& operator=(const StringMatchResultListener&) = + delete; }; // Anything inside the 'internal' namespace IS INTERNAL IMPLEMENTATION @@ -396,7 +398,7 @@ class MatcherCastImpl { // is already a Matcher. This only compiles when type T can be // statically converted to type U. template -class MatcherCastImpl > { +class MatcherCastImpl> { public: static Matcher Cast(const Matcher& source_matcher) { return Matcher(new Impl(source_matcher)); @@ -450,7 +452,7 @@ class MatcherCastImpl > { // This even more specialized version is used for efficiently casting // a matcher to its own type. template -class MatcherCastImpl > { +class MatcherCastImpl> { public: static Matcher Cast(const Matcher& matcher) { return matcher; } }; @@ -533,19 +535,18 @@ inline Matcher SafeMatcherCast(const Matcher& matcher) { "T must be implicitly convertible to U"); // Enforce that we are not converting a non-reference type T to a reference // type U. - GTEST_COMPILE_ASSERT_( - std::is_reference::value || !std::is_reference::value, - cannot_convert_non_reference_arg_to_reference); + static_assert(std::is_reference::value || !std::is_reference::value, + "cannot convert non reference arg to reference"); // In case both T and U are arithmetic types, enforce that the // conversion is not lossy. typedef GTEST_REMOVE_REFERENCE_AND_CONST_(T) RawT; typedef GTEST_REMOVE_REFERENCE_AND_CONST_(U) RawU; constexpr bool kTIsOther = GMOCK_KIND_OF_(RawT) == internal::kOther; constexpr bool kUIsOther = GMOCK_KIND_OF_(RawU) == internal::kOther; - GTEST_COMPILE_ASSERT_( + static_assert( kTIsOther || kUIsOther || - (internal::LosslessArithmeticConvertible::value), - conversion_of_arithmetic_types_must_be_lossless); + (internal::LosslessArithmeticConvertible::value), + "conversion of arithmetic types must be lossless"); return MatcherCast(matcher); } @@ -678,9 +679,9 @@ bool TupleMatches(const MatcherTuple& matcher_tuple, const ValueTuple& value_tuple) { // Makes sure that matcher_tuple and value_tuple have the same // number of fields. - GTEST_COMPILE_ASSERT_(std::tuple_size::value == - std::tuple_size::value, - matcher_and_value_have_different_numbers_of_fields); + static_assert(std::tuple_size::value == + std::tuple_size::value, + "matcher and value have different numbers of fields"); return TuplePrefix::value>::Matches(matcher_tuple, value_tuple); } @@ -689,8 +690,7 @@ bool TupleMatches(const MatcherTuple& matcher_tuple, // is no failure, nothing will be streamed to os. template void ExplainMatchFailureTupleTo(const MatcherTuple& matchers, - const ValueTuple& values, - ::std::ostream* os) { + const ValueTuple& values, ::std::ostream* os) { TuplePrefix::value>::ExplainMatchFailuresTo( matchers, values, os); } @@ -714,14 +714,14 @@ class TransformTupleValuesHelper { private: template struct IterateOverTuple { - OutIter operator() (Func f, const Tup& t, OutIter out) const { + OutIter operator()(Func f, const Tup& t, OutIter out) const { *out++ = f(::std::get(t)); return IterateOverTuple()(f, t, out); } }; template struct IterateOverTuple { - OutIter operator() (Func /* f */, const Tup& /* t */, OutIter out) const { + OutIter operator()(Func /* f */, const Tup& /* t */, OutIter out) const { return out; } }; @@ -767,9 +767,7 @@ class IsNullMatcher { } void DescribeTo(::std::ostream* os) const { *os << "is NULL"; } - void DescribeNegationTo(::std::ostream* os) const { - *os << "isn't NULL"; - } + void DescribeNegationTo(::std::ostream* os) const { *os << "isn't NULL"; } }; // Implements the polymorphic NotNull() matcher, which matches any raw or smart @@ -783,9 +781,7 @@ class NotNullMatcher { } void DescribeTo(::std::ostream* os) const { *os << "isn't NULL"; } - void DescribeNegationTo(::std::ostream* os) const { - *os << "is NULL"; - } + void DescribeNegationTo(::std::ostream* os) const { *os << "is NULL"; } }; // Ref(variable) matches any argument that is a reference to @@ -871,8 +867,7 @@ inline bool CaseInsensitiveCStringEquals(const wchar_t* lhs, // String comparison for narrow or wide strings that can have embedded NUL // characters. template -bool CaseInsensitiveStringEquals(const StringType& s1, - const StringType& s2) { +bool CaseInsensitiveStringEquals(const StringType& s1, const StringType& s2) { // Are the heads equal? if (!CaseInsensitiveCStringEquals(s1.c_str(), s2.c_str())) { return false; @@ -933,8 +928,8 @@ class StrEqualityMatcher { bool MatchAndExplain(const MatcheeStringType& s, MatchResultListener* /* listener */) const { const StringType s2(s); - const bool eq = case_sensitive_ ? s2 == string_ : - CaseInsensitiveStringEquals(s2, string_); + const bool eq = case_sensitive_ ? s2 == string_ + : CaseInsensitiveStringEquals(s2, string_); return expect_eq_ == eq; } @@ -1021,8 +1016,7 @@ class HasSubstrMatcher { template class StartsWithMatcher { public: - explicit StartsWithMatcher(const StringType& prefix) : prefix_(prefix) { - } + explicit StartsWithMatcher(const StringType& prefix) : prefix_(prefix) {} #if GTEST_INTERNAL_HAS_STRING_VIEW bool MatchAndExplain(const internal::StringView& s, @@ -1053,7 +1047,7 @@ class StartsWithMatcher { MatchResultListener* /* listener */) const { const StringType& s2(s); return s2.length() >= prefix_.length() && - s2.substr(0, prefix_.length()) == prefix_; + s2.substr(0, prefix_.length()) == prefix_; } void DescribeTo(::std::ostream* os) const { @@ -1107,7 +1101,7 @@ class EndsWithMatcher { MatchResultListener* /* listener */) const { const StringType& s2(s); return s2.length() >= suffix_.length() && - s2.substr(s2.length() - suffix_.length()) == suffix_; + s2.substr(s2.length() - suffix_.length()) == suffix_; } void DescribeTo(::std::ostream* os) const { @@ -1124,6 +1118,45 @@ class EndsWithMatcher { const StringType suffix_; }; +// Implements the polymorphic WhenBase64Unescaped(matcher) matcher, which can be +// used as a Matcher as long as T can be converted to a string. +class WhenBase64UnescapedMatcher { + public: + using is_gtest_matcher = void; + + explicit WhenBase64UnescapedMatcher( + const Matcher& internal_matcher) + : internal_matcher_(internal_matcher) {} + + // Matches anything that can convert to std::string. + template + bool MatchAndExplain(const MatcheeStringType& s, + MatchResultListener* listener) const { + const std::string s2(s); // NOLINT (needed for working with string_view). + std::string unescaped; + if (!internal::Base64Unescape(s2, &unescaped)) { + if (listener != nullptr) { + *listener << "is not a valid base64 escaped string"; + } + return false; + } + return MatchPrintAndExplain(unescaped, internal_matcher_, listener); + } + + void DescribeTo(::std::ostream* os) const { + *os << "matches after Base64Unescape "; + internal_matcher_.DescribeTo(os); + } + + void DescribeNegationTo(::std::ostream* os) const { + *os << "does not match after Base64Unescape "; + internal_matcher_.DescribeTo(os); + } + + private: + const Matcher internal_matcher_; +}; + // Implements a matcher that compares the two fields of a 2-tuple // using one of the ==, <=, <, etc, operators. The two fields being // compared don't have to have the same type. @@ -1197,8 +1230,7 @@ class Ge2Matcher : public PairMatchBase { template class NotMatcherImpl : public MatcherInterface { public: - explicit NotMatcherImpl(const Matcher& matcher) - : matcher_(matcher) {} + explicit NotMatcherImpl(const Matcher& matcher) : matcher_(matcher) {} bool MatchAndExplain(const T& x, MatchResultListener* listener) const override { @@ -1242,7 +1274,7 @@ class NotMatcher { template class AllOfMatcherImpl : public MatcherInterface { public: - explicit AllOfMatcherImpl(std::vector > matchers) + explicit AllOfMatcherImpl(std::vector> matchers) : matchers_(std::move(matchers)) {} void DescribeTo(::std::ostream* os) const override { @@ -1293,7 +1325,7 @@ class AllOfMatcherImpl : public MatcherInterface { } private: - const std::vector > matchers_; + const std::vector> matchers_; }; // VariadicMatcher is used for the variadic implementation of @@ -1316,14 +1348,14 @@ class VariadicMatcher { // all of the provided matchers (Matcher1, Matcher2, ...) can match. template operator Matcher() const { - std::vector > values; + std::vector> values; CreateVariadicMatcher(&values, std::integral_constant()); return Matcher(new CombiningMatcher(std::move(values))); } private: template - void CreateVariadicMatcher(std::vector >* values, + void CreateVariadicMatcher(std::vector>* values, std::integral_constant) const { values->push_back(SafeMatcherCast(std::get(matchers_))); CreateVariadicMatcher(values, std::integral_constant()); @@ -1331,7 +1363,7 @@ class VariadicMatcher { template void CreateVariadicMatcher( - std::vector >*, + std::vector>*, std::integral_constant) const {} std::tuple matchers_; @@ -1347,7 +1379,7 @@ using AllOfMatcher = VariadicMatcher; template class AnyOfMatcherImpl : public MatcherInterface { public: - explicit AnyOfMatcherImpl(std::vector > matchers) + explicit AnyOfMatcherImpl(std::vector> matchers) : matchers_(std::move(matchers)) {} void DescribeTo(::std::ostream* os) const override { @@ -1398,7 +1430,7 @@ class AnyOfMatcherImpl : public MatcherInterface { } private: - const std::vector > matchers_; + const std::vector> matchers_; }; // AnyOfMatcher is used for the variadic implementation of AnyOf(m_1, m_2, ...). @@ -1425,8 +1457,6 @@ class ConditionalMatcher { bool condition_; MatcherTrue matcher_true_; MatcherFalse matcher_false_; - - GTEST_DISALLOW_ASSIGN_(ConditionalMatcher); }; // Wrapper for implementation of Any/AllOfArray(). @@ -1478,8 +1508,7 @@ class TrulyMatcher { // We cannot write 'return !!predicate_(x);' as that doesn't work // when predicate_(x) returns a class convertible to bool but // having no operator!(). - if (predicate_(x)) - return true; + if (predicate_(x)) return true; *listener << "didn't satisfy the given predicate"; return false; } @@ -1587,8 +1616,8 @@ class PredicateFormatterFromMatcher { // used for implementing ASSERT_THAT() and EXPECT_THAT(). // Implementation detail: 'matcher' is received by-value to force decaying. template -inline PredicateFormatterFromMatcher -MakePredicateFormatterFromMatcher(M matcher) { +inline PredicateFormatterFromMatcher MakePredicateFormatterFromMatcher( + M matcher) { return PredicateFormatterFromMatcher(std::move(matcher)); } @@ -1603,9 +1632,7 @@ class IsNanMatcher { } void DescribeTo(::std::ostream* os) const { *os << "is NaN"; } - void DescribeNegationTo(::std::ostream* os) const { - *os << "isn't NaN"; - } + void DescribeNegationTo(::std::ostream* os) const { *os << "isn't NaN"; } }; // Implements the polymorphic floating point equality matcher, which matches @@ -1621,9 +1648,8 @@ class FloatingEqMatcher { // equality comparisons between NANs will always return false. We specify a // negative max_abs_error_ term to indicate that ULP-based approximation will // be used for comparison. - FloatingEqMatcher(FloatType expected, bool nan_eq_nan) : - expected_(expected), nan_eq_nan_(nan_eq_nan), max_abs_error_(-1) { - } + FloatingEqMatcher(FloatType expected, bool nan_eq_nan) + : expected_(expected), nan_eq_nan_(nan_eq_nan), max_abs_error_(-1) {} // Constructor that supports a user-specified max_abs_error that will be used // for comparison instead of ULP-based approximation. The max absolute @@ -1685,8 +1711,8 @@ class FloatingEqMatcher { // os->precision() returns the previously set precision, which we // store to restore the ostream to its original configuration // after outputting. - const ::std::streamsize old_precision = os->precision( - ::std::numeric_limits::digits10 + 2); + const ::std::streamsize old_precision = + os->precision(::std::numeric_limits::digits10 + 2); if (FloatingPoint(expected_).is_nan()) { if (nan_eq_nan_) { *os << "is NaN"; @@ -1704,8 +1730,8 @@ class FloatingEqMatcher { void DescribeNegationTo(::std::ostream* os) const override { // As before, get original precision. - const ::std::streamsize old_precision = os->precision( - ::std::numeric_limits::digits10 + 2); + const ::std::streamsize old_precision = + os->precision(::std::numeric_limits::digits10 + 2); if (FloatingPoint(expected_).is_nan()) { if (nan_eq_nan_) { *os << "isn't NaN"; @@ -1723,9 +1749,7 @@ class FloatingEqMatcher { } private: - bool HasMaxAbsError() const { - return max_abs_error_ >= 0; - } + bool HasMaxAbsError() const { return max_abs_error_ >= 0; } const FloatType expected_; const bool nan_eq_nan_; @@ -1797,9 +1821,8 @@ class FloatingEq2Matcher { template class Impl : public MatcherInterface { public: - Impl(FloatType max_abs_error, bool nan_eq_nan) : - max_abs_error_(max_abs_error), - nan_eq_nan_(nan_eq_nan) {} + Impl(FloatType max_abs_error, bool nan_eq_nan) + : max_abs_error_(max_abs_error), nan_eq_nan_(nan_eq_nan) {} bool MatchAndExplain(Tuple args, MatchResultListener* listener) const override { @@ -1975,9 +1998,7 @@ class WhenDynamicCastToMatcherBase { protected: const Matcher matcher_; - static std::string GetToName() { - return GetTypeName(); - } + static std::string GetToName() { return GetTypeName(); } private: static void GetCastTypeDescription(::std::ostream* os) { @@ -2114,7 +2135,7 @@ class PropertyMatcher { } template - bool MatchAndExplain(const T&value, MatchResultListener* listener) const { + bool MatchAndExplain(const T& value, MatchResultListener* listener) const { return MatchAndExplainImpl( typename std::is_pointer::type>::type(), value, listener); @@ -2166,16 +2187,16 @@ struct CallableTraits { // Specialization for function pointers. template -struct CallableTraits { +struct CallableTraits { typedef ResType ResultType; - typedef ResType(*StorageType)(ArgType); + typedef ResType (*StorageType)(ArgType); - static void CheckIsValid(ResType(*f)(ArgType)) { + static void CheckIsValid(ResType (*f)(ArgType)) { GTEST_CHECK_(f != nullptr) << "NULL function pointer is passed into ResultOf()."; } template - static ResType Invoke(ResType(*f)(ArgType), T arg) { + static ResType Invoke(ResType (*f)(ArgType), T arg) { return (*f)(arg); } }; @@ -2186,13 +2207,21 @@ template class ResultOfMatcher { public: ResultOfMatcher(Callable callable, InnerMatcher matcher) - : callable_(std::move(callable)), matcher_(std::move(matcher)) { + : ResultOfMatcher(/*result_description=*/"", std::move(callable), + std::move(matcher)) {} + + ResultOfMatcher(const std::string& result_description, Callable callable, + InnerMatcher matcher) + : result_description_(result_description), + callable_(std::move(callable)), + matcher_(std::move(matcher)) { CallableTraits::CheckIsValid(callable_); } template operator Matcher() const { - return Matcher(new Impl(callable_, matcher_)); + return Matcher( + new Impl(result_description_, callable_, matcher_)); } private: @@ -2205,21 +2234,36 @@ class ResultOfMatcher { public: template - Impl(const CallableStorageType& callable, const M& matcher) - : callable_(callable), matcher_(MatcherCast(matcher)) {} + Impl(const std::string& result_description, + const CallableStorageType& callable, const M& matcher) + : result_description_(result_description), + callable_(callable), + matcher_(MatcherCast(matcher)) {} void DescribeTo(::std::ostream* os) const override { - *os << "is mapped by the given callable to a value that "; + if (result_description_.empty()) { + *os << "is mapped by the given callable to a value that "; + } else { + *os << "whose " << result_description_ << " "; + } matcher_.DescribeTo(os); } void DescribeNegationTo(::std::ostream* os) const override { - *os << "is mapped by the given callable to a value that "; + if (result_description_.empty()) { + *os << "is mapped by the given callable to a value that "; + } else { + *os << "whose " << result_description_ << " "; + } matcher_.DescribeNegationTo(os); } bool MatchAndExplain(T obj, MatchResultListener* listener) const override { - *listener << "which is mapped by the given callable to "; + if (result_description_.empty()) { + *listener << "which is mapped by the given callable to "; + } else { + *listener << "whose " << result_description_ << " is "; + } // Cannot pass the return value directly to MatchPrintAndExplain, which // takes a non-const reference as argument. // Also, specifying template argument explicitly is needed because T could @@ -2230,6 +2274,7 @@ class ResultOfMatcher { } private: + const std::string result_description_; // Functors often define operator() as non-const method even though // they are actually stateless. But we need to use them even when // 'this' is a const pointer. It's the user's responsibility not to @@ -2239,6 +2284,7 @@ class ResultOfMatcher { const Matcher matcher_; }; // class Impl + const std::string result_description_; const CallableStorageType callable_; const InnerMatcher matcher_; }; @@ -2248,8 +2294,7 @@ template class SizeIsMatcher { public: explicit SizeIsMatcher(const SizeMatcher& size_matcher) - : size_matcher_(size_matcher) { - } + : size_matcher_(size_matcher) {} template operator Matcher() const { @@ -2277,8 +2322,8 @@ class SizeIsMatcher { SizeType size = container.size(); StringMatchResultListener size_listener; const bool result = size_matcher_.MatchAndExplain(size, &size_listener); - *listener - << "whose size " << size << (result ? " matches" : " doesn't match"); + *listener << "whose size " << size + << (result ? " matches" : " doesn't match"); PrintIfNotEmpty(size_listener.str(), listener->stream()); return result; } @@ -2307,8 +2352,9 @@ class BeginEndDistanceIsMatcher { template class Impl : public MatcherInterface { public: - typedef internal::StlContainerView< - GTEST_REMOVE_REFERENCE_AND_CONST_(Container)> ContainerView; + typedef internal::StlContainerView + ContainerView; typedef typename std::iterator_traits< typename ContainerView::type::const_iterator>::difference_type DistanceType; @@ -2388,18 +2434,15 @@ class ContainerEqMatcher { typedef internal::StlContainerView< typename std::remove_const::type> LhsView; - typedef typename LhsView::type LhsStlContainer; StlContainerReference lhs_stl_container = LhsView::ConstReference(lhs); - if (lhs_stl_container == expected_) - return true; + if (lhs_stl_container == expected_) return true; ::std::ostream* const os = listener->stream(); if (os != nullptr) { // Something is different. Check for extra values first. bool printed_header = false; - for (typename LhsStlContainer::const_iterator it = - lhs_stl_container.begin(); - it != lhs_stl_container.end(); ++it) { + for (auto it = lhs_stl_container.begin(); it != lhs_stl_container.end(); + ++it) { if (internal::ArrayAwareFind(expected_.begin(), expected_.end(), *it) == expected_.end()) { if (printed_header) { @@ -2414,11 +2457,10 @@ class ContainerEqMatcher { // Now check for missing values. bool printed_header2 = false; - for (typename StlContainer::const_iterator it = expected_.begin(); - it != expected_.end(); ++it) { - if (internal::ArrayAwareFind( - lhs_stl_container.begin(), lhs_stl_container.end(), *it) == - lhs_stl_container.end()) { + for (auto it = expected_.begin(); it != expected_.end(); ++it) { + if (internal::ArrayAwareFind(lhs_stl_container.begin(), + lhs_stl_container.end(), + *it) == lhs_stl_container.end()) { if (printed_header2) { *os << ", "; } else { @@ -2441,7 +2483,9 @@ class ContainerEqMatcher { // A comparator functor that uses the < operator to compare two values. struct LessComparator { template - bool operator()(const T& lhs, const U& rhs) const { return lhs < rhs; } + bool operator()(const T& lhs, const U& rhs) const { + return lhs < rhs; + } }; // Implements WhenSortedBy(comparator, container_matcher). @@ -2460,14 +2504,16 @@ class WhenSortedByMatcher { template class Impl : public MatcherInterface { public: - typedef internal::StlContainerView< - GTEST_REMOVE_REFERENCE_AND_CONST_(LhsContainer)> LhsView; + typedef internal::StlContainerView + LhsView; typedef typename LhsView::type LhsStlContainer; typedef typename LhsView::const_reference LhsStlContainerReference; // Transforms std::pair into std::pair // so that we can match associative containers. - typedef typename RemoveConstFromKey< - typename LhsStlContainer::value_type>::type LhsValue; + typedef + typename RemoveConstFromKey::type + LhsValue; Impl(const Comparator& comparator, const ContainerMatcher& matcher) : comparator_(comparator), matcher_(matcher) {} @@ -2487,8 +2533,8 @@ class WhenSortedByMatcher { LhsStlContainerReference lhs_stl_container = LhsView::ConstReference(lhs); ::std::vector sorted_container(lhs_stl_container.begin(), lhs_stl_container.end()); - ::std::sort( - sorted_container.begin(), sorted_container.end(), comparator_); + ::std::sort(sorted_container.begin(), sorted_container.end(), + comparator_); if (!listener->IsInterested()) { // If the listener is not interested, we do not need to @@ -2501,8 +2547,8 @@ class WhenSortedByMatcher { *listener << " when sorted"; StringMatchResultListener inner_listener; - const bool match = matcher_.MatchAndExplain(sorted_container, - &inner_listener); + const bool match = + matcher_.MatchAndExplain(sorted_container, &inner_listener); PrintIfNotEmpty(inner_listener.str(), listener->stream()); return match; } @@ -2511,7 +2557,8 @@ class WhenSortedByMatcher { const Comparator comparator_; const Matcher&> matcher_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(Impl); + Impl(const Impl&) = delete; + Impl& operator=(const Impl&) = delete; }; private: @@ -2525,9 +2572,9 @@ class WhenSortedByMatcher { // container and the RHS container respectively. template class PointwiseMatcher { - GTEST_COMPILE_ASSERT_( + static_assert( !IsHashTable::value, - use_UnorderedPointwise_with_hash_tables); + "use UnorderedPointwise with hash tables"); public: typedef internal::StlContainerView RhsView; @@ -2546,9 +2593,9 @@ class PointwiseMatcher { template operator Matcher() const { - GTEST_COMPILE_ASSERT_( + static_assert( !IsHashTable::value, - use_UnorderedPointwise_with_hash_tables); + "use UnorderedPointwise with hash tables"); return Matcher( new Impl(tuple_matcher_, rhs_)); @@ -2557,8 +2604,9 @@ class PointwiseMatcher { template class Impl : public MatcherInterface { public: - typedef internal::StlContainerView< - GTEST_REMOVE_REFERENCE_AND_CONST_(LhsContainer)> LhsView; + typedef internal::StlContainerView + LhsView; typedef typename LhsView::type LhsStlContainer; typedef typename LhsView::const_reference LhsStlContainerReference; typedef typename LhsStlContainer::value_type LhsValue; @@ -2598,8 +2646,8 @@ class PointwiseMatcher { return false; } - typename LhsStlContainer::const_iterator left = lhs_stl_container.begin(); - typename RhsStlContainer::const_iterator right = rhs_.begin(); + auto left = lhs_stl_container.begin(); + auto right = rhs_.begin(); for (size_t i = 0; i != actual_size; ++i, ++left, ++right) { if (listener->IsInterested()) { StringMatchResultListener inner_listener; @@ -2652,18 +2700,17 @@ class QuantifierMatcherImpl : public MatcherInterface { template explicit QuantifierMatcherImpl(InnerMatcher inner_matcher) : inner_matcher_( - testing::SafeMatcherCast(inner_matcher)) {} + testing::SafeMatcherCast(inner_matcher)) {} // Checks whether: // * All elements in the container match, if all_elements_should_match. // * Any element in the container matches, if !all_elements_should_match. - bool MatchAndExplainImpl(bool all_elements_should_match, - Container container, + bool MatchAndExplainImpl(bool all_elements_should_match, Container container, MatchResultListener* listener) const { StlContainerReference stl_container = View::ConstReference(container); size_t i = 0; - for (typename StlContainer::const_iterator it = stl_container.begin(); - it != stl_container.end(); ++it, ++i) { + for (auto it = stl_container.begin(); it != stl_container.end(); + ++it, ++i) { StringMatchResultListener inner_listener; const bool matches = inner_matcher_.MatchAndExplain(*it, &inner_listener); @@ -2906,8 +2953,7 @@ class KeyMatcherImpl : public MatcherInterface { template explicit KeyMatcherImpl(InnerMatcher inner_matcher) : inner_matcher_( - testing::SafeMatcherCast(inner_matcher)) { - } + testing::SafeMatcherCast(inner_matcher)) {} // Returns true if and only if 'key_value.first' (the key) matches the inner // matcher. @@ -3012,8 +3058,7 @@ class PairMatcherImpl : public MatcherInterface { : first_matcher_( testing::SafeMatcherCast(first_matcher)), second_matcher_( - testing::SafeMatcherCast(second_matcher)) { - } + testing::SafeMatcherCast(second_matcher)) {} // Describes what this matcher does. void DescribeTo(::std::ostream* os) const override { @@ -3091,7 +3136,7 @@ class PairMatcher { : first_matcher_(first_matcher), second_matcher_(second_matcher) {} template - operator Matcher () const { + operator Matcher() const { return Matcher( new PairMatcherImpl(first_matcher_, second_matcher_)); } @@ -3363,7 +3408,7 @@ class ElementsAreMatcherImpl : public MatcherInterface { // explanations[i] is the explanation of the element at index i. ::std::vector explanations(count()); StlContainerReference stl_container = View::ConstReference(container); - typename StlContainer::const_iterator it = stl_container.begin(); + auto it = stl_container.begin(); size_t exam_pos = 0; bool mismatch_found = false; // Have we found a mismatched element yet? @@ -3440,7 +3485,7 @@ class ElementsAreMatcherImpl : public MatcherInterface { size_t count() const { return matchers_.size(); } - ::std::vector > matchers_; + ::std::vector> matchers_; }; // Connectivity matrix of (elements X matchers), in element-major order. @@ -3452,8 +3497,7 @@ class GTEST_API_ MatchMatrix { MatchMatrix(size_t num_elements, size_t num_matchers) : num_elements_(num_elements), num_matchers_(num_matchers), - matched_(num_elements_* num_matchers_, 0) { - } + matched_(num_elements_ * num_matchers_, 0) {} size_t LhsSize() const { return num_elements_; } size_t RhsSize() const { return num_matchers_; } @@ -3492,8 +3536,7 @@ typedef ::std::vector ElementMatcherPairs; // Returns a maximum bipartite matching for the specified graph 'g'. // The matching is represented as a vector of {element, matcher} pairs. -GTEST_API_ ElementMatcherPairs -FindMaxBipartiteMatching(const MatchMatrix& g); +GTEST_API_ ElementMatcherPairs FindMaxBipartiteMatching(const MatchMatrix& g); struct UnorderedMatcherRequire { enum Flags { @@ -3530,9 +3573,7 @@ class GTEST_API_ UnorderedElementsAreMatcherImplBase { bool FindPairing(const MatchMatrix& matrix, MatchResultListener* listener) const; - MatcherDescriberVec& matcher_describers() { - return matcher_describers_; - } + MatcherDescriberVec& matcher_describers() { return matcher_describers_; } static Message Elements(size_t n) { return Message() << n << " element" << (n == 1 ? "" : "s"); @@ -3556,7 +3597,6 @@ class UnorderedElementsAreMatcherImpl typedef internal::StlContainerView View; typedef typename View::type StlContainer; typedef typename View::const_reference StlContainerReference; - typedef typename StlContainer::const_iterator StlContainerConstIterator; typedef typename StlContainer::value_type Element; template @@ -3639,7 +3679,7 @@ class UnorderedElementsAreMatcherImpl return matrix; } - ::std::vector > matchers_; + ::std::vector> matchers_; }; // Functor for use in TransformTuple. @@ -3664,7 +3704,7 @@ class UnorderedElementsAreMatcher { typedef GTEST_REMOVE_REFERENCE_AND_CONST_(Container) RawContainer; typedef typename internal::StlContainerView::type View; typedef typename View::value_type Element; - typedef ::std::vector > MatcherVec; + typedef ::std::vector> MatcherVec; MatcherVec matchers; matchers.reserve(::std::tuple_size::value); TransformTupleValues(CastAndAppendTransform(), matchers_, @@ -3687,15 +3727,15 @@ class ElementsAreMatcher { template operator Matcher() const { - GTEST_COMPILE_ASSERT_( + static_assert( !IsHashTable::value || ::std::tuple_size::value < 2, - use_UnorderedElementsAre_with_hash_tables); + "use UnorderedElementsAre with hash tables"); typedef GTEST_REMOVE_REFERENCE_AND_CONST_(Container) RawContainer; typedef typename internal::StlContainerView::type View; typedef typename View::value_type Element; - typedef ::std::vector > MatcherVec; + typedef ::std::vector> MatcherVec; MatcherVec matchers; matchers.reserve(::std::tuple_size::value); TransformTupleValues(CastAndAppendTransform(), matchers_, @@ -3738,9 +3778,9 @@ class ElementsAreArrayMatcher { template operator Matcher() const { - GTEST_COMPILE_ASSERT_( + static_assert( !IsHashTable::value, - use_UnorderedElementsAreArray_with_hash_tables); + "use UnorderedElementsAreArray with hash tables"); return Matcher(new ElementsAreMatcherImpl( matchers_.begin(), matchers_.end())); @@ -3830,9 +3870,9 @@ BoundSecondMatcher MatcherBindSecond( // 'negation' is false; otherwise returns the description of the // negation of the matcher. 'param_values' contains a list of strings // that are the print-out of the matcher's parameters. -GTEST_API_ std::string FormatMatcherDescription(bool negation, - const char* matcher_name, - const Strings& param_values); +GTEST_API_ std::string FormatMatcherDescription( + bool negation, const char* matcher_name, + const std::vector& param_names, const Strings& param_values); // Implements a matcher that checks the value of a optional<> type variable. template @@ -4155,14 +4195,14 @@ UnorderedElementsAreArray(Iter first, Iter last) { } template -inline internal::UnorderedElementsAreArrayMatcher -UnorderedElementsAreArray(const T* pointer, size_t count) { +inline internal::UnorderedElementsAreArrayMatcher UnorderedElementsAreArray( + const T* pointer, size_t count) { return UnorderedElementsAreArray(pointer, pointer + count); } template -inline internal::UnorderedElementsAreArrayMatcher -UnorderedElementsAreArray(const T (&array)[N]) { +inline internal::UnorderedElementsAreArrayMatcher UnorderedElementsAreArray( + const T (&array)[N]) { return UnorderedElementsAreArray(array, N); } @@ -4174,8 +4214,8 @@ UnorderedElementsAreArray(const Container& container) { } template -inline internal::UnorderedElementsAreArrayMatcher -UnorderedElementsAreArray(::std::initializer_list xs) { +inline internal::UnorderedElementsAreArrayMatcher UnorderedElementsAreArray( + ::std::initializer_list xs) { return UnorderedElementsAreArray(xs.begin(), xs.end()); } @@ -4209,14 +4249,14 @@ Matcher internal::MatcherCastImpl::CastImpl( } // Creates a polymorphic matcher that matches any NULL pointer. -inline PolymorphicMatcher IsNull() { +inline PolymorphicMatcher IsNull() { return MakePolymorphicMatcher(internal::IsNullMatcher()); } // Creates a polymorphic matcher that matches any non-NULL pointer. // This is convenient as Not(NULL) doesn't compile (the compiler // thinks that that expression is comparing a pointer with an integer). -inline PolymorphicMatcher NotNull() { +inline PolymorphicMatcher NotNull() { return MakePolymorphicMatcher(internal::NotNullMatcher()); } @@ -4247,8 +4287,8 @@ inline internal::FloatingEqMatcher NanSensitiveDoubleEq(double rhs) { // Creates a matcher that matches any double argument approximately equal to // rhs, up to the specified max absolute error bound, where two NANs are // considered unequal. The max absolute error bound must be non-negative. -inline internal::FloatingEqMatcher DoubleNear( - double rhs, double max_abs_error) { +inline internal::FloatingEqMatcher DoubleNear(double rhs, + double max_abs_error) { return internal::FloatingEqMatcher(rhs, false, max_abs_error); } @@ -4275,8 +4315,8 @@ inline internal::FloatingEqMatcher NanSensitiveFloatEq(float rhs) { // Creates a matcher that matches any float argument approximately equal to // rhs, up to the specified max absolute error bound, where two NANs are // considered unequal. The max absolute error bound must be non-negative. -inline internal::FloatingEqMatcher FloatNear( - float rhs, float max_abs_error) { +inline internal::FloatingEqMatcher FloatNear(float rhs, + float max_abs_error) { return internal::FloatingEqMatcher(rhs, false, max_abs_error); } @@ -4304,7 +4344,7 @@ inline internal::PointeeMatcher Pointee( // If To is a reference and the cast fails, this matcher returns false // immediately. template -inline PolymorphicMatcher > +inline PolymorphicMatcher> WhenDynamicCastTo(const Matcher& inner_matcher) { return MakePolymorphicMatcher( internal::WhenDynamicCastToMatcher(inner_matcher)); @@ -4316,12 +4356,10 @@ WhenDynamicCastTo(const Matcher& inner_matcher) { // Field(&Foo::number, Ge(5)) // matches a Foo object x if and only if x.number >= 5. template -inline PolymorphicMatcher< - internal::FieldMatcher > Field( +inline PolymorphicMatcher> Field( FieldType Class::*field, const FieldMatcher& matcher) { - return MakePolymorphicMatcher( - internal::FieldMatcher( - field, MatcherCast(matcher))); + return MakePolymorphicMatcher(internal::FieldMatcher( + field, MatcherCast(matcher))); // The call to MatcherCast() is required for supporting inner // matchers of compatible types. For example, it allows // Field(&Foo::bar, m) @@ -4331,7 +4369,7 @@ inline PolymorphicMatcher< // Same as Field() but also takes the name of the field to provide better error // messages. template -inline PolymorphicMatcher > Field( +inline PolymorphicMatcher> Field( const std::string& field_name, FieldType Class::*field, const FieldMatcher& matcher) { return MakePolymorphicMatcher(internal::FieldMatcher( @@ -4344,7 +4382,7 @@ inline PolymorphicMatcher > Field( // matches a Foo object x if and only if x.str() starts with "hi". template inline PolymorphicMatcher > + Class, PropertyType, PropertyType (Class::*)() const>> Property(PropertyType (Class::*property)() const, const PropertyMatcher& matcher) { return MakePolymorphicMatcher( @@ -4361,7 +4399,7 @@ Property(PropertyType (Class::*property)() const, // better error messages. template inline PolymorphicMatcher > + Class, PropertyType, PropertyType (Class::*)() const>> Property(const std::string& property_name, PropertyType (Class::*property)() const, const PropertyMatcher& matcher) { @@ -4374,8 +4412,8 @@ Property(const std::string& property_name, // The same as above but for reference-qualified member functions. template inline PolymorphicMatcher > -Property(PropertyType (Class::*property)() const &, + Class, PropertyType, PropertyType (Class::*)() const&>> +Property(PropertyType (Class::*property)() const&, const PropertyMatcher& matcher) { return MakePolymorphicMatcher( internal::PropertyMatcher inline PolymorphicMatcher > + Class, PropertyType, PropertyType (Class::*)() const&>> Property(const std::string& property_name, - PropertyType (Class::*property)() const &, + PropertyType (Class::*property)() const&, const PropertyMatcher& matcher) { return MakePolymorphicMatcher( internal::PropertyMatcher internal::ResultOfMatcher ResultOf( Callable callable, InnerMatcher matcher) { + return internal::ResultOfMatcher(std::move(callable), + std::move(matcher)); +} + +// Same as ResultOf() above, but also takes a description of the `callable` +// result to provide better error messages. +template +internal::ResultOfMatcher ResultOf( + const std::string& result_description, Callable callable, + InnerMatcher matcher) { return internal::ResultOfMatcher( - std::move(callable), std::move(matcher)); + result_description, std::move(callable), std::move(matcher)); } // String matchers. // Matches a string equal to str. template -PolymorphicMatcher > StrEq( +PolymorphicMatcher> StrEq( const internal::StringLike& str) { return MakePolymorphicMatcher( internal::StrEqualityMatcher(std::string(str), true, true)); @@ -4423,7 +4471,7 @@ PolymorphicMatcher > StrEq( // Matches a string not equal to str. template -PolymorphicMatcher > StrNe( +PolymorphicMatcher> StrNe( const internal::StringLike& str) { return MakePolymorphicMatcher( internal::StrEqualityMatcher(std::string(str), false, true)); @@ -4431,7 +4479,7 @@ PolymorphicMatcher > StrNe( // Matches a string equal to str, ignoring case. template -PolymorphicMatcher > StrCaseEq( +PolymorphicMatcher> StrCaseEq( const internal::StringLike& str) { return MakePolymorphicMatcher( internal::StrEqualityMatcher(std::string(str), true, false)); @@ -4439,7 +4487,7 @@ PolymorphicMatcher > StrCaseEq( // Matches a string not equal to str, ignoring case. template -PolymorphicMatcher > StrCaseNe( +PolymorphicMatcher> StrCaseNe( const internal::StringLike& str) { return MakePolymorphicMatcher(internal::StrEqualityMatcher( std::string(str), false, false)); @@ -4448,7 +4496,7 @@ PolymorphicMatcher > StrCaseNe( // Creates a matcher that matches any string, std::string, or C string // that contains the given substring. template -PolymorphicMatcher > HasSubstr( +PolymorphicMatcher> HasSubstr( const internal::StringLike& substring) { return MakePolymorphicMatcher( internal::HasSubstrMatcher(std::string(substring))); @@ -4456,7 +4504,7 @@ PolymorphicMatcher > HasSubstr( // Matches a string that starts with 'prefix' (case-sensitive). template -PolymorphicMatcher > StartsWith( +PolymorphicMatcher> StartsWith( const internal::StringLike& prefix) { return MakePolymorphicMatcher( internal::StartsWithMatcher(std::string(prefix))); @@ -4464,7 +4512,7 @@ PolymorphicMatcher > StartsWith( // Matches a string that ends with 'suffix' (case-sensitive). template -PolymorphicMatcher > EndsWith( +PolymorphicMatcher> EndsWith( const internal::StringLike& suffix) { return MakePolymorphicMatcher( internal::EndsWithMatcher(std::string(suffix))); @@ -4474,50 +4522,50 @@ PolymorphicMatcher > EndsWith( // Wide string matchers. // Matches a string equal to str. -inline PolymorphicMatcher > StrEq( +inline PolymorphicMatcher> StrEq( const std::wstring& str) { return MakePolymorphicMatcher( internal::StrEqualityMatcher(str, true, true)); } // Matches a string not equal to str. -inline PolymorphicMatcher > StrNe( +inline PolymorphicMatcher> StrNe( const std::wstring& str) { return MakePolymorphicMatcher( internal::StrEqualityMatcher(str, false, true)); } // Matches a string equal to str, ignoring case. -inline PolymorphicMatcher > -StrCaseEq(const std::wstring& str) { +inline PolymorphicMatcher> StrCaseEq( + const std::wstring& str) { return MakePolymorphicMatcher( internal::StrEqualityMatcher(str, true, false)); } // Matches a string not equal to str, ignoring case. -inline PolymorphicMatcher > -StrCaseNe(const std::wstring& str) { +inline PolymorphicMatcher> StrCaseNe( + const std::wstring& str) { return MakePolymorphicMatcher( internal::StrEqualityMatcher(str, false, false)); } // Creates a matcher that matches any ::wstring, std::wstring, or C wide string // that contains the given substring. -inline PolymorphicMatcher > HasSubstr( +inline PolymorphicMatcher> HasSubstr( const std::wstring& substring) { return MakePolymorphicMatcher( internal::HasSubstrMatcher(substring)); } // Matches a string that starts with 'prefix' (case-sensitive). -inline PolymorphicMatcher > -StartsWith(const std::wstring& prefix) { +inline PolymorphicMatcher> StartsWith( + const std::wstring& prefix) { return MakePolymorphicMatcher( internal::StartsWithMatcher(prefix)); } // Matches a string that ends with 'suffix' (case-sensitive). -inline PolymorphicMatcher > EndsWith( +inline PolymorphicMatcher> EndsWith( const std::wstring& suffix) { return MakePolymorphicMatcher( internal::EndsWithMatcher(suffix)); @@ -4612,8 +4660,8 @@ inline internal::NotMatcher Not(InnerMatcher m) { // predicate. The predicate can be any unary function or functor // whose return type can be implicitly converted to bool. template -inline PolymorphicMatcher > -Truly(Predicate pred) { +inline PolymorphicMatcher> Truly( + Predicate pred) { return MakePolymorphicMatcher(internal::TrulyMatcher(pred)); } @@ -4624,8 +4672,8 @@ Truly(Predicate pred) { // EXPECT_THAT(container, SizeIs(2)); // Checks container has 2 elements. // EXPECT_THAT(container, SizeIs(Le(2)); // Checks container has at most 2. template -inline internal::SizeIsMatcher -SizeIs(const SizeMatcher& size_matcher) { +inline internal::SizeIsMatcher SizeIs( + const SizeMatcher& size_matcher) { return internal::SizeIsMatcher(size_matcher); } @@ -4635,8 +4683,8 @@ SizeIs(const SizeMatcher& size_matcher) { // do not implement size(). The container must provide const_iterator (with // valid iterator_traits), begin() and end(). template -inline internal::BeginEndDistanceIsMatcher -BeginEndDistanceIs(const DistanceMatcher& distance_matcher) { +inline internal::BeginEndDistanceIsMatcher BeginEndDistanceIs( + const DistanceMatcher& distance_matcher) { return internal::BeginEndDistanceIsMatcher(distance_matcher); } @@ -4645,8 +4693,8 @@ BeginEndDistanceIs(const DistanceMatcher& distance_matcher) { // values that are included in one container but not the other. (Duplicate // values and order differences are not explained.) template -inline PolymorphicMatcher::type>> +inline PolymorphicMatcher< + internal::ContainerEqMatcher::type>> ContainerEq(const Container& rhs) { return MakePolymorphicMatcher(internal::ContainerEqMatcher(rhs)); } @@ -4654,9 +4702,8 @@ ContainerEq(const Container& rhs) { // Returns a matcher that matches a container that, when sorted using // the given comparator, matches container_matcher. template -inline internal::WhenSortedByMatcher -WhenSortedBy(const Comparator& comparator, - const ContainerMatcher& container_matcher) { +inline internal::WhenSortedByMatcher WhenSortedBy( + const Comparator& comparator, const ContainerMatcher& container_matcher) { return internal::WhenSortedByMatcher( comparator, container_matcher); } @@ -4666,9 +4713,9 @@ WhenSortedBy(const Comparator& comparator, template inline internal::WhenSortedByMatcher WhenSorted(const ContainerMatcher& container_matcher) { - return - internal::WhenSortedByMatcher( - internal::LessComparator(), container_matcher); + return internal::WhenSortedByMatcher( + internal::LessComparator(), container_matcher); } // Matches an STL-style container or a native array that contains the @@ -4685,15 +4732,13 @@ Pointwise(const TupleMatcher& tuple_matcher, const Container& rhs) { rhs); } - // Supports the Pointwise(m, {a, b, c}) syntax. template -inline internal::PointwiseMatcher > Pointwise( +inline internal::PointwiseMatcher> Pointwise( const TupleMatcher& tuple_matcher, std::initializer_list rhs) { return Pointwise(tuple_matcher, std::vector(rhs)); } - // UnorderedPointwise(pair_matcher, rhs) matches an STL-style // container or a native array that contains the same number of // elements as in rhs, where in some permutation of the container, its @@ -4722,22 +4767,20 @@ UnorderedPointwise(const Tuple2Matcher& tuple2_matcher, RhsView::ConstReference(rhs_container); // Create a matcher for each element in rhs_container. - ::std::vector > matchers; - for (typename RhsStlContainer::const_iterator it = rhs_stl_container.begin(); - it != rhs_stl_container.end(); ++it) { - matchers.push_back( - internal::MatcherBindSecond(tuple2_matcher, *it)); + ::std::vector> matchers; + for (auto it = rhs_stl_container.begin(); it != rhs_stl_container.end(); + ++it) { + matchers.push_back(internal::MatcherBindSecond(tuple2_matcher, *it)); } // Delegate the work to UnorderedElementsAreArray(). return UnorderedElementsAreArray(matchers); } - // Supports the UnorderedPointwise(m, {a, b, c}) syntax. template inline internal::UnorderedElementsAreArrayMatcher< - typename internal::BoundSecondMatcher > + typename internal::BoundSecondMatcher> UnorderedPointwise(const Tuple2Matcher& tuple2_matcher, std::initializer_list rhs) { return UnorderedPointwise(tuple2_matcher, std::vector(rhs)); @@ -4943,16 +4986,16 @@ inline internal::KeyMatcher Key(M inner_matcher) { // to match a std::map that contains exactly one element whose key // is >= 5 and whose value equals "foo". template -inline internal::PairMatcher -Pair(FirstMatcher first_matcher, SecondMatcher second_matcher) { - return internal::PairMatcher( - first_matcher, second_matcher); +inline internal::PairMatcher Pair( + FirstMatcher first_matcher, SecondMatcher second_matcher) { + return internal::PairMatcher(first_matcher, + second_matcher); } namespace no_adl { // Conditional() creates a matcher that conditionally uses either the first or // second matcher provided. For example, we could create an `equal if, and only -// if' matcher using the Conditonal wrapper as follows: +// if' matcher using the Conditional wrapper as follows: // // EXPECT_THAT(result, Conditional(condition, Eq(expected), Ne(expected))); template @@ -4988,6 +5031,14 @@ inline internal::AddressMatcher Address( const InnerMatcher& inner_matcher) { return internal::AddressMatcher(inner_matcher); } + +// Matches a base64 escaped string, when the unescaped string matches the +// internal matcher. +template +internal::WhenBase64UnescapedMatcher WhenBase64Unescaped( + const MatcherType& internal_matcher) { + return internal::WhenBase64UnescapedMatcher(internal_matcher); +} } // namespace no_adl // Returns a predicate that is satisfied by anything that matches the @@ -5006,8 +5057,8 @@ inline bool Value(const T& value, M matcher) { // Matches the value against the given matcher and explains the match // result to listener. template -inline bool ExplainMatchResult( - M matcher, const T& value, MatchResultListener* listener) { +inline bool ExplainMatchResult(M matcher, const T& value, + MatchResultListener* listener) { return SafeMatcherCast(matcher).MatchAndExplain(value, listener); } @@ -5017,7 +5068,8 @@ inline bool ExplainMatchResult( // // MATCHER_P(XAndYThat, matcher, // "X that " + DescribeMatcher(matcher, negation) + -// " and Y that " + DescribeMatcher(matcher, negation)) { +// (negation ? " or" : " and") + " Y that " + +// DescribeMatcher(matcher, negation)) { // return ExplainMatchResult(matcher, arg.x(), result_listener) && // ExplainMatchResult(matcher, arg.y(), result_listener); // } @@ -5166,7 +5218,9 @@ internal::ArgsMatcher::type, k...> Args( // // EXPECT_CALL(foo, Bar(_, _)).With(Eq()); template -inline InnerMatcher AllArgs(const InnerMatcher& matcher) { return matcher; } +inline InnerMatcher AllArgs(const InnerMatcher& matcher) { + return matcher; +} // Returns a matcher that matches the value of an optional<> type variable. // The matcher implementation only uses '!arg' and requires that the optional<> @@ -5184,7 +5238,7 @@ inline internal::OptionalMatcher Optional( // Returns a matcher that matches the value of a absl::any type variable. template -PolymorphicMatcher > AnyWith( +PolymorphicMatcher> AnyWith( const Matcher& matcher) { return MakePolymorphicMatcher( internal::any_cast_matcher::AnyCastMatcher(matcher)); @@ -5195,7 +5249,7 @@ PolymorphicMatcher > AnyWith( // functions. // It is compatible with std::variant. template -PolymorphicMatcher > VariantWith( +PolymorphicMatcher> VariantWith( const Matcher& matcher) { return MakePolymorphicMatcher( internal::variant_matcher::VariantMatcher(matcher)); @@ -5224,7 +5278,8 @@ class WithWhatMatcherImpl { template bool MatchAndExplain(const Err& err, MatchResultListener* listener) const { - *listener << "which contains .what() that "; + *listener << "which contains .what() (of value = " << err.what() + << ") that "; return matcher_.MatchAndExplain(err.what(), listener); } @@ -5374,12 +5429,14 @@ PolymorphicMatcher> ThrowsMessage( // tests. ASSERT_THAT(value, matcher) and EXPECT_THAT(value, matcher) // succeed if and only if the value matches the matcher. If the assertion // fails, the value and the description of the matcher will be printed. -#define ASSERT_THAT(value, matcher) ASSERT_PRED_FORMAT1(\ - ::testing::internal::MakePredicateFormatterFromMatcher(matcher), value) -#define EXPECT_THAT(value, matcher) EXPECT_PRED_FORMAT1(\ - ::testing::internal::MakePredicateFormatterFromMatcher(matcher), value) +#define ASSERT_THAT(value, matcher) \ + ASSERT_PRED_FORMAT1( \ + ::testing::internal::MakePredicateFormatterFromMatcher(matcher), value) +#define EXPECT_THAT(value, matcher) \ + EXPECT_PRED_FORMAT1( \ + ::testing::internal::MakePredicateFormatterFromMatcher(matcher), value) -// MATCHER* macroses itself are listed below. +// MATCHER* macros itself are listed below. #define MATCHER(name, description) \ class name##Matcher \ : public ::testing::internal::MatcherBaseImpl { \ @@ -5400,12 +5457,13 @@ PolymorphicMatcher> ThrowsMessage( \ private: \ ::std::string FormatDescription(bool negation) const { \ + /* NOLINTNEXTLINE readability-redundant-string-init */ \ ::std::string gmock_description = (description); \ if (!gmock_description.empty()) { \ return gmock_description; \ } \ return ::testing::internal::FormatMatcherDescription(negation, #name, \ - {}); \ + {}, {}); \ } \ }; \ }; \ @@ -5417,33 +5475,41 @@ PolymorphicMatcher> ThrowsMessage( const #define MATCHER_P(name, p0, description) \ - GMOCK_INTERNAL_MATCHER(name, name##MatcherP, description, (p0)) -#define MATCHER_P2(name, p0, p1, description) \ - GMOCK_INTERNAL_MATCHER(name, name##MatcherP2, description, (p0, p1)) -#define MATCHER_P3(name, p0, p1, p2, description) \ - GMOCK_INTERNAL_MATCHER(name, name##MatcherP3, description, (p0, p1, p2)) -#define MATCHER_P4(name, p0, p1, p2, p3, description) \ - GMOCK_INTERNAL_MATCHER(name, name##MatcherP4, description, (p0, p1, p2, p3)) + GMOCK_INTERNAL_MATCHER(name, name##MatcherP, description, (#p0), (p0)) +#define MATCHER_P2(name, p0, p1, description) \ + GMOCK_INTERNAL_MATCHER(name, name##MatcherP2, description, (#p0, #p1), \ + (p0, p1)) +#define MATCHER_P3(name, p0, p1, p2, description) \ + GMOCK_INTERNAL_MATCHER(name, name##MatcherP3, description, (#p0, #p1, #p2), \ + (p0, p1, p2)) +#define MATCHER_P4(name, p0, p1, p2, p3, description) \ + GMOCK_INTERNAL_MATCHER(name, name##MatcherP4, description, \ + (#p0, #p1, #p2, #p3), (p0, p1, p2, p3)) #define MATCHER_P5(name, p0, p1, p2, p3, p4, description) \ GMOCK_INTERNAL_MATCHER(name, name##MatcherP5, description, \ - (p0, p1, p2, p3, p4)) + (#p0, #p1, #p2, #p3, #p4), (p0, p1, p2, p3, p4)) #define MATCHER_P6(name, p0, p1, p2, p3, p4, p5, description) \ GMOCK_INTERNAL_MATCHER(name, name##MatcherP6, description, \ + (#p0, #p1, #p2, #p3, #p4, #p5), \ (p0, p1, p2, p3, p4, p5)) #define MATCHER_P7(name, p0, p1, p2, p3, p4, p5, p6, description) \ GMOCK_INTERNAL_MATCHER(name, name##MatcherP7, description, \ + (#p0, #p1, #p2, #p3, #p4, #p5, #p6), \ (p0, p1, p2, p3, p4, p5, p6)) #define MATCHER_P8(name, p0, p1, p2, p3, p4, p5, p6, p7, description) \ GMOCK_INTERNAL_MATCHER(name, name##MatcherP8, description, \ + (#p0, #p1, #p2, #p3, #p4, #p5, #p6, #p7), \ (p0, p1, p2, p3, p4, p5, p6, p7)) #define MATCHER_P9(name, p0, p1, p2, p3, p4, p5, p6, p7, p8, description) \ GMOCK_INTERNAL_MATCHER(name, name##MatcherP9, description, \ + (#p0, #p1, #p2, #p3, #p4, #p5, #p6, #p7, #p8), \ (p0, p1, p2, p3, p4, p5, p6, p7, p8)) #define MATCHER_P10(name, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, description) \ GMOCK_INTERNAL_MATCHER(name, name##MatcherP10, description, \ + (#p0, #p1, #p2, #p3, #p4, #p5, #p6, #p7, #p8, #p9), \ (p0, p1, p2, p3, p4, p5, p6, p7, p8, p9)) -#define GMOCK_INTERNAL_MATCHER(name, full_name, description, args) \ +#define GMOCK_INTERNAL_MATCHER(name, full_name, description, arg_names, args) \ template \ class full_name : public ::testing::internal::MatcherBaseImpl< \ full_name> { \ @@ -5472,7 +5538,7 @@ PolymorphicMatcher> ThrowsMessage( return gmock_description; \ } \ return ::testing::internal::FormatMatcherDescription( \ - negation, #name, \ + negation, #name, {GMOCK_PP_REMOVE_PARENS(arg_names)}, \ ::testing::internal::UniversalTersePrintTupleFieldsToStrings( \ ::std::tuple( \ GMOCK_INTERNAL_MATCHER_MEMBERS_USAGE(args)))); \ diff --git a/ext/googletest/googlemock/include/gmock/gmock-more-actions.h b/ext/googletest/googlemock/include/gmock/gmock-more-actions.h index fd293358a2..148ac01721 100644 --- a/ext/googletest/googlemock/include/gmock/gmock-more-actions.h +++ b/ext/googletest/googlemock/include/gmock/gmock-more-actions.h @@ -27,12 +27,12 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Mock - a framework for writing C++ mock classes. // // This file implements some commonly used variadic actions. -// GOOGLETEST_CM0002 DO NOT DELETE +// IWYU pragma: private, include "gmock/gmock.h" +// IWYU pragma: friend gmock/.* #ifndef GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_MORE_ACTIONS_H_ #define GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_MORE_ACTIONS_H_ @@ -129,170 +129,207 @@ // Declares the template parameters. #define GMOCK_INTERNAL_DECL_HAS_1_TEMPLATE_PARAMS(kind0, name0) kind0 name0 -#define GMOCK_INTERNAL_DECL_HAS_2_TEMPLATE_PARAMS(kind0, name0, kind1, \ - name1) kind0 name0, kind1 name1 +#define GMOCK_INTERNAL_DECL_HAS_2_TEMPLATE_PARAMS(kind0, name0, kind1, name1) \ + kind0 name0, kind1 name1 #define GMOCK_INTERNAL_DECL_HAS_3_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ - kind2, name2) kind0 name0, kind1 name1, kind2 name2 + kind2, name2) \ + kind0 name0, kind1 name1, kind2 name2 #define GMOCK_INTERNAL_DECL_HAS_4_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ - kind2, name2, kind3, name3) kind0 name0, kind1 name1, kind2 name2, \ - kind3 name3 -#define GMOCK_INTERNAL_DECL_HAS_5_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ - kind2, name2, kind3, name3, kind4, name4) kind0 name0, kind1 name1, \ - kind2 name2, kind3 name3, kind4 name4 + kind2, name2, kind3, name3) \ + kind0 name0, kind1 name1, kind2 name2, kind3 name3 +#define GMOCK_INTERNAL_DECL_HAS_5_TEMPLATE_PARAMS( \ + kind0, name0, kind1, name1, kind2, name2, kind3, name3, kind4, name4) \ + kind0 name0, kind1 name1, kind2 name2, kind3 name3, kind4 name4 #define GMOCK_INTERNAL_DECL_HAS_6_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ - kind2, name2, kind3, name3, kind4, name4, kind5, name5) kind0 name0, \ - kind1 name1, kind2 name2, kind3 name3, kind4 name4, kind5 name5 -#define GMOCK_INTERNAL_DECL_HAS_7_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ - kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, \ - name6) kind0 name0, kind1 name1, kind2 name2, kind3 name3, kind4 name4, \ - kind5 name5, kind6 name6 -#define GMOCK_INTERNAL_DECL_HAS_8_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ - kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, name6, \ - kind7, name7) kind0 name0, kind1 name1, kind2 name2, kind3 name3, \ - kind4 name4, kind5 name5, kind6 name6, kind7 name7 -#define GMOCK_INTERNAL_DECL_HAS_9_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ - kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, name6, \ - kind7, name7, kind8, name8) kind0 name0, kind1 name1, kind2 name2, \ - kind3 name3, kind4 name4, kind5 name5, kind6 name6, kind7 name7, \ - kind8 name8 -#define GMOCK_INTERNAL_DECL_HAS_10_TEMPLATE_PARAMS(kind0, name0, kind1, \ - name1, kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, \ - name6, kind7, name7, kind8, name8, kind9, name9) kind0 name0, \ - kind1 name1, kind2 name2, kind3 name3, kind4 name4, kind5 name5, \ - kind6 name6, kind7 name7, kind8 name8, kind9 name9 + kind2, name2, kind3, name3, \ + kind4, name4, kind5, name5) \ + kind0 name0, kind1 name1, kind2 name2, kind3 name3, kind4 name4, kind5 name5 +#define GMOCK_INTERNAL_DECL_HAS_7_TEMPLATE_PARAMS( \ + kind0, name0, kind1, name1, kind2, name2, kind3, name3, kind4, name4, \ + kind5, name5, kind6, name6) \ + kind0 name0, kind1 name1, kind2 name2, kind3 name3, kind4 name4, \ + kind5 name5, kind6 name6 +#define GMOCK_INTERNAL_DECL_HAS_8_TEMPLATE_PARAMS( \ + kind0, name0, kind1, name1, kind2, name2, kind3, name3, kind4, name4, \ + kind5, name5, kind6, name6, kind7, name7) \ + kind0 name0, kind1 name1, kind2 name2, kind3 name3, kind4 name4, \ + kind5 name5, kind6 name6, kind7 name7 +#define GMOCK_INTERNAL_DECL_HAS_9_TEMPLATE_PARAMS( \ + kind0, name0, kind1, name1, kind2, name2, kind3, name3, kind4, name4, \ + kind5, name5, kind6, name6, kind7, name7, kind8, name8) \ + kind0 name0, kind1 name1, kind2 name2, kind3 name3, kind4 name4, \ + kind5 name5, kind6 name6, kind7 name7, kind8 name8 +#define GMOCK_INTERNAL_DECL_HAS_10_TEMPLATE_PARAMS( \ + kind0, name0, kind1, name1, kind2, name2, kind3, name3, kind4, name4, \ + kind5, name5, kind6, name6, kind7, name7, kind8, name8, kind9, name9) \ + kind0 name0, kind1 name1, kind2 name2, kind3 name3, kind4 name4, \ + kind5 name5, kind6 name6, kind7 name7, kind8 name8, kind9 name9 // Lists the template parameters. #define GMOCK_INTERNAL_LIST_HAS_1_TEMPLATE_PARAMS(kind0, name0) name0 -#define GMOCK_INTERNAL_LIST_HAS_2_TEMPLATE_PARAMS(kind0, name0, kind1, \ - name1) name0, name1 +#define GMOCK_INTERNAL_LIST_HAS_2_TEMPLATE_PARAMS(kind0, name0, kind1, name1) \ + name0, name1 #define GMOCK_INTERNAL_LIST_HAS_3_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ - kind2, name2) name0, name1, name2 + kind2, name2) \ + name0, name1, name2 #define GMOCK_INTERNAL_LIST_HAS_4_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ - kind2, name2, kind3, name3) name0, name1, name2, name3 -#define GMOCK_INTERNAL_LIST_HAS_5_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ - kind2, name2, kind3, name3, kind4, name4) name0, name1, name2, name3, \ - name4 + kind2, name2, kind3, name3) \ + name0, name1, name2, name3 +#define GMOCK_INTERNAL_LIST_HAS_5_TEMPLATE_PARAMS( \ + kind0, name0, kind1, name1, kind2, name2, kind3, name3, kind4, name4) \ + name0, name1, name2, name3, name4 #define GMOCK_INTERNAL_LIST_HAS_6_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ - kind2, name2, kind3, name3, kind4, name4, kind5, name5) name0, name1, \ - name2, name3, name4, name5 -#define GMOCK_INTERNAL_LIST_HAS_7_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ - kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, \ - name6) name0, name1, name2, name3, name4, name5, name6 -#define GMOCK_INTERNAL_LIST_HAS_8_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ - kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, name6, \ - kind7, name7) name0, name1, name2, name3, name4, name5, name6, name7 -#define GMOCK_INTERNAL_LIST_HAS_9_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ - kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, name6, \ - kind7, name7, kind8, name8) name0, name1, name2, name3, name4, name5, \ - name6, name7, name8 -#define GMOCK_INTERNAL_LIST_HAS_10_TEMPLATE_PARAMS(kind0, name0, kind1, \ - name1, kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, \ - name6, kind7, name7, kind8, name8, kind9, name9) name0, name1, name2, \ - name3, name4, name5, name6, name7, name8, name9 + kind2, name2, kind3, name3, \ + kind4, name4, kind5, name5) \ + name0, name1, name2, name3, name4, name5 +#define GMOCK_INTERNAL_LIST_HAS_7_TEMPLATE_PARAMS( \ + kind0, name0, kind1, name1, kind2, name2, kind3, name3, kind4, name4, \ + kind5, name5, kind6, name6) \ + name0, name1, name2, name3, name4, name5, name6 +#define GMOCK_INTERNAL_LIST_HAS_8_TEMPLATE_PARAMS( \ + kind0, name0, kind1, name1, kind2, name2, kind3, name3, kind4, name4, \ + kind5, name5, kind6, name6, kind7, name7) \ + name0, name1, name2, name3, name4, name5, name6, name7 +#define GMOCK_INTERNAL_LIST_HAS_9_TEMPLATE_PARAMS( \ + kind0, name0, kind1, name1, kind2, name2, kind3, name3, kind4, name4, \ + kind5, name5, kind6, name6, kind7, name7, kind8, name8) \ + name0, name1, name2, name3, name4, name5, name6, name7, name8 +#define GMOCK_INTERNAL_LIST_HAS_10_TEMPLATE_PARAMS( \ + kind0, name0, kind1, name1, kind2, name2, kind3, name3, kind4, name4, \ + kind5, name5, kind6, name6, kind7, name7, kind8, name8, kind9, name9) \ + name0, name1, name2, name3, name4, name5, name6, name7, name8, name9 // Declares the types of value parameters. #define GMOCK_INTERNAL_DECL_TYPE_AND_0_VALUE_PARAMS() #define GMOCK_INTERNAL_DECL_TYPE_AND_1_VALUE_PARAMS(p0) , typename p0##_type -#define GMOCK_INTERNAL_DECL_TYPE_AND_2_VALUE_PARAMS(p0, p1) , \ - typename p0##_type, typename p1##_type -#define GMOCK_INTERNAL_DECL_TYPE_AND_3_VALUE_PARAMS(p0, p1, p2) , \ - typename p0##_type, typename p1##_type, typename p2##_type -#define GMOCK_INTERNAL_DECL_TYPE_AND_4_VALUE_PARAMS(p0, p1, p2, p3) , \ - typename p0##_type, typename p1##_type, typename p2##_type, \ - typename p3##_type -#define GMOCK_INTERNAL_DECL_TYPE_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4) , \ - typename p0##_type, typename p1##_type, typename p2##_type, \ - typename p3##_type, typename p4##_type -#define GMOCK_INTERNAL_DECL_TYPE_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5) , \ - typename p0##_type, typename p1##_type, typename p2##_type, \ - typename p3##_type, typename p4##_type, typename p5##_type +#define GMOCK_INTERNAL_DECL_TYPE_AND_2_VALUE_PARAMS(p0, p1) \ + , typename p0##_type, typename p1##_type +#define GMOCK_INTERNAL_DECL_TYPE_AND_3_VALUE_PARAMS(p0, p1, p2) \ + , typename p0##_type, typename p1##_type, typename p2##_type +#define GMOCK_INTERNAL_DECL_TYPE_AND_4_VALUE_PARAMS(p0, p1, p2, p3) \ + , typename p0##_type, typename p1##_type, typename p2##_type, \ + typename p3##_type +#define GMOCK_INTERNAL_DECL_TYPE_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4) \ + , typename p0##_type, typename p1##_type, typename p2##_type, \ + typename p3##_type, typename p4##_type +#define GMOCK_INTERNAL_DECL_TYPE_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5) \ + , typename p0##_type, typename p1##_type, typename p2##_type, \ + typename p3##_type, typename p4##_type, typename p5##_type #define GMOCK_INTERNAL_DECL_TYPE_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \ - p6) , typename p0##_type, typename p1##_type, typename p2##_type, \ - typename p3##_type, typename p4##_type, typename p5##_type, \ - typename p6##_type + p6) \ + , typename p0##_type, typename p1##_type, typename p2##_type, \ + typename p3##_type, typename p4##_type, typename p5##_type, \ + typename p6##_type #define GMOCK_INTERNAL_DECL_TYPE_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \ - p6, p7) , typename p0##_type, typename p1##_type, typename p2##_type, \ - typename p3##_type, typename p4##_type, typename p5##_type, \ - typename p6##_type, typename p7##_type + p6, p7) \ + , typename p0##_type, typename p1##_type, typename p2##_type, \ + typename p3##_type, typename p4##_type, typename p5##_type, \ + typename p6##_type, typename p7##_type #define GMOCK_INTERNAL_DECL_TYPE_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \ - p6, p7, p8) , typename p0##_type, typename p1##_type, typename p2##_type, \ - typename p3##_type, typename p4##_type, typename p5##_type, \ - typename p6##_type, typename p7##_type, typename p8##_type + p6, p7, p8) \ + , typename p0##_type, typename p1##_type, typename p2##_type, \ + typename p3##_type, typename p4##_type, typename p5##_type, \ + typename p6##_type, typename p7##_type, typename p8##_type #define GMOCK_INTERNAL_DECL_TYPE_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \ - p6, p7, p8, p9) , typename p0##_type, typename p1##_type, \ - typename p2##_type, typename p3##_type, typename p4##_type, \ - typename p5##_type, typename p6##_type, typename p7##_type, \ - typename p8##_type, typename p9##_type + p6, p7, p8, p9) \ + , typename p0##_type, typename p1##_type, typename p2##_type, \ + typename p3##_type, typename p4##_type, typename p5##_type, \ + typename p6##_type, typename p7##_type, typename p8##_type, \ + typename p9##_type // Initializes the value parameters. -#define GMOCK_INTERNAL_INIT_AND_0_VALUE_PARAMS()\ - () -#define GMOCK_INTERNAL_INIT_AND_1_VALUE_PARAMS(p0)\ - (p0##_type gmock_p0) : p0(::std::move(gmock_p0)) -#define GMOCK_INTERNAL_INIT_AND_2_VALUE_PARAMS(p0, p1)\ - (p0##_type gmock_p0, p1##_type gmock_p1) : p0(::std::move(gmock_p0)), \ - p1(::std::move(gmock_p1)) -#define GMOCK_INTERNAL_INIT_AND_3_VALUE_PARAMS(p0, p1, p2)\ - (p0##_type gmock_p0, p1##_type gmock_p1, \ - p2##_type gmock_p2) : p0(::std::move(gmock_p0)), \ - p1(::std::move(gmock_p1)), p2(::std::move(gmock_p2)) -#define GMOCK_INTERNAL_INIT_AND_4_VALUE_PARAMS(p0, p1, p2, p3)\ - (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ - p3##_type gmock_p3) : p0(::std::move(gmock_p0)), \ - p1(::std::move(gmock_p1)), p2(::std::move(gmock_p2)), \ +#define GMOCK_INTERNAL_INIT_AND_0_VALUE_PARAMS() () +#define GMOCK_INTERNAL_INIT_AND_1_VALUE_PARAMS(p0) \ + (p0##_type gmock_p0) : p0(::std::move(gmock_p0)) +#define GMOCK_INTERNAL_INIT_AND_2_VALUE_PARAMS(p0, p1) \ + (p0##_type gmock_p0, p1##_type gmock_p1) \ + : p0(::std::move(gmock_p0)), p1(::std::move(gmock_p1)) +#define GMOCK_INTERNAL_INIT_AND_3_VALUE_PARAMS(p0, p1, p2) \ + (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2) \ + : p0(::std::move(gmock_p0)), \ + p1(::std::move(gmock_p1)), \ + p2(::std::move(gmock_p2)) +#define GMOCK_INTERNAL_INIT_AND_4_VALUE_PARAMS(p0, p1, p2, p3) \ + (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ + p3##_type gmock_p3) \ + : p0(::std::move(gmock_p0)), \ + p1(::std::move(gmock_p1)), \ + p2(::std::move(gmock_p2)), \ p3(::std::move(gmock_p3)) -#define GMOCK_INTERNAL_INIT_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4)\ - (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ - p3##_type gmock_p3, p4##_type gmock_p4) : p0(::std::move(gmock_p0)), \ - p1(::std::move(gmock_p1)), p2(::std::move(gmock_p2)), \ - p3(::std::move(gmock_p3)), p4(::std::move(gmock_p4)) -#define GMOCK_INTERNAL_INIT_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5)\ - (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ - p3##_type gmock_p3, p4##_type gmock_p4, \ - p5##_type gmock_p5) : p0(::std::move(gmock_p0)), \ - p1(::std::move(gmock_p1)), p2(::std::move(gmock_p2)), \ - p3(::std::move(gmock_p3)), p4(::std::move(gmock_p4)), \ +#define GMOCK_INTERNAL_INIT_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4) \ + (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ + p3##_type gmock_p3, p4##_type gmock_p4) \ + : p0(::std::move(gmock_p0)), \ + p1(::std::move(gmock_p1)), \ + p2(::std::move(gmock_p2)), \ + p3(::std::move(gmock_p3)), \ + p4(::std::move(gmock_p4)) +#define GMOCK_INTERNAL_INIT_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5) \ + (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ + p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5) \ + : p0(::std::move(gmock_p0)), \ + p1(::std::move(gmock_p1)), \ + p2(::std::move(gmock_p2)), \ + p3(::std::move(gmock_p3)), \ + p4(::std::move(gmock_p4)), \ p5(::std::move(gmock_p5)) -#define GMOCK_INTERNAL_INIT_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6)\ - (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ - p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \ - p6##_type gmock_p6) : p0(::std::move(gmock_p0)), \ - p1(::std::move(gmock_p1)), p2(::std::move(gmock_p2)), \ - p3(::std::move(gmock_p3)), p4(::std::move(gmock_p4)), \ - p5(::std::move(gmock_p5)), p6(::std::move(gmock_p6)) -#define GMOCK_INTERNAL_INIT_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7)\ - (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ - p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \ - p6##_type gmock_p6, p7##_type gmock_p7) : p0(::std::move(gmock_p0)), \ - p1(::std::move(gmock_p1)), p2(::std::move(gmock_p2)), \ - p3(::std::move(gmock_p3)), p4(::std::move(gmock_p4)), \ - p5(::std::move(gmock_p5)), p6(::std::move(gmock_p6)), \ +#define GMOCK_INTERNAL_INIT_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6) \ + (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ + p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \ + p6##_type gmock_p6) \ + : p0(::std::move(gmock_p0)), \ + p1(::std::move(gmock_p1)), \ + p2(::std::move(gmock_p2)), \ + p3(::std::move(gmock_p3)), \ + p4(::std::move(gmock_p4)), \ + p5(::std::move(gmock_p5)), \ + p6(::std::move(gmock_p6)) +#define GMOCK_INTERNAL_INIT_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7) \ + (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ + p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \ + p6##_type gmock_p6, p7##_type gmock_p7) \ + : p0(::std::move(gmock_p0)), \ + p1(::std::move(gmock_p1)), \ + p2(::std::move(gmock_p2)), \ + p3(::std::move(gmock_p3)), \ + p4(::std::move(gmock_p4)), \ + p5(::std::move(gmock_p5)), \ + p6(::std::move(gmock_p6)), \ p7(::std::move(gmock_p7)) -#define GMOCK_INTERNAL_INIT_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ - p7, p8)\ - (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ - p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \ - p6##_type gmock_p6, p7##_type gmock_p7, \ - p8##_type gmock_p8) : p0(::std::move(gmock_p0)), \ - p1(::std::move(gmock_p1)), p2(::std::move(gmock_p2)), \ - p3(::std::move(gmock_p3)), p4(::std::move(gmock_p4)), \ - p5(::std::move(gmock_p5)), p6(::std::move(gmock_p6)), \ - p7(::std::move(gmock_p7)), p8(::std::move(gmock_p8)) +#define GMOCK_INTERNAL_INIT_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7, \ + p8) \ + (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ + p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \ + p6##_type gmock_p6, p7##_type gmock_p7, p8##_type gmock_p8) \ + : p0(::std::move(gmock_p0)), \ + p1(::std::move(gmock_p1)), \ + p2(::std::move(gmock_p2)), \ + p3(::std::move(gmock_p3)), \ + p4(::std::move(gmock_p4)), \ + p5(::std::move(gmock_p5)), \ + p6(::std::move(gmock_p6)), \ + p7(::std::move(gmock_p7)), \ + p8(::std::move(gmock_p8)) #define GMOCK_INTERNAL_INIT_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ - p7, p8, p9)\ - (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ - p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \ - p6##_type gmock_p6, p7##_type gmock_p7, p8##_type gmock_p8, \ - p9##_type gmock_p9) : p0(::std::move(gmock_p0)), \ - p1(::std::move(gmock_p1)), p2(::std::move(gmock_p2)), \ - p3(::std::move(gmock_p3)), p4(::std::move(gmock_p4)), \ - p5(::std::move(gmock_p5)), p6(::std::move(gmock_p6)), \ - p7(::std::move(gmock_p7)), p8(::std::move(gmock_p8)), \ + p7, p8, p9) \ + (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ + p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \ + p6##_type gmock_p6, p7##_type gmock_p7, p8##_type gmock_p8, \ + p9##_type gmock_p9) \ + : p0(::std::move(gmock_p0)), \ + p1(::std::move(gmock_p1)), \ + p2(::std::move(gmock_p2)), \ + p3(::std::move(gmock_p3)), \ + p4(::std::move(gmock_p4)), \ + p5(::std::move(gmock_p5)), \ + p6(::std::move(gmock_p6)), \ + p7(::std::move(gmock_p7)), \ + p8(::std::move(gmock_p8)), \ p9(::std::move(gmock_p9)) // Defines the copy constructor #define GMOCK_INTERNAL_DEFN_COPY_AND_0_VALUE_PARAMS() \ - {} // Avoid https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82134 + {} // Avoid https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82134 #define GMOCK_INTERNAL_DEFN_COPY_AND_1_VALUE_PARAMS(...) = default; #define GMOCK_INTERNAL_DEFN_COPY_AND_2_VALUE_PARAMS(...) = default; #define GMOCK_INTERNAL_DEFN_COPY_AND_3_VALUE_PARAMS(...) = default; @@ -307,30 +344,71 @@ // Declares the fields for storing the value parameters. #define GMOCK_INTERNAL_DEFN_AND_0_VALUE_PARAMS() #define GMOCK_INTERNAL_DEFN_AND_1_VALUE_PARAMS(p0) p0##_type p0; -#define GMOCK_INTERNAL_DEFN_AND_2_VALUE_PARAMS(p0, p1) p0##_type p0; \ - p1##_type p1; -#define GMOCK_INTERNAL_DEFN_AND_3_VALUE_PARAMS(p0, p1, p2) p0##_type p0; \ - p1##_type p1; p2##_type p2; -#define GMOCK_INTERNAL_DEFN_AND_4_VALUE_PARAMS(p0, p1, p2, p3) p0##_type p0; \ - p1##_type p1; p2##_type p2; p3##_type p3; -#define GMOCK_INTERNAL_DEFN_AND_5_VALUE_PARAMS(p0, p1, p2, p3, \ - p4) p0##_type p0; p1##_type p1; p2##_type p2; p3##_type p3; p4##_type p4; -#define GMOCK_INTERNAL_DEFN_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, \ - p5) p0##_type p0; p1##_type p1; p2##_type p2; p3##_type p3; p4##_type p4; \ - p5##_type p5; -#define GMOCK_INTERNAL_DEFN_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \ - p6) p0##_type p0; p1##_type p1; p2##_type p2; p3##_type p3; p4##_type p4; \ - p5##_type p5; p6##_type p6; -#define GMOCK_INTERNAL_DEFN_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ - p7) p0##_type p0; p1##_type p1; p2##_type p2; p3##_type p3; p4##_type p4; \ - p5##_type p5; p6##_type p6; p7##_type p7; -#define GMOCK_INTERNAL_DEFN_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ - p7, p8) p0##_type p0; p1##_type p1; p2##_type p2; p3##_type p3; \ - p4##_type p4; p5##_type p5; p6##_type p6; p7##_type p7; p8##_type p8; +#define GMOCK_INTERNAL_DEFN_AND_2_VALUE_PARAMS(p0, p1) \ + p0##_type p0; \ + p1##_type p1; +#define GMOCK_INTERNAL_DEFN_AND_3_VALUE_PARAMS(p0, p1, p2) \ + p0##_type p0; \ + p1##_type p1; \ + p2##_type p2; +#define GMOCK_INTERNAL_DEFN_AND_4_VALUE_PARAMS(p0, p1, p2, p3) \ + p0##_type p0; \ + p1##_type p1; \ + p2##_type p2; \ + p3##_type p3; +#define GMOCK_INTERNAL_DEFN_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4) \ + p0##_type p0; \ + p1##_type p1; \ + p2##_type p2; \ + p3##_type p3; \ + p4##_type p4; +#define GMOCK_INTERNAL_DEFN_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5) \ + p0##_type p0; \ + p1##_type p1; \ + p2##_type p2; \ + p3##_type p3; \ + p4##_type p4; \ + p5##_type p5; +#define GMOCK_INTERNAL_DEFN_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6) \ + p0##_type p0; \ + p1##_type p1; \ + p2##_type p2; \ + p3##_type p3; \ + p4##_type p4; \ + p5##_type p5; \ + p6##_type p6; +#define GMOCK_INTERNAL_DEFN_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7) \ + p0##_type p0; \ + p1##_type p1; \ + p2##_type p2; \ + p3##_type p3; \ + p4##_type p4; \ + p5##_type p5; \ + p6##_type p6; \ + p7##_type p7; +#define GMOCK_INTERNAL_DEFN_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7, \ + p8) \ + p0##_type p0; \ + p1##_type p1; \ + p2##_type p2; \ + p3##_type p3; \ + p4##_type p4; \ + p5##_type p5; \ + p6##_type p6; \ + p7##_type p7; \ + p8##_type p8; #define GMOCK_INTERNAL_DEFN_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ - p7, p8, p9) p0##_type p0; p1##_type p1; p2##_type p2; p3##_type p3; \ - p4##_type p4; p5##_type p5; p6##_type p6; p7##_type p7; p8##_type p8; \ - p9##_type p9; + p7, p8, p9) \ + p0##_type p0; \ + p1##_type p1; \ + p2##_type p2; \ + p3##_type p3; \ + p4##_type p4; \ + p5##_type p5; \ + p6##_type p6; \ + p7##_type p7; \ + p8##_type p8; \ + p9##_type p9; // Lists the value parameters. #define GMOCK_INTERNAL_LIST_AND_0_VALUE_PARAMS() @@ -338,72 +416,78 @@ #define GMOCK_INTERNAL_LIST_AND_2_VALUE_PARAMS(p0, p1) p0, p1 #define GMOCK_INTERNAL_LIST_AND_3_VALUE_PARAMS(p0, p1, p2) p0, p1, p2 #define GMOCK_INTERNAL_LIST_AND_4_VALUE_PARAMS(p0, p1, p2, p3) p0, p1, p2, p3 -#define GMOCK_INTERNAL_LIST_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4) p0, p1, \ - p2, p3, p4 -#define GMOCK_INTERNAL_LIST_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5) p0, \ - p1, p2, p3, p4, p5 -#define GMOCK_INTERNAL_LIST_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \ - p6) p0, p1, p2, p3, p4, p5, p6 -#define GMOCK_INTERNAL_LIST_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ - p7) p0, p1, p2, p3, p4, p5, p6, p7 -#define GMOCK_INTERNAL_LIST_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ - p7, p8) p0, p1, p2, p3, p4, p5, p6, p7, p8 +#define GMOCK_INTERNAL_LIST_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4) \ + p0, p1, p2, p3, p4 +#define GMOCK_INTERNAL_LIST_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5) \ + p0, p1, p2, p3, p4, p5 +#define GMOCK_INTERNAL_LIST_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6) \ + p0, p1, p2, p3, p4, p5, p6 +#define GMOCK_INTERNAL_LIST_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7) \ + p0, p1, p2, p3, p4, p5, p6, p7 +#define GMOCK_INTERNAL_LIST_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7, \ + p8) \ + p0, p1, p2, p3, p4, p5, p6, p7, p8 #define GMOCK_INTERNAL_LIST_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ - p7, p8, p9) p0, p1, p2, p3, p4, p5, p6, p7, p8, p9 + p7, p8, p9) \ + p0, p1, p2, p3, p4, p5, p6, p7, p8, p9 // Lists the value parameter types. #define GMOCK_INTERNAL_LIST_TYPE_AND_0_VALUE_PARAMS() #define GMOCK_INTERNAL_LIST_TYPE_AND_1_VALUE_PARAMS(p0) , p0##_type -#define GMOCK_INTERNAL_LIST_TYPE_AND_2_VALUE_PARAMS(p0, p1) , p0##_type, \ - p1##_type -#define GMOCK_INTERNAL_LIST_TYPE_AND_3_VALUE_PARAMS(p0, p1, p2) , p0##_type, \ - p1##_type, p2##_type -#define GMOCK_INTERNAL_LIST_TYPE_AND_4_VALUE_PARAMS(p0, p1, p2, p3) , \ - p0##_type, p1##_type, p2##_type, p3##_type -#define GMOCK_INTERNAL_LIST_TYPE_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4) , \ - p0##_type, p1##_type, p2##_type, p3##_type, p4##_type -#define GMOCK_INTERNAL_LIST_TYPE_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5) , \ - p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, p5##_type +#define GMOCK_INTERNAL_LIST_TYPE_AND_2_VALUE_PARAMS(p0, p1) \ + , p0##_type, p1##_type +#define GMOCK_INTERNAL_LIST_TYPE_AND_3_VALUE_PARAMS(p0, p1, p2) \ + , p0##_type, p1##_type, p2##_type +#define GMOCK_INTERNAL_LIST_TYPE_AND_4_VALUE_PARAMS(p0, p1, p2, p3) \ + , p0##_type, p1##_type, p2##_type, p3##_type +#define GMOCK_INTERNAL_LIST_TYPE_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4) \ + , p0##_type, p1##_type, p2##_type, p3##_type, p4##_type +#define GMOCK_INTERNAL_LIST_TYPE_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5) \ + , p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, p5##_type #define GMOCK_INTERNAL_LIST_TYPE_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \ - p6) , p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, p5##_type, \ - p6##_type + p6) \ + , p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, p5##_type, p6##_type #define GMOCK_INTERNAL_LIST_TYPE_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \ - p6, p7) , p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, \ - p5##_type, p6##_type, p7##_type + p6, p7) \ + , p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, p5##_type, \ + p6##_type, p7##_type #define GMOCK_INTERNAL_LIST_TYPE_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \ - p6, p7, p8) , p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, \ - p5##_type, p6##_type, p7##_type, p8##_type + p6, p7, p8) \ + , p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, p5##_type, \ + p6##_type, p7##_type, p8##_type #define GMOCK_INTERNAL_LIST_TYPE_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \ - p6, p7, p8, p9) , p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, \ - p5##_type, p6##_type, p7##_type, p8##_type, p9##_type + p6, p7, p8, p9) \ + , p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, p5##_type, \ + p6##_type, p7##_type, p8##_type, p9##_type // Declares the value parameters. #define GMOCK_INTERNAL_DECL_AND_0_VALUE_PARAMS() #define GMOCK_INTERNAL_DECL_AND_1_VALUE_PARAMS(p0) p0##_type p0 -#define GMOCK_INTERNAL_DECL_AND_2_VALUE_PARAMS(p0, p1) p0##_type p0, \ - p1##_type p1 -#define GMOCK_INTERNAL_DECL_AND_3_VALUE_PARAMS(p0, p1, p2) p0##_type p0, \ - p1##_type p1, p2##_type p2 -#define GMOCK_INTERNAL_DECL_AND_4_VALUE_PARAMS(p0, p1, p2, p3) p0##_type p0, \ - p1##_type p1, p2##_type p2, p3##_type p3 -#define GMOCK_INTERNAL_DECL_AND_5_VALUE_PARAMS(p0, p1, p2, p3, \ - p4) p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4 -#define GMOCK_INTERNAL_DECL_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, \ - p5) p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4, \ - p5##_type p5 -#define GMOCK_INTERNAL_DECL_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \ - p6) p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4, \ - p5##_type p5, p6##_type p6 -#define GMOCK_INTERNAL_DECL_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ - p7) p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4, \ - p5##_type p5, p6##_type p6, p7##_type p7 -#define GMOCK_INTERNAL_DECL_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ - p7, p8) p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, \ - p4##_type p4, p5##_type p5, p6##_type p6, p7##_type p7, p8##_type p8 +#define GMOCK_INTERNAL_DECL_AND_2_VALUE_PARAMS(p0, p1) \ + p0##_type p0, p1##_type p1 +#define GMOCK_INTERNAL_DECL_AND_3_VALUE_PARAMS(p0, p1, p2) \ + p0##_type p0, p1##_type p1, p2##_type p2 +#define GMOCK_INTERNAL_DECL_AND_4_VALUE_PARAMS(p0, p1, p2, p3) \ + p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3 +#define GMOCK_INTERNAL_DECL_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4) \ + p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4 +#define GMOCK_INTERNAL_DECL_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5) \ + p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4, \ + p5##_type p5 +#define GMOCK_INTERNAL_DECL_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6) \ + p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4, \ + p5##_type p5, p6##_type p6 +#define GMOCK_INTERNAL_DECL_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7) \ + p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4, \ + p5##_type p5, p6##_type p6, p7##_type p7 +#define GMOCK_INTERNAL_DECL_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7, \ + p8) \ + p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4, \ + p5##_type p5, p6##_type p6, p7##_type p7, p8##_type p8 #define GMOCK_INTERNAL_DECL_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ - p7, p8, p9) p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, \ - p4##_type p4, p5##_type p5, p6##_type p6, p7##_type p7, p8##_type p8, \ - p9##_type p9 + p7, p8, p9) \ + p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4, \ + p5##_type p5, p6##_type p6, p7##_type p7, p8##_type p8, p9##_type p9 // The suffix of the class template implementing the action template. #define GMOCK_INTERNAL_COUNT_AND_0_VALUE_PARAMS() @@ -415,40 +499,43 @@ #define GMOCK_INTERNAL_COUNT_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5) P6 #define GMOCK_INTERNAL_COUNT_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6) P7 #define GMOCK_INTERNAL_COUNT_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ - p7) P8 + p7) \ + P8 #define GMOCK_INTERNAL_COUNT_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ - p7, p8) P9 + p7, p8) \ + P9 #define GMOCK_INTERNAL_COUNT_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ - p7, p8, p9) P10 + p7, p8, p9) \ + P10 // The name of the class template implementing the action template. -#define GMOCK_ACTION_CLASS_(name, value_params)\ - GTEST_CONCAT_TOKEN_(name##Action, GMOCK_INTERNAL_COUNT_##value_params) +#define GMOCK_ACTION_CLASS_(name, value_params) \ + GTEST_CONCAT_TOKEN_(name##Action, GMOCK_INTERNAL_COUNT_##value_params) #define ACTION_TEMPLATE(name, template_params, value_params) \ template \ + GMOCK_INTERNAL_DECL_TYPE_##value_params> \ class GMOCK_ACTION_CLASS_(name, value_params) { \ public: \ explicit GMOCK_ACTION_CLASS_(name, value_params)( \ GMOCK_INTERNAL_DECL_##value_params) \ GMOCK_PP_IF(GMOCK_PP_IS_EMPTY(GMOCK_INTERNAL_COUNT_##value_params), \ - = default; , \ + = default; \ + , \ : impl_(std::make_shared( \ - GMOCK_INTERNAL_LIST_##value_params)) { }) \ - GMOCK_ACTION_CLASS_(name, value_params)( \ - const GMOCK_ACTION_CLASS_(name, value_params)&) noexcept \ - GMOCK_INTERNAL_DEFN_COPY_##value_params \ - GMOCK_ACTION_CLASS_(name, value_params)( \ - GMOCK_ACTION_CLASS_(name, value_params)&&) noexcept \ - GMOCK_INTERNAL_DEFN_COPY_##value_params \ - template \ - operator ::testing::Action() const { \ + GMOCK_INTERNAL_LIST_##value_params)){}) \ + GMOCK_ACTION_CLASS_(name, value_params)(const GMOCK_ACTION_CLASS_( \ + name, value_params) &) noexcept GMOCK_INTERNAL_DEFN_COPY_ \ + ##value_params GMOCK_ACTION_CLASS_(name, value_params)( \ + GMOCK_ACTION_CLASS_(name, value_params) &&) noexcept \ + GMOCK_INTERNAL_DEFN_COPY_##value_params template \ + operator ::testing::Action() const { \ return GMOCK_PP_IF( \ GMOCK_PP_IS_EMPTY(GMOCK_INTERNAL_COUNT_##value_params), \ - (::testing::internal::MakeAction()), \ - (::testing::internal::MakeAction(impl_))); \ + (::testing::internal::MakeAction()), \ + (::testing::internal::MakeAction(impl_))); \ } \ + \ private: \ class gmock_Impl { \ public: \ @@ -458,34 +545,35 @@ return_type gmock_PerformImpl(GMOCK_ACTION_ARG_TYPES_AND_NAMES_) const; \ GMOCK_INTERNAL_DEFN_##value_params \ }; \ - GMOCK_PP_IF(GMOCK_PP_IS_EMPTY(GMOCK_INTERNAL_COUNT_##value_params), \ - , std::shared_ptr impl_;) \ + GMOCK_PP_IF(GMOCK_PP_IS_EMPTY(GMOCK_INTERNAL_COUNT_##value_params), , \ + std::shared_ptr impl_;) \ }; \ template \ - GMOCK_ACTION_CLASS_(name, value_params)< \ - GMOCK_INTERNAL_LIST_##template_params \ - GMOCK_INTERNAL_LIST_TYPE_##value_params> name( \ - GMOCK_INTERNAL_DECL_##value_params) GTEST_MUST_USE_RESULT_; \ + GMOCK_INTERNAL_DECL_TYPE_##value_params> \ + GMOCK_ACTION_CLASS_( \ + name, value_params) \ + name(GMOCK_INTERNAL_DECL_##value_params) GTEST_MUST_USE_RESULT_; \ template \ - inline GMOCK_ACTION_CLASS_(name, value_params)< \ - GMOCK_INTERNAL_LIST_##template_params \ - GMOCK_INTERNAL_LIST_TYPE_##value_params> name( \ - GMOCK_INTERNAL_DECL_##value_params) { \ - return GMOCK_ACTION_CLASS_(name, value_params)< \ - GMOCK_INTERNAL_LIST_##template_params \ - GMOCK_INTERNAL_LIST_TYPE_##value_params>( \ - GMOCK_INTERNAL_LIST_##value_params); \ + GMOCK_INTERNAL_DECL_TYPE_##value_params> \ + inline GMOCK_ACTION_CLASS_( \ + name, value_params) \ + name(GMOCK_INTERNAL_DECL_##value_params) { \ + return GMOCK_ACTION_CLASS_( \ + name, value_params)( \ + GMOCK_INTERNAL_LIST_##value_params); \ } \ template \ + GMOCK_INTERNAL_DECL_TYPE_##value_params> \ template \ - return_type GMOCK_ACTION_CLASS_(name, value_params)< \ - GMOCK_INTERNAL_LIST_##template_params \ - GMOCK_INTERNAL_LIST_TYPE_##value_params>::gmock_Impl::gmock_PerformImpl( \ - GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const + return_type GMOCK_ACTION_CLASS_( \ + name, value_params):: \ + gmock_Impl::gmock_PerformImpl(GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) \ + const namespace testing { @@ -495,8 +583,8 @@ namespace testing { // is expanded and macro expansion cannot contain #pragma. Therefore // we suppress them here. #ifdef _MSC_VER -# pragma warning(push) -# pragma warning(disable:4100) +#pragma warning(push) +#pragma warning(disable : 4100) #endif namespace internal { @@ -512,7 +600,8 @@ auto InvokeArgument(F f, Args... args) -> decltype(f(args...)) { template struct InvokeArgumentAction { - template + template ::type> auto operator()(Args&&... args) const -> decltype(internal::InvokeArgument( std::get(std::forward_as_tuple(std::forward(args)...)), std::declval()...)) { @@ -565,7 +654,7 @@ InvokeArgument(Params&&... params) { } #ifdef _MSC_VER -# pragma warning(pop) +#pragma warning(pop) #endif } // namespace testing diff --git a/ext/googletest/googlemock/include/gmock/gmock-more-matchers.h b/ext/googletest/googlemock/include/gmock/gmock-more-matchers.h index dfc77e359c..47aaf98461 100644 --- a/ext/googletest/googlemock/include/gmock/gmock-more-matchers.h +++ b/ext/googletest/googlemock/include/gmock/gmock-more-matchers.h @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Mock - a framework for writing C++ mock classes. // // This file implements some matchers that depend on gmock-matchers.h. @@ -35,7 +34,8 @@ // Note that tests are implemented in gmock-matchers_test.cc rather than // gmock-more-matchers-test.cc. -// GOOGLETEST_CM0002 DO NOT DELETE +// IWYU pragma: private, include "gmock/gmock.h" +// IWYU pragma: friend gmock/.* #ifndef GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_MORE_MATCHERS_H_ #define GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_MORE_MATCHERS_H_ @@ -47,13 +47,13 @@ namespace testing { // Silence C4100 (unreferenced formal // parameter) for MSVC #ifdef _MSC_VER -# pragma warning(push) -# pragma warning(disable:4100) +#pragma warning(push) +#pragma warning(disable : 4100) #if (_MSC_VER == 1900) // and silence C4800 (C4800: 'int *const ': forcing value // to bool 'true' or 'false') for MSVC 14 -# pragma warning(disable:4800) - #endif +#pragma warning(disable : 4800) +#endif #endif // Defines a matcher that matches an empty container. The container must @@ -83,10 +83,9 @@ MATCHER(IsFalse, negation ? "is true" : "is false") { } #ifdef _MSC_VER -# pragma warning(pop) +#pragma warning(pop) #endif - } // namespace testing #endif // GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_MORE_MATCHERS_H_ diff --git a/ext/googletest/googlemock/include/gmock/gmock-nice-strict.h b/ext/googletest/googlemock/include/gmock/gmock-nice-strict.h index b03b770c75..4f0eb35db7 100644 --- a/ext/googletest/googlemock/include/gmock/gmock-nice-strict.h +++ b/ext/googletest/googlemock/include/gmock/gmock-nice-strict.h @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Implements class templates NiceMock, NaggyMock, and StrictMock. // // Given a mock class MockFoo that is created using Google Mock, @@ -58,11 +57,13 @@ // In particular, nesting NiceMock, NaggyMock, and StrictMock is NOT // supported. -// GOOGLETEST_CM0002 DO NOT DELETE +// IWYU pragma: private, include "gmock/gmock.h" +// IWYU pragma: friend gmock/.* #ifndef GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_NICE_STRICT_H_ #define GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_NICE_STRICT_H_ +#include #include #include "gmock/gmock-spec-builders.h" @@ -109,25 +110,37 @@ constexpr bool HasStrictnessModifier() { template class NiceMockImpl { public: - NiceMockImpl() { ::testing::Mock::AllowUninterestingCalls(this); } + NiceMockImpl() { + ::testing::Mock::AllowUninterestingCalls(reinterpret_cast(this)); + } - ~NiceMockImpl() { ::testing::Mock::UnregisterCallReaction(this); } + ~NiceMockImpl() { + ::testing::Mock::UnregisterCallReaction(reinterpret_cast(this)); + } }; template class NaggyMockImpl { public: - NaggyMockImpl() { ::testing::Mock::WarnUninterestingCalls(this); } + NaggyMockImpl() { + ::testing::Mock::WarnUninterestingCalls(reinterpret_cast(this)); + } - ~NaggyMockImpl() { ::testing::Mock::UnregisterCallReaction(this); } + ~NaggyMockImpl() { + ::testing::Mock::UnregisterCallReaction(reinterpret_cast(this)); + } }; template class StrictMockImpl { public: - StrictMockImpl() { ::testing::Mock::FailUninterestingCalls(this); } + StrictMockImpl() { + ::testing::Mock::FailUninterestingCalls(reinterpret_cast(this)); + } - ~StrictMockImpl() { ::testing::Mock::UnregisterCallReaction(this); } + ~StrictMockImpl() { + ::testing::Mock::UnregisterCallReaction(reinterpret_cast(this)); + } }; } // namespace internal @@ -169,7 +182,8 @@ class GTEST_INTERNAL_EMPTY_BASE_CLASS NiceMock } private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(NiceMock); + NiceMock(const NiceMock&) = delete; + NiceMock& operator=(const NiceMock&) = delete; }; template @@ -210,7 +224,8 @@ class GTEST_INTERNAL_EMPTY_BASE_CLASS NaggyMock } private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(NaggyMock); + NaggyMock(const NaggyMock&) = delete; + NaggyMock& operator=(const NaggyMock&) = delete; }; template @@ -251,7 +266,8 @@ class GTEST_INTERNAL_EMPTY_BASE_CLASS StrictMock } private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(StrictMock); + StrictMock(const StrictMock&) = delete; + StrictMock& operator=(const StrictMock&) = delete; }; #undef GTEST_INTERNAL_EMPTY_BASE_CLASS diff --git a/ext/googletest/googlemock/include/gmock/gmock-spec-builders.h b/ext/googletest/googlemock/include/gmock/gmock-spec-builders.h index 41323c1cc0..45cc605183 100644 --- a/ext/googletest/googlemock/include/gmock/gmock-spec-builders.h +++ b/ext/googletest/googlemock/include/gmock/gmock-spec-builders.h @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Mock - a framework for writing C++ mock classes. // // This file implements the ON_CALL() and EXPECT_CALL() macros. @@ -56,11 +55,13 @@ // where all clauses are optional, and .InSequence()/.After()/ // .WillOnce() can appear any number of times. -// GOOGLETEST_CM0002 DO NOT DELETE +// IWYU pragma: private, include "gmock/gmock.h" +// IWYU pragma: friend gmock/.* #ifndef GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_SPEC_BUILDERS_H_ #define GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_SPEC_BUILDERS_H_ +#include #include #include #include @@ -70,6 +71,7 @@ #include #include #include + #include "gmock/gmock-actions.h" #include "gmock/gmock-cardinalities.h" #include "gmock/gmock-matchers.h" @@ -78,7 +80,7 @@ #include "gtest/gtest.h" #if GTEST_HAS_EXCEPTIONS -# include // NOLINT +#include // NOLINT #endif GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \ @@ -97,13 +99,15 @@ class ExpectationSet; namespace internal { // Implements a mock function. -template class FunctionMocker; +template +class FunctionMocker; // Base class for expectations. class ExpectationBase; // Implements an expectation. -template class TypedExpectation; +template +class TypedExpectation; // Helper class for testing the Expectation class template. class ExpectationTester; @@ -129,9 +133,6 @@ class NaggyMockImpl; // calls to ensure the integrity of the mock objects' states. GTEST_API_ GTEST_DECLARE_STATIC_MUTEX_(g_gmock_mutex); -// Untyped base class for ActionResultHolder. -class UntypedActionResultHolderBase; - // Abstract base class of FunctionMocker. This is the // type-agnostic part of the function mocker interface. Its pure // virtual methods are implemented by FunctionMocker. @@ -154,27 +155,12 @@ class GTEST_API_ UntypedFunctionMockerBase { // responsibility to guarantee the correctness of the arguments' // types. - // Performs the default action with the given arguments and returns - // the action's result. The call description string will be used in - // the error message to describe the call in the case the default - // action fails. - // L = * - virtual UntypedActionResultHolderBase* UntypedPerformDefaultAction( - void* untyped_args, const std::string& call_description) const = 0; - - // Performs the given action with the given arguments and returns - // the action's result. - // L = * - virtual UntypedActionResultHolderBase* UntypedPerformAction( - const void* untyped_action, void* untyped_args) const = 0; - // Writes a message that the call is uninteresting (i.e. neither // explicitly expected nor explicitly unexpected) to the given // ostream. - virtual void UntypedDescribeUninterestingCall( - const void* untyped_args, - ::std::ostream* os) const - GTEST_LOCK_EXCLUDED_(g_gmock_mutex) = 0; + virtual void UntypedDescribeUninterestingCall(const void* untyped_args, + ::std::ostream* os) const + GTEST_LOCK_EXCLUDED_(g_gmock_mutex) = 0; // Returns the expectation that matches the given function arguments // (or NULL is there's no match); when a match is found, @@ -183,10 +169,9 @@ class GTEST_API_ UntypedFunctionMockerBase { // is_excessive is modified to indicate whether the call exceeds the // expected number. virtual const ExpectationBase* UntypedFindMatchingExpectation( - const void* untyped_args, - const void** untyped_action, bool* is_excessive, + const void* untyped_args, const void** untyped_action, bool* is_excessive, ::std::ostream* what, ::std::ostream* why) - GTEST_LOCK_EXCLUDED_(g_gmock_mutex) = 0; + GTEST_LOCK_EXCLUDED_(g_gmock_mutex) = 0; // Prints the given function arguments to the ostream. virtual void UntypedPrintArgs(const void* untyped_args, @@ -196,8 +181,7 @@ class GTEST_API_ UntypedFunctionMockerBase { // this information in the global mock registry. Will be called // whenever an EXPECT_CALL() or ON_CALL() is executed on this mock // method. - void RegisterOwner(const void* mock_obj) - GTEST_LOCK_EXCLUDED_(g_gmock_mutex); + void RegisterOwner(const void* mock_obj) GTEST_LOCK_EXCLUDED_(g_gmock_mutex); // Sets the mock object this mock method belongs to, and sets the // name of the mock function. Will be called upon each invocation @@ -208,20 +192,11 @@ class GTEST_API_ UntypedFunctionMockerBase { // Returns the mock object this mock method belongs to. Must be // called after RegisterOwner() or SetOwnerAndName() has been // called. - const void* MockObject() const - GTEST_LOCK_EXCLUDED_(g_gmock_mutex); + const void* MockObject() const GTEST_LOCK_EXCLUDED_(g_gmock_mutex); // Returns the name of this mock method. Must be called after // SetOwnerAndName() has been called. - const char* Name() const - GTEST_LOCK_EXCLUDED_(g_gmock_mutex); - - // Returns the result of invoking this mock function with the given - // arguments. This function can be safely called from multiple - // threads concurrently. The caller is responsible for deleting the - // result. - UntypedActionResultHolderBase* UntypedInvokeWith(void* untyped_args) - GTEST_LOCK_EXCLUDED_(g_gmock_mutex); + const char* Name() const GTEST_LOCK_EXCLUDED_(g_gmock_mutex); protected: typedef std::vector UntypedOnCallSpecs; @@ -430,29 +405,28 @@ class GTEST_API_ Mock { // Tells Google Mock to allow uninteresting calls on the given mock // object. - static void AllowUninterestingCalls(const void* mock_obj) + static void AllowUninterestingCalls(uintptr_t mock_obj) GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex); // Tells Google Mock to warn the user about uninteresting calls on // the given mock object. - static void WarnUninterestingCalls(const void* mock_obj) + static void WarnUninterestingCalls(uintptr_t mock_obj) GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex); // Tells Google Mock to fail uninteresting calls on the given mock // object. - static void FailUninterestingCalls(const void* mock_obj) + static void FailUninterestingCalls(uintptr_t mock_obj) GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex); // Tells Google Mock the given mock object is being destroyed and // its entry in the call-reaction table should be removed. - static void UnregisterCallReaction(const void* mock_obj) + static void UnregisterCallReaction(uintptr_t mock_obj) GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex); // Returns the reaction Google Mock will have on uninteresting calls // made on the given mock object. static internal::CallReaction GetReactionOnUninterestingCalls( - const void* mock_obj) - GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex); + const void* mock_obj) GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex); // Verifies that all expectations on the given mock object have been // satisfied. Reports one or more Google Test non-fatal failures @@ -465,17 +439,16 @@ class GTEST_API_ Mock { GTEST_EXCLUSIVE_LOCK_REQUIRED_(internal::g_gmock_mutex); // Registers a mock object and a mock method it owns. - static void Register( - const void* mock_obj, - internal::UntypedFunctionMockerBase* mocker) - GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex); + static void Register(const void* mock_obj, + internal::UntypedFunctionMockerBase* mocker) + GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex); // Tells Google Mock where in the source code mock_obj is used in an // ON_CALL or EXPECT_CALL. In case mock_obj is leaked, this // information helps the user identify which object it is. - static void RegisterUseByOnCallOrExpectCall( - const void* mock_obj, const char* file, int line) - GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex); + static void RegisterUseByOnCallOrExpectCall(const void* mock_obj, + const char* file, int line) + GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex); // Unregisters a mock method; removes the owning mock object from // the registry when the last mock method associated with it has @@ -632,7 +605,6 @@ class ExpectationSet { Expectation::Set expectations_; }; - // Sequence objects are used by a user to specify the relative order // in which the expectations should match. They are copyable (we rely // on the compiler-defined copy constructor and assignment operator). @@ -678,10 +650,12 @@ class GTEST_API_ InSequence { public: InSequence(); ~InSequence(); + private: bool sequence_created_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(InSequence); // NOLINT + InSequence(const InSequence&) = delete; + InSequence& operator=(const InSequence&) = delete; } GTEST_ATTRIBUTE_UNUSED_; namespace internal { @@ -784,40 +758,34 @@ class GTEST_API_ ExpectationBase { // the current thread. // Retires all pre-requisites of this expectation. - void RetireAllPreRequisites() - GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex); + void RetireAllPreRequisites() GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex); // Returns true if and only if this expectation is retired. - bool is_retired() const - GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) { + bool is_retired() const GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) { g_gmock_mutex.AssertHeld(); return retired_; } // Retires this expectation. - void Retire() - GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) { + void Retire() GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) { g_gmock_mutex.AssertHeld(); retired_ = true; } // Returns true if and only if this expectation is satisfied. - bool IsSatisfied() const - GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) { + bool IsSatisfied() const GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) { g_gmock_mutex.AssertHeld(); return cardinality().IsSatisfiedByCallCount(call_count_); } // Returns true if and only if this expectation is saturated. - bool IsSaturated() const - GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) { + bool IsSaturated() const GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) { g_gmock_mutex.AssertHeld(); return cardinality().IsSaturatedByCallCount(call_count_); } // Returns true if and only if this expectation is over-saturated. - bool IsOverSaturated() const - GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) { + bool IsOverSaturated() const GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) { g_gmock_mutex.AssertHeld(); return cardinality().IsOverSaturatedByCallCount(call_count_); } @@ -832,15 +800,13 @@ class GTEST_API_ ExpectationBase { GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex); // Returns the number this expectation has been invoked. - int call_count() const - GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) { + int call_count() const GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) { g_gmock_mutex.AssertHeld(); return call_count_; } // Increments the number this expectation has been invoked. - void IncrementCallCount() - GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) { + void IncrementCallCount() GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) { g_gmock_mutex.AssertHeld(); call_count_++; } @@ -849,8 +815,7 @@ class GTEST_API_ ExpectationBase { // WillRepeatedly() clauses) against the cardinality if this hasn't // been done before. Prints a warning if there are too many or too // few actions. - void CheckActionCountIfNotDone() const - GTEST_LOCK_EXCLUDED_(mutex_); + void CheckActionCountIfNotDone() const GTEST_LOCK_EXCLUDED_(mutex_); friend class ::testing::Sequence; friend class ::testing::internal::ExpectationTester; @@ -863,12 +828,12 @@ class GTEST_API_ ExpectationBase { // This group of fields are part of the spec and won't change after // an EXPECT_CALL() statement finishes. - const char* file_; // The file that contains the expectation. - int line_; // The line number of the expectation. + const char* file_; // The file that contains the expectation. + int line_; // The line number of the expectation. const std::string source_text_; // The EXPECT_CALL(...) source text. // True if and only if the cardinality is specified explicitly. bool cardinality_specified_; - Cardinality cardinality_; // The cardinality of the expectation. + Cardinality cardinality_; // The cardinality of the expectation. // The immediate pre-requisites (i.e. expectations that must be // satisfied before this expectation can be matched) of this // expectation. We use std::shared_ptr in the set because we want an @@ -887,12 +852,18 @@ class GTEST_API_ ExpectationBase { bool retires_on_saturation_; Clause last_clause_; mutable bool action_count_checked_; // Under mutex_. - mutable Mutex mutex_; // Protects action_count_checked_. -}; // class ExpectationBase + mutable Mutex mutex_; // Protects action_count_checked_. +}; // class ExpectationBase -// Impements an expectation for the given function type. template -class TypedExpectation : public ExpectationBase { +class TypedExpectation; + +// Implements an expectation for the given function type. +template +class TypedExpectation : public ExpectationBase { + private: + using F = R(Args...); + public: typedef typename Function::ArgumentTuple ArgumentTuple; typedef typename Function::ArgumentMatcherTuple ArgumentMatcherTuple; @@ -945,9 +916,7 @@ class TypedExpectation : public ExpectationBase { } // Implements the .Times() clause. - TypedExpectation& Times(int n) { - return Times(Exactly(n)); - } + TypedExpectation& Times(int n) { return Times(Exactly(n)); } // Implements the .InSequence() clause. TypedExpectation& InSequence(const Sequence& s) { @@ -1007,14 +976,31 @@ class TypedExpectation : public ExpectationBase { return After(s1, s2, s3, s4).After(s5); } - // Implements the .WillOnce() clause. - TypedExpectation& WillOnce(const Action& action) { + // Preferred, type-safe overload: consume anything that can be directly + // converted to a OnceAction, except for Action objects themselves. + TypedExpectation& WillOnce(OnceAction once_action) { + // Call the overload below, smuggling the OnceAction as a copyable callable. + // We know this is safe because a WillOnce action will not be called more + // than once. + return WillOnce(Action(ActionAdaptor{ + std::make_shared>(std::move(once_action)), + })); + } + + // Fallback overload: accept Action objects and those actions that define + // `operator Action` but not `operator OnceAction`. + // + // This is templated in order to cause the overload above to be preferred + // when the input is convertible to either type. + template + TypedExpectation& WillOnce(Action action) { ExpectSpecProperty(last_clause_ <= kWillOnce, ".WillOnce() cannot appear after " ".WillRepeatedly() or .RetiresOnSaturation()."); last_clause_ = kWillOnce; - untyped_actions_.push_back(new Action(action)); + untyped_actions_.push_back(new Action(std::move(action))); + if (!cardinality_specified()) { set_cardinality(Exactly(static_cast(untyped_actions_.size()))); } @@ -1062,9 +1048,7 @@ class TypedExpectation : public ExpectationBase { // Returns the matchers for the arguments as specified inside the // EXPECT_CALL() macro. - const ArgumentMatcherTuple& matchers() const { - return matchers_; - } + const ArgumentMatcherTuple& matchers() const { return matchers_; } // Returns the matcher specified by the .With() clause. const Matcher& extra_matcher() const { @@ -1088,6 +1072,16 @@ class TypedExpectation : public ExpectationBase { template friend class FunctionMocker; + // An adaptor that turns a OneAction into something compatible with + // Action. Must be called at most once. + struct ActionAdaptor { + std::shared_ptr> once_action; + + R operator()(Args&&... args) const { + return std::move(*once_action).Call(std::forward(args)...); + } + }; + // Returns an Expectation object that references and co-owns this // expectation. Expectation GetHandle() override { return owner_->GetHandleOf(this); } @@ -1119,10 +1113,8 @@ class TypedExpectation : public ExpectationBase { // Describes the result of matching the arguments against this // expectation to the given ostream. - void ExplainMatchResultTo( - const ArgumentTuple& args, - ::std::ostream* os) const - GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) { + void ExplainMatchResultTo(const ArgumentTuple& args, ::std::ostream* os) const + GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) { g_gmock_mutex.AssertHeld(); if (is_retired()) { @@ -1181,9 +1173,9 @@ class TypedExpectation : public ExpectationBase { ::std::stringstream ss; DescribeLocationTo(&ss); ss << "Actions ran out in " << source_text() << "...\n" - << "Called " << count << " times, but only " - << action_count << " WillOnce()" - << (action_count == 1 ? " is" : "s are") << " specified - "; + << "Called " << count << " times, but only " << action_count + << " WillOnce()" << (action_count == 1 ? " is" : "s are") + << " specified - "; mocker->DescribeDefaultActionTo(args, &ss); Log(kWarning, ss.str(), 1); } @@ -1225,7 +1217,7 @@ class TypedExpectation : public ExpectationBase { } // Must be done after IncrementCount()! - *what << "Mock function call matches " << source_text() <<"...\n"; + *what << "Mock function call matches " << source_text() << "...\n"; return &(GetCurrentAction(mocker, args)); } @@ -1236,7 +1228,8 @@ class TypedExpectation : public ExpectationBase { Matcher extra_matcher_; Action repeated_action_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(TypedExpectation); + TypedExpectation(const TypedExpectation&) = delete; + TypedExpectation& operator=(const TypedExpectation&) = delete; }; // class TypedExpectation // A MockSpec object is used by ON_CALL() or EXPECT_CALL() for @@ -1258,8 +1251,8 @@ template class MockSpec { public: typedef typename internal::Function::ArgumentTuple ArgumentTuple; - typedef typename internal::Function::ArgumentMatcherTuple - ArgumentMatcherTuple; + typedef + typename internal::Function::ArgumentMatcherTuple ArgumentMatcherTuple; // Constructs a MockSpec object, given the function mocker object // that the spec is associated with. @@ -1269,8 +1262,9 @@ class MockSpec { // Adds a new default action spec to the function mocker and returns // the newly created spec. - internal::OnCallSpec& InternalDefaultActionSetAt( - const char* file, int line, const char* obj, const char* call) { + internal::OnCallSpec& InternalDefaultActionSetAt(const char* file, + int line, const char* obj, + const char* call) { LogWithLocation(internal::kInfo, file, line, std::string("ON_CALL(") + obj + ", " + call + ") invoked"); return function_mocker_->AddNewOnCallSpec(file, line, matchers_); @@ -1278,13 +1272,14 @@ class MockSpec { // Adds a new expectation spec to the function mocker and returns // the newly created spec. - internal::TypedExpectation& InternalExpectedAt( - const char* file, int line, const char* obj, const char* call) { + internal::TypedExpectation& InternalExpectedAt(const char* file, int line, + const char* obj, + const char* call) { const std::string source_text(std::string("EXPECT_CALL(") + obj + ", " + call + ")"); LogWithLocation(internal::kInfo, file, line, source_text + " invoked"); - return function_mocker_->AddNewExpectation( - file, line, source_text, matchers_); + return function_mocker_->AddNewExpectation(file, line, source_text, + matchers_); } // This operator overload is used to swallow the superfluous parameter list @@ -1317,9 +1312,7 @@ template class ReferenceOrValueWrapper { public: // Constructs a wrapper from the given value/reference. - explicit ReferenceOrValueWrapper(T value) - : value_(std::move(value)) { - } + explicit ReferenceOrValueWrapper(T value) : value_(std::move(value)) {} // Unwraps and returns the underlying value/reference, exactly as // originally passed. The behavior of calling this more than once on @@ -1330,9 +1323,7 @@ class ReferenceOrValueWrapper { // Always returns a const reference (more precisely, // const std::add_lvalue_reference::type). The behavior of calling this // after calling Unwrap on the same object is unspecified. - const T& Peek() const { - return value_; - } + const T& Peek() const { return value_; } private: T value_; @@ -1346,8 +1337,7 @@ class ReferenceOrValueWrapper { // Workaround for debatable pass-by-reference lint warning (c-library-team // policy precludes NOLINT in this context) typedef T& reference; - explicit ReferenceOrValueWrapper(reference ref) - : value_ptr_(&ref) {} + explicit ReferenceOrValueWrapper(reference ref) : value_ptr_(&ref) {} T& Unwrap() { return *value_ptr_; } const T& Peek() const { return *value_ptr_; } @@ -1355,102 +1345,27 @@ class ReferenceOrValueWrapper { T* value_ptr_; }; -// C++ treats the void type specially. For example, you cannot define -// a void-typed variable or pass a void value to a function. -// ActionResultHolder holds a value of type T, where T must be a -// copyable type or void (T doesn't need to be default-constructable). -// It hides the syntactic difference between void and other types, and -// is used to unify the code for invoking both void-returning and -// non-void-returning mock functions. - -// Untyped base class for ActionResultHolder. -class UntypedActionResultHolderBase { - public: - virtual ~UntypedActionResultHolderBase() {} - - // Prints the held value as an action's result to os. - virtual void PrintAsActionResult(::std::ostream* os) const = 0; -}; - -// This generic definition is used when T is not void. +// Prints the held value as an action's result to os. template -class ActionResultHolder : public UntypedActionResultHolderBase { +void PrintAsActionResult(const T& result, std::ostream& os) { + os << "\n Returns: "; + // T may be a reference type, so we don't use UniversalPrint(). + UniversalPrinter::Print(result, &os); +} + +// Reports an uninteresting call (whose description is in msg) in the +// manner specified by 'reaction'. +GTEST_API_ void ReportUninterestingCall(CallReaction reaction, + const std::string& msg); + +// A generic RAII type that runs a user-provided function in its destructor. +class Cleanup final { public: - // Returns the held value. Must not be called more than once. - T Unwrap() { - return result_.Unwrap(); - } - - // Prints the held value as an action's result to os. - void PrintAsActionResult(::std::ostream* os) const override { - *os << "\n Returns: "; - // T may be a reference type, so we don't use UniversalPrint(). - UniversalPrinter::Print(result_.Peek(), os); - } - - // Performs the given mock function's default action and returns the - // result in a new-ed ActionResultHolder. - template - static ActionResultHolder* PerformDefaultAction( - const FunctionMocker* func_mocker, - typename Function::ArgumentTuple&& args, - const std::string& call_description) { - return new ActionResultHolder(Wrapper(func_mocker->PerformDefaultAction( - std::move(args), call_description))); - } - - // Performs the given action and returns the result in a new-ed - // ActionResultHolder. - template - static ActionResultHolder* PerformAction( - const Action& action, typename Function::ArgumentTuple&& args) { - return new ActionResultHolder( - Wrapper(action.Perform(std::move(args)))); - } + explicit Cleanup(std::function f) : f_(std::move(f)) {} + ~Cleanup() { f_(); } private: - typedef ReferenceOrValueWrapper Wrapper; - - explicit ActionResultHolder(Wrapper result) - : result_(std::move(result)) { - } - - Wrapper result_; - - GTEST_DISALLOW_COPY_AND_ASSIGN_(ActionResultHolder); -}; - -// Specialization for T = void. -template <> -class ActionResultHolder : public UntypedActionResultHolderBase { - public: - void Unwrap() { } - - void PrintAsActionResult(::std::ostream* /* os */) const override {} - - // Performs the given mock function's default action and returns ownership - // of an empty ActionResultHolder*. - template - static ActionResultHolder* PerformDefaultAction( - const FunctionMocker* func_mocker, - typename Function::ArgumentTuple&& args, - const std::string& call_description) { - func_mocker->PerformDefaultAction(std::move(args), call_description); - return new ActionResultHolder; - } - - // Performs the given action and returns ownership of an empty - // ActionResultHolder*. - template - static ActionResultHolder* PerformAction( - const Action& action, typename Function::ArgumentTuple&& args) { - action.Perform(std::move(args)); - return new ActionResultHolder; - } - - private: - ActionResultHolder() {} - GTEST_DISALLOW_COPY_AND_ASSIGN_(ActionResultHolder); + std::function f_; }; template @@ -1495,14 +1410,12 @@ class FunctionMocker final : public UntypedFunctionMockerBase { // Returns the ON_CALL spec that matches this mock function with the // given arguments; returns NULL if no matching ON_CALL is found. // L = * - const OnCallSpec* FindOnCallSpec( - const ArgumentTuple& args) const { - for (UntypedOnCallSpecs::const_reverse_iterator it - = untyped_on_call_specs_.rbegin(); + const OnCallSpec* FindOnCallSpec(const ArgumentTuple& args) const { + for (UntypedOnCallSpecs::const_reverse_iterator it = + untyped_on_call_specs_.rbegin(); it != untyped_on_call_specs_.rend(); ++it) { const OnCallSpec* spec = static_cast*>(*it); - if (spec->Matches(args)) - return spec; + if (spec->Matches(args)) return spec; } return nullptr; @@ -1510,15 +1423,14 @@ class FunctionMocker final : public UntypedFunctionMockerBase { // Performs the default action of this mock function on the given // arguments and returns the result. Asserts (or throws if - // exceptions are enabled) with a helpful call descrption if there + // exceptions are enabled) with a helpful call description if there // is no valid return value. This method doesn't depend on the // mutable state of this object, and thus can be called concurrently // without locking. // L = * Result PerformDefaultAction(ArgumentTuple&& args, const std::string& call_description) const { - const OnCallSpec* const spec = - this->FindOnCallSpec(args); + const OnCallSpec* const spec = this->FindOnCallSpec(args); if (spec != nullptr) { return spec->GetAction().Perform(std::move(args)); } @@ -1536,32 +1448,6 @@ class FunctionMocker final : public UntypedFunctionMockerBase { return DefaultValue::Get(); } - // Performs the default action with the given arguments and returns - // the action's result. The call description string will be used in - // the error message to describe the call in the case the default - // action fails. The caller is responsible for deleting the result. - // L = * - UntypedActionResultHolderBase* UntypedPerformDefaultAction( - void* untyped_args, // must point to an ArgumentTuple - const std::string& call_description) const override { - ArgumentTuple* args = static_cast(untyped_args); - return ResultHolder::PerformDefaultAction(this, std::move(*args), - call_description); - } - - // Performs the given action with the given arguments and returns - // the action's result. The caller is responsible for deleting the - // result. - // L = * - UntypedActionResultHolderBase* UntypedPerformAction( - const void* untyped_action, void* untyped_args) const override { - // Make a copy of the action before performing it, in case the - // action deletes the mock object (and thus deletes itself). - const Action action = *static_cast*>(untyped_action); - ArgumentTuple* args = static_cast(untyped_args); - return ResultHolder::PerformAction(action, std::move(*args)); - } - // Implements UntypedFunctionMockerBase::ClearDefaultActionsLocked(): // clears the ON_CALL()s set on this mock function. void ClearDefaultActionsLocked() override @@ -1579,8 +1465,7 @@ class FunctionMocker final : public UntypedFunctionMockerBase { untyped_on_call_specs_.swap(specs_to_delete); g_gmock_mutex.Unlock(); - for (UntypedOnCallSpecs::const_iterator it = - specs_to_delete.begin(); + for (UntypedOnCallSpecs::const_iterator it = specs_to_delete.begin(); it != specs_to_delete.end(); ++it) { delete static_cast*>(*it); } @@ -1594,10 +1479,7 @@ class FunctionMocker final : public UntypedFunctionMockerBase { // arguments. This function can be safely called from multiple // threads concurrently. Result Invoke(Args... args) GTEST_LOCK_EXCLUDED_(g_gmock_mutex) { - ArgumentTuple tuple(std::forward(args)...); - std::unique_ptr holder(DownCast_( - this->UntypedInvokeWith(static_cast(&tuple)))); - return holder->Unwrap(); + return InvokeWith(ArgumentTuple(std::forward(args)...)); } MockSpec With(Matcher... m) { @@ -1608,13 +1490,10 @@ class FunctionMocker final : public UntypedFunctionMockerBase { template friend class MockSpec; - typedef ActionResultHolder ResultHolder; - // Adds and returns a default action spec for this mock function. - OnCallSpec& AddNewOnCallSpec( - const char* file, int line, - const ArgumentMatcherTuple& m) - GTEST_LOCK_EXCLUDED_(g_gmock_mutex) { + OnCallSpec& AddNewOnCallSpec(const char* file, int line, + const ArgumentMatcherTuple& m) + GTEST_LOCK_EXCLUDED_(g_gmock_mutex) { Mock::RegisterUseByOnCallOrExpectCall(MockObject(), file, line); OnCallSpec* const on_call_spec = new OnCallSpec(file, line, m); untyped_on_call_specs_.push_back(on_call_spec); @@ -1644,7 +1523,8 @@ class FunctionMocker final : public UntypedFunctionMockerBase { } private: - template friend class TypedExpectation; + template + friend class TypedExpectation; // Some utilities needed for implementing UntypedInvokeWith(). @@ -1728,9 +1608,8 @@ class FunctionMocker final : public UntypedFunctionMockerBase { // Returns the expectation that matches the arguments, or NULL if no // expectation matches them. - TypedExpectation* FindMatchingExpectationLocked( - const ArgumentTuple& args) const - GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) { + TypedExpectation* FindMatchingExpectationLocked(const ArgumentTuple& args) + const GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) { g_gmock_mutex.AssertHeld(); // See the definition of untyped_expectations_ for why access to // it is unprotected here. @@ -1747,11 +1626,10 @@ class FunctionMocker final : public UntypedFunctionMockerBase { } // Returns a message that the arguments don't match any expectation. - void FormatUnexpectedCallMessageLocked( - const ArgumentTuple& args, - ::std::ostream* os, - ::std::ostream* why) const - GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) { + void FormatUnexpectedCallMessageLocked(const ArgumentTuple& args, + ::std::ostream* os, + ::std::ostream* why) const + GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) { g_gmock_mutex.AssertHeld(); *os << "\nUnexpected mock function call - "; DescribeDefaultActionTo(args, os); @@ -1760,15 +1638,14 @@ class FunctionMocker final : public UntypedFunctionMockerBase { // Prints a list of expectations that have been tried against the // current mock function call. - void PrintTriedExpectationsLocked( - const ArgumentTuple& args, - ::std::ostream* why) const - GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) { + void PrintTriedExpectationsLocked(const ArgumentTuple& args, + ::std::ostream* why) const + GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) { g_gmock_mutex.AssertHeld(); const size_t count = untyped_expectations_.size(); *why << "Google Mock tried the following " << count << " " - << (count == 1 ? "expectation, but it didn't match" : - "expectations, but none matched") + << (count == 1 ? "expectation, but it didn't match" + : "expectations, but none matched") << ":\n"; for (size_t i = 0; i < count; i++) { TypedExpectation* const expectation = @@ -1783,11 +1660,177 @@ class FunctionMocker final : public UntypedFunctionMockerBase { expectation->DescribeCallCountTo(why); } } + + // Performs the given action (or the default if it's null) with the given + // arguments and returns the action's result. + // L = * + R PerformAction(const void* untyped_action, ArgumentTuple&& args, + const std::string& call_description) const { + if (untyped_action == nullptr) { + return PerformDefaultAction(std::move(args), call_description); + } + + // Make a copy of the action before performing it, in case the + // action deletes the mock object (and thus deletes itself). + const Action action = *static_cast*>(untyped_action); + return action.Perform(std::move(args)); + } + + // Is it possible to store an object of the supplied type in a local variable + // for the sake of printing it, then return it on to the caller? + template + using can_print_result = internal::conjunction< + // void can't be stored as an object (and we also don't need to print it). + internal::negation>, + // Non-moveable types can't be returned on to the user, so there's no way + // for us to intercept and print them. + std::is_move_constructible>; + + // Perform the supplied action, printing the result to os. + template ::value, int>::type = 0> + R PerformActionAndPrintResult(const void* const untyped_action, + ArgumentTuple&& args, + const std::string& call_description, + std::ostream& os) { + R result = PerformAction(untyped_action, std::move(args), call_description); + + PrintAsActionResult(result, os); + return std::forward(result); + } + + // An overload for when it's not possible to print the result. In this case we + // simply perform the action. + template >::value, int>::type = 0> + R PerformActionAndPrintResult(const void* const untyped_action, + ArgumentTuple&& args, + const std::string& call_description, + std::ostream&) { + return PerformAction(untyped_action, std::move(args), call_description); + } + + // Returns the result of invoking this mock function with the given + // arguments. This function can be safely called from multiple + // threads concurrently. + R InvokeWith(ArgumentTuple&& args) GTEST_LOCK_EXCLUDED_(g_gmock_mutex); }; // class FunctionMocker -// Reports an uninteresting call (whose description is in msg) in the -// manner specified by 'reaction'. -void ReportUninterestingCall(CallReaction reaction, const std::string& msg); +// Calculates the result of invoking this mock function with the given +// arguments, prints it, and returns it. +template +R FunctionMocker::InvokeWith(ArgumentTuple&& args) + GTEST_LOCK_EXCLUDED_(g_gmock_mutex) { + // See the definition of untyped_expectations_ for why access to it + // is unprotected here. + if (untyped_expectations_.size() == 0) { + // No expectation is set on this mock method - we have an + // uninteresting call. + + // We must get Google Mock's reaction on uninteresting calls + // made on this mock object BEFORE performing the action, + // because the action may DELETE the mock object and make the + // following expression meaningless. + const CallReaction reaction = + Mock::GetReactionOnUninterestingCalls(MockObject()); + + // True if and only if we need to print this call's arguments and return + // value. This definition must be kept in sync with + // the behavior of ReportUninterestingCall(). + const bool need_to_report_uninteresting_call = + // If the user allows this uninteresting call, we print it + // only when they want informational messages. + reaction == kAllow ? LogIsVisible(kInfo) : + // If the user wants this to be a warning, we print + // it only when they want to see warnings. + reaction == kWarn + ? LogIsVisible(kWarning) + : + // Otherwise, the user wants this to be an error, and we + // should always print detailed information in the error. + true; + + if (!need_to_report_uninteresting_call) { + // Perform the action without printing the call information. + return this->PerformDefaultAction( + std::move(args), "Function call: " + std::string(Name())); + } + + // Warns about the uninteresting call. + ::std::stringstream ss; + this->UntypedDescribeUninterestingCall(&args, &ss); + + // Perform the action, print the result, and then report the uninteresting + // call. + // + // We use RAII to do the latter in case R is void or a non-moveable type. In + // either case we can't assign it to a local variable. + const Cleanup report_uninteresting_call( + [&] { ReportUninterestingCall(reaction, ss.str()); }); + + return PerformActionAndPrintResult(nullptr, std::move(args), ss.str(), ss); + } + + bool is_excessive = false; + ::std::stringstream ss; + ::std::stringstream why; + ::std::stringstream loc; + const void* untyped_action = nullptr; + + // The UntypedFindMatchingExpectation() function acquires and + // releases g_gmock_mutex. + + const ExpectationBase* const untyped_expectation = + this->UntypedFindMatchingExpectation(&args, &untyped_action, + &is_excessive, &ss, &why); + const bool found = untyped_expectation != nullptr; + + // True if and only if we need to print the call's arguments + // and return value. + // This definition must be kept in sync with the uses of Expect() + // and Log() in this function. + const bool need_to_report_call = + !found || is_excessive || LogIsVisible(kInfo); + if (!need_to_report_call) { + // Perform the action without printing the call information. + return PerformAction(untyped_action, std::move(args), ""); + } + + ss << " Function call: " << Name(); + this->UntypedPrintArgs(&args, &ss); + + // In case the action deletes a piece of the expectation, we + // generate the message beforehand. + if (found && !is_excessive) { + untyped_expectation->DescribeLocationTo(&loc); + } + + // Perform the action, print the result, and then fail or log in whatever way + // is appropriate. + // + // We use RAII to do the latter in case R is void or a non-moveable type. In + // either case we can't assign it to a local variable. + const Cleanup handle_failures([&] { + ss << "\n" << why.str(); + + if (!found) { + // No expectation matches this call - reports a failure. + Expect(false, nullptr, -1, ss.str()); + } else if (is_excessive) { + // We had an upper-bound violation and the failure message is in ss. + Expect(false, untyped_expectation->file(), untyped_expectation->line(), + ss.str()); + } else { + // We had an expected call and the matching expectation is + // described in ss. + Log(kInfo, loc.str() + ss.str(), 2); + } + }); + + return PerformActionAndPrintResult(untyped_action, std::move(args), ss.str(), + ss); +} } // namespace internal @@ -1952,7 +1995,9 @@ using internal::MockSpec; // // Expects a call to const MockFoo::Bar(). // EXPECT_CALL(Const(foo), Bar()); template -inline const T& Const(const T& x) { return x; } +inline const T& Const(const T& x) { + return x; +} // Constructs an Expectation object that references and co-owns exp. inline Expectation::Expectation(internal::ExpectationBase& exp) // NOLINT diff --git a/ext/googletest/googlemock/include/gmock/gmock.h b/ext/googletest/googlemock/include/gmock/gmock.h index 12469bc466..568c8c71d7 100644 --- a/ext/googletest/googlemock/include/gmock/gmock.h +++ b/ext/googletest/googlemock/include/gmock/gmock.h @@ -27,13 +27,10 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Mock - a framework for writing C++ mock classes. // // This is the main header file a user should include. -// GOOGLETEST_CM0002 DO NOT DELETE - #ifndef GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_H_ #define GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_H_ @@ -64,14 +61,15 @@ #include "gmock/gmock-more-matchers.h" #include "gmock/gmock-nice-strict.h" #include "gmock/internal/gmock-internal-utils.h" - -namespace testing { +#include "gmock/internal/gmock-port.h" // Declares Google Mock flags that we want a user to use programmatically. GMOCK_DECLARE_bool_(catch_leaked_mocks); GMOCK_DECLARE_string_(verbose); GMOCK_DECLARE_int32_(default_mock_behavior); +namespace testing { + // Initializes Google Mock. This must be called before running the // tests. In particular, it parses the command line for the flags // that Google Mock recognizes. Whenever a Google Mock flag is seen, diff --git a/ext/googletest/googlemock/include/gmock/internal/custom/README.md b/ext/googletest/googlemock/include/gmock/internal/custom/README.md index f6c93f616d..9c4874fd0c 100644 --- a/ext/googletest/googlemock/include/gmock/internal/custom/README.md +++ b/ext/googletest/googlemock/include/gmock/internal/custom/README.md @@ -14,3 +14,5 @@ The following macros can be defined: * `GMOCK_DEFINE_bool_(name, default_val, doc)` * `GMOCK_DEFINE_int32_(name, default_val, doc)` * `GMOCK_DEFINE_string_(name, default_val, doc)` +* `GMOCK_FLAG_GET(flag_name)` +* `GMOCK_FLAG_SET(flag_name, value)` diff --git a/ext/googletest/googlemock/include/gmock/internal/custom/gmock-generated-actions.h b/ext/googletest/googlemock/include/gmock/internal/custom/gmock-generated-actions.h index 63f899962e..bbcad31c76 100644 --- a/ext/googletest/googlemock/include/gmock/internal/custom/gmock-generated-actions.h +++ b/ext/googletest/googlemock/include/gmock/internal/custom/gmock-generated-actions.h @@ -1,4 +1,5 @@ -// GOOGLETEST_CM0002 DO NOT DELETE +// IWYU pragma: private, include "gmock/gmock.h" +// IWYU pragma: friend gmock/.* #ifndef GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_CUSTOM_GMOCK_GENERATED_ACTIONS_H_ #define GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_CUSTOM_GMOCK_GENERATED_ACTIONS_H_ diff --git a/ext/googletest/googlemock/include/gmock/internal/custom/gmock-matchers.h b/ext/googletest/googlemock/include/gmock/internal/custom/gmock-matchers.h index 638429488e..bb7dcbaa4c 100644 --- a/ext/googletest/googlemock/include/gmock/internal/custom/gmock-matchers.h +++ b/ext/googletest/googlemock/include/gmock/internal/custom/gmock-matchers.h @@ -26,10 +26,11 @@ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// + // Injection point for custom user configurations. See README for details -// -// GOOGLETEST_CM0002 DO NOT DELETE + +// IWYU pragma: private, include "gmock/gmock.h" +// IWYU pragma: friend gmock/.* #ifndef GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_CUSTOM_GMOCK_MATCHERS_H_ #define GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_CUSTOM_GMOCK_MATCHERS_H_ diff --git a/ext/googletest/googlemock/include/gmock/internal/custom/gmock-port.h b/ext/googletest/googlemock/include/gmock/internal/custom/gmock-port.h index 14378692ae..f055f7506b 100644 --- a/ext/googletest/googlemock/include/gmock/internal/custom/gmock-port.h +++ b/ext/googletest/googlemock/include/gmock/internal/custom/gmock-port.h @@ -26,12 +26,13 @@ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// + // Injection point for custom user configurations. See README for details // // ** Custom implementation starts here ** -// GOOGLETEST_CM0002 DO NOT DELETE +// IWYU pragma: private, include "gmock/gmock.h" +// IWYU pragma: friend gmock/.* #ifndef GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_CUSTOM_GMOCK_PORT_H_ #define GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_CUSTOM_GMOCK_PORT_H_ diff --git a/ext/googletest/googlemock/include/gmock/internal/gmock-internal-utils.h b/ext/googletest/googlemock/include/gmock/internal/gmock-internal-utils.h index 317544a7da..b1343fdc82 100644 --- a/ext/googletest/googlemock/include/gmock/internal/gmock-internal-utils.h +++ b/ext/googletest/googlemock/include/gmock/internal/gmock-internal-utils.h @@ -27,22 +27,25 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Mock - a framework for writing C++ mock classes. // // This file defines some utilities useful for implementing Google // Mock. They are subject to change without notice, so please DO NOT // USE THEM IN USER CODE. -// GOOGLETEST_CM0002 DO NOT DELETE +// IWYU pragma: private, include "gmock/gmock.h" +// IWYU pragma: friend gmock/.* #ifndef GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_INTERNAL_UTILS_H_ #define GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_INTERNAL_UTILS_H_ #include + #include // NOLINT #include #include +#include + #include "gmock/internal/gmock-port.h" #include "gtest/gtest.h" @@ -56,14 +59,15 @@ namespace internal { // Silence MSVC C4100 (unreferenced formal parameter) and // C4805('==': unsafe mix of type 'const int' and type 'const bool') #ifdef _MSC_VER -# pragma warning(push) -# pragma warning(disable:4100) -# pragma warning(disable:4805) +#pragma warning(push) +#pragma warning(disable : 4100) +#pragma warning(disable : 4805) #endif // Joins a vector of strings as if they are fields of a tuple; returns // the joined string. -GTEST_API_ std::string JoinAsTuple(const Strings& fields); +GTEST_API_ std::string JoinAsKeyValueTuple( + const std::vector& names, const Strings& values); // Converts an identifier name to a space-separated list of lower-case // words. Each maximum substring of the form [A-Za-z][a-z]*|\d+ is @@ -78,9 +82,18 @@ template inline const typename Pointer::element_type* GetRawPointer(const Pointer& p) { return p.get(); } +// This overload version is for std::reference_wrapper, which does not work with +// the overload above, as it does not have an `element_type`. +template +inline const Element* GetRawPointer(const std::reference_wrapper& r) { + return &r.get(); +} + // This overloaded version is for the raw pointer case. template -inline Element* GetRawPointer(Element* p) { return p; } +inline Element* GetRawPointer(Element* p) { + return p; +} // MSVC treats wchar_t as a native type usually, but treats it as the // same as unsigned short when the compiler option /Zc:wchar_t- is @@ -89,7 +102,7 @@ inline Element* GetRawPointer(Element* p) { return p; } #if defined(_MSC_VER) && !defined(_NATIVE_WCHAR_T_DEFINED) // wchar_t is a typedef. #else -# define GMOCK_WCHAR_T_IS_NATIVE_ 1 +#define GMOCK_WCHAR_T_IS_NATIVE_ 1 #endif // In what follows, we use the term "kind" to indicate whether a type @@ -97,18 +110,20 @@ inline Element* GetRawPointer(Element* p) { return p; } // or none of them. This categorization is useful for determining // when a matcher argument type can be safely converted to another // type in the implementation of SafeMatcherCast. -enum TypeKind { - kBool, kInteger, kFloatingPoint, kOther -}; +enum TypeKind { kBool, kInteger, kFloatingPoint, kOther }; // KindOf::value is the kind of type T. -template struct KindOf { +template +struct KindOf { enum { value = kOther }; // The default kind. }; // This macro declares that the kind of 'type' is 'kind'. #define GMOCK_DECLARE_KIND_(type, kind) \ - template <> struct KindOf { enum { value = kind }; } + template <> \ + struct KindOf { \ + enum { value = kind }; \ + } GMOCK_DECLARE_KIND_(bool, kBool); @@ -116,13 +131,13 @@ GMOCK_DECLARE_KIND_(bool, kBool); GMOCK_DECLARE_KIND_(char, kInteger); GMOCK_DECLARE_KIND_(signed char, kInteger); GMOCK_DECLARE_KIND_(unsigned char, kInteger); -GMOCK_DECLARE_KIND_(short, kInteger); // NOLINT +GMOCK_DECLARE_KIND_(short, kInteger); // NOLINT GMOCK_DECLARE_KIND_(unsigned short, kInteger); // NOLINT GMOCK_DECLARE_KIND_(int, kInteger); GMOCK_DECLARE_KIND_(unsigned int, kInteger); -GMOCK_DECLARE_KIND_(long, kInteger); // NOLINT -GMOCK_DECLARE_KIND_(unsigned long, kInteger); // NOLINT -GMOCK_DECLARE_KIND_(long long, kInteger); // NOLINT +GMOCK_DECLARE_KIND_(long, kInteger); // NOLINT +GMOCK_DECLARE_KIND_(unsigned long, kInteger); // NOLINT +GMOCK_DECLARE_KIND_(long long, kInteger); // NOLINT GMOCK_DECLARE_KIND_(unsigned long long, kInteger); // NOLINT #if GMOCK_WCHAR_T_IS_NATIVE_ @@ -137,7 +152,7 @@ GMOCK_DECLARE_KIND_(long double, kFloatingPoint); #undef GMOCK_DECLARE_KIND_ // Evaluates to the kind of 'type'. -#define GMOCK_KIND_OF_(type) \ +#define GMOCK_KIND_OF_(type) \ static_cast< ::testing::internal::TypeKind>( \ ::testing::internal::KindOf::value) @@ -193,9 +208,7 @@ using LosslessArithmeticConvertible = class FailureReporterInterface { public: // The type of a failure (either non-fatal or fatal). - enum FailureType { - kNonfatal, kFatal - }; + enum FailureType { kNonfatal, kFatal }; virtual ~FailureReporterInterface() {} @@ -215,8 +228,8 @@ GTEST_API_ FailureReporterInterface* GetFailureReporter(); inline void Assert(bool condition, const char* file, int line, const std::string& msg) { if (!condition) { - GetFailureReporter()->ReportFailure(FailureReporterInterface::kFatal, - file, line, msg); + GetFailureReporter()->ReportFailure(FailureReporterInterface::kFatal, file, + line, msg); } } inline void Assert(bool condition, const char* file, int line) { @@ -237,10 +250,7 @@ inline void Expect(bool condition, const char* file, int line) { } // Severity level of a log. -enum LogSeverity { - kInfo = 0, - kWarning = 1 -}; +enum LogSeverity { kInfo = 0, kWarning = 1 }; // Valid values for the --gmock_verbose flag. @@ -281,10 +291,10 @@ class WithoutMatchers { GTEST_API_ WithoutMatchers GetWithoutMatchers(); // Disable MSVC warnings for infinite recursion, since in this case the -// the recursion is unreachable. +// recursion is unreachable. #ifdef _MSC_VER -# pragma warning(push) -# pragma warning(disable:4717) +#pragma warning(push) +#pragma warning(disable : 4717) #endif // Invalid() is usable as an expression of type T, but will terminate @@ -295,14 +305,17 @@ GTEST_API_ WithoutMatchers GetWithoutMatchers(); template inline T Invalid() { Assert(false, "", -1, "Internal error: attempt to return invalid value"); - // This statement is unreachable, and would never terminate even if it - // could be reached. It is provided only to placate compiler warnings - // about missing return statements. +#if defined(__GNUC__) || defined(__clang__) + __builtin_unreachable(); +#elif defined(_MSC_VER) + __assume(0); +#else return Invalid(); +#endif } #ifdef _MSC_VER -# pragma warning(pop) +#pragma warning(pop) #endif // Given a raw type (i.e. having no top-level reference or const @@ -381,7 +394,8 @@ class StlContainerView< ::std::tuple > { // The following specialization prevents the user from instantiating // StlContainer with a reference type. -template class StlContainerView; +template +class StlContainerView; // A type transform to remove constness from the first part of a pair. // Pairs like that are used as the value_type of associative containers, @@ -402,17 +416,18 @@ struct RemoveConstFromKey > { GTEST_API_ void IllegalDoDefault(const char* file, int line); template -auto ApplyImpl(F&& f, Tuple&& args, IndexSequence) -> decltype( - std::forward(f)(std::get(std::forward(args))...)) { +auto ApplyImpl(F&& f, Tuple&& args, IndexSequence) + -> decltype(std::forward(f)( + std::get(std::forward(args))...)) { return std::forward(f)(std::get(std::forward(args))...); } // Apply the function to a tuple of arguments. template -auto Apply(F&& f, Tuple&& args) -> decltype( - ApplyImpl(std::forward(f), std::forward(args), - MakeIndexSequence::type>::value>())) { +auto Apply(F&& f, Tuple&& args) -> decltype(ApplyImpl( + std::forward(f), std::forward(args), + MakeIndexSequence::type>::value>())) { return ApplyImpl(std::forward(f), std::forward(args), MakeIndexSequence::type>::value>()); @@ -449,8 +464,10 @@ struct Function { template constexpr size_t Function::ArgumentCount; +bool Base64Unescape(const std::string& encoded, std::string* decoded); + #ifdef _MSC_VER -# pragma warning(pop) +#pragma warning(pop) #endif } // namespace internal diff --git a/ext/googletest/googlemock/include/gmock/internal/gmock-port.h b/ext/googletest/googlemock/include/gmock/internal/gmock-port.h index 367a44d366..bc18a25f34 100644 --- a/ext/googletest/googlemock/include/gmock/internal/gmock-port.h +++ b/ext/googletest/googlemock/include/gmock/internal/gmock-port.h @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// // Low-level types and utilities for porting Google Mock to various // platforms. All macros ending with _ and symbols defined in an // internal namespace are subject to change without notice. Code @@ -35,7 +34,8 @@ // end with _ are part of Google Mock's public API and can be used by // code outside Google Mock. -// GOOGLETEST_CM0002 DO NOT DELETE +// IWYU pragma: private, include "gmock/gmock.h" +// IWYU pragma: friend gmock/.* #ifndef GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_PORT_H_ #define GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_PORT_H_ @@ -53,35 +53,87 @@ // here, as Google Mock depends on Google Test. Only add a utility // here if it's truly specific to Google Mock. -#include "gtest/internal/gtest-port.h" #include "gmock/internal/custom/gmock-port.h" +#include "gtest/internal/gtest-port.h" + +#if GTEST_HAS_ABSL +#include "absl/flags/declare.h" +#include "absl/flags/flag.h" +#endif // For MS Visual C++, check the compiler version. At least VS 2015 is // required to compile Google Mock. #if defined(_MSC_VER) && _MSC_VER < 1900 -# error "At least Visual C++ 2015 (14.0) is required to compile Google Mock." +#error "At least Visual C++ 2015 (14.0) is required to compile Google Mock." #endif // Macro for referencing flags. This is public as we want the user to // use this syntax to reference Google Mock flags. +#define GMOCK_FLAG_NAME_(name) gmock_##name #define GMOCK_FLAG(name) FLAGS_gmock_##name -#if !defined(GMOCK_DECLARE_bool_) - -// Macros for declaring flags. -# define GMOCK_DECLARE_bool_(name) extern GTEST_API_ bool GMOCK_FLAG(name) -# define GMOCK_DECLARE_int32_(name) extern GTEST_API_ int32_t GMOCK_FLAG(name) -# define GMOCK_DECLARE_string_(name) \ - extern GTEST_API_ ::std::string GMOCK_FLAG(name) +// Pick a command line flags implementation. +#if GTEST_HAS_ABSL // Macros for defining flags. -# define GMOCK_DEFINE_bool_(name, default_val, doc) \ - GTEST_API_ bool GMOCK_FLAG(name) = (default_val) -# define GMOCK_DEFINE_int32_(name, default_val, doc) \ - GTEST_API_ int32_t GMOCK_FLAG(name) = (default_val) -# define GMOCK_DEFINE_string_(name, default_val, doc) \ - GTEST_API_ ::std::string GMOCK_FLAG(name) = (default_val) +#define GMOCK_DEFINE_bool_(name, default_val, doc) \ + ABSL_FLAG(bool, GMOCK_FLAG_NAME_(name), default_val, doc) +#define GMOCK_DEFINE_int32_(name, default_val, doc) \ + ABSL_FLAG(int32_t, GMOCK_FLAG_NAME_(name), default_val, doc) +#define GMOCK_DEFINE_string_(name, default_val, doc) \ + ABSL_FLAG(std::string, GMOCK_FLAG_NAME_(name), default_val, doc) -#endif // !defined(GMOCK_DECLARE_bool_) +// Macros for declaring flags. +#define GMOCK_DECLARE_bool_(name) \ + ABSL_DECLARE_FLAG(bool, GMOCK_FLAG_NAME_(name)) +#define GMOCK_DECLARE_int32_(name) \ + ABSL_DECLARE_FLAG(int32_t, GMOCK_FLAG_NAME_(name)) +#define GMOCK_DECLARE_string_(name) \ + ABSL_DECLARE_FLAG(std::string, GMOCK_FLAG_NAME_(name)) + +#define GMOCK_FLAG_GET(name) ::absl::GetFlag(GMOCK_FLAG(name)) +#define GMOCK_FLAG_SET(name, value) \ + (void)(::absl::SetFlag(&GMOCK_FLAG(name), value)) + +#else // GTEST_HAS_ABSL + +// Macros for defining flags. +#define GMOCK_DEFINE_bool_(name, default_val, doc) \ + namespace testing { \ + GTEST_API_ bool GMOCK_FLAG(name) = (default_val); \ + } \ + static_assert(true, "no-op to require trailing semicolon") +#define GMOCK_DEFINE_int32_(name, default_val, doc) \ + namespace testing { \ + GTEST_API_ int32_t GMOCK_FLAG(name) = (default_val); \ + } \ + static_assert(true, "no-op to require trailing semicolon") +#define GMOCK_DEFINE_string_(name, default_val, doc) \ + namespace testing { \ + GTEST_API_ ::std::string GMOCK_FLAG(name) = (default_val); \ + } \ + static_assert(true, "no-op to require trailing semicolon") + +// Macros for declaring flags. +#define GMOCK_DECLARE_bool_(name) \ + namespace testing { \ + GTEST_API_ extern bool GMOCK_FLAG(name); \ + } \ + static_assert(true, "no-op to require trailing semicolon") +#define GMOCK_DECLARE_int32_(name) \ + namespace testing { \ + GTEST_API_ extern int32_t GMOCK_FLAG(name); \ + } \ + static_assert(true, "no-op to require trailing semicolon") +#define GMOCK_DECLARE_string_(name) \ + namespace testing { \ + GTEST_API_ extern ::std::string GMOCK_FLAG(name); \ + } \ + static_assert(true, "no-op to require trailing semicolon") + +#define GMOCK_FLAG_GET(name) ::testing::GMOCK_FLAG(name) +#define GMOCK_FLAG_SET(name, value) (void)(::testing::GMOCK_FLAG(name) = value) + +#endif // GTEST_HAS_ABSL #endif // GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_PORT_H_ diff --git a/ext/googletest/googlemock/scripts/README.md b/ext/googletest/googlemock/scripts/README.md deleted file mode 100644 index a3301e5bf6..0000000000 --- a/ext/googletest/googlemock/scripts/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Please Note: - -Files in this directory are no longer supported by the maintainers. They -represent mostly historical artifacts and supported by the community only. There -is no guarantee whatsoever that these scripts still work. diff --git a/ext/googletest/googlemock/scripts/fuse_gmock_files.py b/ext/googletest/googlemock/scripts/fuse_gmock_files.py deleted file mode 100755 index 7fa9b3a5e0..0000000000 --- a/ext/googletest/googlemock/scripts/fuse_gmock_files.py +++ /dev/null @@ -1,256 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2009, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -"""fuse_gmock_files.py v0.1.0. - -Fuses Google Mock and Google Test source code into two .h files and a .cc file. - -SYNOPSIS - fuse_gmock_files.py [GMOCK_ROOT_DIR] OUTPUT_DIR - - Scans GMOCK_ROOT_DIR for Google Mock and Google Test source - code, assuming Google Test is in the GMOCK_ROOT_DIR/../googletest - directory, and generates three files: - OUTPUT_DIR/gtest/gtest.h, OUTPUT_DIR/gmock/gmock.h, and - OUTPUT_DIR/gmock-gtest-all.cc. Then you can build your tests - by adding OUTPUT_DIR to the include search path and linking - with OUTPUT_DIR/gmock-gtest-all.cc. These three files contain - everything you need to use Google Mock. Hence you can - "install" Google Mock by copying them to wherever you want. - - GMOCK_ROOT_DIR can be omitted and defaults to the parent - directory of the directory holding this script. - -EXAMPLES - ./fuse_gmock_files.py fused_gmock - ./fuse_gmock_files.py path/to/unpacked/gmock fused_gmock - -This tool is experimental. In particular, it assumes that there is no -conditional inclusion of Google Mock or Google Test headers. Please -report any problems to googlemock@googlegroups.com. You can read -https://github.com/google/googletest/blob/master/docs/gmock_cook_book.md -for more -information. -""" - -from __future__ import print_function - -import os -import re -import sys - -__author__ = 'wan@google.com (Zhanyong Wan)' - -# We assume that this file is in the scripts/ directory in the Google -# Mock root directory. -DEFAULT_GMOCK_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..') - -# We need to call into googletest/scripts/fuse_gtest_files.py. -sys.path.append(os.path.join(DEFAULT_GMOCK_ROOT_DIR, '../googletest/scripts')) -import fuse_gtest_files as gtest # pylint:disable=g-import-not-at-top - -# Regex for matching -# '#include "gmock/..."'. -INCLUDE_GMOCK_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gmock/.+)"') - -# Where to find the source seed files. -GMOCK_H_SEED = 'include/gmock/gmock.h' -GMOCK_ALL_CC_SEED = 'src/gmock-all.cc' - -# Where to put the generated files. -GTEST_H_OUTPUT = 'gtest/gtest.h' -GMOCK_H_OUTPUT = 'gmock/gmock.h' -GMOCK_GTEST_ALL_CC_OUTPUT = 'gmock-gtest-all.cc' - - -def GetGTestRootDir(gmock_root): - """Returns the root directory of Google Test.""" - - return os.path.join(gmock_root, '../googletest') - - -def ValidateGMockRootDir(gmock_root): - """Makes sure gmock_root points to a valid gmock root directory. - - The function aborts the program on failure. - - Args: - gmock_root: A string with the mock root directory. - """ - - gtest.ValidateGTestRootDir(GetGTestRootDir(gmock_root)) - gtest.VerifyFileExists(gmock_root, GMOCK_H_SEED) - gtest.VerifyFileExists(gmock_root, GMOCK_ALL_CC_SEED) - - -def ValidateOutputDir(output_dir): - """Makes sure output_dir points to a valid output directory. - - The function aborts the program on failure. - - Args: - output_dir: A string representing the output directory. - """ - - gtest.VerifyOutputFile(output_dir, gtest.GTEST_H_OUTPUT) - gtest.VerifyOutputFile(output_dir, GMOCK_H_OUTPUT) - gtest.VerifyOutputFile(output_dir, GMOCK_GTEST_ALL_CC_OUTPUT) - - -def FuseGMockH(gmock_root, output_dir): - """Scans folder gmock_root to generate gmock/gmock.h in output_dir.""" - - output_file = open(os.path.join(output_dir, GMOCK_H_OUTPUT), 'w') - processed_files = set() # Holds all gmock headers we've processed. - - def ProcessFile(gmock_header_path): - """Processes the given gmock header file.""" - - # We don't process the same header twice. - if gmock_header_path in processed_files: - return - - processed_files.add(gmock_header_path) - - # Reads each line in the given gmock header. - - with open(os.path.join(gmock_root, gmock_header_path), 'r') as fh: - for line in fh: - m = INCLUDE_GMOCK_FILE_REGEX.match(line) - if m: - # '#include "gmock/..."' - # - let's process it recursively. - ProcessFile('include/' + m.group(1)) - else: - m = gtest.INCLUDE_GTEST_FILE_REGEX.match(line) - if m: - # '#include "gtest/foo.h"' - # We translate it to "gtest/gtest.h", regardless of what foo is, - # since all gtest headers are fused into gtest/gtest.h. - - # There is no need to #include gtest.h twice. - if gtest.GTEST_H_SEED not in processed_files: - processed_files.add(gtest.GTEST_H_SEED) - output_file.write('#include "%s"\n' % (gtest.GTEST_H_OUTPUT,)) - else: - # Otherwise we copy the line unchanged to the output file. - output_file.write(line) - - ProcessFile(GMOCK_H_SEED) - output_file.close() - - -def FuseGMockAllCcToFile(gmock_root, output_file): - """Scans folder gmock_root to fuse gmock-all.cc into output_file.""" - - processed_files = set() - - def ProcessFile(gmock_source_file): - """Processes the given gmock source file.""" - - # We don't process the same #included file twice. - if gmock_source_file in processed_files: - return - - processed_files.add(gmock_source_file) - - # Reads each line in the given gmock source file. - - with open(os.path.join(gmock_root, gmock_source_file), 'r') as fh: - for line in fh: - m = INCLUDE_GMOCK_FILE_REGEX.match(line) - if m: - # '#include "gmock/foo.h"' - # We treat it as '#include "gmock/gmock.h"', as all other gmock - # headers are being fused into gmock.h and cannot be - # included directly. No need to - # #include "gmock/gmock.h" - # more than once. - - if GMOCK_H_SEED not in processed_files: - processed_files.add(GMOCK_H_SEED) - output_file.write('#include "%s"\n' % (GMOCK_H_OUTPUT,)) - else: - m = gtest.INCLUDE_GTEST_FILE_REGEX.match(line) - if m: - # '#include "gtest/..."' - # There is no need to #include gtest.h as it has been - # #included by gtest-all.cc. - - pass - else: - m = gtest.INCLUDE_SRC_FILE_REGEX.match(line) - if m: - # It's '#include "src/foo"' - let's process it recursively. - ProcessFile(m.group(1)) - else: - # Otherwise we copy the line unchanged to the output file. - output_file.write(line) - - ProcessFile(GMOCK_ALL_CC_SEED) - - -def FuseGMockGTestAllCc(gmock_root, output_dir): - """Scans folder gmock_root to generate gmock-gtest-all.cc in output_dir.""" - - with open(os.path.join(output_dir, GMOCK_GTEST_ALL_CC_OUTPUT), - 'w') as output_file: - # First, fuse gtest-all.cc into gmock-gtest-all.cc. - gtest.FuseGTestAllCcToFile(GetGTestRootDir(gmock_root), output_file) - # Next, append fused gmock-all.cc to gmock-gtest-all.cc. - FuseGMockAllCcToFile(gmock_root, output_file) - - -def FuseGMock(gmock_root, output_dir): - """Fuses gtest.h, gmock.h, and gmock-gtest-all.h.""" - - ValidateGMockRootDir(gmock_root) - ValidateOutputDir(output_dir) - - gtest.FuseGTestH(GetGTestRootDir(gmock_root), output_dir) - FuseGMockH(gmock_root, output_dir) - FuseGMockGTestAllCc(gmock_root, output_dir) - - -def main(): - argc = len(sys.argv) - if argc == 2: - # fuse_gmock_files.py OUTPUT_DIR - FuseGMock(DEFAULT_GMOCK_ROOT_DIR, sys.argv[1]) - elif argc == 3: - # fuse_gmock_files.py GMOCK_ROOT_DIR OUTPUT_DIR - FuseGMock(sys.argv[1], sys.argv[2]) - else: - print(__doc__) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/ext/googletest/googlemock/scripts/generator/LICENSE b/ext/googletest/googlemock/scripts/generator/LICENSE deleted file mode 100644 index 87ea063651..0000000000 --- a/ext/googletest/googlemock/scripts/generator/LICENSE +++ /dev/null @@ -1,203 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [2007] Neal Norwitz - Portions Copyright [2007] Google Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/ext/googletest/googlemock/scripts/generator/README b/ext/googletest/googlemock/scripts/generator/README deleted file mode 100644 index 01fd463dda..0000000000 --- a/ext/googletest/googlemock/scripts/generator/README +++ /dev/null @@ -1,34 +0,0 @@ - -The Google Mock class generator is an application that is part of cppclean. -For more information about cppclean, visit http://code.google.com/p/cppclean/ - -The mock generator requires Python 2.3.5 or later. If you don't have Python -installed on your system, you will also need to install it. You can download -Python from: http://www.python.org/download/releases/ - -To use the Google Mock class generator, you need to call it -on the command line passing the header file and class for which you want -to generate a Google Mock class. - -Make sure to install the scripts somewhere in your path. Then you can -run the program. - - gmock_gen.py header-file.h [ClassName]... - -If no ClassNames are specified, all classes in the file are emitted. - -To change the indentation from the default of 2, set INDENT in -the environment. For example to use an indent of 4 spaces: - -INDENT=4 gmock_gen.py header-file.h ClassName - -This version was made from SVN revision 281 in the cppclean repository. - -Known Limitations ------------------ -Not all code will be generated properly. For example, when mocking templated -classes, the template information is lost. You will need to add the template -information manually. - -Not all permutations of using multiple pointers/references will be rendered -properly. These will also have to be fixed manually. diff --git a/ext/googletest/googlemock/scripts/generator/README.cppclean b/ext/googletest/googlemock/scripts/generator/README.cppclean deleted file mode 100644 index 65431b6175..0000000000 --- a/ext/googletest/googlemock/scripts/generator/README.cppclean +++ /dev/null @@ -1,115 +0,0 @@ -Goal: ------ - CppClean attempts to find problems in C++ source that slow development - in large code bases, for example various forms of unused code. - Unused code can be unused functions, methods, data members, types, etc - to unnecessary #include directives. Unnecessary #includes can cause - considerable extra compiles increasing the edit-compile-run cycle. - - The project home page is: http://code.google.com/p/cppclean/ - - -Features: ---------- - * Find and print C++ language constructs: classes, methods, functions, etc. - * Find classes with virtual methods, no virtual destructor, and no bases - * Find global/static data that are potential problems when using threads - * Unnecessary forward class declarations - * Unnecessary function declarations - * Undeclared function definitions - * (planned) Find unnecessary header files #included - - No direct reference to anything in the header - - Header is unnecessary if classes were forward declared instead - * (planned) Source files that reference headers not directly #included, - ie, files that rely on a transitive #include from another header - * (planned) Unused members (private, protected, & public) methods and data - * (planned) Store AST in a SQL database so relationships can be queried - -AST is Abstract Syntax Tree, a representation of parsed source code. -http://en.wikipedia.org/wiki/Abstract_syntax_tree - - -System Requirements: --------------------- - * Python 2.4 or later (2.3 probably works too) - * Works on Windows (untested), Mac OS X, and Unix - - -How to Run: ------------ - For all examples, it is assumed that cppclean resides in a directory called - /cppclean. - - To print warnings for classes with virtual methods, no virtual destructor and - no base classes: - - /cppclean/run.sh nonvirtual_dtors.py file1.h file2.h file3.cc ... - - To print all the functions defined in header file(s): - - /cppclean/run.sh functions.py file1.h file2.h ... - - All the commands take multiple files on the command line. Other programs - include: find_warnings, headers, methods, and types. Some other programs - are available, but used primarily for debugging. - - run.sh is a simple wrapper that sets PYTHONPATH to /cppclean and then - runs the program in /cppclean/cpp/PROGRAM.py. There is currently - no equivalent for Windows. Contributions for a run.bat file - would be greatly appreciated. - - -How to Configure: ------------------ - You can add a siteheaders.py file in /cppclean/cpp to configure where - to look for other headers (typically -I options passed to a compiler). - Currently two values are supported: _TRANSITIVE and GetIncludeDirs. - _TRANSITIVE should be set to a boolean value (True or False) indicating - whether to transitively process all header files. The default is False. - - GetIncludeDirs is a function that takes a single argument and returns - a sequence of directories to include. This can be a generator or - return a static list. - - def GetIncludeDirs(filename): - return ['/some/path/with/other/headers'] - - # Here is a more complicated example. - def GetIncludeDirs(filename): - yield '/path1' - yield os.path.join('/path2', os.path.dirname(filename)) - yield '/path3' - - -How to Test: ------------- - For all examples, it is assumed that cppclean resides in a directory called - /cppclean. The tests require - - cd /cppclean - make test - # To generate expected results after a change: - make expected - - -Current Status: ---------------- - The parser works pretty well for header files, parsing about 99% of Google's - header files. Anything which inspects structure of C++ source files should - work reasonably well. Function bodies are not transformed to an AST, - but left as tokens. Much work is still needed on finding unused header files - and storing an AST in a database. - - -Non-goals: ----------- - * Parsing all valid C++ source - * Handling invalid C++ source gracefully - * Compiling to machine code (or anything beyond an AST) - - -Contact: --------- - If you used cppclean, I would love to hear about your experiences - cppclean@googlegroups.com. Even if you don't use cppclean, I'd like to - hear from you. :-) (You can contact me directly at: nnorwitz@gmail.com) diff --git a/ext/googletest/googlemock/scripts/generator/cpp/ast.py b/ext/googletest/googlemock/scripts/generator/cpp/ast.py deleted file mode 100755 index 0e770163bf..0000000000 --- a/ext/googletest/googlemock/scripts/generator/cpp/ast.py +++ /dev/null @@ -1,1773 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2007 Neal Norwitz -# Portions Copyright 2007 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Generate an Abstract Syntax Tree (AST) for C++.""" - -# FIXME: -# * Tokens should never be exported, need to convert to Nodes -# (return types, parameters, etc.) -# * Handle static class data for templatized classes -# * Handle casts (both C++ and C-style) -# * Handle conditions and loops (if/else, switch, for, while/do) -# -# TODO much, much later: -# * Handle #define -# * exceptions - - -try: - # Python 3.x - import builtins -except ImportError: - # Python 2.x - import __builtin__ as builtins - -import collections -import sys -import traceback - -from cpp import keywords -from cpp import tokenize -from cpp import utils - - -if not hasattr(builtins, 'reversed'): - # Support Python 2.3 and earlier. - def reversed(seq): - for i in range(len(seq)-1, -1, -1): - yield seq[i] - -if not hasattr(builtins, 'next'): - # Support Python 2.5 and earlier. - def next(obj): - return obj.next() - - -VISIBILITY_PUBLIC, VISIBILITY_PROTECTED, VISIBILITY_PRIVATE = range(3) - -FUNCTION_NONE = 0x00 -FUNCTION_CONST = 0x01 -FUNCTION_VIRTUAL = 0x02 -FUNCTION_PURE_VIRTUAL = 0x04 -FUNCTION_CTOR = 0x08 -FUNCTION_DTOR = 0x10 -FUNCTION_ATTRIBUTE = 0x20 -FUNCTION_UNKNOWN_ANNOTATION = 0x40 -FUNCTION_THROW = 0x80 -FUNCTION_OVERRIDE = 0x100 - -""" -These are currently unused. Should really handle these properly at some point. - -TYPE_MODIFIER_INLINE = 0x010000 -TYPE_MODIFIER_EXTERN = 0x020000 -TYPE_MODIFIER_STATIC = 0x040000 -TYPE_MODIFIER_CONST = 0x080000 -TYPE_MODIFIER_REGISTER = 0x100000 -TYPE_MODIFIER_VOLATILE = 0x200000 -TYPE_MODIFIER_MUTABLE = 0x400000 - -TYPE_MODIFIER_MAP = { - 'inline': TYPE_MODIFIER_INLINE, - 'extern': TYPE_MODIFIER_EXTERN, - 'static': TYPE_MODIFIER_STATIC, - 'const': TYPE_MODIFIER_CONST, - 'register': TYPE_MODIFIER_REGISTER, - 'volatile': TYPE_MODIFIER_VOLATILE, - 'mutable': TYPE_MODIFIER_MUTABLE, - } -""" - -_INTERNAL_TOKEN = 'internal' -_NAMESPACE_POP = 'ns-pop' - - -# TODO(nnorwitz): use this as a singleton for templated_types, etc -# where we don't want to create a new empty dict each time. It is also const. -class _NullDict(object): - __contains__ = lambda self: False - keys = values = items = iterkeys = itervalues = iteritems = lambda self: () - - -# TODO(nnorwitz): move AST nodes into a separate module. -class Node(object): - """Base AST node.""" - - def __init__(self, start, end): - self.start = start - self.end = end - - def IsDeclaration(self): - """Returns bool if this node is a declaration.""" - return False - - def IsDefinition(self): - """Returns bool if this node is a definition.""" - return False - - def IsExportable(self): - """Returns bool if this node exportable from a header file.""" - return False - - def Requires(self, node): - """Does this AST node require the definition of the node passed in?""" - return False - - def XXX__str__(self): - return self._StringHelper(self.__class__.__name__, '') - - def _StringHelper(self, name, suffix): - if not utils.DEBUG: - return '%s(%s)' % (name, suffix) - return '%s(%d, %d, %s)' % (name, self.start, self.end, suffix) - - def __repr__(self): - return str(self) - - -class Define(Node): - def __init__(self, start, end, name, definition): - Node.__init__(self, start, end) - self.name = name - self.definition = definition - - def __str__(self): - value = '%s %s' % (self.name, self.definition) - return self._StringHelper(self.__class__.__name__, value) - - -class Include(Node): - def __init__(self, start, end, filename, system): - Node.__init__(self, start, end) - self.filename = filename - self.system = system - - def __str__(self): - fmt = '"%s"' - if self.system: - fmt = '<%s>' - return self._StringHelper(self.__class__.__name__, fmt % self.filename) - - -class Goto(Node): - def __init__(self, start, end, label): - Node.__init__(self, start, end) - self.label = label - - def __str__(self): - return self._StringHelper(self.__class__.__name__, str(self.label)) - - -class Expr(Node): - def __init__(self, start, end, expr): - Node.__init__(self, start, end) - self.expr = expr - - def Requires(self, node): - # TODO(nnorwitz): impl. - return False - - def __str__(self): - return self._StringHelper(self.__class__.__name__, str(self.expr)) - - -class Return(Expr): - pass - - -class Delete(Expr): - pass - - -class Friend(Expr): - def __init__(self, start, end, expr, namespace): - Expr.__init__(self, start, end, expr) - self.namespace = namespace[:] - - -class Using(Node): - def __init__(self, start, end, names): - Node.__init__(self, start, end) - self.names = names - - def __str__(self): - return self._StringHelper(self.__class__.__name__, str(self.names)) - - -class Parameter(Node): - def __init__(self, start, end, name, parameter_type, default): - Node.__init__(self, start, end) - self.name = name - self.type = parameter_type - self.default = default - - def Requires(self, node): - # TODO(nnorwitz): handle namespaces, etc. - return self.type.name == node.name - - def __str__(self): - name = str(self.type) - suffix = '%s %s' % (name, self.name) - if self.default: - suffix += ' = ' + ''.join([d.name for d in self.default]) - return self._StringHelper(self.__class__.__name__, suffix) - - -class _GenericDeclaration(Node): - def __init__(self, start, end, name, namespace): - Node.__init__(self, start, end) - self.name = name - self.namespace = namespace[:] - - def FullName(self): - prefix = '' - if self.namespace and self.namespace[-1]: - prefix = '::'.join(self.namespace) + '::' - return prefix + self.name - - def _TypeStringHelper(self, suffix): - if self.namespace: - names = [n or '' for n in self.namespace] - suffix += ' in ' + '::'.join(names) - return self._StringHelper(self.__class__.__name__, suffix) - - -# TODO(nnorwitz): merge with Parameter in some way? -class VariableDeclaration(_GenericDeclaration): - def __init__(self, start, end, name, var_type, initial_value, namespace): - _GenericDeclaration.__init__(self, start, end, name, namespace) - self.type = var_type - self.initial_value = initial_value - - def Requires(self, node): - # TODO(nnorwitz): handle namespaces, etc. - return self.type.name == node.name - - def ToString(self): - """Return a string that tries to reconstitute the variable decl.""" - suffix = '%s %s' % (self.type, self.name) - if self.initial_value: - suffix += ' = ' + self.initial_value - return suffix - - def __str__(self): - return self._StringHelper(self.__class__.__name__, self.ToString()) - - -class Typedef(_GenericDeclaration): - def __init__(self, start, end, name, alias, namespace): - _GenericDeclaration.__init__(self, start, end, name, namespace) - self.alias = alias - - def IsDefinition(self): - return True - - def IsExportable(self): - return True - - def Requires(self, node): - # TODO(nnorwitz): handle namespaces, etc. - name = node.name - for token in self.alias: - if token is not None and name == token.name: - return True - return False - - def __str__(self): - suffix = '%s, %s' % (self.name, self.alias) - return self._TypeStringHelper(suffix) - - -class _NestedType(_GenericDeclaration): - def __init__(self, start, end, name, fields, namespace): - _GenericDeclaration.__init__(self, start, end, name, namespace) - self.fields = fields - - def IsDefinition(self): - return True - - def IsExportable(self): - return True - - def __str__(self): - suffix = '%s, {%s}' % (self.name, self.fields) - return self._TypeStringHelper(suffix) - - -class Union(_NestedType): - pass - - -class Enum(_NestedType): - pass - - -class Class(_GenericDeclaration): - def __init__(self, start, end, name, bases, templated_types, body, namespace): - _GenericDeclaration.__init__(self, start, end, name, namespace) - self.bases = bases - self.body = body - self.templated_types = templated_types - - def IsDeclaration(self): - return self.bases is None and self.body is None - - def IsDefinition(self): - return not self.IsDeclaration() - - def IsExportable(self): - return not self.IsDeclaration() - - def Requires(self, node): - # TODO(nnorwitz): handle namespaces, etc. - if self.bases: - for token_list in self.bases: - # TODO(nnorwitz): bases are tokens, do name comparison. - for token in token_list: - if token.name == node.name: - return True - # TODO(nnorwitz): search in body too. - return False - - def __str__(self): - name = self.name - if self.templated_types: - name += '<%s>' % self.templated_types - suffix = '%s, %s, %s' % (name, self.bases, self.body) - return self._TypeStringHelper(suffix) - - -class Struct(Class): - pass - - -class Function(_GenericDeclaration): - def __init__(self, start, end, name, return_type, parameters, - modifiers, templated_types, body, namespace): - _GenericDeclaration.__init__(self, start, end, name, namespace) - converter = TypeConverter(namespace) - self.return_type = converter.CreateReturnType(return_type) - self.parameters = converter.ToParameters(parameters) - self.modifiers = modifiers - self.body = body - self.templated_types = templated_types - - def IsDeclaration(self): - return self.body is None - - def IsDefinition(self): - return self.body is not None - - def IsExportable(self): - if self.return_type and 'static' in self.return_type.modifiers: - return False - return None not in self.namespace - - def Requires(self, node): - if self.parameters: - # TODO(nnorwitz): parameters are tokens, do name comparison. - for p in self.parameters: - if p.name == node.name: - return True - # TODO(nnorwitz): search in body too. - return False - - def __str__(self): - # TODO(nnorwitz): add templated_types. - suffix = ('%s %s(%s), 0x%02x, %s' % - (self.return_type, self.name, self.parameters, - self.modifiers, self.body)) - return self._TypeStringHelper(suffix) - - -class Method(Function): - def __init__(self, start, end, name, in_class, return_type, parameters, - modifiers, templated_types, body, namespace): - Function.__init__(self, start, end, name, return_type, parameters, - modifiers, templated_types, body, namespace) - # TODO(nnorwitz): in_class could also be a namespace which can - # mess up finding functions properly. - self.in_class = in_class - - -class Type(_GenericDeclaration): - """Type used for any variable (eg class, primitive, struct, etc).""" - - def __init__(self, start, end, name, templated_types, modifiers, - reference, pointer, array): - """ - Args: - name: str name of main type - templated_types: [Class (Type?)] template type info between <> - modifiers: [str] type modifiers (keywords) eg, const, mutable, etc. - reference, pointer, array: bools - """ - _GenericDeclaration.__init__(self, start, end, name, []) - self.templated_types = templated_types - if not name and modifiers: - self.name = modifiers.pop() - self.modifiers = modifiers - self.reference = reference - self.pointer = pointer - self.array = array - - def __str__(self): - prefix = '' - if self.modifiers: - prefix = ' '.join(self.modifiers) + ' ' - name = str(self.name) - if self.templated_types: - name += '<%s>' % self.templated_types - suffix = prefix + name - if self.reference: - suffix += '&' - if self.pointer: - suffix += '*' - if self.array: - suffix += '[]' - return self._TypeStringHelper(suffix) - - # By definition, Is* are always False. A Type can only exist in - # some sort of variable declaration, parameter, or return value. - def IsDeclaration(self): - return False - - def IsDefinition(self): - return False - - def IsExportable(self): - return False - - -class TypeConverter(object): - - def __init__(self, namespace_stack): - self.namespace_stack = namespace_stack - - def _GetTemplateEnd(self, tokens, start): - count = 1 - end = start - while 1: - token = tokens[end] - end += 1 - if token.name == '<': - count += 1 - elif token.name == '>': - count -= 1 - if count == 0: - break - return tokens[start:end-1], end - - def ToType(self, tokens): - """Convert [Token,...] to [Class(...), ] useful for base classes. - For example, code like class Foo : public Bar { ... }; - the "Bar" portion gets converted to an AST. - - Returns: - [Class(...), ...] - """ - result = [] - name_tokens = [] - reference = pointer = array = False - - def AddType(templated_types): - # Partition tokens into name and modifier tokens. - names = [] - modifiers = [] - for t in name_tokens: - if keywords.IsKeyword(t.name): - modifiers.append(t.name) - else: - names.append(t.name) - name = ''.join(names) - if name_tokens: - result.append(Type(name_tokens[0].start, name_tokens[-1].end, - name, templated_types, modifiers, - reference, pointer, array)) - del name_tokens[:] - - i = 0 - end = len(tokens) - while i < end: - token = tokens[i] - if token.name == '<': - new_tokens, new_end = self._GetTemplateEnd(tokens, i+1) - AddType(self.ToType(new_tokens)) - # If there is a comma after the template, we need to consume - # that here otherwise it becomes part of the name. - i = new_end - reference = pointer = array = False - elif token.name == ',': - AddType([]) - reference = pointer = array = False - elif token.name == '*': - pointer = True - elif token.name == '&': - reference = True - elif token.name == '[': - pointer = True - elif token.name == ']': - pass - else: - name_tokens.append(token) - i += 1 - - if name_tokens: - # No '<' in the tokens, just a simple name and no template. - AddType([]) - return result - - def DeclarationToParts(self, parts, needs_name_removed): - name = None - default = [] - if needs_name_removed: - # Handle default (initial) values properly. - for i, t in enumerate(parts): - if t.name == '=': - default = parts[i+1:] - name = parts[i-1].name - if name == ']' and parts[i-2].name == '[': - name = parts[i-3].name - i -= 1 - parts = parts[:i-1] - break - else: - if parts[-1].token_type == tokenize.NAME: - name = parts.pop().name - else: - # TODO(nnorwitz): this is a hack that happens for code like - # Register(Foo); where it thinks this is a function call - # but it's actually a declaration. - name = '???' - modifiers = [] - type_name = [] - other_tokens = [] - templated_types = [] - i = 0 - end = len(parts) - while i < end: - p = parts[i] - if keywords.IsKeyword(p.name): - modifiers.append(p.name) - elif p.name == '<': - templated_tokens, new_end = self._GetTemplateEnd(parts, i+1) - templated_types = self.ToType(templated_tokens) - i = new_end - 1 - # Don't add a spurious :: to data members being initialized. - next_index = i + 1 - if next_index < end and parts[next_index].name == '::': - i += 1 - elif p.name in ('[', ']', '='): - # These are handled elsewhere. - other_tokens.append(p) - elif p.name not in ('*', '&', '>'): - # Ensure that names have a space between them. - if (type_name and type_name[-1].token_type == tokenize.NAME and - p.token_type == tokenize.NAME): - type_name.append(tokenize.Token(tokenize.SYNTAX, ' ', 0, 0)) - type_name.append(p) - else: - other_tokens.append(p) - i += 1 - type_name = ''.join([t.name for t in type_name]) - return name, type_name, templated_types, modifiers, default, other_tokens - - def ToParameters(self, tokens): - if not tokens: - return [] - - result = [] - name = type_name = '' - type_modifiers = [] - pointer = reference = array = False - first_token = None - default = [] - - def AddParameter(end): - if default: - del default[0] # Remove flag. - parts = self.DeclarationToParts(type_modifiers, True) - (name, type_name, templated_types, modifiers, - unused_default, unused_other_tokens) = parts - parameter_type = Type(first_token.start, first_token.end, - type_name, templated_types, modifiers, - reference, pointer, array) - p = Parameter(first_token.start, end, name, - parameter_type, default) - result.append(p) - - template_count = 0 - brace_count = 0 - for s in tokens: - if not first_token: - first_token = s - - # Check for braces before templates, as we can have unmatched '<>' - # inside default arguments. - if s.name == '{': - brace_count += 1 - elif s.name == '}': - brace_count -= 1 - if brace_count > 0: - type_modifiers.append(s) - continue - - if s.name == '<': - template_count += 1 - elif s.name == '>': - template_count -= 1 - if template_count > 0: - type_modifiers.append(s) - continue - - if s.name == ',': - AddParameter(s.start) - name = type_name = '' - type_modifiers = [] - pointer = reference = array = False - first_token = None - default = [] - elif s.name == '*': - pointer = True - elif s.name == '&': - reference = True - elif s.name == '[': - array = True - elif s.name == ']': - pass # Just don't add to type_modifiers. - elif s.name == '=': - # Got a default value. Add any value (None) as a flag. - default.append(None) - elif default: - default.append(s) - else: - type_modifiers.append(s) - AddParameter(tokens[-1].end) - return result - - def CreateReturnType(self, return_type_seq): - if not return_type_seq: - return None - start = return_type_seq[0].start - end = return_type_seq[-1].end - _, name, templated_types, modifiers, default, other_tokens = \ - self.DeclarationToParts(return_type_seq, False) - names = [n.name for n in other_tokens] - reference = '&' in names - pointer = '*' in names - array = '[' in names - return Type(start, end, name, templated_types, modifiers, - reference, pointer, array) - - def GetTemplateIndices(self, names): - # names is a list of strings. - start = names.index('<') - end = len(names) - 1 - while end > 0: - if names[end] == '>': - break - end -= 1 - return start, end+1 - -class AstBuilder(object): - def __init__(self, token_stream, filename, in_class='', visibility=None, - namespace_stack=[]): - self.tokens = token_stream - self.filename = filename - # TODO(nnorwitz): use a better data structure (deque) for the queue. - # Switching directions of the "queue" improved perf by about 25%. - # Using a deque should be even better since we access from both sides. - self.token_queue = [] - self.namespace_stack = namespace_stack[:] - self.in_class = in_class - if in_class is None: - self.in_class_name_only = None - else: - self.in_class_name_only = in_class.split('::')[-1] - self.visibility = visibility - self.in_function = False - self.current_token = None - # Keep the state whether we are currently handling a typedef or not. - self._handling_typedef = False - - self.converter = TypeConverter(self.namespace_stack) - - def HandleError(self, msg, token): - printable_queue = list(reversed(self.token_queue[-20:])) - sys.stderr.write('Got %s in %s @ %s %s\n' % - (msg, self.filename, token, printable_queue)) - - def Generate(self): - while 1: - token = self._GetNextToken() - if not token: - break - - # Get the next token. - self.current_token = token - - # Dispatch on the next token type. - if token.token_type == _INTERNAL_TOKEN: - if token.name == _NAMESPACE_POP: - self.namespace_stack.pop() - continue - - try: - result = self._GenerateOne(token) - if result is not None: - yield result - except: - self.HandleError('exception', token) - raise - - def _CreateVariable(self, pos_token, name, type_name, type_modifiers, - ref_pointer_name_seq, templated_types, value=None): - reference = '&' in ref_pointer_name_seq - pointer = '*' in ref_pointer_name_seq - array = '[' in ref_pointer_name_seq - var_type = Type(pos_token.start, pos_token.end, type_name, - templated_types, type_modifiers, - reference, pointer, array) - return VariableDeclaration(pos_token.start, pos_token.end, - name, var_type, value, self.namespace_stack) - - def _GenerateOne(self, token): - if token.token_type == tokenize.NAME: - if (keywords.IsKeyword(token.name) and - not keywords.IsBuiltinType(token.name)): - if token.name == 'enum': - # Pop the next token and only put it back if it's not - # 'class'. This allows us to support the two-token - # 'enum class' keyword as if it were simply 'enum'. - next = self._GetNextToken() - if next.name != 'class': - self._AddBackToken(next) - - method = getattr(self, 'handle_' + token.name) - return method() - elif token.name == self.in_class_name_only: - # The token name is the same as the class, must be a ctor if - # there is a paren. Otherwise, it's the return type. - # Peek ahead to get the next token to figure out which. - next = self._GetNextToken() - self._AddBackToken(next) - if next.token_type == tokenize.SYNTAX and next.name == '(': - return self._GetMethod([token], FUNCTION_CTOR, None, True) - # Fall through--handle like any other method. - - # Handle data or function declaration/definition. - syntax = tokenize.SYNTAX - temp_tokens, last_token = \ - self._GetVarTokensUpToIgnoringTemplates(syntax, - '(', ';', '{', '[') - temp_tokens.insert(0, token) - if last_token.name == '(': - # If there is an assignment before the paren, - # this is an expression, not a method. - expr = bool([e for e in temp_tokens if e.name == '=']) - if expr: - new_temp = self._GetTokensUpTo(tokenize.SYNTAX, ';') - temp_tokens.append(last_token) - temp_tokens.extend(new_temp) - last_token = tokenize.Token(tokenize.SYNTAX, ';', 0, 0) - - if last_token.name == '[': - # Handle array, this isn't a method, unless it's an operator. - # TODO(nnorwitz): keep the size somewhere. - # unused_size = self._GetTokensUpTo(tokenize.SYNTAX, ']') - temp_tokens.append(last_token) - if temp_tokens[-2].name == 'operator': - temp_tokens.append(self._GetNextToken()) - else: - temp_tokens2, last_token = \ - self._GetVarTokensUpTo(tokenize.SYNTAX, ';') - temp_tokens.extend(temp_tokens2) - - if last_token.name == ';': - # Handle data, this isn't a method. - parts = self.converter.DeclarationToParts(temp_tokens, True) - (name, type_name, templated_types, modifiers, default, - unused_other_tokens) = parts - - t0 = temp_tokens[0] - names = [t.name for t in temp_tokens] - if templated_types: - start, end = self.converter.GetTemplateIndices(names) - names = names[:start] + names[end:] - default = ''.join([t.name for t in default]) - return self._CreateVariable(t0, name, type_name, modifiers, - names, templated_types, default) - if last_token.name == '{': - self._AddBackTokens(temp_tokens[1:]) - self._AddBackToken(last_token) - method_name = temp_tokens[0].name - method = getattr(self, 'handle_' + method_name, None) - if not method: - # Must be declaring a variable. - # TODO(nnorwitz): handle the declaration. - return None - return method() - return self._GetMethod(temp_tokens, 0, None, False) - elif token.token_type == tokenize.SYNTAX: - if token.name == '~' and self.in_class: - # Must be a dtor (probably not in method body). - token = self._GetNextToken() - # self.in_class can contain A::Name, but the dtor will only - # be Name. Make sure to compare against the right value. - if (token.token_type == tokenize.NAME and - token.name == self.in_class_name_only): - return self._GetMethod([token], FUNCTION_DTOR, None, True) - # TODO(nnorwitz): handle a lot more syntax. - elif token.token_type == tokenize.PREPROCESSOR: - # TODO(nnorwitz): handle more preprocessor directives. - # token starts with a #, so remove it and strip whitespace. - name = token.name[1:].lstrip() - if name.startswith('include'): - # Remove "include". - name = name[7:].strip() - assert name - # Handle #include \ "header-on-second-line.h". - if name.startswith('\\'): - name = name[1:].strip() - assert name[0] in '<"', token - assert name[-1] in '>"', token - system = name[0] == '<' - filename = name[1:-1] - return Include(token.start, token.end, filename, system) - if name.startswith('define'): - # Remove "define". - name = name[6:].strip() - assert name - value = '' - for i, c in enumerate(name): - if c.isspace(): - value = name[i:].lstrip() - name = name[:i] - break - return Define(token.start, token.end, name, value) - if name.startswith('if') and name[2:3].isspace(): - condition = name[3:].strip() - if condition.startswith('0') or condition.startswith('(0)'): - self._SkipIf0Blocks() - return None - - def _GetTokensUpTo(self, expected_token_type, expected_token): - return self._GetVarTokensUpTo(expected_token_type, expected_token)[0] - - def _GetVarTokensUpTo(self, expected_token_type, *expected_tokens): - last_token = self._GetNextToken() - tokens = [] - while (last_token.token_type != expected_token_type or - last_token.name not in expected_tokens): - tokens.append(last_token) - last_token = self._GetNextToken() - return tokens, last_token - - # Same as _GetVarTokensUpTo, but skips over '<...>' which could contain an - # expected token. - def _GetVarTokensUpToIgnoringTemplates(self, expected_token_type, - *expected_tokens): - last_token = self._GetNextToken() - tokens = [] - nesting = 0 - while (nesting > 0 or - last_token.token_type != expected_token_type or - last_token.name not in expected_tokens): - tokens.append(last_token) - last_token = self._GetNextToken() - if last_token.name == '<': - nesting += 1 - elif last_token.name == '>': - nesting -= 1 - return tokens, last_token - - # TODO(nnorwitz): remove _IgnoreUpTo() it shouldn't be necessary. - def _IgnoreUpTo(self, token_type, token): - unused_tokens = self._GetTokensUpTo(token_type, token) - - def _SkipIf0Blocks(self): - count = 1 - while 1: - token = self._GetNextToken() - if token.token_type != tokenize.PREPROCESSOR: - continue - - name = token.name[1:].lstrip() - if name.startswith('endif'): - count -= 1 - if count == 0: - break - elif name.startswith('if'): - count += 1 - - def _GetMatchingChar(self, open_paren, close_paren, GetNextToken=None): - if GetNextToken is None: - GetNextToken = self._GetNextToken - # Assumes the current token is open_paren and we will consume - # and return up to the close_paren. - count = 1 - token = GetNextToken() - while 1: - if token.token_type == tokenize.SYNTAX: - if token.name == open_paren: - count += 1 - elif token.name == close_paren: - count -= 1 - if count == 0: - break - yield token - token = GetNextToken() - yield token - - def _GetParameters(self): - return self._GetMatchingChar('(', ')') - - def GetScope(self): - return self._GetMatchingChar('{', '}') - - def _GetNextToken(self): - if self.token_queue: - return self.token_queue.pop() - try: - return next(self.tokens) - except StopIteration: - return - - def _AddBackToken(self, token): - if token.whence == tokenize.WHENCE_STREAM: - token.whence = tokenize.WHENCE_QUEUE - self.token_queue.insert(0, token) - else: - assert token.whence == tokenize.WHENCE_QUEUE, token - self.token_queue.append(token) - - def _AddBackTokens(self, tokens): - if tokens: - if tokens[-1].whence == tokenize.WHENCE_STREAM: - for token in tokens: - token.whence = tokenize.WHENCE_QUEUE - self.token_queue[:0] = reversed(tokens) - else: - assert tokens[-1].whence == tokenize.WHENCE_QUEUE, tokens - self.token_queue.extend(reversed(tokens)) - - def GetName(self, seq=None): - """Returns ([tokens], next_token_info).""" - GetNextToken = self._GetNextToken - if seq is not None: - it = iter(seq) - GetNextToken = lambda: next(it) - next_token = GetNextToken() - tokens = [] - last_token_was_name = False - while (next_token.token_type == tokenize.NAME or - (next_token.token_type == tokenize.SYNTAX and - next_token.name in ('::', '<'))): - # Two NAMEs in a row means the identifier should terminate. - # It's probably some sort of variable declaration. - if last_token_was_name and next_token.token_type == tokenize.NAME: - break - last_token_was_name = next_token.token_type == tokenize.NAME - tokens.append(next_token) - # Handle templated names. - if next_token.name == '<': - tokens.extend(self._GetMatchingChar('<', '>', GetNextToken)) - last_token_was_name = True - next_token = GetNextToken() - return tokens, next_token - - def GetMethod(self, modifiers, templated_types): - return_type_and_name = self._GetTokensUpTo(tokenize.SYNTAX, '(') - assert len(return_type_and_name) >= 1 - return self._GetMethod(return_type_and_name, modifiers, templated_types, - False) - - def _GetMethod(self, return_type_and_name, modifiers, templated_types, - get_paren): - template_portion = None - if get_paren: - token = self._GetNextToken() - assert token.token_type == tokenize.SYNTAX, token - if token.name == '<': - # Handle templatized dtors. - template_portion = [token] - template_portion.extend(self._GetMatchingChar('<', '>')) - token = self._GetNextToken() - assert token.token_type == tokenize.SYNTAX, token - assert token.name == '(', token - - name = return_type_and_name.pop() - # Handle templatized ctors. - if name.name == '>': - index = 1 - while return_type_and_name[index].name != '<': - index += 1 - template_portion = return_type_and_name[index:] + [name] - del return_type_and_name[index:] - name = return_type_and_name.pop() - elif name.name == ']': - rt = return_type_and_name - assert rt[-1].name == '[', return_type_and_name - assert rt[-2].name == 'operator', return_type_and_name - name_seq = return_type_and_name[-2:] - del return_type_and_name[-2:] - name = tokenize.Token(tokenize.NAME, 'operator[]', - name_seq[0].start, name.end) - # Get the open paren so _GetParameters() below works. - unused_open_paren = self._GetNextToken() - - # TODO(nnorwitz): store template_portion. - return_type = return_type_and_name - indices = name - if return_type: - indices = return_type[0] - - # Force ctor for templatized ctors. - if name.name == self.in_class and not modifiers: - modifiers |= FUNCTION_CTOR - parameters = list(self._GetParameters()) - del parameters[-1] # Remove trailing ')'. - - # Handling operator() is especially weird. - if name.name == 'operator' and not parameters: - token = self._GetNextToken() - assert token.name == '(', token - parameters = list(self._GetParameters()) - del parameters[-1] # Remove trailing ')'. - - token = self._GetNextToken() - while token.token_type == tokenize.NAME: - modifier_token = token - token = self._GetNextToken() - if modifier_token.name == 'const': - modifiers |= FUNCTION_CONST - elif modifier_token.name == '__attribute__': - # TODO(nnorwitz): handle more __attribute__ details. - modifiers |= FUNCTION_ATTRIBUTE - assert token.name == '(', token - # Consume everything between the (parens). - unused_tokens = list(self._GetMatchingChar('(', ')')) - token = self._GetNextToken() - elif modifier_token.name == 'throw': - modifiers |= FUNCTION_THROW - assert token.name == '(', token - # Consume everything between the (parens). - unused_tokens = list(self._GetMatchingChar('(', ')')) - token = self._GetNextToken() - elif modifier_token.name == 'override': - modifiers |= FUNCTION_OVERRIDE - elif modifier_token.name == modifier_token.name.upper(): - # HACK(nnorwitz): assume that all upper-case names - # are some macro we aren't expanding. - modifiers |= FUNCTION_UNKNOWN_ANNOTATION - else: - self.HandleError('unexpected token', modifier_token) - - assert token.token_type == tokenize.SYNTAX, token - # Handle ctor initializers. - if token.name == ':': - # TODO(nnorwitz): anything else to handle for initializer list? - while token.name != ';' and token.name != '{': - token = self._GetNextToken() - - # Handle pointer to functions that are really data but look - # like method declarations. - if token.name == '(': - if parameters[0].name == '*': - # name contains the return type. - name = parameters.pop() - # parameters contains the name of the data. - modifiers = [p.name for p in parameters] - # Already at the ( to open the parameter list. - function_parameters = list(self._GetMatchingChar('(', ')')) - del function_parameters[-1] # Remove trailing ')'. - # TODO(nnorwitz): store the function_parameters. - token = self._GetNextToken() - assert token.token_type == tokenize.SYNTAX, token - assert token.name == ';', token - return self._CreateVariable(indices, name.name, indices.name, - modifiers, '', None) - # At this point, we got something like: - # return_type (type::*name_)(params); - # This is a data member called name_ that is a function pointer. - # With this code: void (sq_type::*field_)(string&); - # We get: name=void return_type=[] parameters=sq_type ... field_ - # TODO(nnorwitz): is return_type always empty? - # TODO(nnorwitz): this isn't even close to being correct. - # Just put in something so we don't crash and can move on. - real_name = parameters[-1] - modifiers = [p.name for p in self._GetParameters()] - del modifiers[-1] # Remove trailing ')'. - return self._CreateVariable(indices, real_name.name, indices.name, - modifiers, '', None) - - if token.name == '{': - body = list(self.GetScope()) - del body[-1] # Remove trailing '}'. - else: - body = None - if token.name == '=': - token = self._GetNextToken() - - if token.name == 'default' or token.name == 'delete': - # Ignore explicitly defaulted and deleted special members - # in C++11. - token = self._GetNextToken() - else: - # Handle pure-virtual declarations. - assert token.token_type == tokenize.CONSTANT, token - assert token.name == '0', token - modifiers |= FUNCTION_PURE_VIRTUAL - token = self._GetNextToken() - - if token.name == '[': - # TODO(nnorwitz): store tokens and improve parsing. - # template char (&ASH(T (&seq)[N]))[N]; - tokens = list(self._GetMatchingChar('[', ']')) - token = self._GetNextToken() - - assert token.name == ';', (token, return_type_and_name, parameters) - - # Looks like we got a method, not a function. - if len(return_type) > 2 and return_type[-1].name == '::': - return_type, in_class = \ - self._GetReturnTypeAndClassName(return_type) - return Method(indices.start, indices.end, name.name, in_class, - return_type, parameters, modifiers, templated_types, - body, self.namespace_stack) - return Function(indices.start, indices.end, name.name, return_type, - parameters, modifiers, templated_types, body, - self.namespace_stack) - - def _GetReturnTypeAndClassName(self, token_seq): - # Splitting the return type from the class name in a method - # can be tricky. For example, Return::Type::Is::Hard::To::Find(). - # Where is the return type and where is the class name? - # The heuristic used is to pull the last name as the class name. - # This includes all the templated type info. - # TODO(nnorwitz): if there is only One name like in the - # example above, punt and assume the last bit is the class name. - - # Ignore a :: prefix, if exists so we can find the first real name. - i = 0 - if token_seq[0].name == '::': - i = 1 - # Ignore a :: suffix, if exists. - end = len(token_seq) - 1 - if token_seq[end-1].name == '::': - end -= 1 - - # Make a copy of the sequence so we can append a sentinel - # value. This is required for GetName will has to have some - # terminating condition beyond the last name. - seq_copy = token_seq[i:end] - seq_copy.append(tokenize.Token(tokenize.SYNTAX, '', 0, 0)) - names = [] - while i < end: - # Iterate through the sequence parsing out each name. - new_name, next = self.GetName(seq_copy[i:]) - assert new_name, 'Got empty new_name, next=%s' % next - # We got a pointer or ref. Add it to the name. - if next and next.token_type == tokenize.SYNTAX: - new_name.append(next) - names.append(new_name) - i += len(new_name) - - # Now that we have the names, it's time to undo what we did. - - # Remove the sentinel value. - names[-1].pop() - # Flatten the token sequence for the return type. - return_type = [e for seq in names[:-1] for e in seq] - # The class name is the last name. - class_name = names[-1] - return return_type, class_name - - def handle_bool(self): - pass - - def handle_char(self): - pass - - def handle_int(self): - pass - - def handle_long(self): - pass - - def handle_short(self): - pass - - def handle_double(self): - pass - - def handle_float(self): - pass - - def handle_void(self): - pass - - def handle_wchar_t(self): - pass - - def handle_unsigned(self): - pass - - def handle_signed(self): - pass - - def _GetNestedType(self, ctor): - name = None - name_tokens, token = self.GetName() - if name_tokens: - name = ''.join([t.name for t in name_tokens]) - - # Handle forward declarations. - if token.token_type == tokenize.SYNTAX and token.name == ';': - return ctor(token.start, token.end, name, None, - self.namespace_stack) - - if token.token_type == tokenize.NAME and self._handling_typedef: - self._AddBackToken(token) - return ctor(token.start, token.end, name, None, - self.namespace_stack) - - # Must be the type declaration. - fields = list(self._GetMatchingChar('{', '}')) - del fields[-1] # Remove trailing '}'. - if token.token_type == tokenize.SYNTAX and token.name == '{': - next = self._GetNextToken() - new_type = ctor(token.start, token.end, name, fields, - self.namespace_stack) - # A name means this is an anonymous type and the name - # is the variable declaration. - if next.token_type != tokenize.NAME: - return new_type - name = new_type - token = next - - # Must be variable declaration using the type prefixed with keyword. - assert token.token_type == tokenize.NAME, token - return self._CreateVariable(token, token.name, name, [], '', None) - - def handle_struct(self): - # Special case the handling typedef/aliasing of structs here. - # It would be a pain to handle in the class code. - name_tokens, var_token = self.GetName() - if name_tokens: - next_token = self._GetNextToken() - is_syntax = (var_token.token_type == tokenize.SYNTAX and - var_token.name[0] in '*&') - is_variable = (var_token.token_type == tokenize.NAME and - next_token.name == ';') - variable = var_token - if is_syntax and not is_variable: - variable = next_token - temp = self._GetNextToken() - if temp.token_type == tokenize.SYNTAX and temp.name == '(': - # Handle methods declared to return a struct. - t0 = name_tokens[0] - struct = tokenize.Token(tokenize.NAME, 'struct', - t0.start-7, t0.start-2) - type_and_name = [struct] - type_and_name.extend(name_tokens) - type_and_name.extend((var_token, next_token)) - return self._GetMethod(type_and_name, 0, None, False) - assert temp.name == ';', (temp, name_tokens, var_token) - if is_syntax or (is_variable and not self._handling_typedef): - modifiers = ['struct'] - type_name = ''.join([t.name for t in name_tokens]) - position = name_tokens[0] - return self._CreateVariable(position, variable.name, type_name, - modifiers, var_token.name, None) - name_tokens.extend((var_token, next_token)) - self._AddBackTokens(name_tokens) - else: - self._AddBackToken(var_token) - return self._GetClass(Struct, VISIBILITY_PUBLIC, None) - - def handle_union(self): - return self._GetNestedType(Union) - - def handle_enum(self): - return self._GetNestedType(Enum) - - def handle_auto(self): - # TODO(nnorwitz): warn about using auto? Probably not since it - # will be reclaimed and useful for C++0x. - pass - - def handle_register(self): - pass - - def handle_const(self): - pass - - def handle_inline(self): - pass - - def handle_extern(self): - pass - - def handle_static(self): - pass - - def handle_virtual(self): - # What follows must be a method. - token = token2 = self._GetNextToken() - if token.name == 'inline': - # HACK(nnorwitz): handle inline dtors by ignoring 'inline'. - token2 = self._GetNextToken() - if token2.token_type == tokenize.SYNTAX and token2.name == '~': - return self.GetMethod(FUNCTION_VIRTUAL + FUNCTION_DTOR, None) - assert token.token_type == tokenize.NAME or token.name == '::', token - return_type_and_name, _ = self._GetVarTokensUpToIgnoringTemplates( - tokenize.SYNTAX, '(') # ) - return_type_and_name.insert(0, token) - if token2 is not token: - return_type_and_name.insert(1, token2) - return self._GetMethod(return_type_and_name, FUNCTION_VIRTUAL, - None, False) - - def handle_volatile(self): - pass - - def handle_mutable(self): - pass - - def handle_public(self): - assert self.in_class - self.visibility = VISIBILITY_PUBLIC - - def handle_protected(self): - assert self.in_class - self.visibility = VISIBILITY_PROTECTED - - def handle_private(self): - assert self.in_class - self.visibility = VISIBILITY_PRIVATE - - def handle_friend(self): - tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';') - assert tokens - t0 = tokens[0] - return Friend(t0.start, t0.end, tokens, self.namespace_stack) - - def handle_static_cast(self): - pass - - def handle_const_cast(self): - pass - - def handle_dynamic_cast(self): - pass - - def handle_reinterpret_cast(self): - pass - - def handle_new(self): - pass - - def handle_delete(self): - tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';') - assert tokens - return Delete(tokens[0].start, tokens[0].end, tokens) - - def handle_typedef(self): - token = self._GetNextToken() - if (token.token_type == tokenize.NAME and - keywords.IsKeyword(token.name)): - # Token must be struct/enum/union/class. - method = getattr(self, 'handle_' + token.name) - self._handling_typedef = True - tokens = [method()] - self._handling_typedef = False - else: - tokens = [token] - - # Get the remainder of the typedef up to the semi-colon. - tokens.extend(self._GetTokensUpTo(tokenize.SYNTAX, ';')) - - # TODO(nnorwitz): clean all this up. - assert tokens - name = tokens.pop() - indices = name - if tokens: - indices = tokens[0] - if not indices: - indices = token - if name.name == ')': - # HACK(nnorwitz): Handle pointers to functions "properly". - if (len(tokens) >= 4 and - tokens[1].name == '(' and tokens[2].name == '*'): - tokens.append(name) - name = tokens[3] - elif name.name == ']': - # HACK(nnorwitz): Handle arrays properly. - if len(tokens) >= 2: - tokens.append(name) - name = tokens[1] - new_type = tokens - if tokens and isinstance(tokens[0], tokenize.Token): - new_type = self.converter.ToType(tokens)[0] - return Typedef(indices.start, indices.end, name.name, - new_type, self.namespace_stack) - - def handle_typeid(self): - pass # Not needed yet. - - def handle_typename(self): - pass # Not needed yet. - - def _GetTemplatedTypes(self): - result = collections.OrderedDict() - tokens = list(self._GetMatchingChar('<', '>')) - len_tokens = len(tokens) - 1 # Ignore trailing '>'. - i = 0 - while i < len_tokens: - key = tokens[i].name - i += 1 - if keywords.IsKeyword(key) or key == ',': - continue - type_name = default = None - if i < len_tokens: - i += 1 - if tokens[i-1].name == '=': - assert i < len_tokens, '%s %s' % (i, tokens) - default, unused_next_token = self.GetName(tokens[i:]) - i += len(default) - else: - if tokens[i-1].name != ',': - # We got something like: Type variable. - # Re-adjust the key (variable) and type_name (Type). - key = tokens[i-1].name - type_name = tokens[i-2] - - result[key] = (type_name, default) - return result - - def handle_template(self): - token = self._GetNextToken() - assert token.token_type == tokenize.SYNTAX, token - assert token.name == '<', token - templated_types = self._GetTemplatedTypes() - # TODO(nnorwitz): for now, just ignore the template params. - token = self._GetNextToken() - if token.token_type == tokenize.NAME: - if token.name == 'class': - return self._GetClass(Class, VISIBILITY_PRIVATE, templated_types) - elif token.name == 'struct': - return self._GetClass(Struct, VISIBILITY_PUBLIC, templated_types) - elif token.name == 'friend': - return self.handle_friend() - self._AddBackToken(token) - tokens, last = self._GetVarTokensUpTo(tokenize.SYNTAX, '(', ';') - tokens.append(last) - self._AddBackTokens(tokens) - if last.name == '(': - return self.GetMethod(FUNCTION_NONE, templated_types) - # Must be a variable definition. - return None - - def handle_true(self): - pass # Nothing to do. - - def handle_false(self): - pass # Nothing to do. - - def handle_asm(self): - pass # Not needed yet. - - def handle_class(self): - return self._GetClass(Class, VISIBILITY_PRIVATE, None) - - def _GetBases(self): - # Get base classes. - bases = [] - while 1: - token = self._GetNextToken() - assert token.token_type == tokenize.NAME, token - # TODO(nnorwitz): store kind of inheritance...maybe. - if token.name not in ('public', 'protected', 'private'): - # If inheritance type is not specified, it is private. - # Just put the token back so we can form a name. - # TODO(nnorwitz): it would be good to warn about this. - self._AddBackToken(token) - else: - # Check for virtual inheritance. - token = self._GetNextToken() - if token.name != 'virtual': - self._AddBackToken(token) - else: - # TODO(nnorwitz): store that we got virtual for this base. - pass - base, next_token = self.GetName() - bases_ast = self.converter.ToType(base) - assert len(bases_ast) == 1, bases_ast - bases.append(bases_ast[0]) - assert next_token.token_type == tokenize.SYNTAX, next_token - if next_token.name == '{': - token = next_token - break - # Support multiple inheritance. - assert next_token.name == ',', next_token - return bases, token - - def _GetClass(self, class_type, visibility, templated_types): - class_name = None - class_token = self._GetNextToken() - if class_token.token_type != tokenize.NAME: - assert class_token.token_type == tokenize.SYNTAX, class_token - token = class_token - else: - # Skip any macro (e.g. storage class specifiers) after the - # 'class' keyword. - next_token = self._GetNextToken() - if next_token.token_type == tokenize.NAME: - self._AddBackToken(next_token) - else: - self._AddBackTokens([class_token, next_token]) - name_tokens, token = self.GetName() - class_name = ''.join([t.name for t in name_tokens]) - bases = None - if token.token_type == tokenize.SYNTAX: - if token.name == ';': - # Forward declaration. - return class_type(class_token.start, class_token.end, - class_name, None, templated_types, None, - self.namespace_stack) - if token.name in '*&': - # Inline forward declaration. Could be method or data. - name_token = self._GetNextToken() - next_token = self._GetNextToken() - if next_token.name == ';': - # Handle data - modifiers = ['class'] - return self._CreateVariable(class_token, name_token.name, - class_name, - modifiers, token.name, None) - else: - # Assume this is a method. - tokens = (class_token, token, name_token, next_token) - self._AddBackTokens(tokens) - return self.GetMethod(FUNCTION_NONE, None) - if token.name == ':': - bases, token = self._GetBases() - - body = None - if token.token_type == tokenize.SYNTAX and token.name == '{': - assert token.token_type == tokenize.SYNTAX, token - assert token.name == '{', token - - ast = AstBuilder(self.GetScope(), self.filename, class_name, - visibility, self.namespace_stack) - body = list(ast.Generate()) - - if not self._handling_typedef: - token = self._GetNextToken() - if token.token_type != tokenize.NAME: - assert token.token_type == tokenize.SYNTAX, token - assert token.name == ';', token - else: - new_class = class_type(class_token.start, class_token.end, - class_name, bases, None, - body, self.namespace_stack) - - modifiers = [] - return self._CreateVariable(class_token, - token.name, new_class, - modifiers, token.name, None) - else: - if not self._handling_typedef: - self.HandleError('non-typedef token', token) - self._AddBackToken(token) - - return class_type(class_token.start, class_token.end, class_name, - bases, templated_types, body, self.namespace_stack) - - def handle_namespace(self): - # Support anonymous namespaces. - name = None - name_tokens, token = self.GetName() - if name_tokens: - name = ''.join([t.name for t in name_tokens]) - self.namespace_stack.append(name) - assert token.token_type == tokenize.SYNTAX, token - # Create an internal token that denotes when the namespace is complete. - internal_token = tokenize.Token(_INTERNAL_TOKEN, _NAMESPACE_POP, - None, None) - internal_token.whence = token.whence - if token.name == '=': - # TODO(nnorwitz): handle aliasing namespaces. - name, next_token = self.GetName() - assert next_token.name == ';', next_token - self._AddBackToken(internal_token) - else: - assert token.name == '{', token - tokens = list(self.GetScope()) - # Replace the trailing } with the internal namespace pop token. - tokens[-1] = internal_token - # Handle namespace with nothing in it. - self._AddBackTokens(tokens) - return None - - def handle_using(self): - tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';') - assert tokens - return Using(tokens[0].start, tokens[0].end, tokens) - - def handle_explicit(self): - assert self.in_class - # Nothing much to do. - # TODO(nnorwitz): maybe verify the method name == class name. - # This must be a ctor. - return self.GetMethod(FUNCTION_CTOR, None) - - def handle_this(self): - pass # Nothing to do. - - def handle_operator(self): - # Pull off the next token(s?) and make that part of the method name. - pass - - def handle_sizeof(self): - pass - - def handle_case(self): - pass - - def handle_switch(self): - pass - - def handle_default(self): - token = self._GetNextToken() - assert token.token_type == tokenize.SYNTAX - assert token.name == ':' - - def handle_if(self): - pass - - def handle_else(self): - pass - - def handle_return(self): - tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';') - if not tokens: - return Return(self.current_token.start, self.current_token.end, None) - return Return(tokens[0].start, tokens[0].end, tokens) - - def handle_goto(self): - tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';') - assert len(tokens) == 1, str(tokens) - return Goto(tokens[0].start, tokens[0].end, tokens[0].name) - - def handle_try(self): - pass # Not needed yet. - - def handle_catch(self): - pass # Not needed yet. - - def handle_throw(self): - pass # Not needed yet. - - def handle_while(self): - pass - - def handle_do(self): - pass - - def handle_for(self): - pass - - def handle_break(self): - self._IgnoreUpTo(tokenize.SYNTAX, ';') - - def handle_continue(self): - self._IgnoreUpTo(tokenize.SYNTAX, ';') - - -def BuilderFromSource(source, filename): - """Utility method that returns an AstBuilder from source code. - - Args: - source: 'C++ source code' - filename: 'file1' - - Returns: - AstBuilder - """ - return AstBuilder(tokenize.GetTokens(source), filename) - - -def PrintIndentifiers(filename, should_print): - """Prints all identifiers for a C++ source file. - - Args: - filename: 'file1' - should_print: predicate with signature: bool Function(token) - """ - source = utils.ReadFile(filename, False) - if source is None: - sys.stderr.write('Unable to find: %s\n' % filename) - return - - #print('Processing %s' % actual_filename) - builder = BuilderFromSource(source, filename) - try: - for node in builder.Generate(): - if should_print(node): - print(node.name) - except KeyboardInterrupt: - return - except: - pass - - -def PrintAllIndentifiers(filenames, should_print): - """Prints all identifiers for each C++ source file in filenames. - - Args: - filenames: ['file1', 'file2', ...] - should_print: predicate with signature: bool Function(token) - """ - for path in filenames: - PrintIndentifiers(path, should_print) - - -def main(argv): - for filename in argv[1:]: - source = utils.ReadFile(filename) - if source is None: - continue - - print('Processing %s' % filename) - builder = BuilderFromSource(source, filename) - try: - entire_ast = filter(None, builder.Generate()) - except KeyboardInterrupt: - return - except: - # Already printed a warning, print the traceback and continue. - traceback.print_exc() - else: - if utils.DEBUG: - for ast in entire_ast: - print(ast) - - -if __name__ == '__main__': - main(sys.argv) diff --git a/ext/googletest/googlemock/scripts/generator/cpp/gmock_class.py b/ext/googletest/googlemock/scripts/generator/cpp/gmock_class.py deleted file mode 100755 index 3e21022bf9..0000000000 --- a/ext/googletest/googlemock/scripts/generator/cpp/gmock_class.py +++ /dev/null @@ -1,247 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2008 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Generate Google Mock classes from base classes. - -This program will read in a C++ source file and output the Google Mock -classes for the specified classes. If no class is specified, all -classes in the source file are emitted. - -Usage: - gmock_class.py header-file.h [ClassName]... - -Output is sent to stdout. -""" - -import os -import re -import sys - -from cpp import ast -from cpp import utils - -# Preserve compatibility with Python 2.3. -try: - _dummy = set -except NameError: - import sets - - set = sets.Set - -_VERSION = (1, 0, 1) # The version of this script. -# How many spaces to indent. Can set me with the INDENT environment variable. -_INDENT = 2 - - -def _RenderType(ast_type): - """Renders the potentially recursively templated type into a string. - - Args: - ast_type: The AST of the type. - - Returns: - Rendered string of the type. - """ - # Add modifiers like 'const'. - modifiers = '' - if ast_type.modifiers: - modifiers = ' '.join(ast_type.modifiers) + ' ' - return_type = modifiers + ast_type.name - if ast_type.templated_types: - # Collect template args. - template_args = [] - for arg in ast_type.templated_types: - rendered_arg = _RenderType(arg) - template_args.append(rendered_arg) - return_type += '<' + ', '.join(template_args) + '>' - if ast_type.pointer: - return_type += '*' - if ast_type.reference: - return_type += '&' - return return_type - - -def _GenerateArg(source): - """Strips out comments, default arguments, and redundant spaces from a single argument. - - Args: - source: A string for a single argument. - - Returns: - Rendered string of the argument. - """ - # Remove end of line comments before eliminating newlines. - arg = re.sub(r'//.*', '', source) - - # Remove c-style comments. - arg = re.sub(r'/\*.*\*/', '', arg) - - # Remove default arguments. - arg = re.sub(r'=.*', '', arg) - - # Collapse spaces and newlines into a single space. - arg = re.sub(r'\s+', ' ', arg) - return arg.strip() - - -def _EscapeForMacro(s): - """Escapes a string for use as an argument to a C++ macro.""" - paren_count = 0 - for c in s: - if c == '(': - paren_count += 1 - elif c == ')': - paren_count -= 1 - elif c == ',' and paren_count == 0: - return '(' + s + ')' - return s - - -def _GenerateMethods(output_lines, source, class_node): - function_type = ( - ast.FUNCTION_VIRTUAL | ast.FUNCTION_PURE_VIRTUAL | ast.FUNCTION_OVERRIDE) - ctor_or_dtor = ast.FUNCTION_CTOR | ast.FUNCTION_DTOR - indent = ' ' * _INDENT - - for node in class_node.body: - # We only care about virtual functions. - if (isinstance(node, ast.Function) and node.modifiers & function_type and - not node.modifiers & ctor_or_dtor): - # Pick out all the elements we need from the original function. - modifiers = 'override' - if node.modifiers & ast.FUNCTION_CONST: - modifiers = 'const, ' + modifiers - - return_type = 'void' - if node.return_type: - return_type = _EscapeForMacro(_RenderType(node.return_type)) - - args = [] - for p in node.parameters: - arg = _GenerateArg(source[p.start:p.end]) - if arg != 'void': - args.append(_EscapeForMacro(arg)) - - # Create the mock method definition. - output_lines.extend([ - '%sMOCK_METHOD(%s, %s, (%s), (%s));' % - (indent, return_type, node.name, ', '.join(args), modifiers) - ]) - - -def _GenerateMocks(filename, source, ast_list, desired_class_names): - processed_class_names = set() - lines = [] - for node in ast_list: - if (isinstance(node, ast.Class) and node.body and - # desired_class_names being None means that all classes are selected. - (not desired_class_names or node.name in desired_class_names)): - class_name = node.name - parent_name = class_name - processed_class_names.add(class_name) - class_node = node - # Add namespace before the class. - if class_node.namespace: - lines.extend(['namespace %s {' % n for n in class_node.namespace]) # } - lines.append('') - - # Add template args for templated classes. - if class_node.templated_types: - # TODO(paulchang): Handle non-type template arguments (e.g. - # template). - - # class_node.templated_types is an OrderedDict from strings to a tuples. - # The key is the name of the template, and the value is - # (type_name, default). Both type_name and default could be None. - template_args = class_node.templated_types.keys() - template_decls = ['typename ' + arg for arg in template_args] - lines.append('template <' + ', '.join(template_decls) + '>') - parent_name += '<' + ', '.join(template_args) + '>' - - # Add the class prolog. - lines.append('class Mock%s : public %s {' # } - % (class_name, parent_name)) - lines.append('%spublic:' % (' ' * (_INDENT // 2))) - - # Add all the methods. - _GenerateMethods(lines, source, class_node) - - # Close the class. - if lines: - # If there are no virtual methods, no need for a public label. - if len(lines) == 2: - del lines[-1] - - # Only close the class if there really is a class. - lines.append('};') - lines.append('') # Add an extra newline. - - # Close the namespace. - if class_node.namespace: - for i in range(len(class_node.namespace) - 1, -1, -1): - lines.append('} // namespace %s' % class_node.namespace[i]) - lines.append('') # Add an extra newline. - - if desired_class_names: - missing_class_name_list = list(desired_class_names - processed_class_names) - if missing_class_name_list: - missing_class_name_list.sort() - sys.stderr.write('Class(es) not found in %s: %s\n' % - (filename, ', '.join(missing_class_name_list))) - elif not processed_class_names: - sys.stderr.write('No class found in %s\n' % filename) - - return lines - - -def main(argv=sys.argv): - if len(argv) < 2: - sys.stderr.write('Google Mock Class Generator v%s\n\n' % - '.'.join(map(str, _VERSION))) - sys.stderr.write(__doc__) - return 1 - - global _INDENT - try: - _INDENT = int(os.environ['INDENT']) - except KeyError: - pass - except: - sys.stderr.write('Unable to use indent of %s\n' % os.environ.get('INDENT')) - - filename = argv[1] - desired_class_names = None # None means all classes in the source file. - if len(argv) >= 3: - desired_class_names = set(argv[2:]) - source = utils.ReadFile(filename) - if source is None: - return 1 - - builder = ast.BuilderFromSource(source, filename) - try: - entire_ast = filter(None, builder.Generate()) - except KeyboardInterrupt: - return - except: - # An error message was already printed since we couldn't parse. - sys.exit(1) - else: - lines = _GenerateMocks(filename, source, entire_ast, desired_class_names) - sys.stdout.write('\n'.join(lines)) - - -if __name__ == '__main__': - main(sys.argv) diff --git a/ext/googletest/googlemock/scripts/generator/cpp/gmock_class_test.py b/ext/googletest/googlemock/scripts/generator/cpp/gmock_class_test.py deleted file mode 100755 index eff475f411..0000000000 --- a/ext/googletest/googlemock/scripts/generator/cpp/gmock_class_test.py +++ /dev/null @@ -1,570 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2009 Neal Norwitz All Rights Reserved. -# Portions Copyright 2009 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tests for gmock.scripts.generator.cpp.gmock_class.""" - -import os -import sys -import unittest - -# Allow the cpp imports below to work when run as a standalone script. -sys.path.append(os.path.join(os.path.dirname(__file__), '..')) - -from cpp import ast -from cpp import gmock_class - - -class TestCase(unittest.TestCase): - """Helper class that adds assert methods.""" - - @staticmethod - def StripLeadingWhitespace(lines): - """Strip leading whitespace in each line in 'lines'.""" - return '\n'.join([s.lstrip() for s in lines.split('\n')]) - - def assertEqualIgnoreLeadingWhitespace(self, expected_lines, lines): - """Specialized assert that ignores the indent level.""" - self.assertEqual(expected_lines, self.StripLeadingWhitespace(lines)) - - -class GenerateMethodsTest(TestCase): - - @staticmethod - def GenerateMethodSource(cpp_source): - """Convert C++ source to Google Mock output source lines.""" - method_source_lines = [] - # is a pseudo-filename, it is not read or written. - builder = ast.BuilderFromSource(cpp_source, '') - ast_list = list(builder.Generate()) - gmock_class._GenerateMethods(method_source_lines, cpp_source, ast_list[0]) - return '\n'.join(method_source_lines) - - def testSimpleMethod(self): - source = """ -class Foo { - public: - virtual int Bar(); -}; -""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(int, Bar, (), (override));', - self.GenerateMethodSource(source)) - - def testSimpleConstructorsAndDestructor(self): - source = """ -class Foo { - public: - Foo(); - Foo(int x); - Foo(const Foo& f); - Foo(Foo&& f); - ~Foo(); - virtual int Bar() = 0; -}; -""" - # The constructors and destructor should be ignored. - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(int, Bar, (), (override));', - self.GenerateMethodSource(source)) - - def testVirtualDestructor(self): - source = """ -class Foo { - public: - virtual ~Foo(); - virtual int Bar() = 0; -}; -""" - # The destructor should be ignored. - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(int, Bar, (), (override));', - self.GenerateMethodSource(source)) - - def testExplicitlyDefaultedConstructorsAndDestructor(self): - source = """ -class Foo { - public: - Foo() = default; - Foo(const Foo& f) = default; - Foo(Foo&& f) = default; - ~Foo() = default; - virtual int Bar() = 0; -}; -""" - # The constructors and destructor should be ignored. - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(int, Bar, (), (override));', - self.GenerateMethodSource(source)) - - def testExplicitlyDeletedConstructorsAndDestructor(self): - source = """ -class Foo { - public: - Foo() = delete; - Foo(const Foo& f) = delete; - Foo(Foo&& f) = delete; - ~Foo() = delete; - virtual int Bar() = 0; -}; -""" - # The constructors and destructor should be ignored. - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(int, Bar, (), (override));', - self.GenerateMethodSource(source)) - - def testSimpleOverrideMethod(self): - source = """ -class Foo { - public: - int Bar() override; -}; -""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(int, Bar, (), (override));', - self.GenerateMethodSource(source)) - - def testSimpleConstMethod(self): - source = """ -class Foo { - public: - virtual void Bar(bool flag) const; -}; -""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(void, Bar, (bool flag), (const, override));', - self.GenerateMethodSource(source)) - - def testExplicitVoid(self): - source = """ -class Foo { - public: - virtual int Bar(void); -}; -""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(int, Bar, (), (override));', - self.GenerateMethodSource(source)) - - def testStrangeNewlineInParameter(self): - source = """ -class Foo { - public: - virtual void Bar(int -a) = 0; -}; -""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(void, Bar, (int a), (override));', - self.GenerateMethodSource(source)) - - def testDefaultParameters(self): - source = """ -class Foo { - public: - virtual void Bar(int a, char c = 'x') = 0; -}; -""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(void, Bar, (int a, char c), (override));', - self.GenerateMethodSource(source)) - - def testMultipleDefaultParameters(self): - source = """ -class Foo { - public: - virtual void Bar( - int a = 42, - char c = 'x', - const int* const p = nullptr, - const std::string& s = "42", - char tab[] = {'4','2'}, - int const *& rp = aDefaultPointer) = 0; -}; -""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(void, Bar, ' - '(int a, char c, const int* const p, const std::string& s, char tab[], int const *& rp), ' - '(override));', self.GenerateMethodSource(source)) - - def testMultipleSingleLineDefaultParameters(self): - source = """ -class Foo { - public: - virtual void Bar(int a = 42, int b = 43, int c = 44) = 0; -}; -""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(void, Bar, (int a, int b, int c), (override));', - self.GenerateMethodSource(source)) - - def testConstDefaultParameter(self): - source = """ -class Test { - public: - virtual bool Bar(const int test_arg = 42) = 0; -}; -""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(bool, Bar, (const int test_arg), (override));', - self.GenerateMethodSource(source)) - - def testConstRefDefaultParameter(self): - source = """ -class Test { - public: - virtual bool Bar(const std::string& test_arg = "42" ) = 0; -}; -""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(bool, Bar, (const std::string& test_arg), (override));', - self.GenerateMethodSource(source)) - - def testRemovesCommentsWhenDefaultsArePresent(self): - source = """ -class Foo { - public: - virtual void Bar(int a = 42 /* a comment */, - char /* other comment */ c= 'x') = 0; -}; -""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(void, Bar, (int a, char c), (override));', - self.GenerateMethodSource(source)) - - def testDoubleSlashCommentsInParameterListAreRemoved(self): - source = """ -class Foo { - public: - virtual void Bar(int a, // inline comments should be elided. - int b // inline comments should be elided. - ) const = 0; -}; -""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(void, Bar, (int a, int b), (const, override));', - self.GenerateMethodSource(source)) - - def testCStyleCommentsInParameterListAreNotRemoved(self): - # NOTE(nnorwitz): I'm not sure if it's the best behavior to keep these - # comments. Also note that C style comments after the last parameter - # are still elided. - source = """ -class Foo { - public: - virtual const string& Bar(int /* keeper */, int b); -}; -""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(const string&, Bar, (int, int b), (override));', - self.GenerateMethodSource(source)) - - def testArgsOfTemplateTypes(self): - source = """ -class Foo { - public: - virtual int Bar(const vector& v, map* output); -};""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(int, Bar, (const vector& v, (map* output)), (override));', - self.GenerateMethodSource(source)) - - def testReturnTypeWithOneTemplateArg(self): - source = """ -class Foo { - public: - virtual vector* Bar(int n); -};""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(vector*, Bar, (int n), (override));', - self.GenerateMethodSource(source)) - - def testReturnTypeWithManyTemplateArgs(self): - source = """ -class Foo { - public: - virtual map Bar(); -};""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD((map), Bar, (), (override));', - self.GenerateMethodSource(source)) - - def testSimpleMethodInTemplatedClass(self): - source = """ -template -class Foo { - public: - virtual int Bar(); -}; -""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(int, Bar, (), (override));', - self.GenerateMethodSource(source)) - - def testPointerArgWithoutNames(self): - source = """ -class Foo { - virtual int Bar(C*); -}; -""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(int, Bar, (C*), (override));', - self.GenerateMethodSource(source)) - - def testReferenceArgWithoutNames(self): - source = """ -class Foo { - virtual int Bar(C&); -}; -""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(int, Bar, (C&), (override));', - self.GenerateMethodSource(source)) - - def testArrayArgWithoutNames(self): - source = """ -class Foo { - virtual int Bar(C[]); -}; -""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(int, Bar, (C[]), (override));', - self.GenerateMethodSource(source)) - - -class GenerateMocksTest(TestCase): - - @staticmethod - def GenerateMocks(cpp_source): - """Convert C++ source to complete Google Mock output source.""" - # is a pseudo-filename, it is not read or written. - filename = '' - builder = ast.BuilderFromSource(cpp_source, filename) - ast_list = list(builder.Generate()) - lines = gmock_class._GenerateMocks(filename, cpp_source, ast_list, None) - return '\n'.join(lines) - - def testNamespaces(self): - source = """ -namespace Foo { -namespace Bar { class Forward; } -namespace Baz::Qux { - -class Test { - public: - virtual void Foo(); -}; - -} // namespace Baz::Qux -} // namespace Foo -""" - expected = """\ -namespace Foo { -namespace Baz::Qux { - -class MockTest : public Test { -public: -MOCK_METHOD(void, Foo, (), (override)); -}; - -} // namespace Baz::Qux -} // namespace Foo -""" - self.assertEqualIgnoreLeadingWhitespace(expected, - self.GenerateMocks(source)) - - def testClassWithStorageSpecifierMacro(self): - source = """ -class STORAGE_SPECIFIER Test { - public: - virtual void Foo(); -}; -""" - expected = """\ -class MockTest : public Test { -public: -MOCK_METHOD(void, Foo, (), (override)); -}; -""" - self.assertEqualIgnoreLeadingWhitespace(expected, - self.GenerateMocks(source)) - - def testTemplatedForwardDeclaration(self): - source = """ -template class Forward; // Forward declaration should be ignored. -class Test { - public: - virtual void Foo(); -}; -""" - expected = """\ -class MockTest : public Test { -public: -MOCK_METHOD(void, Foo, (), (override)); -}; -""" - self.assertEqualIgnoreLeadingWhitespace(expected, - self.GenerateMocks(source)) - - def testTemplatedClass(self): - source = """ -template -class Test { - public: - virtual void Foo(); -}; -""" - expected = """\ -template -class MockTest : public Test { -public: -MOCK_METHOD(void, Foo, (), (override)); -}; -""" - self.assertEqualIgnoreLeadingWhitespace(expected, - self.GenerateMocks(source)) - - def testTemplateInATemplateTypedef(self): - source = """ -class Test { - public: - typedef std::vector> FooType; - virtual void Bar(const FooType& test_arg); -}; -""" - expected = """\ -class MockTest : public Test { -public: -MOCK_METHOD(void, Bar, (const FooType& test_arg), (override)); -}; -""" - self.assertEqualIgnoreLeadingWhitespace(expected, - self.GenerateMocks(source)) - - def testTemplatedClassWithTemplatedArguments(self): - source = """ -template -class Test { - public: - virtual U Foo(T some_arg); -}; -""" - expected = """\ -template -class MockTest : public Test { -public: -MOCK_METHOD(U, Foo, (T some_arg), (override)); -}; -""" - self.assertEqualIgnoreLeadingWhitespace(expected, - self.GenerateMocks(source)) - - def testTemplateInATemplateTypedefWithComma(self): - source = """ -class Test { - public: - typedef std::function>&, int> FooType; - virtual void Bar(const FooType& test_arg); -}; -""" - expected = """\ -class MockTest : public Test { -public: -MOCK_METHOD(void, Bar, (const FooType& test_arg), (override)); -}; -""" - self.assertEqualIgnoreLeadingWhitespace(expected, - self.GenerateMocks(source)) - - def testParenthesizedCommaInArg(self): - source = """ -class Test { - public: - virtual void Bar(std::function f); -}; -""" - expected = """\ -class MockTest : public Test { -public: -MOCK_METHOD(void, Bar, (std::function f), (override)); -}; -""" - self.assertEqualIgnoreLeadingWhitespace(expected, - self.GenerateMocks(source)) - - def testEnumType(self): - source = """ -class Test { - public: - enum Bar { - BAZ, QUX, QUUX, QUUUX - }; - virtual void Foo(); -}; -""" - expected = """\ -class MockTest : public Test { -public: -MOCK_METHOD(void, Foo, (), (override)); -}; -""" - self.assertEqualIgnoreLeadingWhitespace(expected, - self.GenerateMocks(source)) - - def testEnumClassType(self): - source = """ -class Test { - public: - enum class Bar { - BAZ, QUX, QUUX, QUUUX - }; - virtual void Foo(); -}; -""" - expected = """\ -class MockTest : public Test { -public: -MOCK_METHOD(void, Foo, (), (override)); -}; -""" - self.assertEqualIgnoreLeadingWhitespace(expected, - self.GenerateMocks(source)) - - def testStdFunction(self): - source = """ -class Test { - public: - Test(std::function foo) : foo_(foo) {} - - virtual std::function foo(); - - private: - std::function foo_; -}; -""" - expected = """\ -class MockTest : public Test { -public: -MOCK_METHOD(std::function, foo, (), (override)); -}; -""" - self.assertEqualIgnoreLeadingWhitespace(expected, - self.GenerateMocks(source)) - - -if __name__ == '__main__': - unittest.main() diff --git a/ext/googletest/googlemock/scripts/generator/cpp/keywords.py b/ext/googletest/googlemock/scripts/generator/cpp/keywords.py deleted file mode 100755 index e4282714dd..0000000000 --- a/ext/googletest/googlemock/scripts/generator/cpp/keywords.py +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2007 Neal Norwitz -# Portions Copyright 2007 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""C++ keywords and helper utilities for determining keywords.""" - -try: - # Python 3.x - import builtins -except ImportError: - # Python 2.x - import __builtin__ as builtins - - -if not hasattr(builtins, 'set'): - # Nominal support for Python 2.3. - from sets import Set as set - - -TYPES = set('bool char int long short double float void wchar_t unsigned signed'.split()) -TYPE_MODIFIERS = set('auto register const inline extern static virtual volatile mutable'.split()) -ACCESS = set('public protected private friend'.split()) - -CASTS = set('static_cast const_cast dynamic_cast reinterpret_cast'.split()) - -OTHERS = set('true false asm class namespace using explicit this operator sizeof'.split()) -OTHER_TYPES = set('new delete typedef struct union enum typeid typename template'.split()) - -CONTROL = set('case switch default if else return goto'.split()) -EXCEPTION = set('try catch throw'.split()) -LOOP = set('while do for break continue'.split()) - -ALL = TYPES | TYPE_MODIFIERS | ACCESS | CASTS | OTHERS | OTHER_TYPES | CONTROL | EXCEPTION | LOOP - - -def IsKeyword(token): - return token in ALL - -def IsBuiltinType(token): - if token in ('virtual', 'inline'): - # These only apply to methods, they can't be types by themselves. - return False - return token in TYPES or token in TYPE_MODIFIERS diff --git a/ext/googletest/googlemock/scripts/generator/cpp/tokenize.py b/ext/googletest/googlemock/scripts/generator/cpp/tokenize.py deleted file mode 100755 index a75edcb142..0000000000 --- a/ext/googletest/googlemock/scripts/generator/cpp/tokenize.py +++ /dev/null @@ -1,284 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2007 Neal Norwitz -# Portions Copyright 2007 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tokenize C++ source code.""" - -try: - # Python 3.x - import builtins -except ImportError: - # Python 2.x - import __builtin__ as builtins - - -import sys - -from cpp import utils - - -if not hasattr(builtins, 'set'): - # Nominal support for Python 2.3. - from sets import Set as set - - -# Add $ as a valid identifier char since so much code uses it. -_letters = 'abcdefghijklmnopqrstuvwxyz' -VALID_IDENTIFIER_CHARS = set(_letters + _letters.upper() + '_0123456789$') -HEX_DIGITS = set('0123456789abcdefABCDEF') -INT_OR_FLOAT_DIGITS = set('01234567890eE-+') - - -# C++0x string preffixes. -_STR_PREFIXES = set(('R', 'u8', 'u8R', 'u', 'uR', 'U', 'UR', 'L', 'LR')) - - -# Token types. -UNKNOWN = 'UNKNOWN' -SYNTAX = 'SYNTAX' -CONSTANT = 'CONSTANT' -NAME = 'NAME' -PREPROCESSOR = 'PREPROCESSOR' - -# Where the token originated from. This can be used for backtracking. -# It is always set to WHENCE_STREAM in this code. -WHENCE_STREAM, WHENCE_QUEUE = range(2) - - -class Token(object): - """Data container to represent a C++ token. - - Tokens can be identifiers, syntax char(s), constants, or - pre-processor directives. - - start contains the index of the first char of the token in the source - end contains the index of the last char of the token in the source - """ - - def __init__(self, token_type, name, start, end): - self.token_type = token_type - self.name = name - self.start = start - self.end = end - self.whence = WHENCE_STREAM - - def __str__(self): - if not utils.DEBUG: - return 'Token(%r)' % self.name - return 'Token(%r, %s, %s)' % (self.name, self.start, self.end) - - __repr__ = __str__ - - -def _GetString(source, start, i): - i = source.find('"', i+1) - while source[i-1] == '\\': - # Count the trailing backslashes. - backslash_count = 1 - j = i - 2 - while source[j] == '\\': - backslash_count += 1 - j -= 1 - # When trailing backslashes are even, they escape each other. - if (backslash_count % 2) == 0: - break - i = source.find('"', i+1) - return i + 1 - - -def _GetChar(source, start, i): - # NOTE(nnorwitz): may not be quite correct, should be good enough. - i = source.find("'", i+1) - while source[i-1] == '\\': - # Need to special case '\\'. - if (i - 2) > start and source[i-2] == '\\': - break - i = source.find("'", i+1) - # Try to handle unterminated single quotes (in a #if 0 block). - if i < 0: - i = start - return i + 1 - - -def GetTokens(source): - """Returns a sequence of Tokens. - - Args: - source: string of C++ source code. - - Yields: - Token that represents the next token in the source. - """ - # Cache various valid character sets for speed. - valid_identifier_chars = VALID_IDENTIFIER_CHARS - hex_digits = HEX_DIGITS - int_or_float_digits = INT_OR_FLOAT_DIGITS - int_or_float_digits2 = int_or_float_digits | set('.') - - # Only ignore errors while in a #if 0 block. - ignore_errors = False - count_ifs = 0 - - i = 0 - end = len(source) - while i < end: - # Skip whitespace. - while i < end and source[i].isspace(): - i += 1 - if i >= end: - return - - token_type = UNKNOWN - start = i - c = source[i] - if c.isalpha() or c == '_': # Find a string token. - token_type = NAME - while source[i] in valid_identifier_chars: - i += 1 - # String and character constants can look like a name if - # they are something like L"". - if (source[i] == "'" and (i - start) == 1 and - source[start:i] in 'uUL'): - # u, U, and L are valid C++0x character preffixes. - token_type = CONSTANT - i = _GetChar(source, start, i) - elif source[i] == "'" and source[start:i] in _STR_PREFIXES: - token_type = CONSTANT - i = _GetString(source, start, i) - elif c == '/' and source[i+1] == '/': # Find // comments. - i = source.find('\n', i) - if i == -1: # Handle EOF. - i = end - continue - elif c == '/' and source[i+1] == '*': # Find /* comments. */ - i = source.find('*/', i) + 2 - continue - elif c in ':+-<>&|*=': # : or :: (plus other chars). - token_type = SYNTAX - i += 1 - new_ch = source[i] - if new_ch == c and c != '>': # Treat ">>" as two tokens. - i += 1 - elif c == '-' and new_ch == '>': - i += 1 - elif new_ch == '=': - i += 1 - elif c in '()[]{}~!?^%;/.,': # Handle single char tokens. - token_type = SYNTAX - i += 1 - if c == '.' and source[i].isdigit(): - token_type = CONSTANT - i += 1 - while source[i] in int_or_float_digits: - i += 1 - # Handle float suffixes. - for suffix in ('l', 'f'): - if suffix == source[i:i+1].lower(): - i += 1 - break - elif c.isdigit(): # Find integer. - token_type = CONSTANT - if c == '0' and source[i+1] in 'xX': - # Handle hex digits. - i += 2 - while source[i] in hex_digits: - i += 1 - else: - while source[i] in int_or_float_digits2: - i += 1 - # Handle integer (and float) suffixes. - for suffix in ('ull', 'll', 'ul', 'l', 'f', 'u'): - size = len(suffix) - if suffix == source[i:i+size].lower(): - i += size - break - elif c == '"': # Find string. - token_type = CONSTANT - i = _GetString(source, start, i) - elif c == "'": # Find char. - token_type = CONSTANT - i = _GetChar(source, start, i) - elif c == '#': # Find pre-processor command. - token_type = PREPROCESSOR - got_if = source[i:i+3] == '#if' and source[i+3:i+4].isspace() - if got_if: - count_ifs += 1 - elif source[i:i+6] == '#endif': - count_ifs -= 1 - if count_ifs == 0: - ignore_errors = False - - # TODO(nnorwitz): handle preprocessor statements (\ continuations). - while 1: - i1 = source.find('\n', i) - i2 = source.find('//', i) - i3 = source.find('/*', i) - i4 = source.find('"', i) - # NOTE(nnorwitz): doesn't handle comments in #define macros. - # Get the first important symbol (newline, comment, EOF/end). - i = min([x for x in (i1, i2, i3, i4, end) if x != -1]) - - # Handle #include "dir//foo.h" properly. - if source[i] == '"': - i = source.find('"', i+1) + 1 - assert i > 0 - continue - # Keep going if end of the line and the line ends with \. - if not (i == i1 and source[i-1] == '\\'): - if got_if: - condition = source[start+4:i].lstrip() - if (condition.startswith('0') or - condition.startswith('(0)')): - ignore_errors = True - break - i += 1 - elif c == '\\': # Handle \ in code. - # This is different from the pre-processor \ handling. - i += 1 - continue - elif ignore_errors: - # The tokenizer seems to be in pretty good shape. This - # raise is conditionally disabled so that bogus code - # in an #if 0 block can be handled. Since we will ignore - # it anyways, this is probably fine. So disable the - # exception and return the bogus char. - i += 1 - else: - sys.stderr.write('Got invalid token in %s @ %d token:%s: %r\n' % - ('?', i, c, source[i-10:i+10])) - raise RuntimeError('unexpected token') - - if i <= 0: - print('Invalid index, exiting now.') - return - yield Token(token_type, source[start:i], start, i) - - -if __name__ == '__main__': - def main(argv): - """Driver mostly for testing purposes.""" - for filename in argv[1:]: - source = utils.ReadFile(filename) - if source is None: - continue - - for token in GetTokens(source): - print('%-12s: %s' % (token.token_type, token.name)) - # print('\r%6.2f%%' % (100.0 * index / token.end),) - sys.stdout.write('\n') - - - main(sys.argv) diff --git a/ext/googletest/googlemock/scripts/generator/cpp/utils.py b/ext/googletest/googlemock/scripts/generator/cpp/utils.py deleted file mode 100755 index 6f5fc097b9..0000000000 --- a/ext/googletest/googlemock/scripts/generator/cpp/utils.py +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2007 Neal Norwitz -# Portions Copyright 2007 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Generic utilities for C++ parsing.""" - -import sys - -# Set to True to see the start/end token indices. -DEBUG = True - - -def ReadFile(filename, print_error=True): - """Returns the contents of a file.""" - try: - fp = open(filename) - try: - return fp.read() - finally: - fp.close() - except IOError: - if print_error: - print('Error reading %s: %s' % (filename, sys.exc_info()[1])) - return None diff --git a/ext/googletest/googlemock/scripts/generator/gmock_gen.py b/ext/googletest/googlemock/scripts/generator/gmock_gen.py deleted file mode 100755 index 9d528a56d9..0000000000 --- a/ext/googletest/googlemock/scripts/generator/gmock_gen.py +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2008 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Driver for starting up Google Mock class generator.""" - - -import os -import sys - -if __name__ == '__main__': - # Add the directory of this script to the path so we can import gmock_class. - sys.path.append(os.path.dirname(__file__)) - - from cpp import gmock_class - # Fix the docstring in case they require the usage. - gmock_class.__doc__ = gmock_class.__doc__.replace('gmock_class.py', __file__) - gmock_class.main() diff --git a/ext/googletest/googlemock/src/gmock-cardinalities.cc b/ext/googletest/googlemock/src/gmock-cardinalities.cc index 7463f43832..92cde3484a 100644 --- a/ext/googletest/googlemock/src/gmock-cardinalities.cc +++ b/ext/googletest/googlemock/src/gmock-cardinalities.cc @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Mock - a framework for writing C++ mock classes. // // This file implements cardinalities. @@ -35,9 +34,11 @@ #include "gmock/gmock-cardinalities.h" #include + #include // NOLINT #include #include + #include "gmock/internal/gmock-internal-utils.h" #include "gtest/gtest.h" @@ -49,8 +50,7 @@ namespace { class BetweenCardinalityImpl : public CardinalityInterface { public: BetweenCardinalityImpl(int min, int max) - : min_(min >= 0 ? min : 0), - max_(max >= min_ ? max : min_) { + : min_(min >= 0 ? min : 0), max_(max >= min_ ? max : min_) { std::stringstream ss; if (min < 0) { ss << "The invocation lower bound must be >= 0, " @@ -62,8 +62,7 @@ class BetweenCardinalityImpl : public CardinalityInterface { internal::Expect(false, __FILE__, __LINE__, ss.str()); } else if (min > max) { ss << "The invocation upper bound (" << max - << ") must be >= the invocation lower bound (" << min - << ")."; + << ") must be >= the invocation lower bound (" << min << ")."; internal::Expect(false, __FILE__, __LINE__, ss.str()); } } @@ -87,7 +86,8 @@ class BetweenCardinalityImpl : public CardinalityInterface { const int min_; const int max_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(BetweenCardinalityImpl); + BetweenCardinalityImpl(const BetweenCardinalityImpl&) = delete; + BetweenCardinalityImpl& operator=(const BetweenCardinalityImpl&) = delete; }; // Formats "n times" in a human-friendly way. diff --git a/ext/googletest/googlemock/src/gmock-internal-utils.cc b/ext/googletest/googlemock/src/gmock-internal-utils.cc index e5b547981d..0a74841f35 100644 --- a/ext/googletest/googlemock/src/gmock-internal-utils.cc +++ b/ext/googletest/googlemock/src/gmock-internal-utils.cc @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Mock - a framework for writing C++ mock classes. // // This file defines some utilities useful for implementing Google @@ -37,8 +36,15 @@ #include "gmock/internal/gmock-internal-utils.h" #include + +#include +#include +#include +#include #include // NOLINT #include +#include + #include "gmock/gmock.h" #include "gmock/internal/gmock-port.h" #include "gtest/gtest.h" @@ -48,21 +54,22 @@ namespace internal { // Joins a vector of strings as if they are fields of a tuple; returns // the joined string. -GTEST_API_ std::string JoinAsTuple(const Strings& fields) { - switch (fields.size()) { - case 0: - return ""; - case 1: - return fields[0]; - default: - std::string result = "(" + fields[0]; - for (size_t i = 1; i < fields.size(); i++) { - result += ", "; - result += fields[i]; - } - result += ")"; - return result; +GTEST_API_ std::string JoinAsKeyValueTuple( + const std::vector& names, const Strings& values) { + GTEST_CHECK_(names.size() == values.size()); + if (values.empty()) { + return ""; } + const auto build_one = [&](const size_t i) { + return std::string(names[i]) + ": " + values[i]; + }; + std::string result = "(" + build_one(0); + for (size_t i = 1; i < values.size(); i++) { + result += ", "; + result += build_one(i); + } + result += ")"; + return result; } // Converts an identifier name to a space-separated list of lower-case @@ -76,12 +83,11 @@ GTEST_API_ std::string ConvertIdentifierNameToWords(const char* id_name) { // We don't care about the current locale as the input is // guaranteed to be a valid C++ identifier name. const bool starts_new_word = IsUpper(*p) || - (!IsAlpha(prev_char) && IsLower(*p)) || - (!IsDigit(prev_char) && IsDigit(*p)); + (!IsAlpha(prev_char) && IsLower(*p)) || + (!IsDigit(prev_char) && IsDigit(*p)); if (IsAlNum(*p)) { - if (starts_new_word && result != "") - result += ' '; + if (starts_new_word && result != "") result += ' '; result += ToLower(*p); } } @@ -95,12 +101,9 @@ class GoogleTestFailureReporter : public FailureReporterInterface { public: void ReportFailure(FailureType type, const char* file, int line, const std::string& message) override { - AssertHelper(type == kFatal ? - TestPartResult::kFatalFailure : - TestPartResult::kNonFatalFailure, - file, - line, - message.c_str()) = Message(); + AssertHelper(type == kFatal ? TestPartResult::kFatalFailure + : TestPartResult::kNonFatalFailure, + file, line, message.c_str()) = Message(); if (type == kFatal) { posix::Abort(); } @@ -126,10 +129,10 @@ static GTEST_DEFINE_STATIC_MUTEX_(g_log_mutex); // Returns true if and only if a log with the given severity is visible // according to the --gmock_verbose flag. GTEST_API_ bool LogIsVisible(LogSeverity severity) { - if (GMOCK_FLAG(verbose) == kInfoVerbosity) { + if (GMOCK_FLAG_GET(verbose) == kInfoVerbosity) { // Always show the log if --gmock_verbose=info. return true; - } else if (GMOCK_FLAG(verbose) == kErrorVerbosity) { + } else if (GMOCK_FLAG_GET(verbose) == kErrorVerbosity) { // Always hide it if --gmock_verbose=error. return false; } else { @@ -148,8 +151,7 @@ GTEST_API_ bool LogIsVisible(LogSeverity severity) { // conservative. GTEST_API_ void Log(LogSeverity severity, const std::string& message, int stack_frames_to_skip) { - if (!LogIsVisible(severity)) - return; + if (!LogIsVisible(severity)) return; // Ensures that logs from different threads don't interleave. MutexLock l(&g_log_mutex); @@ -178,8 +180,8 @@ GTEST_API_ void Log(LogSeverity severity, const std::string& message, std::cout << "\n"; } std::cout << "Stack trace:\n" - << ::testing::internal::GetCurrentOsStackTraceExceptTop( - ::testing::UnitTest::GetInstance(), actual_to_skip); + << ::testing::internal::GetCurrentOsStackTraceExceptTop( + ::testing::UnitTest::GetInstance(), actual_to_skip); } std::cout << ::std::flush; } @@ -196,5 +198,53 @@ GTEST_API_ void IllegalDoDefault(const char* file, int line) { "the variable in various places."); } +constexpr char UnBase64Impl(char c, const char* const base64, char carry) { + return *base64 == 0 ? static_cast(65) + : *base64 == c ? carry + : UnBase64Impl(c, base64 + 1, carry + 1); +} + +template +constexpr std::array UnBase64Impl(IndexSequence, + const char* const base64) { + return {{UnBase64Impl(static_cast(I), base64, 0)...}}; +} + +constexpr std::array UnBase64(const char* const base64) { + return UnBase64Impl(MakeIndexSequence<256>{}, base64); +} + +static constexpr char kBase64[] = + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; +static constexpr std::array kUnBase64 = UnBase64(kBase64); + +bool Base64Unescape(const std::string& encoded, std::string* decoded) { + decoded->clear(); + size_t encoded_len = encoded.size(); + decoded->reserve(3 * (encoded_len / 4) + (encoded_len % 4)); + int bit_pos = 0; + char dst = 0; + for (int src : encoded) { + if (std::isspace(src) || src == '=') { + continue; + } + char src_bin = kUnBase64[static_cast(src)]; + if (src_bin >= 64) { + decoded->clear(); + return false; + } + if (bit_pos == 0) { + dst |= static_cast(src_bin << 2); + bit_pos = 6; + } else { + dst |= static_cast(src_bin >> (bit_pos - 2)); + decoded->push_back(dst); + dst = static_cast(src_bin << (10 - bit_pos)); + bit_pos = (bit_pos + 6) % 8; + } + } + return true; +} + } // namespace internal } // namespace testing diff --git a/ext/googletest/googlemock/src/gmock-matchers.cc b/ext/googletest/googlemock/src/gmock-matchers.cc index dded437add..a8d04a6da0 100644 --- a/ext/googletest/googlemock/src/gmock-matchers.cc +++ b/ext/googletest/googlemock/src/gmock-matchers.cc @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Mock - a framework for writing C++ mock classes. // // This file implements Matcher, Matcher, and @@ -36,9 +35,11 @@ #include "gmock/gmock-matchers.h" #include + #include #include #include +#include namespace testing { namespace internal { @@ -48,11 +49,13 @@ namespace internal { // 'negation' is false; otherwise returns the description of the // negation of the matcher. 'param_values' contains a list of strings // that are the print-out of the matcher's parameters. -GTEST_API_ std::string FormatMatcherDescription(bool negation, - const char* matcher_name, - const Strings& param_values) { +GTEST_API_ std::string FormatMatcherDescription( + bool negation, const char* matcher_name, + const std::vector& param_names, const Strings& param_values) { std::string result = ConvertIdentifierNameToWords(matcher_name); - if (param_values.size() >= 1) result += " " + JoinAsTuple(param_values); + if (param_values.size() >= 1) { + result += " " + JoinAsKeyValueTuple(param_names, param_values); + } return negation ? "not (" + result + ")" : result; } diff --git a/ext/googletest/googlemock/src/gmock-spec-builders.cc b/ext/googletest/googlemock/src/gmock-spec-builders.cc index c7266a3704..658ad3fa22 100644 --- a/ext/googletest/googlemock/src/gmock-spec-builders.cc +++ b/ext/googletest/googlemock/src/gmock-spec-builders.cc @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Mock - a framework for writing C++ mock classes. // // This file implements the spec builder syntax (ON_CALL and @@ -42,6 +41,7 @@ #include #include #include +#include #include #include "gmock/gmock.h" @@ -49,15 +49,15 @@ #include "gtest/internal/gtest-port.h" #if GTEST_OS_CYGWIN || GTEST_OS_LINUX || GTEST_OS_MAC -# include // NOLINT +#include // NOLINT #endif // Silence C4800 (C4800: 'int *const ': forcing value // to bool 'true' or 'false') for MSVC 15 #ifdef _MSC_VER #if _MSC_VER == 1900 -# pragma warning(push) -# pragma warning(disable:4800) +#pragma warning(push) +#pragma warning(disable : 4800) #endif #endif @@ -195,11 +195,12 @@ void ExpectationBase::DescribeCallCountTo(::std::ostream* os) const // Describes the state of the expectation (e.g. is it satisfied? // is it active?). - *os << " - " << (IsOverSaturated() ? "over-saturated" : - IsSaturated() ? "saturated" : - IsSatisfied() ? "satisfied" : "unsatisfied") - << " and " - << (is_retired() ? "retired" : "active"); + *os << " - " + << (IsOverSaturated() ? "over-saturated" + : IsSaturated() ? "saturated" + : IsSatisfied() ? "satisfied" + : "unsatisfied") + << " and " << (is_retired() ? "retired" : "active"); } // Checks the action count (i.e. the number of WillOnce() and @@ -242,13 +243,12 @@ void ExpectationBase::CheckActionCountIfNotDone() const ::std::stringstream ss; DescribeLocationTo(&ss); - ss << "Too " << (too_many ? "many" : "few") - << " actions specified in " << source_text() << "...\n" + ss << "Too " << (too_many ? "many" : "few") << " actions specified in " + << source_text() << "...\n" << "Expected to be "; cardinality().DescribeTo(&ss); - ss << ", but has " << (too_many ? "" : "only ") - << action_count << " WillOnce()" - << (action_count == 1 ? "" : "s"); + ss << ", but has " << (too_many ? "" : "only ") << action_count + << " WillOnce()" << (action_count == 1 ? "" : "s"); if (repeated_action_specified_) { ss << " and a WillRepeatedly()"; } @@ -264,10 +264,10 @@ void ExpectationBase::UntypedTimes(const Cardinality& a_cardinality) { ".Times() cannot appear " "more than once in an EXPECT_CALL()."); } else { - ExpectSpecProperty(last_clause_ < kTimes, - ".Times() cannot appear after " - ".InSequence(), .WillOnce(), .WillRepeatedly(), " - "or .RetiresOnSaturation()."); + ExpectSpecProperty( + last_clause_ < kTimes, + ".Times() may only appear *before* .InSequence(), .WillOnce(), " + ".WillRepeatedly(), or .RetiresOnSaturation(), not after."); } last_clause_ = kTimes; @@ -283,7 +283,7 @@ GTEST_API_ ThreadLocal g_gmock_implicit_sequence; void ReportUninterestingCall(CallReaction reaction, const std::string& msg) { // Include a stack trace only if --gmock_verbose=info is specified. const int stack_frames_to_skip = - GMOCK_FLAG(verbose) == kInfoVerbosity ? 3 : -1; + GMOCK_FLAG_GET(verbose) == kInfoVerbosity ? 3 : -1; switch (reaction) { case kAllow: Log(kInfo, msg, stack_frames_to_skip); @@ -370,143 +370,12 @@ const char* UntypedFunctionMockerBase::Name() const return name; } -// Calculates the result of invoking this mock function with the given -// arguments, prints it, and returns it. The caller is responsible -// for deleting the result. -UntypedActionResultHolderBase* UntypedFunctionMockerBase::UntypedInvokeWith( - void* const untyped_args) GTEST_LOCK_EXCLUDED_(g_gmock_mutex) { - // See the definition of untyped_expectations_ for why access to it - // is unprotected here. - if (untyped_expectations_.size() == 0) { - // No expectation is set on this mock method - we have an - // uninteresting call. - - // We must get Google Mock's reaction on uninteresting calls - // made on this mock object BEFORE performing the action, - // because the action may DELETE the mock object and make the - // following expression meaningless. - const CallReaction reaction = - Mock::GetReactionOnUninterestingCalls(MockObject()); - - // True if and only if we need to print this call's arguments and return - // value. This definition must be kept in sync with - // the behavior of ReportUninterestingCall(). - const bool need_to_report_uninteresting_call = - // If the user allows this uninteresting call, we print it - // only when they want informational messages. - reaction == kAllow ? LogIsVisible(kInfo) : - // If the user wants this to be a warning, we print - // it only when they want to see warnings. - reaction == kWarn - ? LogIsVisible(kWarning) - : - // Otherwise, the user wants this to be an error, and we - // should always print detailed information in the error. - true; - - if (!need_to_report_uninteresting_call) { - // Perform the action without printing the call information. - return this->UntypedPerformDefaultAction( - untyped_args, "Function call: " + std::string(Name())); - } - - // Warns about the uninteresting call. - ::std::stringstream ss; - this->UntypedDescribeUninterestingCall(untyped_args, &ss); - - // Calculates the function result. - UntypedActionResultHolderBase* const result = - this->UntypedPerformDefaultAction(untyped_args, ss.str()); - - // Prints the function result. - if (result != nullptr) result->PrintAsActionResult(&ss); - - ReportUninterestingCall(reaction, ss.str()); - return result; - } - - bool is_excessive = false; - ::std::stringstream ss; - ::std::stringstream why; - ::std::stringstream loc; - const void* untyped_action = nullptr; - - // The UntypedFindMatchingExpectation() function acquires and - // releases g_gmock_mutex. - - const ExpectationBase* const untyped_expectation = - this->UntypedFindMatchingExpectation(untyped_args, &untyped_action, - &is_excessive, &ss, &why); - const bool found = untyped_expectation != nullptr; - - // True if and only if we need to print the call's arguments - // and return value. - // This definition must be kept in sync with the uses of Expect() - // and Log() in this function. - const bool need_to_report_call = - !found || is_excessive || LogIsVisible(kInfo); - if (!need_to_report_call) { - // Perform the action without printing the call information. - return untyped_action == nullptr - ? this->UntypedPerformDefaultAction(untyped_args, "") - : this->UntypedPerformAction(untyped_action, untyped_args); - } - - ss << " Function call: " << Name(); - this->UntypedPrintArgs(untyped_args, &ss); - - // In case the action deletes a piece of the expectation, we - // generate the message beforehand. - if (found && !is_excessive) { - untyped_expectation->DescribeLocationTo(&loc); - } - - UntypedActionResultHolderBase* result = nullptr; - - auto perform_action = [&] { - return untyped_action == nullptr - ? this->UntypedPerformDefaultAction(untyped_args, ss.str()) - : this->UntypedPerformAction(untyped_action, untyped_args); - }; - auto handle_failures = [&] { - ss << "\n" << why.str(); - - if (!found) { - // No expectation matches this call - reports a failure. - Expect(false, nullptr, -1, ss.str()); - } else if (is_excessive) { - // We had an upper-bound violation and the failure message is in ss. - Expect(false, untyped_expectation->file(), untyped_expectation->line(), - ss.str()); - } else { - // We had an expected call and the matching expectation is - // described in ss. - Log(kInfo, loc.str() + ss.str(), 2); - } - }; -#if GTEST_HAS_EXCEPTIONS - try { - result = perform_action(); - } catch (...) { - handle_failures(); - throw; - } -#else - result = perform_action(); -#endif - - if (result != nullptr) result->PrintAsActionResult(&ss); - handle_failures(); - return result; -} - // Returns an Expectation object that references and co-owns exp, // which must be an expectation on this mock function. Expectation UntypedFunctionMockerBase::GetHandleOf(ExpectationBase* exp) { // See the definition of untyped_expectations_ for why access to it // is unprotected here. - for (UntypedExpectations::const_iterator it = - untyped_expectations_.begin(); + for (UntypedExpectations::const_iterator it = untyped_expectations_.begin(); it != untyped_expectations_.end(); ++it) { if (it->get() == exp) { return Expectation(*it); @@ -526,8 +395,7 @@ bool UntypedFunctionMockerBase::VerifyAndClearExpectationsLocked() GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) { g_gmock_mutex.AssertHeld(); bool expectations_met = true; - for (UntypedExpectations::const_iterator it = - untyped_expectations_.begin(); + for (UntypedExpectations::const_iterator it = untyped_expectations_.begin(); it != untyped_expectations_.end(); ++it) { ExpectationBase* const untyped_expectation = it->get(); if (untyped_expectation->IsOverSaturated()) { @@ -538,15 +406,15 @@ bool UntypedFunctionMockerBase::VerifyAndClearExpectationsLocked() } else if (!untyped_expectation->IsSatisfied()) { expectations_met = false; ::std::stringstream ss; - ss << "Actual function call count doesn't match " - << untyped_expectation->source_text() << "...\n"; + ss << "Actual function call count doesn't match " + << untyped_expectation->source_text() << "...\n"; // No need to show the source file location of the expectation // in the description, as the Expect() call that follows already // takes care of it. untyped_expectation->MaybeDescribeExtraMatcherTo(&ss); untyped_expectation->DescribeCallCountTo(&ss); - Expect(false, untyped_expectation->file(), - untyped_expectation->line(), ss.str()); + Expect(false, untyped_expectation->file(), untyped_expectation->line(), + ss.str()); } } @@ -613,8 +481,7 @@ class MockObjectRegistry { // object alive. Therefore we report any living object as test // failure, unless the user explicitly asked us to ignore it. ~MockObjectRegistry() { - if (!GMOCK_FLAG(catch_leaked_mocks)) - return; + if (!GMOCK_FLAG_GET(catch_leaked_mocks)) return; int leaked_count = 0; for (StateMap::const_iterator it = states_.begin(); it != states_.end(); @@ -634,7 +501,7 @@ class MockObjectRegistry { << state.first_used_test << ")"; } std::cout << " should be deleted but never is. Its address is @" - << it->first << "."; + << it->first << "."; leaked_count++; } if (leaked_count > 0) { @@ -668,57 +535,63 @@ MockObjectRegistry g_mock_object_registry; // Maps a mock object to the reaction Google Mock should have when an // uninteresting method is called. Protected by g_gmock_mutex. -std::map g_uninteresting_call_reaction; +std::unordered_map& +UninterestingCallReactionMap() { + static auto* map = new std::unordered_map; + return *map; +} // Sets the reaction Google Mock should have when an uninteresting // method of the given mock object is called. -void SetReactionOnUninterestingCalls(const void* mock_obj, +void SetReactionOnUninterestingCalls(uintptr_t mock_obj, internal::CallReaction reaction) GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex) { internal::MutexLock l(&internal::g_gmock_mutex); - g_uninteresting_call_reaction[mock_obj] = reaction; + UninterestingCallReactionMap()[mock_obj] = reaction; } } // namespace // Tells Google Mock to allow uninteresting calls on the given mock // object. -void Mock::AllowUninterestingCalls(const void* mock_obj) +void Mock::AllowUninterestingCalls(uintptr_t mock_obj) GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex) { SetReactionOnUninterestingCalls(mock_obj, internal::kAllow); } // Tells Google Mock to warn the user about uninteresting calls on the // given mock object. -void Mock::WarnUninterestingCalls(const void* mock_obj) +void Mock::WarnUninterestingCalls(uintptr_t mock_obj) GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex) { SetReactionOnUninterestingCalls(mock_obj, internal::kWarn); } // Tells Google Mock to fail uninteresting calls on the given mock // object. -void Mock::FailUninterestingCalls(const void* mock_obj) +void Mock::FailUninterestingCalls(uintptr_t mock_obj) GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex) { SetReactionOnUninterestingCalls(mock_obj, internal::kFail); } // Tells Google Mock the given mock object is being destroyed and its // entry in the call-reaction table should be removed. -void Mock::UnregisterCallReaction(const void* mock_obj) +void Mock::UnregisterCallReaction(uintptr_t mock_obj) GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex) { internal::MutexLock l(&internal::g_gmock_mutex); - g_uninteresting_call_reaction.erase(mock_obj); + UninterestingCallReactionMap().erase(static_cast(mock_obj)); } // Returns the reaction Google Mock will have on uninteresting calls // made on the given mock object. internal::CallReaction Mock::GetReactionOnUninterestingCalls( - const void* mock_obj) - GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex) { + const void* mock_obj) GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex) { internal::MutexLock l(&internal::g_gmock_mutex); - return (g_uninteresting_call_reaction.count(mock_obj) == 0) ? - internal::intToCallReaction(GMOCK_FLAG(default_mock_behavior)) : - g_uninteresting_call_reaction[mock_obj]; + return (UninterestingCallReactionMap().count( + reinterpret_cast(mock_obj)) == 0) + ? internal::intToCallReaction( + GMOCK_FLAG_GET(default_mock_behavior)) + : UninterestingCallReactionMap()[reinterpret_cast( + mock_obj)]; } // Tells Google Mock to ignore mock_obj when checking for leaked mock @@ -873,8 +746,8 @@ Expectation::~Expectation() {} void Sequence::AddExpectation(const Expectation& expectation) const { if (*last_expectation_ != expectation) { if (last_expectation_->expectation_base() != nullptr) { - expectation.expectation_base()->immediate_prerequisites_ - += *last_expectation_; + expectation.expectation_base()->immediate_prerequisites_ += + *last_expectation_; } *last_expectation_ = expectation; } @@ -903,6 +776,6 @@ InSequence::~InSequence() { #ifdef _MSC_VER #if _MSC_VER == 1900 -# pragma warning(pop) +#pragma warning(pop) #endif #endif diff --git a/ext/googletest/googlemock/src/gmock.cc b/ext/googletest/googlemock/src/gmock.cc index 7bcdb0ba2d..5025656a02 100644 --- a/ext/googletest/googlemock/src/gmock.cc +++ b/ext/googletest/googlemock/src/gmock.cc @@ -27,17 +27,15 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - #include "gmock/gmock.h" -#include "gmock/internal/gmock-port.h" -namespace testing { +#include "gmock/internal/gmock-port.h" GMOCK_DEFINE_bool_(catch_leaked_mocks, true, "true if and only if Google Mock should report leaked " "mock objects as failures."); -GMOCK_DEFINE_string_(verbose, internal::kWarningVerbosity, +GMOCK_DEFINE_string_(verbose, testing::internal::kWarningVerbosity, "Controls how verbose Google Mock's output is." " Valid values:\n" " info - prints all messages.\n" @@ -51,6 +49,7 @@ GMOCK_DEFINE_int32_(default_mock_behavior, 1, " 1 - by default, mocks act as NaggyMocks.\n" " 2 - by default, mocks act as StrictMocks."); +namespace testing { namespace internal { // Parses a string as a command line flag. The string should have the @@ -59,18 +58,18 @@ namespace internal { // // Returns the value of the flag, or NULL if the parsing failed. static const char* ParseGoogleMockFlagValue(const char* str, - const char* flag, + const char* flag_name, bool def_optional) { // str and flag must not be NULL. - if (str == nullptr || flag == nullptr) return nullptr; + if (str == nullptr || flag_name == nullptr) return nullptr; // The flag must start with "--gmock_". - const std::string flag_str = std::string("--gmock_") + flag; - const size_t flag_len = flag_str.length(); - if (strncmp(str, flag_str.c_str(), flag_len) != 0) return nullptr; + const std::string flag_name_str = std::string("--gmock_") + flag_name; + const size_t flag_name_len = flag_name_str.length(); + if (strncmp(str, flag_name_str.c_str(), flag_name_len) != 0) return nullptr; // Skips the flag name. - const char* flag_end = str + flag_len; + const char* flag_end = str + flag_name_len; // When def_optional is true, it's OK to not have a "=value" part. if (def_optional && (flag_end[0] == '\0')) { @@ -91,10 +90,10 @@ static const char* ParseGoogleMockFlagValue(const char* str, // // On success, stores the value of the flag in *value, and returns // true. On failure, returns false without changing *value. -static bool ParseGoogleMockBoolFlag(const char* str, const char* flag, - bool* value) { +static bool ParseGoogleMockFlag(const char* str, const char* flag_name, + bool* value) { // Gets the value of the flag as a string. - const char* const value_str = ParseGoogleMockFlagValue(str, flag, true); + const char* const value_str = ParseGoogleMockFlagValue(str, flag_name, true); // Aborts if the parsing failed. if (value_str == nullptr) return false; @@ -110,10 +109,10 @@ static bool ParseGoogleMockBoolFlag(const char* str, const char* flag, // On success, stores the value of the flag in *value, and returns // true. On failure, returns false without changing *value. template -static bool ParseGoogleMockStringFlag(const char* str, const char* flag, - String* value) { +static bool ParseGoogleMockFlag(const char* str, const char* flag_name, + String* value) { // Gets the value of the flag as a string. - const char* const value_str = ParseGoogleMockFlagValue(str, flag, false); + const char* const value_str = ParseGoogleMockFlagValue(str, flag_name, false); // Aborts if the parsing failed. if (value_str == nullptr) return false; @@ -123,17 +122,17 @@ static bool ParseGoogleMockStringFlag(const char* str, const char* flag, return true; } -static bool ParseGoogleMockIntFlag(const char* str, const char* flag, - int32_t* value) { +static bool ParseGoogleMockFlag(const char* str, const char* flag_name, + int32_t* value) { // Gets the value of the flag as a string. - const char* const value_str = ParseGoogleMockFlagValue(str, flag, true); + const char* const value_str = ParseGoogleMockFlagValue(str, flag_name, true); // Aborts if the parsing failed. if (value_str == nullptr) return false; // Sets *value to the value of the flag. - return ParseInt32(Message() << "The value of flag --" << flag, - value_str, value); + return ParseInt32(Message() << "The value of flag --" << flag_name, value_str, + value); } // The internal implementation of InitGoogleMock(). @@ -152,11 +151,22 @@ void InitGoogleMockImpl(int* argc, CharType** argv) { const char* const arg = arg_string.c_str(); // Do we see a Google Mock flag? - if (ParseGoogleMockBoolFlag(arg, "catch_leaked_mocks", - &GMOCK_FLAG(catch_leaked_mocks)) || - ParseGoogleMockStringFlag(arg, "verbose", &GMOCK_FLAG(verbose)) || - ParseGoogleMockIntFlag(arg, "default_mock_behavior", - &GMOCK_FLAG(default_mock_behavior))) { + bool found_gmock_flag = false; + +#define GMOCK_INTERNAL_PARSE_FLAG(flag_name) \ + if (!found_gmock_flag) { \ + auto value = GMOCK_FLAG_GET(flag_name); \ + if (ParseGoogleMockFlag(arg, #flag_name, &value)) { \ + GMOCK_FLAG_SET(flag_name, value); \ + found_gmock_flag = true; \ + } \ + } + + GMOCK_INTERNAL_PARSE_FLAG(catch_leaked_mocks) + GMOCK_INTERNAL_PARSE_FLAG(verbose) + GMOCK_INTERNAL_PARSE_FLAG(default_mock_behavior) + + if (found_gmock_flag) { // Yes. Shift the remainder of the argv list left by one. Note // that argv has (*argc + 1) elements, the last one always being // NULL. The following loop moves the trailing NULL element as diff --git a/ext/googletest/googlemock/src/gmock_main.cc b/ext/googletest/googlemock/src/gmock_main.cc index 18c500f663..b411c5ecb9 100644 --- a/ext/googletest/googlemock/src/gmock_main.cc +++ b/ext/googletest/googlemock/src/gmock_main.cc @@ -27,8 +27,8 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - #include + #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -56,7 +56,7 @@ void loop() { RUN_ALL_TESTS(); } // https://web.archive.org/web/20170912203238/connect.microsoft.com/VisualStudio/feedback/details/394464/wmain-link-error-in-the-static-library // // NOLINT #if GTEST_OS_WINDOWS_MOBILE -# include // NOLINT +#include // NOLINT GTEST_API_ int _tmain(int argc, TCHAR** argv) { #else diff --git a/ext/googletest/googlemock/test/BUILD.bazel b/ext/googletest/googlemock/test/BUILD.bazel index 6193ed4daf..d4297c80fe 100644 --- a/ext/googletest/googlemock/test/BUILD.bazel +++ b/ext/googletest/googlemock/test/BUILD.bazel @@ -30,7 +30,6 @@ # # Bazel Build for Google C++ Testing Framework(Google Test)-googlemock -load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_test") load("@rules_python//python:defs.bzl", "py_library", "py_test") licenses(["notice"]) @@ -39,7 +38,7 @@ licenses(["notice"]) cc_test( name = "gmock_all_test", size = "small", - srcs = glob(include = ["gmock-*.cc"]), + srcs = glob(include = ["gmock-*.cc"]) + ["gmock-matchers_test.h"], linkopts = select({ "//:qnx": [], "//:windows": [], diff --git a/ext/googletest/googlemock/test/gmock-actions_test.cc b/ext/googletest/googlemock/test/gmock-actions_test.cc index e1ca7fe2dd..215495ed2a 100644 --- a/ext/googletest/googlemock/test/gmock-actions_test.cc +++ b/ext/googletest/googlemock/test/gmock-actions_test.cc @@ -27,64 +27,230 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Mock - a framework for writing C++ mock classes. // // This file tests the built-in actions. -// Silence C4100 (unreferenced formal parameter) for MSVC +// Silence C4100 (unreferenced formal parameter) and C4503 (decorated name +// length exceeded) for MSVC. #ifdef _MSC_VER -# pragma warning(push) -# pragma warning(disable:4100) +#pragma warning(push) +#pragma warning(disable : 4100) +#pragma warning(disable : 4503) #if _MSC_VER == 1900 // and silence C4800 (C4800: 'int *const ': forcing value // to bool 'true' or 'false') for MSVC 15 -# pragma warning(disable:4800) +#pragma warning(disable : 4800) #endif #endif #include "gmock/gmock-actions.h" + #include +#include #include #include #include #include +#include + #include "gmock/gmock.h" #include "gmock/internal/gmock-port.h" -#include "gtest/gtest.h" #include "gtest/gtest-spi.h" +#include "gtest/gtest.h" +namespace testing { namespace { -using ::testing::_; -using ::testing::Action; -using ::testing::ActionInterface; -using ::testing::Assign; -using ::testing::ByMove; -using ::testing::ByRef; -using ::testing::DefaultValue; -using ::testing::DoAll; -using ::testing::DoDefault; -using ::testing::IgnoreResult; -using ::testing::Invoke; -using ::testing::InvokeWithoutArgs; -using ::testing::MakePolymorphicAction; -using ::testing::PolymorphicAction; -using ::testing::Return; -using ::testing::ReturnNew; -using ::testing::ReturnNull; -using ::testing::ReturnRef; -using ::testing::ReturnRefOfCopy; -using ::testing::ReturnRoundRobin; -using ::testing::SetArgPointee; -using ::testing::SetArgumentPointee; -using ::testing::Unused; -using ::testing::WithArgs; using ::testing::internal::BuiltInDefaultValue; -#if !GTEST_OS_WINDOWS_MOBILE -using ::testing::SetErrnoAndReturn; -#endif +TEST(TypeTraits, Negation) { + // Direct use with std types. + static_assert(std::is_base_of>::value, + ""); + + static_assert(std::is_base_of>::value, + ""); + + // With other types that fit the requirement of a value member that is + // convertible to bool. + static_assert(std::is_base_of< + std::true_type, + internal::negation>>::value, + ""); + + static_assert(std::is_base_of< + std::false_type, + internal::negation>>::value, + ""); + + static_assert(std::is_base_of< + std::false_type, + internal::negation>>::value, + ""); +} + +// Weird false/true types that aren't actually bool constants (but should still +// be legal according to [meta.logical] because `bool(T::value)` is valid), are +// distinct from std::false_type and std::true_type, and are distinct from other +// instantiations of the same template. +// +// These let us check finicky details mandated by the standard like +// "std::conjunction should evaluate to a type that inherits from the first +// false-y input". +template +struct MyFalse : std::integral_constant {}; + +template +struct MyTrue : std::integral_constant {}; + +TEST(TypeTraits, Conjunction) { + // Base case: always true. + static_assert(std::is_base_of>::value, + ""); + + // One predicate: inherits from that predicate, regardless of value. + static_assert( + std::is_base_of, internal::conjunction>>::value, + ""); + + static_assert( + std::is_base_of, internal::conjunction>>::value, ""); + + // Multiple predicates, with at least one false: inherits from that one. + static_assert( + std::is_base_of, internal::conjunction, MyFalse<1>, + MyTrue<2>>>::value, + ""); + + static_assert( + std::is_base_of, internal::conjunction, MyFalse<1>, + MyFalse<2>>>::value, + ""); + + // Short circuiting: in the case above, additional predicates need not even + // define a value member. + struct Empty {}; + static_assert( + std::is_base_of, internal::conjunction, MyFalse<1>, + Empty>>::value, + ""); + + // All predicates true: inherits from the last. + static_assert( + std::is_base_of, internal::conjunction, MyTrue<1>, + MyTrue<2>>>::value, + ""); +} + +TEST(TypeTraits, Disjunction) { + // Base case: always false. + static_assert( + std::is_base_of>::value, ""); + + // One predicate: inherits from that predicate, regardless of value. + static_assert( + std::is_base_of, internal::disjunction>>::value, + ""); + + static_assert( + std::is_base_of, internal::disjunction>>::value, ""); + + // Multiple predicates, with at least one true: inherits from that one. + static_assert( + std::is_base_of, internal::disjunction, MyTrue<1>, + MyFalse<2>>>::value, + ""); + + static_assert( + std::is_base_of, internal::disjunction, MyTrue<1>, + MyTrue<2>>>::value, + ""); + + // Short circuiting: in the case above, additional predicates need not even + // define a value member. + struct Empty {}; + static_assert( + std::is_base_of, internal::disjunction, MyTrue<1>, + Empty>>::value, + ""); + + // All predicates false: inherits from the last. + static_assert( + std::is_base_of, internal::disjunction, MyFalse<1>, + MyFalse<2>>>::value, + ""); +} + +TEST(TypeTraits, IsInvocableRV) { + struct C { + int operator()() const { return 0; } + void operator()(int) & {} + std::string operator()(int) && { return ""; }; + }; + + // The first overload is callable for const and non-const rvalues and lvalues. + // It can be used to obtain an int, cv void, or anything int is convertible + // to. + static_assert(internal::is_callable_r::value, ""); + static_assert(internal::is_callable_r::value, ""); + static_assert(internal::is_callable_r::value, ""); + static_assert(internal::is_callable_r::value, ""); + + static_assert(internal::is_callable_r::value, ""); + static_assert(internal::is_callable_r::value, ""); + static_assert(internal::is_callable_r::value, ""); + + // It's possible to provide an int. If it's given to an lvalue, the result is + // void. Otherwise it is std::string (which is also treated as allowed for a + // void result type). + static_assert(internal::is_callable_r::value, ""); + static_assert(!internal::is_callable_r::value, ""); + static_assert(!internal::is_callable_r::value, ""); + static_assert(!internal::is_callable_r::value, ""); + + static_assert(internal::is_callable_r::value, ""); + static_assert(internal::is_callable_r::value, ""); + static_assert(!internal::is_callable_r::value, ""); + + // It's not possible to provide other arguments. + static_assert(!internal::is_callable_r::value, ""); + static_assert(!internal::is_callable_r::value, ""); + + // In C++17 and above, where it's guaranteed that functions can return + // non-moveable objects, everything should work fine for non-moveable rsult + // types too. +#if defined(__cplusplus) && __cplusplus >= 201703L + { + struct NonMoveable { + NonMoveable() = default; + NonMoveable(NonMoveable&&) = delete; + }; + + static_assert(!std::is_move_constructible_v); + + struct Callable { + NonMoveable operator()() { return NonMoveable(); } + }; + + static_assert(internal::is_callable_r::value); + static_assert(internal::is_callable_r::value); + static_assert( + internal::is_callable_r::value); + + static_assert(!internal::is_callable_r::value); + static_assert(!internal::is_callable_r::value); + } +#endif // C++17 and above + + // Nothing should choke when we try to call other arguments besides directly + // callable objects, but they should not show up as callable. + static_assert(!internal::is_callable_r::value, ""); + static_assert(!internal::is_callable_r::value, ""); + static_assert(!internal::is_callable_r::value, ""); +} // Tests that BuiltInDefaultValue::Get() returns NULL. TEST(BuiltInDefaultValueTest, IsNullForPointerTypes) { @@ -114,17 +280,17 @@ TEST(BuiltInDefaultValueTest, IsZeroForNumericTypes) { #endif #endif EXPECT_EQ(0U, BuiltInDefaultValue::Get()); // NOLINT - EXPECT_EQ(0, BuiltInDefaultValue::Get()); // NOLINT - EXPECT_EQ(0, BuiltInDefaultValue::Get()); // NOLINT + EXPECT_EQ(0, BuiltInDefaultValue::Get()); // NOLINT + EXPECT_EQ(0, BuiltInDefaultValue::Get()); // NOLINT EXPECT_EQ(0U, BuiltInDefaultValue::Get()); EXPECT_EQ(0, BuiltInDefaultValue::Get()); EXPECT_EQ(0, BuiltInDefaultValue::Get()); - EXPECT_EQ(0U, BuiltInDefaultValue::Get()); // NOLINT - EXPECT_EQ(0, BuiltInDefaultValue::Get()); // NOLINT - EXPECT_EQ(0, BuiltInDefaultValue::Get()); // NOLINT + EXPECT_EQ(0U, BuiltInDefaultValue::Get()); // NOLINT + EXPECT_EQ(0, BuiltInDefaultValue::Get()); // NOLINT + EXPECT_EQ(0, BuiltInDefaultValue::Get()); // NOLINT EXPECT_EQ(0U, BuiltInDefaultValue::Get()); // NOLINT - EXPECT_EQ(0, BuiltInDefaultValue::Get()); // NOLINT - EXPECT_EQ(0, BuiltInDefaultValue::Get()); // NOLINT + EXPECT_EQ(0, BuiltInDefaultValue::Get()); // NOLINT + EXPECT_EQ(0, BuiltInDefaultValue::Get()); // NOLINT EXPECT_EQ(0, BuiltInDefaultValue::Get()); EXPECT_EQ(0, BuiltInDefaultValue::Get()); } @@ -139,17 +305,17 @@ TEST(BuiltInDefaultValueTest, ExistsForNumericTypes) { EXPECT_TRUE(BuiltInDefaultValue::Exists()); #endif EXPECT_TRUE(BuiltInDefaultValue::Exists()); // NOLINT - EXPECT_TRUE(BuiltInDefaultValue::Exists()); // NOLINT - EXPECT_TRUE(BuiltInDefaultValue::Exists()); // NOLINT + EXPECT_TRUE(BuiltInDefaultValue::Exists()); // NOLINT + EXPECT_TRUE(BuiltInDefaultValue::Exists()); // NOLINT EXPECT_TRUE(BuiltInDefaultValue::Exists()); EXPECT_TRUE(BuiltInDefaultValue::Exists()); EXPECT_TRUE(BuiltInDefaultValue::Exists()); - EXPECT_TRUE(BuiltInDefaultValue::Exists()); // NOLINT - EXPECT_TRUE(BuiltInDefaultValue::Exists()); // NOLINT - EXPECT_TRUE(BuiltInDefaultValue::Exists()); // NOLINT + EXPECT_TRUE(BuiltInDefaultValue::Exists()); // NOLINT + EXPECT_TRUE(BuiltInDefaultValue::Exists()); // NOLINT + EXPECT_TRUE(BuiltInDefaultValue::Exists()); // NOLINT EXPECT_TRUE(BuiltInDefaultValue::Exists()); // NOLINT - EXPECT_TRUE(BuiltInDefaultValue::Exists()); // NOLINT - EXPECT_TRUE(BuiltInDefaultValue::Exists()); // NOLINT + EXPECT_TRUE(BuiltInDefaultValue::Exists()); // NOLINT + EXPECT_TRUE(BuiltInDefaultValue::Exists()); // NOLINT EXPECT_TRUE(BuiltInDefaultValue::Exists()); EXPECT_TRUE(BuiltInDefaultValue::Exists()); } @@ -167,13 +333,13 @@ TEST(BuiltInDefaultValueTest, BoolExists) { // Tests that BuiltInDefaultValue::Get() returns "" when T is a // string type. TEST(BuiltInDefaultValueTest, IsEmptyStringForString) { - EXPECT_EQ("", BuiltInDefaultValue< ::std::string>::Get()); + EXPECT_EQ("", BuiltInDefaultValue<::std::string>::Get()); } // Tests that BuiltInDefaultValue::Exists() returns true when T is a // string type. TEST(BuiltInDefaultValueTest, ExistsForString) { - EXPECT_TRUE(BuiltInDefaultValue< ::std::string>::Exists()); + EXPECT_TRUE(BuiltInDefaultValue<::std::string>::Exists()); } // Tests that BuiltInDefaultValue::Get() returns the same @@ -208,7 +374,6 @@ class MyNonDefaultConstructible { int value_; }; - TEST(BuiltInDefaultValueTest, ExistsForDefaultConstructibleType) { EXPECT_TRUE(BuiltInDefaultValue::Exists()); } @@ -217,25 +382,19 @@ TEST(BuiltInDefaultValueTest, IsDefaultConstructedForDefaultConstructibleType) { EXPECT_EQ(42, BuiltInDefaultValue::Get().value()); } - TEST(BuiltInDefaultValueTest, DoesNotExistForNonDefaultConstructibleType) { EXPECT_FALSE(BuiltInDefaultValue::Exists()); } // Tests that BuiltInDefaultValue::Get() aborts the program. TEST(BuiltInDefaultValueDeathTest, IsUndefinedForReferences) { - EXPECT_DEATH_IF_SUPPORTED({ - BuiltInDefaultValue::Get(); - }, ""); - EXPECT_DEATH_IF_SUPPORTED({ - BuiltInDefaultValue::Get(); - }, ""); + EXPECT_DEATH_IF_SUPPORTED({ BuiltInDefaultValue::Get(); }, ""); + EXPECT_DEATH_IF_SUPPORTED({ BuiltInDefaultValue::Get(); }, ""); } TEST(BuiltInDefaultValueDeathTest, IsUndefinedForNonDefaultConstructibleType) { - EXPECT_DEATH_IF_SUPPORTED({ - BuiltInDefaultValue::Get(); - }, ""); + EXPECT_DEATH_IF_SUPPORTED( + { BuiltInDefaultValue::Get(); }, ""); } // Tests that DefaultValue::IsSet() is false initially. @@ -281,26 +440,22 @@ TEST(DefaultValueDeathTest, GetReturnsBuiltInDefaultValueWhenUnset) { EXPECT_EQ(0, DefaultValue::Get()); - EXPECT_DEATH_IF_SUPPORTED({ - DefaultValue::Get(); - }, ""); + EXPECT_DEATH_IF_SUPPORTED({ DefaultValue::Get(); }, + ""); } TEST(DefaultValueTest, GetWorksForMoveOnlyIfSet) { EXPECT_TRUE(DefaultValue>::Exists()); EXPECT_TRUE(DefaultValue>::Get() == nullptr); - DefaultValue>::SetFactory([] { - return std::unique_ptr(new int(42)); - }); + DefaultValue>::SetFactory( + [] { return std::unique_ptr(new int(42)); }); EXPECT_TRUE(DefaultValue>::Exists()); std::unique_ptr i = DefaultValue>::Get(); EXPECT_EQ(42, *i); } // Tests that DefaultValue::Get() returns void. -TEST(DefaultValueTest, GetWorksForVoid) { - return DefaultValue::Get(); -} +TEST(DefaultValueTest, GetWorksForVoid) { return DefaultValue::Get(); } // Tests using DefaultValue with a reference type. @@ -348,12 +503,9 @@ TEST(DefaultValueOfReferenceDeathTest, GetReturnsBuiltInDefaultValueWhenUnset) { EXPECT_FALSE(DefaultValue::IsSet()); EXPECT_FALSE(DefaultValue::IsSet()); - EXPECT_DEATH_IF_SUPPORTED({ - DefaultValue::Get(); - }, ""); - EXPECT_DEATH_IF_SUPPORTED({ - DefaultValue::Get(); - }, ""); + EXPECT_DEATH_IF_SUPPORTED({ DefaultValue::Get(); }, ""); + EXPECT_DEATH_IF_SUPPORTED({ DefaultValue::Get(); }, + ""); } // Tests that ActionInterface can be implemented by defining the @@ -384,7 +536,7 @@ TEST(ActionInterfaceTest, MakeAction) { EXPECT_EQ(5, action.Perform(std::make_tuple(true, 5))); } -// Tests that Action can be contructed from a pointer to +// Tests that Action can be constructed from a pointer to // ActionInterface. TEST(ActionTest, CanBeConstructedFromActionInterface) { Action action(new MyActionImpl); @@ -433,7 +585,7 @@ class IsNotZero : public ActionInterface { // NOLINT }; TEST(ActionTest, CanBeConvertedToOtherActionType) { - const Action a1(new IsNotZero); // NOLINT + const Action a1(new IsNotZero); // NOLINT const Action a2 = Action(a1); // NOLINT EXPECT_EQ(1, a2.Perform(std::make_tuple('a'))); EXPECT_EQ(0, a2.Perform(std::make_tuple('\0'))); @@ -525,24 +677,134 @@ TEST(ReturnTest, AcceptsStringLiteral) { EXPECT_EQ("world", a2.Perform(std::make_tuple())); } -// Test struct which wraps a vector of integers. Used in -// 'SupportsWrapperReturnType' test. -struct IntegerVectorWrapper { - std::vector * v; - IntegerVectorWrapper(std::vector& _v) : v(&_v) {} // NOLINT -}; +// Return(x) should work fine when the mock function's return type is a +// reference-like wrapper for decltype(x), as when x is a std::string and the +// mock function returns std::string_view. +TEST(ReturnTest, SupportsReferenceLikeReturnType) { + // A reference wrapper for std::vector, implicitly convertible from it. + struct Result { + const std::vector* v; + Result(const std::vector& v) : v(&v) {} // NOLINT + }; -// Tests that Return() works when return type is a wrapper type. -TEST(ReturnTest, SupportsWrapperReturnType) { - // Initialize vector of integers. - std::vector v; - for (int i = 0; i < 5; ++i) v.push_back(i); + // Set up an action for a mock function that returns the reference wrapper + // type, initializing it with an actual vector. + // + // The returned wrapper should be initialized with a copy of that vector + // that's embedded within the action itself (which should stay alive as long + // as the mock object is alive), rather than e.g. a reference to the temporary + // we feed to Return. This should work fine both for WillOnce and + // WillRepeatedly. + MockFunction mock; + EXPECT_CALL(mock, Call) + .WillOnce(Return(std::vector{17, 19, 23})) + .WillRepeatedly(Return(std::vector{29, 31, 37})); - // Return() called with 'v' as argument. The Action will return the same data - // as 'v' (copy) but it will be wrapped in an IntegerVectorWrapper. - Action a = Return(v); - const std::vector& result = *(a.Perform(std::make_tuple()).v); - EXPECT_THAT(result, ::testing::ElementsAre(0, 1, 2, 3, 4)); + EXPECT_THAT(mock.AsStdFunction()(), + Field(&Result::v, Pointee(ElementsAre(17, 19, 23)))); + + EXPECT_THAT(mock.AsStdFunction()(), + Field(&Result::v, Pointee(ElementsAre(29, 31, 37)))); +} + +TEST(ReturnTest, PrefersConversionOperator) { + // Define types In and Out such that: + // + // * In is implicitly convertible to Out. + // * Out also has an explicit constructor from In. + // + struct In; + struct Out { + int x; + + explicit Out(const int x) : x(x) {} + explicit Out(const In&) : x(0) {} + }; + + struct In { + operator Out() const { return Out{19}; } // NOLINT + }; + + // Assumption check: the C++ language rules are such that a function that + // returns Out which uses In a return statement will use the implicit + // conversion path rather than the explicit constructor. + EXPECT_THAT([]() -> Out { return In(); }(), Field(&Out::x, 19)); + + // Return should work the same way: if the mock function's return type is Out + // and we feed Return an In value, then the Out should be created through the + // implicit conversion path rather than the explicit constructor. + MockFunction mock; + EXPECT_CALL(mock, Call).WillOnce(Return(In())); + EXPECT_THAT(mock.AsStdFunction()(), Field(&Out::x, 19)); +} + +// It should be possible to use Return(R) with a mock function result type U +// that is convertible from const R& but *not* R (such as +// std::reference_wrapper). This should work for both WillOnce and +// WillRepeatedly. +TEST(ReturnTest, ConversionRequiresConstLvalueReference) { + using R = int; + using U = std::reference_wrapper; + + static_assert(std::is_convertible::value, ""); + static_assert(!std::is_convertible::value, ""); + + MockFunction mock; + EXPECT_CALL(mock, Call).WillOnce(Return(17)).WillRepeatedly(Return(19)); + + EXPECT_EQ(17, mock.AsStdFunction()()); + EXPECT_EQ(19, mock.AsStdFunction()()); +} + +// Return(x) should not be usable with a mock function result type that's +// implicitly convertible from decltype(x) but requires a non-const lvalue +// reference to the input. It doesn't make sense for the conversion operator to +// modify the input. +TEST(ReturnTest, ConversionRequiresMutableLvalueReference) { + // Set up a type that is implicitly convertible from std::string&, but not + // std::string&& or `const std::string&`. + // + // Avoid asserting about conversion from std::string on MSVC, which seems to + // implement std::is_convertible incorrectly in this case. + struct S { + S(std::string&) {} // NOLINT + }; + + static_assert(std::is_convertible::value, ""); +#ifndef _MSC_VER + static_assert(!std::is_convertible::value, ""); +#endif + static_assert(!std::is_convertible::value, ""); + + // It shouldn't be possible to use the result of Return(std::string) in a + // context where an S is needed. + // + // Here too we disable the assertion for MSVC, since its incorrect + // implementation of is_convertible causes our SFINAE to be wrong. + using RA = decltype(Return(std::string())); + + static_assert(!std::is_convertible>::value, ""); +#ifndef _MSC_VER + static_assert(!std::is_convertible>::value, ""); +#endif +} + +TEST(ReturnTest, MoveOnlyResultType) { + // Return should support move-only result types when used with WillOnce. + { + MockFunction()> mock; + EXPECT_CALL(mock, Call) + // NOLINTNEXTLINE + .WillOnce(Return(std::unique_ptr(new int(17)))); + + EXPECT_THAT(mock.AsStdFunction()(), Pointee(17)); + } + + // The result of Return should not be convertible to Action (so it can't be + // used with WillRepeatedly). + static_assert(!std::is_convertible())), + Action()>>::value, + ""); } // Tests that Return(v) is covaraint. @@ -596,19 +858,6 @@ TEST(ReturnTest, ConvertsArgumentWhenConverted) { << "when performed."; } -class DestinationType {}; - -class SourceType { - public: - // Note: a non-const typecast operator. - operator DestinationType() { return DestinationType(); } -}; - -TEST(ReturnTest, CanConvertArgumentUsingNonConstTypeCastOperator) { - SourceType s; - Action action(Return(s)); -} - // Tests that ReturnNull() returns NULL in a pointer-returning function. TEST(ReturnNullTest, WorksInPointerReturningFunction) { const Action a1 = ReturnNull(); @@ -648,7 +897,9 @@ TEST(ReturnRefTest, IsCovariant) { } template ()))> -bool CanCallReturnRef(T&&) { return true; } +bool CanCallReturnRef(T&&) { + return true; +} bool CanCallReturnRef(Unused) { return false; } // Tests that ReturnRef(v) is working with non-temporaries (T&) @@ -668,7 +919,7 @@ TEST(ReturnRefTest, WorksForNonTemporary) { // Tests that ReturnRef(v) is not working with temporaries (T&&) TEST(ReturnRefTest, DoesNotWorkForTemporary) { - auto scalar_value = []() -> int { return 123; }; + auto scalar_value = []() -> int { return 123; }; EXPECT_FALSE(CanCallReturnRef(scalar_value())); auto non_scalar_value = []() -> std::string { return "ABC"; }; @@ -747,15 +998,15 @@ class MockClass { int(const std::unique_ptr&, std::unique_ptr)); private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(MockClass); + MockClass(const MockClass&) = delete; + MockClass& operator=(const MockClass&) = delete; }; // Tests that DoDefault() returns the built-in default value for the // return type by default. TEST(DoDefaultTest, ReturnsBuiltInDefaultValueByDefault) { MockClass mock; - EXPECT_CALL(mock, IntFunc(_)) - .WillOnce(DoDefault()); + EXPECT_CALL(mock, IntFunc(_)).WillOnce(DoDefault()); EXPECT_EQ(0, mock.IntFunc(true)); } @@ -763,14 +1014,11 @@ TEST(DoDefaultTest, ReturnsBuiltInDefaultValueByDefault) { // the process when there is no built-in default value for the return type. TEST(DoDefaultDeathTest, DiesForUnknowType) { MockClass mock; - EXPECT_CALL(mock, Foo()) - .WillRepeatedly(DoDefault()); + EXPECT_CALL(mock, Foo()).WillRepeatedly(DoDefault()); #if GTEST_HAS_EXCEPTIONS EXPECT_ANY_THROW(mock.Foo()); #else - EXPECT_DEATH_IF_SUPPORTED({ - mock.Foo(); - }, ""); + EXPECT_DEATH_IF_SUPPORTED({ mock.Foo(); }, ""); #endif } @@ -782,16 +1030,13 @@ void VoidFunc(bool /* flag */) {} TEST(DoDefaultDeathTest, DiesIfUsedInCompositeAction) { MockClass mock; EXPECT_CALL(mock, IntFunc(_)) - .WillRepeatedly(DoAll(Invoke(VoidFunc), - DoDefault())); + .WillRepeatedly(DoAll(Invoke(VoidFunc), DoDefault())); // Ideally we should verify the error message as well. Sadly, // EXPECT_DEATH() can only capture stderr, while Google Mock's // errors are printed on stdout. Therefore we have to settle for // not verifying the message. - EXPECT_DEATH_IF_SUPPORTED({ - mock.IntFunc(true); - }, ""); + EXPECT_DEATH_IF_SUPPORTED({ mock.IntFunc(true); }, ""); } // Tests that DoDefault() returns the default value set by @@ -799,8 +1044,7 @@ TEST(DoDefaultDeathTest, DiesIfUsedInCompositeAction) { TEST(DoDefaultTest, ReturnsUserSpecifiedPerTypeDefaultValueWhenThereIsOne) { DefaultValue::Set(1); MockClass mock; - EXPECT_CALL(mock, IntFunc(_)) - .WillOnce(DoDefault()); + EXPECT_CALL(mock, IntFunc(_)).WillOnce(DoDefault()); EXPECT_EQ(1, mock.IntFunc(false)); DefaultValue::Clear(); } @@ -808,20 +1052,19 @@ TEST(DoDefaultTest, ReturnsUserSpecifiedPerTypeDefaultValueWhenThereIsOne) { // Tests that DoDefault() does the action specified by ON_CALL(). TEST(DoDefaultTest, DoesWhatOnCallSpecifies) { MockClass mock; - ON_CALL(mock, IntFunc(_)) - .WillByDefault(Return(2)); - EXPECT_CALL(mock, IntFunc(_)) - .WillOnce(DoDefault()); + ON_CALL(mock, IntFunc(_)).WillByDefault(Return(2)); + EXPECT_CALL(mock, IntFunc(_)).WillOnce(DoDefault()); EXPECT_EQ(2, mock.IntFunc(false)); } // Tests that using DoDefault() in ON_CALL() leads to a run-time failure. TEST(DoDefaultTest, CannotBeUsedInOnCall) { MockClass mock; - EXPECT_NONFATAL_FAILURE({ // NOLINT - ON_CALL(mock, IntFunc(_)) - .WillByDefault(DoDefault()); - }, "DoDefault() cannot be used in ON_CALL()"); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + ON_CALL(mock, IntFunc(_)).WillByDefault(DoDefault()); + }, + "DoDefault() cannot be used in ON_CALL()"); } // Tests that SetArgPointee(v) sets the variable pointed to by @@ -868,7 +1111,7 @@ TEST(SetArgPointeeTest, AcceptsWideStringLiteral) { a.Perform(std::make_tuple(&ptr)); EXPECT_STREQ(L"world", ptr); -# if GTEST_HAS_STD_WSTRING +#if GTEST_HAS_STD_WSTRING typedef void MyStringFunction(std::wstring*); Action a2 = SetArgPointee<0>(L"world"); @@ -876,7 +1119,7 @@ TEST(SetArgPointeeTest, AcceptsWideStringLiteral) { a2.Perform(std::make_tuple(&str)); EXPECT_EQ(L"world", str); -# endif +#endif } // Tests that SetArgPointee() accepts a char pointer. @@ -907,7 +1150,7 @@ TEST(SetArgPointeeTest, AcceptsWideCharPointer) { a.Perform(std::make_tuple(true, &ptr)); EXPECT_EQ(hi, ptr); -# if GTEST_HAS_STD_WSTRING +#if GTEST_HAS_STD_WSTRING typedef void MyStringFunction(bool, std::wstring*); wchar_t world_array[] = L"world"; @@ -916,7 +1159,7 @@ TEST(SetArgPointeeTest, AcceptsWideCharPointer) { std::wstring str; a2.Perform(std::make_tuple(true, &str)); EXPECT_EQ(world_array, str); -# endif +#endif } // Tests that SetArgumentPointee(v) sets the variable pointed to by @@ -1079,6 +1322,159 @@ TEST(AssignTest, CompatibleTypes) { EXPECT_DOUBLE_EQ(5, x); } +// DoAll should support &&-qualified actions when used with WillOnce. +TEST(DoAll, SupportsRefQualifiedActions) { + struct InitialAction { + void operator()(const int arg) && { EXPECT_EQ(17, arg); } + }; + + struct FinalAction { + int operator()() && { return 19; } + }; + + MockFunction mock; + EXPECT_CALL(mock, Call).WillOnce(DoAll(InitialAction{}, FinalAction{})); + EXPECT_EQ(19, mock.AsStdFunction()(17)); +} + +// DoAll should never provide rvalue references to the initial actions. If the +// mock action itself accepts an rvalue reference or a non-scalar object by +// value then the final action should receive an rvalue reference, but initial +// actions should receive only lvalue references. +TEST(DoAll, ProvidesLvalueReferencesToInitialActions) { + struct Obj {}; + + // Mock action accepts by value: the initial action should be fed a const + // lvalue reference, and the final action an rvalue reference. + { + struct InitialAction { + void operator()(Obj&) const { FAIL() << "Unexpected call"; } + void operator()(const Obj&) const {} + void operator()(Obj&&) const { FAIL() << "Unexpected call"; } + void operator()(const Obj&&) const { FAIL() << "Unexpected call"; } + }; + + MockFunction mock; + EXPECT_CALL(mock, Call) + .WillOnce(DoAll(InitialAction{}, InitialAction{}, [](Obj&&) {})) + .WillRepeatedly(DoAll(InitialAction{}, InitialAction{}, [](Obj&&) {})); + + mock.AsStdFunction()(Obj{}); + mock.AsStdFunction()(Obj{}); + } + + // Mock action accepts by const lvalue reference: both actions should receive + // a const lvalue reference. + { + struct InitialAction { + void operator()(Obj&) const { FAIL() << "Unexpected call"; } + void operator()(const Obj&) const {} + void operator()(Obj&&) const { FAIL() << "Unexpected call"; } + void operator()(const Obj&&) const { FAIL() << "Unexpected call"; } + }; + + MockFunction mock; + EXPECT_CALL(mock, Call) + .WillOnce(DoAll(InitialAction{}, InitialAction{}, [](const Obj&) {})) + .WillRepeatedly( + DoAll(InitialAction{}, InitialAction{}, [](const Obj&) {})); + + mock.AsStdFunction()(Obj{}); + mock.AsStdFunction()(Obj{}); + } + + // Mock action accepts by non-const lvalue reference: both actions should get + // a non-const lvalue reference if they want them. + { + struct InitialAction { + void operator()(Obj&) const {} + void operator()(Obj&&) const { FAIL() << "Unexpected call"; } + }; + + MockFunction mock; + EXPECT_CALL(mock, Call) + .WillOnce(DoAll(InitialAction{}, InitialAction{}, [](Obj&) {})) + .WillRepeatedly(DoAll(InitialAction{}, InitialAction{}, [](Obj&) {})); + + Obj obj; + mock.AsStdFunction()(obj); + mock.AsStdFunction()(obj); + } + + // Mock action accepts by rvalue reference: the initial actions should receive + // a non-const lvalue reference if it wants it, and the final action an rvalue + // reference. + { + struct InitialAction { + void operator()(Obj&) const {} + void operator()(Obj&&) const { FAIL() << "Unexpected call"; } + }; + + MockFunction mock; + EXPECT_CALL(mock, Call) + .WillOnce(DoAll(InitialAction{}, InitialAction{}, [](Obj&&) {})) + .WillRepeatedly(DoAll(InitialAction{}, InitialAction{}, [](Obj&&) {})); + + mock.AsStdFunction()(Obj{}); + mock.AsStdFunction()(Obj{}); + } + + // &&-qualified initial actions should also be allowed with WillOnce. + { + struct InitialAction { + void operator()(Obj&) && {} + }; + + MockFunction mock; + EXPECT_CALL(mock, Call) + .WillOnce(DoAll(InitialAction{}, InitialAction{}, [](Obj&) {})); + + Obj obj; + mock.AsStdFunction()(obj); + } + + { + struct InitialAction { + void operator()(Obj&) && {} + }; + + MockFunction mock; + EXPECT_CALL(mock, Call) + .WillOnce(DoAll(InitialAction{}, InitialAction{}, [](Obj&&) {})); + + mock.AsStdFunction()(Obj{}); + } +} + +// DoAll should support being used with type-erased Action objects, both through +// WillOnce and WillRepeatedly. +TEST(DoAll, SupportsTypeErasedActions) { + // With only type-erased actions. + const Action initial_action = [] {}; + const Action final_action = [] { return 17; }; + + MockFunction mock; + EXPECT_CALL(mock, Call) + .WillOnce(DoAll(initial_action, initial_action, final_action)) + .WillRepeatedly(DoAll(initial_action, initial_action, final_action)); + + EXPECT_EQ(17, mock.AsStdFunction()()); + + // With &&-qualified and move-only final action. + { + struct FinalAction { + FinalAction() = default; + FinalAction(FinalAction&&) = default; + + int operator()() && { return 17; } + }; + + EXPECT_CALL(mock, Call) + .WillOnce(DoAll(initial_action, initial_action, FinalAction{})); + + EXPECT_EQ(17, mock.AsStdFunction()()); + } +} // Tests using WithArgs and with an action that takes 1 argument. TEST(WithArgsTest, OneArg) { @@ -1175,8 +1571,29 @@ TEST(WithArgsTest, ReturnReference) { TEST(WithArgsTest, InnerActionWithConversion) { Action inner = [] { return nullptr; }; - Action a = testing::WithoutArgs(inner); - EXPECT_EQ(nullptr, a.Perform(std::make_tuple(1.1))); + + MockFunction mock; + EXPECT_CALL(mock, Call) + .WillOnce(WithoutArgs(inner)) + .WillRepeatedly(WithoutArgs(inner)); + + EXPECT_EQ(nullptr, mock.AsStdFunction()(1.1)); + EXPECT_EQ(nullptr, mock.AsStdFunction()(1.1)); +} + +// It should be possible to use an &&-qualified inner action as long as the +// whole shebang is used as an rvalue with WillOnce. +TEST(WithArgsTest, RefQualifiedInnerAction) { + struct SomeAction { + int operator()(const int arg) && { + EXPECT_EQ(17, arg); + return 19; + } + }; + + MockFunction mock; + EXPECT_CALL(mock, Call).WillOnce(WithArg<1>(SomeAction{})); + EXPECT_EQ(19, mock.AsStdFunction()(0, 17)); } #if !GTEST_OS_WINDOWS_MOBILE @@ -1235,7 +1652,7 @@ TEST(ByRefTest, IsCopyable) { TEST(ByRefTest, ConstValue) { const int n = 0; // int& ref = ByRef(n); // This shouldn't compile - we have a - // negative compilation test to catch it. + // negative compilation test to catch it. const int& const_ref = ByRef(n); EXPECT_EQ(&n, &const_ref); } @@ -1260,7 +1677,7 @@ TEST(ByRefTest, ExplicitType) { EXPECT_EQ(&n, &r1); // ByRef(n); // This shouldn't compile - we have a negative - // compilation test to catch it. + // compilation test to catch it. Derived d; Derived& r2 = ByRef(d); @@ -1375,9 +1792,10 @@ TEST(MockMethodTest, CanReturnMoveOnlyValue_DoAllReturn) { MockClass mock; std::unique_ptr i(new int(19)); EXPECT_CALL(mock_function, Call()); - EXPECT_CALL(mock, MakeUnique()).WillOnce(DoAll( - InvokeWithoutArgs(&mock_function, &testing::MockFunction::Call), - Return(ByMove(std::move(i))))); + EXPECT_CALL(mock, MakeUnique()) + .WillOnce(DoAll(InvokeWithoutArgs(&mock_function, + &testing::MockFunction::Call), + Return(ByMove(std::move(i))))); std::unique_ptr result1 = mock.MakeUnique(); EXPECT_EQ(19, *result1); @@ -1387,9 +1805,8 @@ TEST(MockMethodTest, CanReturnMoveOnlyValue_Invoke) { MockClass mock; // Check default value - DefaultValue>::SetFactory([] { - return std::unique_ptr(new int(42)); - }); + DefaultValue>::SetFactory( + [] { return std::unique_ptr(new int(42)); }); EXPECT_EQ(42, *mock.MakeUnique()); EXPECT_CALL(mock, MakeUnique()).WillRepeatedly(Invoke(UniquePtrSource)); @@ -1449,6 +1866,178 @@ TEST(MockMethodTest, CanTakeMoveOnlyValue) { EXPECT_EQ(42, *saved); } +// It should be possible to use callables with an &&-qualified call operator +// with WillOnce, since they will be called only once. This allows actions to +// contain and manipulate move-only types. +TEST(MockMethodTest, ActionHasRvalueRefQualifiedCallOperator) { + struct Return17 { + int operator()() && { return 17; } + }; + + // Action is directly compatible with mocked function type. + { + MockFunction mock; + EXPECT_CALL(mock, Call).WillOnce(Return17()); + + EXPECT_EQ(17, mock.AsStdFunction()()); + } + + // Action doesn't want mocked function arguments. + { + MockFunction mock; + EXPECT_CALL(mock, Call).WillOnce(Return17()); + + EXPECT_EQ(17, mock.AsStdFunction()(0)); + } +} + +// Edge case: if an action has both a const-qualified and an &&-qualified call +// operator, there should be no "ambiguous call" errors. The &&-qualified +// operator should be used by WillOnce (since it doesn't need to retain the +// action beyond one call), and the const-qualified one by WillRepeatedly. +TEST(MockMethodTest, ActionHasMultipleCallOperators) { + struct ReturnInt { + int operator()() && { return 17; } + int operator()() const& { return 19; } + }; + + // Directly compatible with mocked function type. + { + MockFunction mock; + EXPECT_CALL(mock, Call).WillOnce(ReturnInt()).WillRepeatedly(ReturnInt()); + + EXPECT_EQ(17, mock.AsStdFunction()()); + EXPECT_EQ(19, mock.AsStdFunction()()); + EXPECT_EQ(19, mock.AsStdFunction()()); + } + + // Ignores function arguments. + { + MockFunction mock; + EXPECT_CALL(mock, Call).WillOnce(ReturnInt()).WillRepeatedly(ReturnInt()); + + EXPECT_EQ(17, mock.AsStdFunction()(0)); + EXPECT_EQ(19, mock.AsStdFunction()(0)); + EXPECT_EQ(19, mock.AsStdFunction()(0)); + } +} + +// WillOnce should have no problem coping with a move-only action, whether it is +// &&-qualified or not. +TEST(MockMethodTest, MoveOnlyAction) { + // &&-qualified + { + struct Return17 { + Return17() = default; + Return17(Return17&&) = default; + + Return17(const Return17&) = delete; + Return17 operator=(const Return17&) = delete; + + int operator()() && { return 17; } + }; + + MockFunction mock; + EXPECT_CALL(mock, Call).WillOnce(Return17()); + EXPECT_EQ(17, mock.AsStdFunction()()); + } + + // Not &&-qualified + { + struct Return17 { + Return17() = default; + Return17(Return17&&) = default; + + Return17(const Return17&) = delete; + Return17 operator=(const Return17&) = delete; + + int operator()() const { return 17; } + }; + + MockFunction mock; + EXPECT_CALL(mock, Call).WillOnce(Return17()); + EXPECT_EQ(17, mock.AsStdFunction()()); + } +} + +// It should be possible to use an action that returns a value with a mock +// function that doesn't, both through WillOnce and WillRepeatedly. +TEST(MockMethodTest, ActionReturnsIgnoredValue) { + struct ReturnInt { + int operator()() const { return 0; } + }; + + MockFunction mock; + EXPECT_CALL(mock, Call).WillOnce(ReturnInt()).WillRepeatedly(ReturnInt()); + + mock.AsStdFunction()(); + mock.AsStdFunction()(); +} + +// Despite the fanciness around move-only actions and so on, it should still be +// possible to hand an lvalue reference to a copyable action to WillOnce. +TEST(MockMethodTest, WillOnceCanAcceptLvalueReference) { + MockFunction mock; + + const auto action = [] { return 17; }; + EXPECT_CALL(mock, Call).WillOnce(action); + + EXPECT_EQ(17, mock.AsStdFunction()()); +} + +// A callable that doesn't use SFINAE to restrict its call operator's overload +// set, but is still picky about which arguments it will accept. +struct StaticAssertSingleArgument { + template + static constexpr bool CheckArgs() { + static_assert(sizeof...(Args) == 1, ""); + return true; + } + + template ()> + int operator()(Args...) const { + return 17; + } +}; + +// WillOnce and WillRepeatedly should both work fine with naïve implementations +// of actions that don't use SFINAE to limit the overload set for their call +// operator. If they are compatible with the actual mocked signature, we +// shouldn't probe them with no arguments and trip a static_assert. +TEST(MockMethodTest, ActionSwallowsAllArguments) { + MockFunction mock; + EXPECT_CALL(mock, Call) + .WillOnce(StaticAssertSingleArgument{}) + .WillRepeatedly(StaticAssertSingleArgument{}); + + EXPECT_EQ(17, mock.AsStdFunction()(0)); + EXPECT_EQ(17, mock.AsStdFunction()(0)); +} + +struct ActionWithTemplatedConversionOperators { + template + operator OnceAction() && { // NOLINT + return [] { return 17; }; + } + + template + operator Action() const { // NOLINT + return [] { return 19; }; + } +}; + +// It should be fine to hand both WillOnce and WillRepeatedly a function that +// defines templated conversion operators to OnceAction and Action. WillOnce +// should prefer the OnceAction version. +TEST(MockMethodTest, ActionHasTemplatedConversionOperators) { + MockFunction mock; + EXPECT_CALL(mock, Call) + .WillOnce(ActionWithTemplatedConversionOperators{}) + .WillRepeatedly(ActionWithTemplatedConversionOperators{}); + + EXPECT_EQ(17, mock.AsStdFunction()()); + EXPECT_EQ(19, mock.AsStdFunction()()); +} // Tests for std::function based action. @@ -1463,7 +2052,9 @@ int Deref(std::unique_ptr ptr) { return *ptr; } struct Double { template - T operator()(T t) { return 2 * t; } + T operator()(T t) { + return 2 * t; + } }; std::unique_ptr UniqueInt(int i) { @@ -1532,8 +2123,9 @@ TEST(FunctorActionTest, TypeConversion) { TEST(FunctorActionTest, UnusedArguments) { // Verify that users can ignore uninteresting arguments. - Action a = - [](int i, Unused, Unused) { return 2 * i; }; + Action a = [](int i, Unused, Unused) { + return 2 * i; + }; std::tuple dummy = std::make_tuple(3, 7.3, 9.44); EXPECT_EQ(6, a.Perform(dummy)); } @@ -1552,9 +2144,7 @@ TEST(MoveOnlyArgumentsTest, ReturningActions) { EXPECT_EQ(x, 3); } -ACTION(ReturnArity) { - return std::tuple_size::value; -} +ACTION(ReturnArity) { return std::tuple_size::value; } TEST(ActionMacro, LargeArity) { EXPECT_EQ( @@ -1573,11 +2163,5 @@ TEST(ActionMacro, LargeArity) { 14, 15, 16, 17, 18, 19))); } -} // Unnamed namespace - -#ifdef _MSC_VER -#if _MSC_VER == 1900 -# pragma warning(pop) -#endif -#endif - +} // namespace +} // namespace testing diff --git a/ext/googletest/googlemock/test/gmock-cardinalities_test.cc b/ext/googletest/googlemock/test/gmock-cardinalities_test.cc index ca97cae249..cdd9956353 100644 --- a/ext/googletest/googlemock/test/gmock-cardinalities_test.cc +++ b/ext/googletest/googlemock/test/gmock-cardinalities_test.cc @@ -27,14 +27,13 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Mock - a framework for writing C++ mock classes. // // This file tests the built-in cardinalities. #include "gmock/gmock.h" -#include "gtest/gtest.h" #include "gtest/gtest-spi.h" +#include "gtest/gtest.h" namespace { @@ -55,13 +54,12 @@ class MockFoo { MOCK_METHOD0(Bar, int()); // NOLINT private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFoo); + MockFoo(const MockFoo&) = delete; + MockFoo& operator=(const MockFoo&) = delete; }; // Tests that Cardinality objects can be default constructed. -TEST(CardinalityTest, IsDefaultConstructable) { - Cardinality c; -} +TEST(CardinalityTest, IsDefaultConstructable) { Cardinality c; } // Tests that Cardinality objects are copyable. TEST(CardinalityTest, IsCopyable) { @@ -119,8 +117,7 @@ TEST(AnyNumber, Works) { stringstream ss; c.DescribeTo(&ss); - EXPECT_PRED_FORMAT2(IsSubstring, "called any number of times", - ss.str()); + EXPECT_PRED_FORMAT2(IsSubstring, "called any number of times", ss.str()); } TEST(AnyNumberTest, HasCorrectBounds) { @@ -132,9 +129,11 @@ TEST(AnyNumberTest, HasCorrectBounds) { // Tests AtLeast(n). TEST(AtLeastTest, OnNegativeNumber) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - AtLeast(-1); - }, "The invocation lower bound must be >= 0"); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + AtLeast(-1); + }, + "The invocation lower bound must be >= 0"); } TEST(AtLeastTest, OnZero) { @@ -147,8 +146,7 @@ TEST(AtLeastTest, OnZero) { stringstream ss; c.DescribeTo(&ss); - EXPECT_PRED_FORMAT2(IsSubstring, "any number of times", - ss.str()); + EXPECT_PRED_FORMAT2(IsSubstring, "any number of times", ss.str()); } TEST(AtLeastTest, OnPositiveNumber) { @@ -164,18 +162,15 @@ TEST(AtLeastTest, OnPositiveNumber) { stringstream ss1; AtLeast(1).DescribeTo(&ss1); - EXPECT_PRED_FORMAT2(IsSubstring, "at least once", - ss1.str()); + EXPECT_PRED_FORMAT2(IsSubstring, "at least once", ss1.str()); stringstream ss2; c.DescribeTo(&ss2); - EXPECT_PRED_FORMAT2(IsSubstring, "at least twice", - ss2.str()); + EXPECT_PRED_FORMAT2(IsSubstring, "at least twice", ss2.str()); stringstream ss3; AtLeast(3).DescribeTo(&ss3); - EXPECT_PRED_FORMAT2(IsSubstring, "at least 3 times", - ss3.str()); + EXPECT_PRED_FORMAT2(IsSubstring, "at least 3 times", ss3.str()); } TEST(AtLeastTest, HasCorrectBounds) { @@ -187,9 +182,11 @@ TEST(AtLeastTest, HasCorrectBounds) { // Tests AtMost(n). TEST(AtMostTest, OnNegativeNumber) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - AtMost(-1); - }, "The invocation upper bound must be >= 0"); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + AtMost(-1); + }, + "The invocation upper bound must be >= 0"); } TEST(AtMostTest, OnZero) { @@ -202,8 +199,7 @@ TEST(AtMostTest, OnZero) { stringstream ss; c.DescribeTo(&ss); - EXPECT_PRED_FORMAT2(IsSubstring, "never called", - ss.str()); + EXPECT_PRED_FORMAT2(IsSubstring, "never called", ss.str()); } TEST(AtMostTest, OnPositiveNumber) { @@ -219,18 +215,15 @@ TEST(AtMostTest, OnPositiveNumber) { stringstream ss1; AtMost(1).DescribeTo(&ss1); - EXPECT_PRED_FORMAT2(IsSubstring, "called at most once", - ss1.str()); + EXPECT_PRED_FORMAT2(IsSubstring, "called at most once", ss1.str()); stringstream ss2; c.DescribeTo(&ss2); - EXPECT_PRED_FORMAT2(IsSubstring, "called at most twice", - ss2.str()); + EXPECT_PRED_FORMAT2(IsSubstring, "called at most twice", ss2.str()); stringstream ss3; AtMost(3).DescribeTo(&ss3); - EXPECT_PRED_FORMAT2(IsSubstring, "called at most 3 times", - ss3.str()); + EXPECT_PRED_FORMAT2(IsSubstring, "called at most 3 times", ss3.str()); } TEST(AtMostTest, HasCorrectBounds) { @@ -242,22 +235,28 @@ TEST(AtMostTest, HasCorrectBounds) { // Tests Between(m, n). TEST(BetweenTest, OnNegativeStart) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - Between(-1, 2); - }, "The invocation lower bound must be >= 0, but is actually -1"); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + Between(-1, 2); + }, + "The invocation lower bound must be >= 0, but is actually -1"); } TEST(BetweenTest, OnNegativeEnd) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - Between(1, -2); - }, "The invocation upper bound must be >= 0, but is actually -2"); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + Between(1, -2); + }, + "The invocation upper bound must be >= 0, but is actually -2"); } TEST(BetweenTest, OnStartBiggerThanEnd) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - Between(2, 1); - }, "The invocation upper bound (1) must be >= " - "the invocation lower bound (2)"); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + Between(2, 1); + }, + "The invocation upper bound (1) must be >= " + "the invocation lower bound (2)"); } TEST(BetweenTest, OnZeroStartAndZeroEnd) { @@ -271,8 +270,7 @@ TEST(BetweenTest, OnZeroStartAndZeroEnd) { stringstream ss; c.DescribeTo(&ss); - EXPECT_PRED_FORMAT2(IsSubstring, "never called", - ss.str()); + EXPECT_PRED_FORMAT2(IsSubstring, "never called", ss.str()); } TEST(BetweenTest, OnZeroStartAndNonZeroEnd) { @@ -289,8 +287,7 @@ TEST(BetweenTest, OnZeroStartAndNonZeroEnd) { stringstream ss; c.DescribeTo(&ss); - EXPECT_PRED_FORMAT2(IsSubstring, "called at most twice", - ss.str()); + EXPECT_PRED_FORMAT2(IsSubstring, "called at most twice", ss.str()); } TEST(BetweenTest, OnSameStartAndEnd) { @@ -307,8 +304,7 @@ TEST(BetweenTest, OnSameStartAndEnd) { stringstream ss; c.DescribeTo(&ss); - EXPECT_PRED_FORMAT2(IsSubstring, "called 3 times", - ss.str()); + EXPECT_PRED_FORMAT2(IsSubstring, "called 3 times", ss.str()); } TEST(BetweenTest, OnDifferentStartAndEnd) { @@ -328,8 +324,7 @@ TEST(BetweenTest, OnDifferentStartAndEnd) { stringstream ss; c.DescribeTo(&ss); - EXPECT_PRED_FORMAT2(IsSubstring, "called between 3 and 5 times", - ss.str()); + EXPECT_PRED_FORMAT2(IsSubstring, "called between 3 and 5 times", ss.str()); } TEST(BetweenTest, HasCorrectBounds) { @@ -341,9 +336,11 @@ TEST(BetweenTest, HasCorrectBounds) { // Tests Exactly(n). TEST(ExactlyTest, OnNegativeNumber) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - Exactly(-1); - }, "The invocation lower bound must be >= 0"); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + Exactly(-1); + }, + "The invocation lower bound must be >= 0"); } TEST(ExactlyTest, OnZero) { @@ -356,8 +353,7 @@ TEST(ExactlyTest, OnZero) { stringstream ss; c.DescribeTo(&ss); - EXPECT_PRED_FORMAT2(IsSubstring, "never called", - ss.str()); + EXPECT_PRED_FORMAT2(IsSubstring, "never called", ss.str()); } TEST(ExactlyTest, OnPositiveNumber) { @@ -370,18 +366,15 @@ TEST(ExactlyTest, OnPositiveNumber) { stringstream ss1; Exactly(1).DescribeTo(&ss1); - EXPECT_PRED_FORMAT2(IsSubstring, "called once", - ss1.str()); + EXPECT_PRED_FORMAT2(IsSubstring, "called once", ss1.str()); stringstream ss2; c.DescribeTo(&ss2); - EXPECT_PRED_FORMAT2(IsSubstring, "called twice", - ss2.str()); + EXPECT_PRED_FORMAT2(IsSubstring, "called twice", ss2.str()); stringstream ss3; Exactly(3).DescribeTo(&ss3); - EXPECT_PRED_FORMAT2(IsSubstring, "called 3 times", - ss3.str()); + EXPECT_PRED_FORMAT2(IsSubstring, "called 3 times", ss3.str()); } TEST(ExactlyTest, HasCorrectBounds) { diff --git a/ext/googletest/googlemock/test/gmock-function-mocker_test.cc b/ext/googletest/googlemock/test/gmock-function-mocker_test.cc index cf76fa99f2..286115fef0 100644 --- a/ext/googletest/googlemock/test/gmock-function-mocker_test.cc +++ b/ext/googletest/googlemock/test/gmock-function-mocker_test.cc @@ -27,6 +27,11 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Silence C4503 (decorated name length exceeded) for MSVC. +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable : 4503) +#endif // Google Mock - a framework for writing C++ mock classes. // @@ -37,7 +42,7 @@ // MSDN says the header file to be included for STDMETHOD is BaseTyps.h but // we are getting compiler errors if we use basetyps.h, hence including // objbase.h for definition of STDMETHOD. -# include +#include #endif // GTEST_OS_WINDOWS #include @@ -65,7 +70,7 @@ using testing::Return; using testing::ReturnRef; using testing::TypedEq; -template +template class TemplatedCopyable { public: TemplatedCopyable() {} @@ -82,7 +87,7 @@ class FooInterface { virtual int Nullary() = 0; virtual bool Unary(int x) = 0; - virtual long Binary(short x, int y) = 0; // NOLINT + virtual long Binary(short x, int y) = 0; // NOLINT virtual int Decimal(bool b, char c, short d, int e, long f, // NOLINT float g, double h, unsigned i, char* j, const std::string& k) = 0; @@ -133,8 +138,8 @@ class FooInterface { // signature. This was fixed in Visual Studio 2008. However, the compiler // still emits a warning that alerts about this change in behavior. #ifdef _MSC_VER -# pragma warning(push) -# pragma warning(disable : 4373) +#pragma warning(push) +#pragma warning(disable : 4373) #endif class MockFoo : public FooInterface { public: @@ -203,7 +208,8 @@ class MockFoo : public FooInterface { MOCK_METHOD(int, RefQualifiedOverloaded, (), (ref(&&), override)); private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFoo); + MockFoo(const MockFoo&) = delete; + MockFoo& operator=(const MockFoo&) = delete; }; class LegacyMockFoo : public FooInterface { @@ -275,11 +281,12 @@ class LegacyMockFoo : public FooInterface { int RefQualifiedOverloaded() && override { return 0; } private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(LegacyMockFoo); + LegacyMockFoo(const LegacyMockFoo&) = delete; + LegacyMockFoo& operator=(const LegacyMockFoo&) = delete; }; #ifdef _MSC_VER -# pragma warning(pop) +#pragma warning(pop) #endif template @@ -493,7 +500,8 @@ class MockB { MOCK_METHOD(void, DoB, ()); private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(MockB); + MockB(const MockB&) = delete; + MockB& operator=(const MockB&) = delete; }; class LegacyMockB { @@ -503,7 +511,8 @@ class LegacyMockB { MOCK_METHOD0(DoB, void()); private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(LegacyMockB); + LegacyMockB(const LegacyMockB&) = delete; + LegacyMockB& operator=(const LegacyMockB&) = delete; }; template @@ -558,7 +567,8 @@ class MockStack : public StackInterface { MOCK_METHOD((std::map), ReturnTypeWithComma, (int), (const)); private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(MockStack); + MockStack(const MockStack&) = delete; + MockStack& operator=(const MockStack&) = delete; }; template @@ -576,7 +586,8 @@ class LegacyMockStack : public StackInterface { MOCK_CONST_METHOD1_T(ReturnTypeWithComma, std::map(int)); // NOLINT private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(LegacyMockStack); + LegacyMockStack(const LegacyMockStack&) = delete; + LegacyMockStack& operator=(const LegacyMockStack&) = delete; }; template @@ -595,10 +606,8 @@ TYPED_TEST(TemplateMockTest, Works) { .WillOnce(Return(0)); EXPECT_CALL(mock, Push(_)); int n = 5; - EXPECT_CALL(mock, GetTop()) - .WillOnce(ReturnRef(n)); - EXPECT_CALL(mock, Pop()) - .Times(AnyNumber()); + EXPECT_CALL(mock, GetTop()).WillOnce(ReturnRef(n)); + EXPECT_CALL(mock, Pop()).Times(AnyNumber()); EXPECT_EQ(0, mock.GetSize()); mock.Push(5); @@ -612,10 +621,8 @@ TYPED_TEST(TemplateMockTest, MethodWithCommaInReturnTypeWorks) { TypeParam mock; const std::map a_map; - EXPECT_CALL(mock, ReturnTypeWithComma()) - .WillOnce(Return(a_map)); - EXPECT_CALL(mock, ReturnTypeWithComma(1)) - .WillOnce(Return(a_map)); + EXPECT_CALL(mock, ReturnTypeWithComma()).WillOnce(Return(a_map)); + EXPECT_CALL(mock, ReturnTypeWithComma(1)).WillOnce(Return(a_map)); EXPECT_EQ(a_map, mock.ReturnTypeWithComma()); EXPECT_EQ(a_map, mock.ReturnTypeWithComma(1)); @@ -650,7 +657,8 @@ class MockStackWithCallType : public StackInterfaceWithCallType { (Calltype(STDMETHODCALLTYPE), override, const)); private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(MockStackWithCallType); + MockStackWithCallType(const MockStackWithCallType&) = delete; + MockStackWithCallType& operator=(const MockStackWithCallType&) = delete; }; template @@ -664,7 +672,9 @@ class LegacyMockStackWithCallType : public StackInterfaceWithCallType { MOCK_CONST_METHOD0_T_WITH_CALLTYPE(STDMETHODCALLTYPE, GetTop, const T&()); private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(LegacyMockStackWithCallType); + LegacyMockStackWithCallType(const LegacyMockStackWithCallType&) = delete; + LegacyMockStackWithCallType& operator=(const LegacyMockStackWithCallType&) = + delete; }; template @@ -685,10 +695,8 @@ TYPED_TEST(TemplateMockTestWithCallType, Works) { .WillOnce(Return(0)); EXPECT_CALL(mock, Push(_)); int n = 5; - EXPECT_CALL(mock, GetTop()) - .WillOnce(ReturnRef(n)); - EXPECT_CALL(mock, Pop()) - .Times(AnyNumber()); + EXPECT_CALL(mock, GetTop()).WillOnce(ReturnRef(n)); + EXPECT_CALL(mock, Pop()).Times(AnyNumber()); EXPECT_EQ(0, mock.GetSize()); mock.Push(5); @@ -716,7 +724,9 @@ class MockOverloadedOnArgNumber { MY_MOCK_METHODS1_; private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(MockOverloadedOnArgNumber); + MockOverloadedOnArgNumber(const MockOverloadedOnArgNumber&) = delete; + MockOverloadedOnArgNumber& operator=(const MockOverloadedOnArgNumber&) = + delete; }; class LegacyMockOverloadedOnArgNumber { @@ -726,7 +736,10 @@ class LegacyMockOverloadedOnArgNumber { LEGACY_MY_MOCK_METHODS1_; private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(LegacyMockOverloadedOnArgNumber); + LegacyMockOverloadedOnArgNumber(const LegacyMockOverloadedOnArgNumber&) = + delete; + LegacyMockOverloadedOnArgNumber& operator=( + const LegacyMockOverloadedOnArgNumber&) = delete; }; template @@ -747,9 +760,9 @@ TYPED_TEST(OverloadedMockMethodTest, CanOverloadOnArgNumberInMacroBody) { EXPECT_TRUE(mock.Overloaded(true, 1)); } -#define MY_MOCK_METHODS2_ \ - MOCK_CONST_METHOD1(Overloaded, int(int n)); \ - MOCK_METHOD1(Overloaded, int(int n)) +#define MY_MOCK_METHODS2_ \ + MOCK_CONST_METHOD1(Overloaded, int(int n)); \ + MOCK_METHOD1(Overloaded, int(int n)) class MockOverloadedOnConstness { public: @@ -758,7 +771,9 @@ class MockOverloadedOnConstness { MY_MOCK_METHODS2_; private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(MockOverloadedOnConstness); + MockOverloadedOnConstness(const MockOverloadedOnConstness&) = delete; + MockOverloadedOnConstness& operator=(const MockOverloadedOnConstness&) = + delete; }; TEST(MockMethodOverloadedMockMethodTest, CanOverloadOnConstnessInMacroBody) { @@ -779,9 +794,7 @@ TEST(MockMethodMockFunctionTest, WorksForVoidNullary) { TEST(MockMethodMockFunctionTest, WorksForNonVoidNullary) { MockFunction foo; - EXPECT_CALL(foo, Call()) - .WillOnce(Return(1)) - .WillOnce(Return(2)); + EXPECT_CALL(foo, Call()).WillOnce(Return(1)).WillOnce(Return(2)); EXPECT_EQ(1, foo.Call()); EXPECT_EQ(2, foo.Call()); } @@ -794,19 +807,17 @@ TEST(MockMethodMockFunctionTest, WorksForVoidUnary) { TEST(MockMethodMockFunctionTest, WorksForNonVoidBinary) { MockFunction foo; - EXPECT_CALL(foo, Call(false, 42)) - .WillOnce(Return(1)) - .WillOnce(Return(2)); - EXPECT_CALL(foo, Call(true, Ge(100))) - .WillOnce(Return(3)); + EXPECT_CALL(foo, Call(false, 42)).WillOnce(Return(1)).WillOnce(Return(2)); + EXPECT_CALL(foo, Call(true, Ge(100))).WillOnce(Return(3)); EXPECT_EQ(1, foo.Call(false, 42)); EXPECT_EQ(2, foo.Call(false, 42)); EXPECT_EQ(3, foo.Call(true, 120)); } TEST(MockMethodMockFunctionTest, WorksFor10Arguments) { - MockFunction foo; + MockFunction + foo; EXPECT_CALL(foo, Call(_, 'a', _, _, _, _, _, _, _, _)) .WillOnce(Return(1)) .WillOnce(Return(2)); @@ -816,9 +827,7 @@ TEST(MockMethodMockFunctionTest, WorksFor10Arguments) { TEST(MockMethodMockFunctionTest, AsStdFunction) { MockFunction foo; - auto call = [](const std::function &f, int i) { - return f(i); - }; + auto call = [](const std::function& f, int i) { return f(i); }; EXPECT_CALL(foo, Call(1)).WillOnce(Return(-1)); EXPECT_CALL(foo, Call(2)).WillOnce(Return(-2)); EXPECT_EQ(-1, call(foo.AsStdFunction(), 1)); @@ -836,10 +845,8 @@ TEST(MockMethodMockFunctionTest, AsStdFunctionReturnsReference) { } TEST(MockMethodMockFunctionTest, AsStdFunctionWithReferenceParameter) { - MockFunction foo; - auto call = [](const std::function &f, int &i) { - return f(i); - }; + MockFunction foo; + auto call = [](const std::function& f, int& i) { return f(i); }; int i = 42; EXPECT_CALL(foo, Call(i)).WillOnce(Return(-1)); EXPECT_EQ(-1, call(foo.AsStdFunction(), i)); @@ -888,8 +895,7 @@ TYPED_TEST( } template -struct AlternateCallable { -}; +struct AlternateCallable {}; TYPED_TEST(MockMethodMockFunctionSignatureTest, IsMockFunctionTemplateArgumentDeducedForAlternateCallable) { @@ -898,16 +904,14 @@ TYPED_TEST(MockMethodMockFunctionSignatureTest, EXPECT_TRUE(IsMockFunctionTemplateArgumentDeducedTo(foo)); } -TYPED_TEST( - MockMethodMockFunctionSignatureTest, - IsMockFunctionCallMethodSignatureTheSameForAlternateCallable) { +TYPED_TEST(MockMethodMockFunctionSignatureTest, + IsMockFunctionCallMethodSignatureTheSameForAlternateCallable) { using ForRawSignature = decltype(&MockFunction::Call); using ForStdFunction = decltype(&MockFunction>::Call); EXPECT_TRUE((std::is_same::value)); } - struct MockMethodSizes0 { MOCK_METHOD(void, func, ()); }; @@ -925,22 +929,21 @@ struct MockMethodSizes4 { }; struct LegacyMockMethodSizes0 { - MOCK_METHOD0(func, void()); + MOCK_METHOD0(func, void()); }; struct LegacyMockMethodSizes1 { - MOCK_METHOD1(func, void(int)); + MOCK_METHOD1(func, void(int)); }; struct LegacyMockMethodSizes2 { - MOCK_METHOD2(func, void(int, int)); + MOCK_METHOD2(func, void(int, int)); }; struct LegacyMockMethodSizes3 { - MOCK_METHOD3(func, void(int, int, int)); + MOCK_METHOD3(func, void(int, int, int)); }; struct LegacyMockMethodSizes4 { - MOCK_METHOD4(func, void(int, int, int, int)); + MOCK_METHOD4(func, void(int, int, int, int)); }; - TEST(MockMethodMockFunctionTest, MockMethodSizeOverhead) { EXPECT_EQ(sizeof(MockMethodSizes0), sizeof(MockMethodSizes1)); EXPECT_EQ(sizeof(MockMethodSizes0), sizeof(MockMethodSizes2)); diff --git a/ext/googletest/googlemock/test/gmock-internal-utils_test.cc b/ext/googletest/googlemock/test/gmock-internal-utils_test.cc index bd7e3353d9..932bece5af 100644 --- a/ext/googletest/googlemock/test/gmock-internal-utils_test.cc +++ b/ext/googletest/googlemock/test/gmock-internal-utils_test.cc @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Mock - a framework for writing C++ mock classes. // // This file tests the internal utilities. @@ -58,7 +57,7 @@ #undef GTEST_IMPLEMENTATION_ #if GTEST_OS_CYGWIN -# include // For ssize_t. NOLINT +#include // For ssize_t. NOLINT #endif namespace proto2 { @@ -70,24 +69,23 @@ namespace internal { namespace { -TEST(JoinAsTupleTest, JoinsEmptyTuple) { - EXPECT_EQ("", JoinAsTuple(Strings())); +TEST(JoinAsKeyValueTupleTest, JoinsEmptyTuple) { + EXPECT_EQ("", JoinAsKeyValueTuple({}, Strings())); } -TEST(JoinAsTupleTest, JoinsOneTuple) { - const char* fields[] = {"1"}; - EXPECT_EQ("1", JoinAsTuple(Strings(fields, fields + 1))); +TEST(JoinAsKeyValueTupleTest, JoinsOneTuple) { + EXPECT_EQ("(a: 1)", JoinAsKeyValueTuple({"a"}, {"1"})); } -TEST(JoinAsTupleTest, JoinsTwoTuple) { - const char* fields[] = {"1", "a"}; - EXPECT_EQ("(1, a)", JoinAsTuple(Strings(fields, fields + 2))); +TEST(JoinAsKeyValueTupleTest, JoinsTwoTuple) { + EXPECT_EQ("(a: 1, b: 2)", JoinAsKeyValueTuple({"a", "b"}, {"1", "2"})); } -TEST(JoinAsTupleTest, JoinsTenTuple) { - const char* fields[] = {"1", "2", "3", "4", "5", "6", "7", "8", "9", "10"}; - EXPECT_EQ("(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)", - JoinAsTuple(Strings(fields, fields + 10))); +TEST(JoinAsKeyValueTupleTest, JoinsTenTuple) { + EXPECT_EQ( + "(a: 1, b: 2, c: 3, d: 4, e: 5, f: 6, g: 7, h: 8, i: 9, j: 10)", + JoinAsKeyValueTuple({"a", "b", "c", "d", "e", "f", "g", "h", "i", "j"}, + {"1", "2", "3", "4", "5", "6", "7", "8", "9", "10"})); } TEST(ConvertIdentifierNameToWordsTest, WorksWhenNameContainsNoWord) { @@ -140,6 +138,12 @@ TEST(GetRawPointerTest, WorksForRawPointers) { EXPECT_EQ(&n, GetRawPointer(&n)); } +TEST(GetRawPointerTest, WorksForStdReferenceWrapper) { + int n = 1; + EXPECT_EQ(&n, GetRawPointer(std::ref(n))); + EXPECT_EQ(&n, GetRawPointer(std::cref(n))); +} + // Tests KindOf. class Base {}; @@ -150,19 +154,19 @@ TEST(KindOfTest, Bool) { } TEST(KindOfTest, Integer) { - EXPECT_EQ(kInteger, GMOCK_KIND_OF_(char)); // NOLINT - EXPECT_EQ(kInteger, GMOCK_KIND_OF_(signed char)); // NOLINT - EXPECT_EQ(kInteger, GMOCK_KIND_OF_(unsigned char)); // NOLINT - EXPECT_EQ(kInteger, GMOCK_KIND_OF_(short)); // NOLINT - EXPECT_EQ(kInteger, GMOCK_KIND_OF_(unsigned short)); // NOLINT - EXPECT_EQ(kInteger, GMOCK_KIND_OF_(int)); // NOLINT - EXPECT_EQ(kInteger, GMOCK_KIND_OF_(unsigned int)); // NOLINT - EXPECT_EQ(kInteger, GMOCK_KIND_OF_(long)); // NOLINT - EXPECT_EQ(kInteger, GMOCK_KIND_OF_(unsigned long)); // NOLINT - EXPECT_EQ(kInteger, GMOCK_KIND_OF_(long long)); // NOLINT + EXPECT_EQ(kInteger, GMOCK_KIND_OF_(char)); // NOLINT + EXPECT_EQ(kInteger, GMOCK_KIND_OF_(signed char)); // NOLINT + EXPECT_EQ(kInteger, GMOCK_KIND_OF_(unsigned char)); // NOLINT + EXPECT_EQ(kInteger, GMOCK_KIND_OF_(short)); // NOLINT + EXPECT_EQ(kInteger, GMOCK_KIND_OF_(unsigned short)); // NOLINT + EXPECT_EQ(kInteger, GMOCK_KIND_OF_(int)); // NOLINT + EXPECT_EQ(kInteger, GMOCK_KIND_OF_(unsigned int)); // NOLINT + EXPECT_EQ(kInteger, GMOCK_KIND_OF_(long)); // NOLINT + EXPECT_EQ(kInteger, GMOCK_KIND_OF_(unsigned long)); // NOLINT + EXPECT_EQ(kInteger, GMOCK_KIND_OF_(long long)); // NOLINT EXPECT_EQ(kInteger, GMOCK_KIND_OF_(unsigned long long)); // NOLINT - EXPECT_EQ(kInteger, GMOCK_KIND_OF_(wchar_t)); // NOLINT - EXPECT_EQ(kInteger, GMOCK_KIND_OF_(size_t)); // NOLINT + EXPECT_EQ(kInteger, GMOCK_KIND_OF_(wchar_t)); // NOLINT + EXPECT_EQ(kInteger, GMOCK_KIND_OF_(size_t)); // NOLINT #if GTEST_OS_LINUX || GTEST_OS_MAC || GTEST_OS_CYGWIN // ssize_t is not defined on Windows and possibly some other OSes. EXPECT_EQ(kInteger, GMOCK_KIND_OF_(ssize_t)); // NOLINT @@ -170,15 +174,15 @@ TEST(KindOfTest, Integer) { } TEST(KindOfTest, FloatingPoint) { - EXPECT_EQ(kFloatingPoint, GMOCK_KIND_OF_(float)); // NOLINT - EXPECT_EQ(kFloatingPoint, GMOCK_KIND_OF_(double)); // NOLINT + EXPECT_EQ(kFloatingPoint, GMOCK_KIND_OF_(float)); // NOLINT + EXPECT_EQ(kFloatingPoint, GMOCK_KIND_OF_(double)); // NOLINT EXPECT_EQ(kFloatingPoint, GMOCK_KIND_OF_(long double)); // NOLINT } TEST(KindOfTest, Other) { - EXPECT_EQ(kOther, GMOCK_KIND_OF_(void*)); // NOLINT + EXPECT_EQ(kOther, GMOCK_KIND_OF_(void*)); // NOLINT EXPECT_EQ(kOther, GMOCK_KIND_OF_(char**)); // NOLINT - EXPECT_EQ(kOther, GMOCK_KIND_OF_(Base)); // NOLINT + EXPECT_EQ(kOther, GMOCK_KIND_OF_(Base)); // NOLINT } // Tests LosslessArithmeticConvertible. @@ -209,26 +213,26 @@ TEST(LosslessArithmeticConvertibleTest, IntegerToInteger) { EXPECT_TRUE((LosslessArithmeticConvertible::value)); // Unsigned => larger unsigned is fine. - EXPECT_TRUE((LosslessArithmeticConvertible< - unsigned short, uint64_t>::value)); // NOLINT + EXPECT_TRUE((LosslessArithmeticConvertible::value)); // NOLINT // Signed => unsigned is not fine. - EXPECT_FALSE((LosslessArithmeticConvertible< - short, uint64_t>::value)); // NOLINT - EXPECT_FALSE((LosslessArithmeticConvertible< - signed char, unsigned int>::value)); // NOLINT + EXPECT_FALSE( + (LosslessArithmeticConvertible::value)); // NOLINT + EXPECT_FALSE((LosslessArithmeticConvertible::value)); // NOLINT // Same size and same signedness: fine too. - EXPECT_TRUE((LosslessArithmeticConvertible< - unsigned char, unsigned char>::value)); + EXPECT_TRUE( + (LosslessArithmeticConvertible::value)); EXPECT_TRUE((LosslessArithmeticConvertible::value)); EXPECT_TRUE((LosslessArithmeticConvertible::value)); - EXPECT_TRUE((LosslessArithmeticConvertible< - unsigned long, unsigned long>::value)); // NOLINT + EXPECT_TRUE((LosslessArithmeticConvertible::value)); // NOLINT // Same size, different signedness: not fine. - EXPECT_FALSE((LosslessArithmeticConvertible< - unsigned char, signed char>::value)); + EXPECT_FALSE( + (LosslessArithmeticConvertible::value)); EXPECT_FALSE((LosslessArithmeticConvertible::value)); EXPECT_FALSE((LosslessArithmeticConvertible::value)); @@ -243,8 +247,8 @@ TEST(LosslessArithmeticConvertibleTest, IntegerToFloatingPoint) { // the format of the latter is implementation-defined. EXPECT_FALSE((LosslessArithmeticConvertible::value)); EXPECT_FALSE((LosslessArithmeticConvertible::value)); - EXPECT_FALSE((LosslessArithmeticConvertible< - short, long double>::value)); // NOLINT + EXPECT_FALSE( + (LosslessArithmeticConvertible::value)); // NOLINT } TEST(LosslessArithmeticConvertibleTest, FloatingPointToBool) { @@ -272,7 +276,7 @@ TEST(LosslessArithmeticConvertibleTest, FloatingPointToFloatingPoint) { EXPECT_FALSE((LosslessArithmeticConvertible::value)); GTEST_INTENTIONAL_CONST_COND_PUSH_() if (sizeof(double) == sizeof(long double)) { // NOLINT - GTEST_INTENTIONAL_CONST_COND_POP_() + GTEST_INTENTIONAL_CONST_COND_POP_() // In some implementations (e.g. MSVC), double and long double // have the same size. EXPECT_TRUE((LosslessArithmeticConvertible::value)); @@ -291,7 +295,7 @@ TEST(TupleMatchesTest, WorksForSize0) { } TEST(TupleMatchesTest, WorksForSize1) { - std::tuple > matchers(Eq(1)); + std::tuple> matchers(Eq(1)); std::tuple values1(1), values2(2); EXPECT_TRUE(TupleMatches(matchers, values1)); @@ -299,7 +303,7 @@ TEST(TupleMatchesTest, WorksForSize1) { } TEST(TupleMatchesTest, WorksForSize2) { - std::tuple, Matcher > matchers(Eq(1), Eq('a')); + std::tuple, Matcher> matchers(Eq(1), Eq('a')); std::tuple values1(1, 'a'), values2(1, 'b'), values3(2, 'a'), values4(2, 'b'); @@ -312,7 +316,7 @@ TEST(TupleMatchesTest, WorksForSize2) { TEST(TupleMatchesTest, WorksForSize5) { std::tuple, Matcher, Matcher, Matcher, // NOLINT - Matcher > + Matcher> matchers(Eq(1), Eq('a'), Eq(true), Eq(2L), Eq("hi")); std::tuple // NOLINT values1(1, 'a', true, 2L, "hi"), values2(1, 'a', true, 2L, "hello"), @@ -331,13 +335,10 @@ TEST(AssertTest, SucceedsOnTrue) { // Tests that Assert(false, ...) generates a fatal failure. TEST(AssertTest, FailsFatallyOnFalse) { - EXPECT_DEATH_IF_SUPPORTED({ - Assert(false, __FILE__, __LINE__, "This should fail."); - }, ""); + EXPECT_DEATH_IF_SUPPORTED( + { Assert(false, __FILE__, __LINE__, "This should fail."); }, ""); - EXPECT_DEATH_IF_SUPPORTED({ - Assert(false, __FILE__, __LINE__); - }, ""); + EXPECT_DEATH_IF_SUPPORTED({ Assert(false, __FILE__, __LINE__); }, ""); } // Tests that Expect(true, ...) succeeds. @@ -348,40 +349,44 @@ TEST(ExpectTest, SucceedsOnTrue) { // Tests that Expect(false, ...) generates a non-fatal failure. TEST(ExpectTest, FailsNonfatallyOnFalse) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - Expect(false, __FILE__, __LINE__, "This should fail."); - }, "This should fail"); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + Expect(false, __FILE__, __LINE__, "This should fail."); + }, + "This should fail"); - EXPECT_NONFATAL_FAILURE({ // NOLINT - Expect(false, __FILE__, __LINE__); - }, "Expectation failed"); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + Expect(false, __FILE__, __LINE__); + }, + "Expectation failed"); } // Tests LogIsVisible(). class LogIsVisibleTest : public ::testing::Test { protected: - void SetUp() override { original_verbose_ = GMOCK_FLAG(verbose); } + void SetUp() override { original_verbose_ = GMOCK_FLAG_GET(verbose); } - void TearDown() override { GMOCK_FLAG(verbose) = original_verbose_; } + void TearDown() override { GMOCK_FLAG_SET(verbose, original_verbose_); } std::string original_verbose_; }; TEST_F(LogIsVisibleTest, AlwaysReturnsTrueIfVerbosityIsInfo) { - GMOCK_FLAG(verbose) = kInfoVerbosity; + GMOCK_FLAG_SET(verbose, kInfoVerbosity); EXPECT_TRUE(LogIsVisible(kInfo)); EXPECT_TRUE(LogIsVisible(kWarning)); } TEST_F(LogIsVisibleTest, AlwaysReturnsFalseIfVerbosityIsError) { - GMOCK_FLAG(verbose) = kErrorVerbosity; + GMOCK_FLAG_SET(verbose, kErrorVerbosity); EXPECT_FALSE(LogIsVisible(kInfo)); EXPECT_FALSE(LogIsVisible(kWarning)); } TEST_F(LogIsVisibleTest, WorksWhenVerbosityIsWarning) { - GMOCK_FLAG(verbose) = kWarningVerbosity; + GMOCK_FLAG_SET(verbose, kWarningVerbosity); EXPECT_FALSE(LogIsVisible(kInfo)); EXPECT_TRUE(LogIsVisible(kWarning)); } @@ -394,31 +399,31 @@ TEST_F(LogIsVisibleTest, WorksWhenVerbosityIsWarning) { // and log severity. void TestLogWithSeverity(const std::string& verbosity, LogSeverity severity, bool should_print) { - const std::string old_flag = GMOCK_FLAG(verbose); - GMOCK_FLAG(verbose) = verbosity; + const std::string old_flag = GMOCK_FLAG_GET(verbose); + GMOCK_FLAG_SET(verbose, verbosity); CaptureStdout(); Log(severity, "Test log.\n", 0); if (should_print) { - EXPECT_THAT(GetCapturedStdout().c_str(), - ContainsRegex( - severity == kWarning ? - "^\nGMOCK WARNING:\nTest log\\.\nStack trace:\n" : - "^\nTest log\\.\nStack trace:\n")); + EXPECT_THAT( + GetCapturedStdout().c_str(), + ContainsRegex(severity == kWarning + ? "^\nGMOCK WARNING:\nTest log\\.\nStack trace:\n" + : "^\nTest log\\.\nStack trace:\n")); } else { EXPECT_STREQ("", GetCapturedStdout().c_str()); } - GMOCK_FLAG(verbose) = old_flag; + GMOCK_FLAG_SET(verbose, old_flag); } // Tests that when the stack_frames_to_skip parameter is negative, // Log() doesn't include the stack trace in the output. TEST(LogTest, NoStackTraceWhenStackFramesToSkipIsNegative) { - const std::string saved_flag = GMOCK_FLAG(verbose); - GMOCK_FLAG(verbose) = kInfoVerbosity; + const std::string saved_flag = GMOCK_FLAG_GET(verbose); + GMOCK_FLAG_SET(verbose, kInfoVerbosity); CaptureStdout(); Log(kInfo, "Test log.\n", -1); EXPECT_STREQ("\nTest log.\n", GetCapturedStdout().c_str()); - GMOCK_FLAG(verbose) = saved_flag; + GMOCK_FLAG_SET(verbose, saved_flag); } struct MockStackTraceGetter : testing::internal::OsStackTraceGetterInterface { @@ -450,13 +455,13 @@ TEST(LogTest, NoSkippingStackFrameInOptMode) { EXPECT_THAT(log, HasSubstr(expected_message)); int skip_count = atoi(log.substr(expected_message.size()).c_str()); -# if defined(NDEBUG) +#if defined(NDEBUG) // In opt mode, no stack frame should be skipped. const int expected_skip_count = 0; -# else +#else // In dbg mode, the stack frames should be skipped. const int expected_skip_count = 100; -# endif +#endif // Note that each inner implementation layer will +1 the number to remove // itself from the trace. This means that the value is a little higher than @@ -498,12 +503,12 @@ TEST(LogTest, OnlyWarningsArePrintedWhenVerbosityIsInvalid) { // Verifies that Log() behaves correctly for the given verbosity level // and log severity. -std::string GrabOutput(void(*logger)(), const char* verbosity) { - const std::string saved_flag = GMOCK_FLAG(verbose); - GMOCK_FLAG(verbose) = verbosity; +std::string GrabOutput(void (*logger)(), const char* verbosity) { + const std::string saved_flag = GMOCK_FLAG_GET(verbose); + GMOCK_FLAG_SET(verbose, verbosity); CaptureStdout(); logger(); - GMOCK_FLAG(verbose) = saved_flag; + GMOCK_FLAG_SET(verbose, saved_flag); return GetCapturedStdout(); } @@ -533,7 +538,7 @@ TEST(ExpectCallTest, DoesNotLogWhenVerbosityIsWarning) { // Verifies that EXPECT_CALL doesn't log // if the --gmock_verbose flag is set to "error". -TEST(ExpectCallTest, DoesNotLogWhenVerbosityIsError) { +TEST(ExpectCallTest, DoesNotLogWhenVerbosityIsError) { EXPECT_STREQ("", GrabOutput(ExpectCallLogger, kErrorVerbosity).c_str()); } @@ -577,9 +582,9 @@ TEST(OnCallTest, LogsAnythingArgument) { TEST(StlContainerViewTest, WorksForStlContainer) { StaticAssertTypeEq, - StlContainerView >::type>(); + StlContainerView>::type>(); StaticAssertTypeEq&, - StlContainerView >::const_reference>(); + StlContainerView>::const_reference>(); typedef std::vector Chars; Chars v1; @@ -592,17 +597,16 @@ TEST(StlContainerViewTest, WorksForStlContainer) { } TEST(StlContainerViewTest, WorksForStaticNativeArray) { - StaticAssertTypeEq, - StlContainerView::type>(); + StaticAssertTypeEq, StlContainerView::type>(); StaticAssertTypeEq, - StlContainerView::type>(); + StlContainerView::type>(); StaticAssertTypeEq, - StlContainerView::type>(); + StlContainerView::type>(); StaticAssertTypeEq, - StlContainerView::const_reference>(); + StlContainerView::const_reference>(); - int a1[3] = { 0, 1, 2 }; + int a1[3] = {0, 1, 2}; NativeArray a2 = StlContainerView::ConstReference(a1); EXPECT_EQ(3U, a2.size()); EXPECT_EQ(a1, a2.begin()); @@ -620,24 +624,24 @@ TEST(StlContainerViewTest, WorksForStaticNativeArray) { TEST(StlContainerViewTest, WorksForDynamicNativeArray) { StaticAssertTypeEq, - StlContainerView >::type>(); + StlContainerView>::type>(); StaticAssertTypeEq< NativeArray, - StlContainerView, int> >::type>(); + StlContainerView, int>>::type>(); StaticAssertTypeEq< const NativeArray, - StlContainerView >::const_reference>(); + StlContainerView>::const_reference>(); - int a1[3] = { 0, 1, 2 }; + int a1[3] = {0, 1, 2}; const int* const p1 = a1; NativeArray a2 = - StlContainerView >::ConstReference( + StlContainerView>::ConstReference( std::make_tuple(p1, 3)); EXPECT_EQ(3U, a2.size()); EXPECT_EQ(a1, a2.begin()); - const NativeArray a3 = StlContainerView >::Copy( + const NativeArray a3 = StlContainerView>::Copy( std::make_tuple(static_cast(a1), 3)); ASSERT_EQ(3U, a3.size()); EXPECT_EQ(0, a3.begin()[0]); @@ -716,6 +720,46 @@ TEST(FunctionTest, LongArgumentList) { F::MakeResultIgnoredValue>::value)); } +TEST(Base64Unescape, InvalidString) { + std::string unescaped; + EXPECT_FALSE(Base64Unescape("(invalid)", &unescaped)); +} + +TEST(Base64Unescape, ShortString) { + std::string unescaped; + EXPECT_TRUE(Base64Unescape("SGVsbG8gd29ybGQh", &unescaped)); + EXPECT_EQ("Hello world!", unescaped); +} + +TEST(Base64Unescape, ShortStringWithPadding) { + std::string unescaped; + EXPECT_TRUE(Base64Unescape("SGVsbG8gd29ybGQ=", &unescaped)); + EXPECT_EQ("Hello world", unescaped); +} + +TEST(Base64Unescape, ShortStringWithoutPadding) { + std::string unescaped; + EXPECT_TRUE(Base64Unescape("SGVsbG8gd29ybGQ", &unescaped)); + EXPECT_EQ("Hello world", unescaped); +} + +TEST(Base64Unescape, LongStringWithWhiteSpaces) { + std::string escaped = + R"(TWFuIGlzIGRpc3Rpbmd1aXNoZWQsIG5vdCBvbmx5IGJ5IGhpcyByZWFzb24sIGJ1dCBieSB0aGlz + IHNpbmd1bGFyIHBhc3Npb24gZnJvbSBvdGhlciBhbmltYWxzLCB3aGljaCBpcyBhIGx1c3Qgb2Yg + dGhlIG1pbmQsIHRoYXQgYnkgYSBwZXJzZXZlcmFuY2Ugb2YgZGVsaWdodCBpbiB0aGUgY29udGlu + dWVkIGFuZCBpbmRlZmF0aWdhYmxlIGdlbmVyYXRpb24gb2Yga25vd2xlZGdlLCBleGNlZWRzIHRo + ZSBzaG9ydCB2ZWhlbWVuY2Ugb2YgYW55IGNhcm5hbCBwbGVhc3VyZS4=)"; + std::string expected = + "Man is distinguished, not only by his reason, but by this singular " + "passion from other animals, which is a lust of the mind, that by a " + "perseverance of delight in the continued and indefatigable generation " + "of knowledge, exceeds the short vehemence of any carnal pleasure."; + std::string unescaped; + EXPECT_TRUE(Base64Unescape(escaped, &unescaped)); + EXPECT_EQ(expected, unescaped); +} + } // namespace } // namespace internal } // namespace testing diff --git a/ext/googletest/googlemock/test/gmock-matchers-arithmetic_test.cc b/ext/googletest/googlemock/test/gmock-matchers-arithmetic_test.cc new file mode 100644 index 0000000000..a4c1def6eb --- /dev/null +++ b/ext/googletest/googlemock/test/gmock-matchers-arithmetic_test.cc @@ -0,0 +1,1517 @@ +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Google Mock - a framework for writing C++ mock classes. +// +// This file tests some commonly used argument matchers. + +// Silence warning C4244: 'initializing': conversion from 'int' to 'short', +// possible loss of data and C4100, unreferenced local parameter +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable : 4244) +#pragma warning(disable : 4100) +#endif + +#include "test/gmock-matchers_test.h" + +namespace testing { +namespace gmock_matchers_test { +namespace { + +typedef ::std::tuple Tuple2; // NOLINT + +// Tests that Eq() matches a 2-tuple where the first field == the +// second field. +TEST(Eq2Test, MatchesEqualArguments) { + Matcher m = Eq(); + EXPECT_TRUE(m.Matches(Tuple2(5L, 5))); + EXPECT_FALSE(m.Matches(Tuple2(5L, 6))); +} + +// Tests that Eq() describes itself properly. +TEST(Eq2Test, CanDescribeSelf) { + Matcher m = Eq(); + EXPECT_EQ("are an equal pair", Describe(m)); +} + +// Tests that Ge() matches a 2-tuple where the first field >= the +// second field. +TEST(Ge2Test, MatchesGreaterThanOrEqualArguments) { + Matcher m = Ge(); + EXPECT_TRUE(m.Matches(Tuple2(5L, 4))); + EXPECT_TRUE(m.Matches(Tuple2(5L, 5))); + EXPECT_FALSE(m.Matches(Tuple2(5L, 6))); +} + +// Tests that Ge() describes itself properly. +TEST(Ge2Test, CanDescribeSelf) { + Matcher m = Ge(); + EXPECT_EQ("are a pair where the first >= the second", Describe(m)); +} + +// Tests that Gt() matches a 2-tuple where the first field > the +// second field. +TEST(Gt2Test, MatchesGreaterThanArguments) { + Matcher m = Gt(); + EXPECT_TRUE(m.Matches(Tuple2(5L, 4))); + EXPECT_FALSE(m.Matches(Tuple2(5L, 5))); + EXPECT_FALSE(m.Matches(Tuple2(5L, 6))); +} + +// Tests that Gt() describes itself properly. +TEST(Gt2Test, CanDescribeSelf) { + Matcher m = Gt(); + EXPECT_EQ("are a pair where the first > the second", Describe(m)); +} + +// Tests that Le() matches a 2-tuple where the first field <= the +// second field. +TEST(Le2Test, MatchesLessThanOrEqualArguments) { + Matcher m = Le(); + EXPECT_TRUE(m.Matches(Tuple2(5L, 6))); + EXPECT_TRUE(m.Matches(Tuple2(5L, 5))); + EXPECT_FALSE(m.Matches(Tuple2(5L, 4))); +} + +// Tests that Le() describes itself properly. +TEST(Le2Test, CanDescribeSelf) { + Matcher m = Le(); + EXPECT_EQ("are a pair where the first <= the second", Describe(m)); +} + +// Tests that Lt() matches a 2-tuple where the first field < the +// second field. +TEST(Lt2Test, MatchesLessThanArguments) { + Matcher m = Lt(); + EXPECT_TRUE(m.Matches(Tuple2(5L, 6))); + EXPECT_FALSE(m.Matches(Tuple2(5L, 5))); + EXPECT_FALSE(m.Matches(Tuple2(5L, 4))); +} + +// Tests that Lt() describes itself properly. +TEST(Lt2Test, CanDescribeSelf) { + Matcher m = Lt(); + EXPECT_EQ("are a pair where the first < the second", Describe(m)); +} + +// Tests that Ne() matches a 2-tuple where the first field != the +// second field. +TEST(Ne2Test, MatchesUnequalArguments) { + Matcher m = Ne(); + EXPECT_TRUE(m.Matches(Tuple2(5L, 6))); + EXPECT_TRUE(m.Matches(Tuple2(5L, 4))); + EXPECT_FALSE(m.Matches(Tuple2(5L, 5))); +} + +// Tests that Ne() describes itself properly. +TEST(Ne2Test, CanDescribeSelf) { + Matcher m = Ne(); + EXPECT_EQ("are an unequal pair", Describe(m)); +} + +TEST(PairMatchBaseTest, WorksWithMoveOnly) { + using Pointers = std::tuple, std::unique_ptr>; + Matcher matcher = Eq(); + Pointers pointers; + // Tested values don't matter; the point is that matcher does not copy the + // matched values. + EXPECT_TRUE(matcher.Matches(pointers)); +} + +// Tests that IsNan() matches a NaN, with float. +TEST(IsNan, FloatMatchesNan) { + float quiet_nan = std::numeric_limits::quiet_NaN(); + float other_nan = std::nanf("1"); + float real_value = 1.0f; + + Matcher m = IsNan(); + EXPECT_TRUE(m.Matches(quiet_nan)); + EXPECT_TRUE(m.Matches(other_nan)); + EXPECT_FALSE(m.Matches(real_value)); + + Matcher m_ref = IsNan(); + EXPECT_TRUE(m_ref.Matches(quiet_nan)); + EXPECT_TRUE(m_ref.Matches(other_nan)); + EXPECT_FALSE(m_ref.Matches(real_value)); + + Matcher m_cref = IsNan(); + EXPECT_TRUE(m_cref.Matches(quiet_nan)); + EXPECT_TRUE(m_cref.Matches(other_nan)); + EXPECT_FALSE(m_cref.Matches(real_value)); +} + +// Tests that IsNan() matches a NaN, with double. +TEST(IsNan, DoubleMatchesNan) { + double quiet_nan = std::numeric_limits::quiet_NaN(); + double other_nan = std::nan("1"); + double real_value = 1.0; + + Matcher m = IsNan(); + EXPECT_TRUE(m.Matches(quiet_nan)); + EXPECT_TRUE(m.Matches(other_nan)); + EXPECT_FALSE(m.Matches(real_value)); + + Matcher m_ref = IsNan(); + EXPECT_TRUE(m_ref.Matches(quiet_nan)); + EXPECT_TRUE(m_ref.Matches(other_nan)); + EXPECT_FALSE(m_ref.Matches(real_value)); + + Matcher m_cref = IsNan(); + EXPECT_TRUE(m_cref.Matches(quiet_nan)); + EXPECT_TRUE(m_cref.Matches(other_nan)); + EXPECT_FALSE(m_cref.Matches(real_value)); +} + +// Tests that IsNan() matches a NaN, with long double. +TEST(IsNan, LongDoubleMatchesNan) { + long double quiet_nan = std::numeric_limits::quiet_NaN(); + long double other_nan = std::nan("1"); + long double real_value = 1.0; + + Matcher m = IsNan(); + EXPECT_TRUE(m.Matches(quiet_nan)); + EXPECT_TRUE(m.Matches(other_nan)); + EXPECT_FALSE(m.Matches(real_value)); + + Matcher m_ref = IsNan(); + EXPECT_TRUE(m_ref.Matches(quiet_nan)); + EXPECT_TRUE(m_ref.Matches(other_nan)); + EXPECT_FALSE(m_ref.Matches(real_value)); + + Matcher m_cref = IsNan(); + EXPECT_TRUE(m_cref.Matches(quiet_nan)); + EXPECT_TRUE(m_cref.Matches(other_nan)); + EXPECT_FALSE(m_cref.Matches(real_value)); +} + +// Tests that IsNan() works with Not. +TEST(IsNan, NotMatchesNan) { + Matcher mf = Not(IsNan()); + EXPECT_FALSE(mf.Matches(std::numeric_limits::quiet_NaN())); + EXPECT_FALSE(mf.Matches(std::nanf("1"))); + EXPECT_TRUE(mf.Matches(1.0)); + + Matcher md = Not(IsNan()); + EXPECT_FALSE(md.Matches(std::numeric_limits::quiet_NaN())); + EXPECT_FALSE(md.Matches(std::nan("1"))); + EXPECT_TRUE(md.Matches(1.0)); + + Matcher mld = Not(IsNan()); + EXPECT_FALSE(mld.Matches(std::numeric_limits::quiet_NaN())); + EXPECT_FALSE(mld.Matches(std::nanl("1"))); + EXPECT_TRUE(mld.Matches(1.0)); +} + +// Tests that IsNan() can describe itself. +TEST(IsNan, CanDescribeSelf) { + Matcher mf = IsNan(); + EXPECT_EQ("is NaN", Describe(mf)); + + Matcher md = IsNan(); + EXPECT_EQ("is NaN", Describe(md)); + + Matcher mld = IsNan(); + EXPECT_EQ("is NaN", Describe(mld)); +} + +// Tests that IsNan() can describe itself with Not. +TEST(IsNan, CanDescribeSelfWithNot) { + Matcher mf = Not(IsNan()); + EXPECT_EQ("isn't NaN", Describe(mf)); + + Matcher md = Not(IsNan()); + EXPECT_EQ("isn't NaN", Describe(md)); + + Matcher mld = Not(IsNan()); + EXPECT_EQ("isn't NaN", Describe(mld)); +} + +// Tests that FloatEq() matches a 2-tuple where +// FloatEq(first field) matches the second field. +TEST(FloatEq2Test, MatchesEqualArguments) { + typedef ::std::tuple Tpl; + Matcher m = FloatEq(); + EXPECT_TRUE(m.Matches(Tpl(1.0f, 1.0f))); + EXPECT_TRUE(m.Matches(Tpl(0.3f, 0.1f + 0.1f + 0.1f))); + EXPECT_FALSE(m.Matches(Tpl(1.1f, 1.0f))); +} + +// Tests that FloatEq() describes itself properly. +TEST(FloatEq2Test, CanDescribeSelf) { + Matcher&> m = FloatEq(); + EXPECT_EQ("are an almost-equal pair", Describe(m)); +} + +// Tests that NanSensitiveFloatEq() matches a 2-tuple where +// NanSensitiveFloatEq(first field) matches the second field. +TEST(NanSensitiveFloatEqTest, MatchesEqualArgumentsWithNaN) { + typedef ::std::tuple Tpl; + Matcher m = NanSensitiveFloatEq(); + EXPECT_TRUE(m.Matches(Tpl(1.0f, 1.0f))); + EXPECT_TRUE(m.Matches(Tpl(std::numeric_limits::quiet_NaN(), + std::numeric_limits::quiet_NaN()))); + EXPECT_FALSE(m.Matches(Tpl(1.1f, 1.0f))); + EXPECT_FALSE(m.Matches(Tpl(1.0f, std::numeric_limits::quiet_NaN()))); + EXPECT_FALSE(m.Matches(Tpl(std::numeric_limits::quiet_NaN(), 1.0f))); +} + +// Tests that NanSensitiveFloatEq() describes itself properly. +TEST(NanSensitiveFloatEqTest, CanDescribeSelfWithNaNs) { + Matcher&> m = NanSensitiveFloatEq(); + EXPECT_EQ("are an almost-equal pair", Describe(m)); +} + +// Tests that DoubleEq() matches a 2-tuple where +// DoubleEq(first field) matches the second field. +TEST(DoubleEq2Test, MatchesEqualArguments) { + typedef ::std::tuple Tpl; + Matcher m = DoubleEq(); + EXPECT_TRUE(m.Matches(Tpl(1.0, 1.0))); + EXPECT_TRUE(m.Matches(Tpl(0.3, 0.1 + 0.1 + 0.1))); + EXPECT_FALSE(m.Matches(Tpl(1.1, 1.0))); +} + +// Tests that DoubleEq() describes itself properly. +TEST(DoubleEq2Test, CanDescribeSelf) { + Matcher&> m = DoubleEq(); + EXPECT_EQ("are an almost-equal pair", Describe(m)); +} + +// Tests that NanSensitiveDoubleEq() matches a 2-tuple where +// NanSensitiveDoubleEq(first field) matches the second field. +TEST(NanSensitiveDoubleEqTest, MatchesEqualArgumentsWithNaN) { + typedef ::std::tuple Tpl; + Matcher m = NanSensitiveDoubleEq(); + EXPECT_TRUE(m.Matches(Tpl(1.0f, 1.0f))); + EXPECT_TRUE(m.Matches(Tpl(std::numeric_limits::quiet_NaN(), + std::numeric_limits::quiet_NaN()))); + EXPECT_FALSE(m.Matches(Tpl(1.1f, 1.0f))); + EXPECT_FALSE(m.Matches(Tpl(1.0f, std::numeric_limits::quiet_NaN()))); + EXPECT_FALSE(m.Matches(Tpl(std::numeric_limits::quiet_NaN(), 1.0f))); +} + +// Tests that DoubleEq() describes itself properly. +TEST(NanSensitiveDoubleEqTest, CanDescribeSelfWithNaNs) { + Matcher&> m = NanSensitiveDoubleEq(); + EXPECT_EQ("are an almost-equal pair", Describe(m)); +} + +// Tests that FloatEq() matches a 2-tuple where +// FloatNear(first field, max_abs_error) matches the second field. +TEST(FloatNear2Test, MatchesEqualArguments) { + typedef ::std::tuple Tpl; + Matcher m = FloatNear(0.5f); + EXPECT_TRUE(m.Matches(Tpl(1.0f, 1.0f))); + EXPECT_TRUE(m.Matches(Tpl(1.3f, 1.0f))); + EXPECT_FALSE(m.Matches(Tpl(1.8f, 1.0f))); +} + +// Tests that FloatNear() describes itself properly. +TEST(FloatNear2Test, CanDescribeSelf) { + Matcher&> m = FloatNear(0.5f); + EXPECT_EQ("are an almost-equal pair", Describe(m)); +} + +// Tests that NanSensitiveFloatNear() matches a 2-tuple where +// NanSensitiveFloatNear(first field) matches the second field. +TEST(NanSensitiveFloatNearTest, MatchesNearbyArgumentsWithNaN) { + typedef ::std::tuple Tpl; + Matcher m = NanSensitiveFloatNear(0.5f); + EXPECT_TRUE(m.Matches(Tpl(1.0f, 1.0f))); + EXPECT_TRUE(m.Matches(Tpl(1.1f, 1.0f))); + EXPECT_TRUE(m.Matches(Tpl(std::numeric_limits::quiet_NaN(), + std::numeric_limits::quiet_NaN()))); + EXPECT_FALSE(m.Matches(Tpl(1.6f, 1.0f))); + EXPECT_FALSE(m.Matches(Tpl(1.0f, std::numeric_limits::quiet_NaN()))); + EXPECT_FALSE(m.Matches(Tpl(std::numeric_limits::quiet_NaN(), 1.0f))); +} + +// Tests that NanSensitiveFloatNear() describes itself properly. +TEST(NanSensitiveFloatNearTest, CanDescribeSelfWithNaNs) { + Matcher&> m = NanSensitiveFloatNear(0.5f); + EXPECT_EQ("are an almost-equal pair", Describe(m)); +} + +// Tests that FloatEq() matches a 2-tuple where +// DoubleNear(first field, max_abs_error) matches the second field. +TEST(DoubleNear2Test, MatchesEqualArguments) { + typedef ::std::tuple Tpl; + Matcher m = DoubleNear(0.5); + EXPECT_TRUE(m.Matches(Tpl(1.0, 1.0))); + EXPECT_TRUE(m.Matches(Tpl(1.3, 1.0))); + EXPECT_FALSE(m.Matches(Tpl(1.8, 1.0))); +} + +// Tests that DoubleNear() describes itself properly. +TEST(DoubleNear2Test, CanDescribeSelf) { + Matcher&> m = DoubleNear(0.5); + EXPECT_EQ("are an almost-equal pair", Describe(m)); +} + +// Tests that NanSensitiveDoubleNear() matches a 2-tuple where +// NanSensitiveDoubleNear(first field) matches the second field. +TEST(NanSensitiveDoubleNearTest, MatchesNearbyArgumentsWithNaN) { + typedef ::std::tuple Tpl; + Matcher m = NanSensitiveDoubleNear(0.5f); + EXPECT_TRUE(m.Matches(Tpl(1.0f, 1.0f))); + EXPECT_TRUE(m.Matches(Tpl(1.1f, 1.0f))); + EXPECT_TRUE(m.Matches(Tpl(std::numeric_limits::quiet_NaN(), + std::numeric_limits::quiet_NaN()))); + EXPECT_FALSE(m.Matches(Tpl(1.6f, 1.0f))); + EXPECT_FALSE(m.Matches(Tpl(1.0f, std::numeric_limits::quiet_NaN()))); + EXPECT_FALSE(m.Matches(Tpl(std::numeric_limits::quiet_NaN(), 1.0f))); +} + +// Tests that NanSensitiveDoubleNear() describes itself properly. +TEST(NanSensitiveDoubleNearTest, CanDescribeSelfWithNaNs) { + Matcher&> m = NanSensitiveDoubleNear(0.5f); + EXPECT_EQ("are an almost-equal pair", Describe(m)); +} + +// Tests that Not(m) matches any value that doesn't match m. +TEST(NotTest, NegatesMatcher) { + Matcher m; + m = Not(Eq(2)); + EXPECT_TRUE(m.Matches(3)); + EXPECT_FALSE(m.Matches(2)); +} + +// Tests that Not(m) describes itself properly. +TEST(NotTest, CanDescribeSelf) { + Matcher m = Not(Eq(5)); + EXPECT_EQ("isn't equal to 5", Describe(m)); +} + +// Tests that monomorphic matchers are safely cast by the Not matcher. +TEST(NotTest, NotMatcherSafelyCastsMonomorphicMatchers) { + // greater_than_5 is a monomorphic matcher. + Matcher greater_than_5 = Gt(5); + + Matcher m = Not(greater_than_5); + Matcher m2 = Not(greater_than_5); + Matcher m3 = Not(m); +} + +// Helper to allow easy testing of AllOf matchers with num parameters. +void AllOfMatches(int num, const Matcher& m) { + SCOPED_TRACE(Describe(m)); + EXPECT_TRUE(m.Matches(0)); + for (int i = 1; i <= num; ++i) { + EXPECT_FALSE(m.Matches(i)); + } + EXPECT_TRUE(m.Matches(num + 1)); +} + +INSTANTIATE_GTEST_MATCHER_TEST_P(AllOfTest); + +// Tests that AllOf(m1, ..., mn) matches any value that matches all of +// the given matchers. +TEST(AllOfTest, MatchesWhenAllMatch) { + Matcher m; + m = AllOf(Le(2), Ge(1)); + EXPECT_TRUE(m.Matches(1)); + EXPECT_TRUE(m.Matches(2)); + EXPECT_FALSE(m.Matches(0)); + EXPECT_FALSE(m.Matches(3)); + + m = AllOf(Gt(0), Ne(1), Ne(2)); + EXPECT_TRUE(m.Matches(3)); + EXPECT_FALSE(m.Matches(2)); + EXPECT_FALSE(m.Matches(1)); + EXPECT_FALSE(m.Matches(0)); + + m = AllOf(Gt(0), Ne(1), Ne(2), Ne(3)); + EXPECT_TRUE(m.Matches(4)); + EXPECT_FALSE(m.Matches(3)); + EXPECT_FALSE(m.Matches(2)); + EXPECT_FALSE(m.Matches(1)); + EXPECT_FALSE(m.Matches(0)); + + m = AllOf(Ge(0), Lt(10), Ne(3), Ne(5), Ne(7)); + EXPECT_TRUE(m.Matches(0)); + EXPECT_TRUE(m.Matches(1)); + EXPECT_FALSE(m.Matches(3)); + + // The following tests for varying number of sub-matchers. Due to the way + // the sub-matchers are handled it is enough to test every sub-matcher once + // with sub-matchers using the same matcher type. Varying matcher types are + // checked for above. + AllOfMatches(2, AllOf(Ne(1), Ne(2))); + AllOfMatches(3, AllOf(Ne(1), Ne(2), Ne(3))); + AllOfMatches(4, AllOf(Ne(1), Ne(2), Ne(3), Ne(4))); + AllOfMatches(5, AllOf(Ne(1), Ne(2), Ne(3), Ne(4), Ne(5))); + AllOfMatches(6, AllOf(Ne(1), Ne(2), Ne(3), Ne(4), Ne(5), Ne(6))); + AllOfMatches(7, AllOf(Ne(1), Ne(2), Ne(3), Ne(4), Ne(5), Ne(6), Ne(7))); + AllOfMatches(8, + AllOf(Ne(1), Ne(2), Ne(3), Ne(4), Ne(5), Ne(6), Ne(7), Ne(8))); + AllOfMatches( + 9, AllOf(Ne(1), Ne(2), Ne(3), Ne(4), Ne(5), Ne(6), Ne(7), Ne(8), Ne(9))); + AllOfMatches(10, AllOf(Ne(1), Ne(2), Ne(3), Ne(4), Ne(5), Ne(6), Ne(7), Ne(8), + Ne(9), Ne(10))); + AllOfMatches( + 50, AllOf(Ne(1), Ne(2), Ne(3), Ne(4), Ne(5), Ne(6), Ne(7), Ne(8), Ne(9), + Ne(10), Ne(11), Ne(12), Ne(13), Ne(14), Ne(15), Ne(16), Ne(17), + Ne(18), Ne(19), Ne(20), Ne(21), Ne(22), Ne(23), Ne(24), Ne(25), + Ne(26), Ne(27), Ne(28), Ne(29), Ne(30), Ne(31), Ne(32), Ne(33), + Ne(34), Ne(35), Ne(36), Ne(37), Ne(38), Ne(39), Ne(40), Ne(41), + Ne(42), Ne(43), Ne(44), Ne(45), Ne(46), Ne(47), Ne(48), Ne(49), + Ne(50))); +} + +// Tests that AllOf(m1, ..., mn) describes itself properly. +TEST(AllOfTest, CanDescribeSelf) { + Matcher m; + m = AllOf(Le(2), Ge(1)); + EXPECT_EQ("(is <= 2) and (is >= 1)", Describe(m)); + + m = AllOf(Gt(0), Ne(1), Ne(2)); + std::string expected_descr1 = + "(is > 0) and (isn't equal to 1) and (isn't equal to 2)"; + EXPECT_EQ(expected_descr1, Describe(m)); + + m = AllOf(Gt(0), Ne(1), Ne(2), Ne(3)); + std::string expected_descr2 = + "(is > 0) and (isn't equal to 1) and (isn't equal to 2) and (isn't equal " + "to 3)"; + EXPECT_EQ(expected_descr2, Describe(m)); + + m = AllOf(Ge(0), Lt(10), Ne(3), Ne(5), Ne(7)); + std::string expected_descr3 = + "(is >= 0) and (is < 10) and (isn't equal to 3) and (isn't equal to 5) " + "and (isn't equal to 7)"; + EXPECT_EQ(expected_descr3, Describe(m)); +} + +// Tests that AllOf(m1, ..., mn) describes its negation properly. +TEST(AllOfTest, CanDescribeNegation) { + Matcher m; + m = AllOf(Le(2), Ge(1)); + std::string expected_descr4 = "(isn't <= 2) or (isn't >= 1)"; + EXPECT_EQ(expected_descr4, DescribeNegation(m)); + + m = AllOf(Gt(0), Ne(1), Ne(2)); + std::string expected_descr5 = + "(isn't > 0) or (is equal to 1) or (is equal to 2)"; + EXPECT_EQ(expected_descr5, DescribeNegation(m)); + + m = AllOf(Gt(0), Ne(1), Ne(2), Ne(3)); + std::string expected_descr6 = + "(isn't > 0) or (is equal to 1) or (is equal to 2) or (is equal to 3)"; + EXPECT_EQ(expected_descr6, DescribeNegation(m)); + + m = AllOf(Ge(0), Lt(10), Ne(3), Ne(5), Ne(7)); + std::string expected_desr7 = + "(isn't >= 0) or (isn't < 10) or (is equal to 3) or (is equal to 5) or " + "(is equal to 7)"; + EXPECT_EQ(expected_desr7, DescribeNegation(m)); + + m = AllOf(Ne(1), Ne(2), Ne(3), Ne(4), Ne(5), Ne(6), Ne(7), Ne(8), Ne(9), + Ne(10), Ne(11)); + AllOf(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11); + EXPECT_THAT(Describe(m), EndsWith("and (isn't equal to 11)")); + AllOfMatches(11, m); +} + +// Tests that monomorphic matchers are safely cast by the AllOf matcher. +TEST(AllOfTest, AllOfMatcherSafelyCastsMonomorphicMatchers) { + // greater_than_5 and less_than_10 are monomorphic matchers. + Matcher greater_than_5 = Gt(5); + Matcher less_than_10 = Lt(10); + + Matcher m = AllOf(greater_than_5, less_than_10); + Matcher m2 = AllOf(greater_than_5, less_than_10); + Matcher m3 = AllOf(greater_than_5, m2); + + // Tests that BothOf works when composing itself. + Matcher m4 = AllOf(greater_than_5, less_than_10, less_than_10); + Matcher m5 = AllOf(greater_than_5, less_than_10, less_than_10); +} + +TEST_P(AllOfTestP, ExplainsResult) { + Matcher m; + + // Successful match. Both matchers need to explain. The second + // matcher doesn't give an explanation, so only the first matcher's + // explanation is printed. + m = AllOf(GreaterThan(10), Lt(30)); + EXPECT_EQ("which is 15 more than 10", Explain(m, 25)); + + // Successful match. Both matchers need to explain. + m = AllOf(GreaterThan(10), GreaterThan(20)); + EXPECT_EQ("which is 20 more than 10, and which is 10 more than 20", + Explain(m, 30)); + + // Successful match. All matchers need to explain. The second + // matcher doesn't given an explanation. + m = AllOf(GreaterThan(10), Lt(30), GreaterThan(20)); + EXPECT_EQ("which is 15 more than 10, and which is 5 more than 20", + Explain(m, 25)); + + // Successful match. All matchers need to explain. + m = AllOf(GreaterThan(10), GreaterThan(20), GreaterThan(30)); + EXPECT_EQ( + "which is 30 more than 10, and which is 20 more than 20, " + "and which is 10 more than 30", + Explain(m, 40)); + + // Failed match. The first matcher, which failed, needs to + // explain. + m = AllOf(GreaterThan(10), GreaterThan(20)); + EXPECT_EQ("which is 5 less than 10", Explain(m, 5)); + + // Failed match. The second matcher, which failed, needs to + // explain. Since it doesn't given an explanation, nothing is + // printed. + m = AllOf(GreaterThan(10), Lt(30)); + EXPECT_EQ("", Explain(m, 40)); + + // Failed match. The second matcher, which failed, needs to + // explain. + m = AllOf(GreaterThan(10), GreaterThan(20)); + EXPECT_EQ("which is 5 less than 20", Explain(m, 15)); +} + +// Helper to allow easy testing of AnyOf matchers with num parameters. +static void AnyOfMatches(int num, const Matcher& m) { + SCOPED_TRACE(Describe(m)); + EXPECT_FALSE(m.Matches(0)); + for (int i = 1; i <= num; ++i) { + EXPECT_TRUE(m.Matches(i)); + } + EXPECT_FALSE(m.Matches(num + 1)); +} + +static void AnyOfStringMatches(int num, const Matcher& m) { + SCOPED_TRACE(Describe(m)); + EXPECT_FALSE(m.Matches(std::to_string(0))); + + for (int i = 1; i <= num; ++i) { + EXPECT_TRUE(m.Matches(std::to_string(i))); + } + EXPECT_FALSE(m.Matches(std::to_string(num + 1))); +} + +INSTANTIATE_GTEST_MATCHER_TEST_P(AnyOfTest); + +// Tests that AnyOf(m1, ..., mn) matches any value that matches at +// least one of the given matchers. +TEST(AnyOfTest, MatchesWhenAnyMatches) { + Matcher m; + m = AnyOf(Le(1), Ge(3)); + EXPECT_TRUE(m.Matches(1)); + EXPECT_TRUE(m.Matches(4)); + EXPECT_FALSE(m.Matches(2)); + + m = AnyOf(Lt(0), Eq(1), Eq(2)); + EXPECT_TRUE(m.Matches(-1)); + EXPECT_TRUE(m.Matches(1)); + EXPECT_TRUE(m.Matches(2)); + EXPECT_FALSE(m.Matches(0)); + + m = AnyOf(Lt(0), Eq(1), Eq(2), Eq(3)); + EXPECT_TRUE(m.Matches(-1)); + EXPECT_TRUE(m.Matches(1)); + EXPECT_TRUE(m.Matches(2)); + EXPECT_TRUE(m.Matches(3)); + EXPECT_FALSE(m.Matches(0)); + + m = AnyOf(Le(0), Gt(10), 3, 5, 7); + EXPECT_TRUE(m.Matches(0)); + EXPECT_TRUE(m.Matches(11)); + EXPECT_TRUE(m.Matches(3)); + EXPECT_FALSE(m.Matches(2)); + + // The following tests for varying number of sub-matchers. Due to the way + // the sub-matchers are handled it is enough to test every sub-matcher once + // with sub-matchers using the same matcher type. Varying matcher types are + // checked for above. + AnyOfMatches(2, AnyOf(1, 2)); + AnyOfMatches(3, AnyOf(1, 2, 3)); + AnyOfMatches(4, AnyOf(1, 2, 3, 4)); + AnyOfMatches(5, AnyOf(1, 2, 3, 4, 5)); + AnyOfMatches(6, AnyOf(1, 2, 3, 4, 5, 6)); + AnyOfMatches(7, AnyOf(1, 2, 3, 4, 5, 6, 7)); + AnyOfMatches(8, AnyOf(1, 2, 3, 4, 5, 6, 7, 8)); + AnyOfMatches(9, AnyOf(1, 2, 3, 4, 5, 6, 7, 8, 9)); + AnyOfMatches(10, AnyOf(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)); +} + +// Tests the variadic version of the AnyOfMatcher. +TEST(AnyOfTest, VariadicMatchesWhenAnyMatches) { + // Also make sure AnyOf is defined in the right namespace and does not depend + // on ADL. + Matcher m = ::testing::AnyOf(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11); + + EXPECT_THAT(Describe(m), EndsWith("or (is equal to 11)")); + AnyOfMatches(11, m); + AnyOfMatches(50, AnyOf(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, + 45, 46, 47, 48, 49, 50)); + AnyOfStringMatches( + 50, AnyOf("1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", + "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", + "23", "24", "25", "26", "27", "28", "29", "30", "31", "32", + "33", "34", "35", "36", "37", "38", "39", "40", "41", "42", + "43", "44", "45", "46", "47", "48", "49", "50")); +} + +TEST(ConditionalTest, MatchesFirstIfCondition) { + Matcher eq_red = Eq("red"); + Matcher ne_red = Ne("red"); + Matcher m = Conditional(true, eq_red, ne_red); + EXPECT_TRUE(m.Matches("red")); + EXPECT_FALSE(m.Matches("green")); + + StringMatchResultListener listener; + StringMatchResultListener expected; + EXPECT_FALSE(m.MatchAndExplain("green", &listener)); + EXPECT_FALSE(eq_red.MatchAndExplain("green", &expected)); + EXPECT_THAT(listener.str(), Eq(expected.str())); +} + +TEST(ConditionalTest, MatchesSecondIfCondition) { + Matcher eq_red = Eq("red"); + Matcher ne_red = Ne("red"); + Matcher m = Conditional(false, eq_red, ne_red); + EXPECT_FALSE(m.Matches("red")); + EXPECT_TRUE(m.Matches("green")); + + StringMatchResultListener listener; + StringMatchResultListener expected; + EXPECT_FALSE(m.MatchAndExplain("red", &listener)); + EXPECT_FALSE(ne_red.MatchAndExplain("red", &expected)); + EXPECT_THAT(listener.str(), Eq(expected.str())); +} + +// Tests that AnyOf(m1, ..., mn) describes itself properly. +TEST(AnyOfTest, CanDescribeSelf) { + Matcher m; + m = AnyOf(Le(1), Ge(3)); + + EXPECT_EQ("(is <= 1) or (is >= 3)", Describe(m)); + + m = AnyOf(Lt(0), Eq(1), Eq(2)); + EXPECT_EQ("(is < 0) or (is equal to 1) or (is equal to 2)", Describe(m)); + + m = AnyOf(Lt(0), Eq(1), Eq(2), Eq(3)); + EXPECT_EQ("(is < 0) or (is equal to 1) or (is equal to 2) or (is equal to 3)", + Describe(m)); + + m = AnyOf(Le(0), Gt(10), 3, 5, 7); + EXPECT_EQ( + "(is <= 0) or (is > 10) or (is equal to 3) or (is equal to 5) or (is " + "equal to 7)", + Describe(m)); +} + +// Tests that AnyOf(m1, ..., mn) describes its negation properly. +TEST(AnyOfTest, CanDescribeNegation) { + Matcher m; + m = AnyOf(Le(1), Ge(3)); + EXPECT_EQ("(isn't <= 1) and (isn't >= 3)", DescribeNegation(m)); + + m = AnyOf(Lt(0), Eq(1), Eq(2)); + EXPECT_EQ("(isn't < 0) and (isn't equal to 1) and (isn't equal to 2)", + DescribeNegation(m)); + + m = AnyOf(Lt(0), Eq(1), Eq(2), Eq(3)); + EXPECT_EQ( + "(isn't < 0) and (isn't equal to 1) and (isn't equal to 2) and (isn't " + "equal to 3)", + DescribeNegation(m)); + + m = AnyOf(Le(0), Gt(10), 3, 5, 7); + EXPECT_EQ( + "(isn't <= 0) and (isn't > 10) and (isn't equal to 3) and (isn't equal " + "to 5) and (isn't equal to 7)", + DescribeNegation(m)); +} + +// Tests that monomorphic matchers are safely cast by the AnyOf matcher. +TEST(AnyOfTest, AnyOfMatcherSafelyCastsMonomorphicMatchers) { + // greater_than_5 and less_than_10 are monomorphic matchers. + Matcher greater_than_5 = Gt(5); + Matcher less_than_10 = Lt(10); + + Matcher m = AnyOf(greater_than_5, less_than_10); + Matcher m2 = AnyOf(greater_than_5, less_than_10); + Matcher m3 = AnyOf(greater_than_5, m2); + + // Tests that EitherOf works when composing itself. + Matcher m4 = AnyOf(greater_than_5, less_than_10, less_than_10); + Matcher m5 = AnyOf(greater_than_5, less_than_10, less_than_10); +} + +TEST_P(AnyOfTestP, ExplainsResult) { + Matcher m; + + // Failed match. Both matchers need to explain. The second + // matcher doesn't give an explanation, so only the first matcher's + // explanation is printed. + m = AnyOf(GreaterThan(10), Lt(0)); + EXPECT_EQ("which is 5 less than 10", Explain(m, 5)); + + // Failed match. Both matchers need to explain. + m = AnyOf(GreaterThan(10), GreaterThan(20)); + EXPECT_EQ("which is 5 less than 10, and which is 15 less than 20", + Explain(m, 5)); + + // Failed match. All matchers need to explain. The second + // matcher doesn't given an explanation. + m = AnyOf(GreaterThan(10), Gt(20), GreaterThan(30)); + EXPECT_EQ("which is 5 less than 10, and which is 25 less than 30", + Explain(m, 5)); + + // Failed match. All matchers need to explain. + m = AnyOf(GreaterThan(10), GreaterThan(20), GreaterThan(30)); + EXPECT_EQ( + "which is 5 less than 10, and which is 15 less than 20, " + "and which is 25 less than 30", + Explain(m, 5)); + + // Successful match. The first matcher, which succeeded, needs to + // explain. + m = AnyOf(GreaterThan(10), GreaterThan(20)); + EXPECT_EQ("which is 5 more than 10", Explain(m, 15)); + + // Successful match. The second matcher, which succeeded, needs to + // explain. Since it doesn't given an explanation, nothing is + // printed. + m = AnyOf(GreaterThan(10), Lt(30)); + EXPECT_EQ("", Explain(m, 0)); + + // Successful match. The second matcher, which succeeded, needs to + // explain. + m = AnyOf(GreaterThan(30), GreaterThan(20)); + EXPECT_EQ("which is 5 more than 20", Explain(m, 25)); +} + +// The following predicate function and predicate functor are for +// testing the Truly(predicate) matcher. + +// Returns non-zero if the input is positive. Note that the return +// type of this function is not bool. It's OK as Truly() accepts any +// unary function or functor whose return type can be implicitly +// converted to bool. +int IsPositive(double x) { return x > 0 ? 1 : 0; } + +// This functor returns true if the input is greater than the given +// number. +class IsGreaterThan { + public: + explicit IsGreaterThan(int threshold) : threshold_(threshold) {} + + bool operator()(int n) const { return n > threshold_; } + + private: + int threshold_; +}; + +// For testing Truly(). +const int foo = 0; + +// This predicate returns true if and only if the argument references foo and +// has a zero value. +bool ReferencesFooAndIsZero(const int& n) { return (&n == &foo) && (n == 0); } + +// Tests that Truly(predicate) matches what satisfies the given +// predicate. +TEST(TrulyTest, MatchesWhatSatisfiesThePredicate) { + Matcher m = Truly(IsPositive); + EXPECT_TRUE(m.Matches(2.0)); + EXPECT_FALSE(m.Matches(-1.5)); +} + +// Tests that Truly(predicate_functor) works too. +TEST(TrulyTest, CanBeUsedWithFunctor) { + Matcher m = Truly(IsGreaterThan(5)); + EXPECT_TRUE(m.Matches(6)); + EXPECT_FALSE(m.Matches(4)); +} + +// A class that can be implicitly converted to bool. +class ConvertibleToBool { + public: + explicit ConvertibleToBool(int number) : number_(number) {} + operator bool() const { return number_ != 0; } + + private: + int number_; +}; + +ConvertibleToBool IsNotZero(int number) { return ConvertibleToBool(number); } + +// Tests that the predicate used in Truly() may return a class that's +// implicitly convertible to bool, even when the class has no +// operator!(). +TEST(TrulyTest, PredicateCanReturnAClassConvertibleToBool) { + Matcher m = Truly(IsNotZero); + EXPECT_TRUE(m.Matches(1)); + EXPECT_FALSE(m.Matches(0)); +} + +// Tests that Truly(predicate) can describe itself properly. +TEST(TrulyTest, CanDescribeSelf) { + Matcher m = Truly(IsPositive); + EXPECT_EQ("satisfies the given predicate", Describe(m)); +} + +// Tests that Truly(predicate) works when the matcher takes its +// argument by reference. +TEST(TrulyTest, WorksForByRefArguments) { + Matcher m = Truly(ReferencesFooAndIsZero); + EXPECT_TRUE(m.Matches(foo)); + int n = 0; + EXPECT_FALSE(m.Matches(n)); +} + +// Tests that Truly(predicate) provides a helpful reason when it fails. +TEST(TrulyTest, ExplainsFailures) { + StringMatchResultListener listener; + EXPECT_FALSE(ExplainMatchResult(Truly(IsPositive), -1, &listener)); + EXPECT_EQ(listener.str(), "didn't satisfy the given predicate"); +} + +// Tests that Matches(m) is a predicate satisfied by whatever that +// matches matcher m. +TEST(MatchesTest, IsSatisfiedByWhatMatchesTheMatcher) { + EXPECT_TRUE(Matches(Ge(0))(1)); + EXPECT_FALSE(Matches(Eq('a'))('b')); +} + +// Tests that Matches(m) works when the matcher takes its argument by +// reference. +TEST(MatchesTest, WorksOnByRefArguments) { + int m = 0, n = 0; + EXPECT_TRUE(Matches(AllOf(Ref(n), Eq(0)))(n)); + EXPECT_FALSE(Matches(Ref(m))(n)); +} + +// Tests that a Matcher on non-reference type can be used in +// Matches(). +TEST(MatchesTest, WorksWithMatcherOnNonRefType) { + Matcher eq5 = Eq(5); + EXPECT_TRUE(Matches(eq5)(5)); + EXPECT_FALSE(Matches(eq5)(2)); +} + +// Tests Value(value, matcher). Since Value() is a simple wrapper for +// Matches(), which has been tested already, we don't spend a lot of +// effort on testing Value(). +TEST(ValueTest, WorksWithPolymorphicMatcher) { + EXPECT_TRUE(Value("hi", StartsWith("h"))); + EXPECT_FALSE(Value(5, Gt(10))); +} + +TEST(ValueTest, WorksWithMonomorphicMatcher) { + const Matcher is_zero = Eq(0); + EXPECT_TRUE(Value(0, is_zero)); + EXPECT_FALSE(Value('a', is_zero)); + + int n = 0; + const Matcher ref_n = Ref(n); + EXPECT_TRUE(Value(n, ref_n)); + EXPECT_FALSE(Value(1, ref_n)); +} + +TEST(AllArgsTest, WorksForTuple) { + EXPECT_THAT(std::make_tuple(1, 2L), AllArgs(Lt())); + EXPECT_THAT(std::make_tuple(2L, 1), Not(AllArgs(Lt()))); +} + +TEST(AllArgsTest, WorksForNonTuple) { + EXPECT_THAT(42, AllArgs(Gt(0))); + EXPECT_THAT('a', Not(AllArgs(Eq('b')))); +} + +class AllArgsHelper { + public: + AllArgsHelper() {} + + MOCK_METHOD2(Helper, int(char x, int y)); + + private: + AllArgsHelper(const AllArgsHelper&) = delete; + AllArgsHelper& operator=(const AllArgsHelper&) = delete; +}; + +TEST(AllArgsTest, WorksInWithClause) { + AllArgsHelper helper; + ON_CALL(helper, Helper(_, _)).With(AllArgs(Lt())).WillByDefault(Return(1)); + EXPECT_CALL(helper, Helper(_, _)); + EXPECT_CALL(helper, Helper(_, _)).With(AllArgs(Gt())).WillOnce(Return(2)); + + EXPECT_EQ(1, helper.Helper('\1', 2)); + EXPECT_EQ(2, helper.Helper('a', 1)); +} + +class OptionalMatchersHelper { + public: + OptionalMatchersHelper() {} + + MOCK_METHOD0(NoArgs, int()); + + MOCK_METHOD1(OneArg, int(int y)); + + MOCK_METHOD2(TwoArgs, int(char x, int y)); + + MOCK_METHOD1(Overloaded, int(char x)); + MOCK_METHOD2(Overloaded, int(char x, int y)); + + private: + OptionalMatchersHelper(const OptionalMatchersHelper&) = delete; + OptionalMatchersHelper& operator=(const OptionalMatchersHelper&) = delete; +}; + +TEST(AllArgsTest, WorksWithoutMatchers) { + OptionalMatchersHelper helper; + + ON_CALL(helper, NoArgs).WillByDefault(Return(10)); + ON_CALL(helper, OneArg).WillByDefault(Return(20)); + ON_CALL(helper, TwoArgs).WillByDefault(Return(30)); + + EXPECT_EQ(10, helper.NoArgs()); + EXPECT_EQ(20, helper.OneArg(1)); + EXPECT_EQ(30, helper.TwoArgs('\1', 2)); + + EXPECT_CALL(helper, NoArgs).Times(1); + EXPECT_CALL(helper, OneArg).WillOnce(Return(100)); + EXPECT_CALL(helper, OneArg(17)).WillOnce(Return(200)); + EXPECT_CALL(helper, TwoArgs).Times(0); + + EXPECT_EQ(10, helper.NoArgs()); + EXPECT_EQ(100, helper.OneArg(1)); + EXPECT_EQ(200, helper.OneArg(17)); +} + +// Tests floating-point matchers. +template +class FloatingPointTest : public testing::Test { + protected: + typedef testing::internal::FloatingPoint Floating; + typedef typename Floating::Bits Bits; + + FloatingPointTest() + : max_ulps_(Floating::kMaxUlps), + zero_bits_(Floating(0).bits()), + one_bits_(Floating(1).bits()), + infinity_bits_(Floating(Floating::Infinity()).bits()), + close_to_positive_zero_( + Floating::ReinterpretBits(zero_bits_ + max_ulps_ / 2)), + close_to_negative_zero_( + -Floating::ReinterpretBits(zero_bits_ + max_ulps_ - max_ulps_ / 2)), + further_from_negative_zero_(-Floating::ReinterpretBits( + zero_bits_ + max_ulps_ + 1 - max_ulps_ / 2)), + close_to_one_(Floating::ReinterpretBits(one_bits_ + max_ulps_)), + further_from_one_(Floating::ReinterpretBits(one_bits_ + max_ulps_ + 1)), + infinity_(Floating::Infinity()), + close_to_infinity_( + Floating::ReinterpretBits(infinity_bits_ - max_ulps_)), + further_from_infinity_( + Floating::ReinterpretBits(infinity_bits_ - max_ulps_ - 1)), + max_(Floating::Max()), + nan1_(Floating::ReinterpretBits(Floating::kExponentBitMask | 1)), + nan2_(Floating::ReinterpretBits(Floating::kExponentBitMask | 200)) {} + + void TestSize() { EXPECT_EQ(sizeof(RawType), sizeof(Bits)); } + + // A battery of tests for FloatingEqMatcher::Matches. + // matcher_maker is a pointer to a function which creates a FloatingEqMatcher. + void TestMatches( + testing::internal::FloatingEqMatcher (*matcher_maker)(RawType)) { + Matcher m1 = matcher_maker(0.0); + EXPECT_TRUE(m1.Matches(-0.0)); + EXPECT_TRUE(m1.Matches(close_to_positive_zero_)); + EXPECT_TRUE(m1.Matches(close_to_negative_zero_)); + EXPECT_FALSE(m1.Matches(1.0)); + + Matcher m2 = matcher_maker(close_to_positive_zero_); + EXPECT_FALSE(m2.Matches(further_from_negative_zero_)); + + Matcher m3 = matcher_maker(1.0); + EXPECT_TRUE(m3.Matches(close_to_one_)); + EXPECT_FALSE(m3.Matches(further_from_one_)); + + // Test commutativity: matcher_maker(0.0).Matches(1.0) was tested above. + EXPECT_FALSE(m3.Matches(0.0)); + + Matcher m4 = matcher_maker(-infinity_); + EXPECT_TRUE(m4.Matches(-close_to_infinity_)); + + Matcher m5 = matcher_maker(infinity_); + EXPECT_TRUE(m5.Matches(close_to_infinity_)); + + // This is interesting as the representations of infinity_ and nan1_ + // are only 1 DLP apart. + EXPECT_FALSE(m5.Matches(nan1_)); + + // matcher_maker can produce a Matcher, which is needed in + // some cases. + Matcher m6 = matcher_maker(0.0); + EXPECT_TRUE(m6.Matches(-0.0)); + EXPECT_TRUE(m6.Matches(close_to_positive_zero_)); + EXPECT_FALSE(m6.Matches(1.0)); + + // matcher_maker can produce a Matcher, which is needed in some + // cases. + Matcher m7 = matcher_maker(0.0); + RawType x = 0.0; + EXPECT_TRUE(m7.Matches(x)); + x = 0.01f; + EXPECT_FALSE(m7.Matches(x)); + } + + // Pre-calculated numbers to be used by the tests. + + const Bits max_ulps_; + + const Bits zero_bits_; // The bits that represent 0.0. + const Bits one_bits_; // The bits that represent 1.0. + const Bits infinity_bits_; // The bits that represent +infinity. + + // Some numbers close to 0.0. + const RawType close_to_positive_zero_; + const RawType close_to_negative_zero_; + const RawType further_from_negative_zero_; + + // Some numbers close to 1.0. + const RawType close_to_one_; + const RawType further_from_one_; + + // Some numbers close to +infinity. + const RawType infinity_; + const RawType close_to_infinity_; + const RawType further_from_infinity_; + + // Maximum representable value that's not infinity. + const RawType max_; + + // Some NaNs. + const RawType nan1_; + const RawType nan2_; +}; + +// Tests floating-point matchers with fixed epsilons. +template +class FloatingPointNearTest : public FloatingPointTest { + protected: + typedef FloatingPointTest ParentType; + + // A battery of tests for FloatingEqMatcher::Matches with a fixed epsilon. + // matcher_maker is a pointer to a function which creates a FloatingEqMatcher. + void TestNearMatches(testing::internal::FloatingEqMatcher ( + *matcher_maker)(RawType, RawType)) { + Matcher m1 = matcher_maker(0.0, 0.0); + EXPECT_TRUE(m1.Matches(0.0)); + EXPECT_TRUE(m1.Matches(-0.0)); + EXPECT_FALSE(m1.Matches(ParentType::close_to_positive_zero_)); + EXPECT_FALSE(m1.Matches(ParentType::close_to_negative_zero_)); + EXPECT_FALSE(m1.Matches(1.0)); + + Matcher m2 = matcher_maker(0.0, 1.0); + EXPECT_TRUE(m2.Matches(0.0)); + EXPECT_TRUE(m2.Matches(-0.0)); + EXPECT_TRUE(m2.Matches(1.0)); + EXPECT_TRUE(m2.Matches(-1.0)); + EXPECT_FALSE(m2.Matches(ParentType::close_to_one_)); + EXPECT_FALSE(m2.Matches(-ParentType::close_to_one_)); + + // Check that inf matches inf, regardless of the of the specified max + // absolute error. + Matcher m3 = matcher_maker(ParentType::infinity_, 0.0); + EXPECT_TRUE(m3.Matches(ParentType::infinity_)); + EXPECT_FALSE(m3.Matches(ParentType::close_to_infinity_)); + EXPECT_FALSE(m3.Matches(-ParentType::infinity_)); + + Matcher m4 = matcher_maker(-ParentType::infinity_, 0.0); + EXPECT_TRUE(m4.Matches(-ParentType::infinity_)); + EXPECT_FALSE(m4.Matches(-ParentType::close_to_infinity_)); + EXPECT_FALSE(m4.Matches(ParentType::infinity_)); + + // Test various overflow scenarios. + Matcher m5 = matcher_maker(ParentType::max_, ParentType::max_); + EXPECT_TRUE(m5.Matches(ParentType::max_)); + EXPECT_FALSE(m5.Matches(-ParentType::max_)); + + Matcher m6 = matcher_maker(-ParentType::max_, ParentType::max_); + EXPECT_FALSE(m6.Matches(ParentType::max_)); + EXPECT_TRUE(m6.Matches(-ParentType::max_)); + + Matcher m7 = matcher_maker(ParentType::max_, 0); + EXPECT_TRUE(m7.Matches(ParentType::max_)); + EXPECT_FALSE(m7.Matches(-ParentType::max_)); + + Matcher m8 = matcher_maker(-ParentType::max_, 0); + EXPECT_FALSE(m8.Matches(ParentType::max_)); + EXPECT_TRUE(m8.Matches(-ParentType::max_)); + + // The difference between max() and -max() normally overflows to infinity, + // but it should still match if the max_abs_error is also infinity. + Matcher m9 = + matcher_maker(ParentType::max_, ParentType::infinity_); + EXPECT_TRUE(m8.Matches(-ParentType::max_)); + + // matcher_maker can produce a Matcher, which is needed in + // some cases. + Matcher m10 = matcher_maker(0.0, 1.0); + EXPECT_TRUE(m10.Matches(-0.0)); + EXPECT_TRUE(m10.Matches(ParentType::close_to_positive_zero_)); + EXPECT_FALSE(m10.Matches(ParentType::close_to_one_)); + + // matcher_maker can produce a Matcher, which is needed in some + // cases. + Matcher m11 = matcher_maker(0.0, 1.0); + RawType x = 0.0; + EXPECT_TRUE(m11.Matches(x)); + x = 1.0f; + EXPECT_TRUE(m11.Matches(x)); + x = -1.0f; + EXPECT_TRUE(m11.Matches(x)); + x = 1.1f; + EXPECT_FALSE(m11.Matches(x)); + x = -1.1f; + EXPECT_FALSE(m11.Matches(x)); + } +}; + +// Instantiate FloatingPointTest for testing floats. +typedef FloatingPointTest FloatTest; + +TEST_F(FloatTest, FloatEqApproximatelyMatchesFloats) { TestMatches(&FloatEq); } + +TEST_F(FloatTest, NanSensitiveFloatEqApproximatelyMatchesFloats) { + TestMatches(&NanSensitiveFloatEq); +} + +TEST_F(FloatTest, FloatEqCannotMatchNaN) { + // FloatEq never matches NaN. + Matcher m = FloatEq(nan1_); + EXPECT_FALSE(m.Matches(nan1_)); + EXPECT_FALSE(m.Matches(nan2_)); + EXPECT_FALSE(m.Matches(1.0)); +} + +TEST_F(FloatTest, NanSensitiveFloatEqCanMatchNaN) { + // NanSensitiveFloatEq will match NaN. + Matcher m = NanSensitiveFloatEq(nan1_); + EXPECT_TRUE(m.Matches(nan1_)); + EXPECT_TRUE(m.Matches(nan2_)); + EXPECT_FALSE(m.Matches(1.0)); +} + +TEST_F(FloatTest, FloatEqCanDescribeSelf) { + Matcher m1 = FloatEq(2.0f); + EXPECT_EQ("is approximately 2", Describe(m1)); + EXPECT_EQ("isn't approximately 2", DescribeNegation(m1)); + + Matcher m2 = FloatEq(0.5f); + EXPECT_EQ("is approximately 0.5", Describe(m2)); + EXPECT_EQ("isn't approximately 0.5", DescribeNegation(m2)); + + Matcher m3 = FloatEq(nan1_); + EXPECT_EQ("never matches", Describe(m3)); + EXPECT_EQ("is anything", DescribeNegation(m3)); +} + +TEST_F(FloatTest, NanSensitiveFloatEqCanDescribeSelf) { + Matcher m1 = NanSensitiveFloatEq(2.0f); + EXPECT_EQ("is approximately 2", Describe(m1)); + EXPECT_EQ("isn't approximately 2", DescribeNegation(m1)); + + Matcher m2 = NanSensitiveFloatEq(0.5f); + EXPECT_EQ("is approximately 0.5", Describe(m2)); + EXPECT_EQ("isn't approximately 0.5", DescribeNegation(m2)); + + Matcher m3 = NanSensitiveFloatEq(nan1_); + EXPECT_EQ("is NaN", Describe(m3)); + EXPECT_EQ("isn't NaN", DescribeNegation(m3)); +} + +// Instantiate FloatingPointTest for testing floats with a user-specified +// max absolute error. +typedef FloatingPointNearTest FloatNearTest; + +TEST_F(FloatNearTest, FloatNearMatches) { TestNearMatches(&FloatNear); } + +TEST_F(FloatNearTest, NanSensitiveFloatNearApproximatelyMatchesFloats) { + TestNearMatches(&NanSensitiveFloatNear); +} + +TEST_F(FloatNearTest, FloatNearCanDescribeSelf) { + Matcher m1 = FloatNear(2.0f, 0.5f); + EXPECT_EQ("is approximately 2 (absolute error <= 0.5)", Describe(m1)); + EXPECT_EQ("isn't approximately 2 (absolute error > 0.5)", + DescribeNegation(m1)); + + Matcher m2 = FloatNear(0.5f, 0.5f); + EXPECT_EQ("is approximately 0.5 (absolute error <= 0.5)", Describe(m2)); + EXPECT_EQ("isn't approximately 0.5 (absolute error > 0.5)", + DescribeNegation(m2)); + + Matcher m3 = FloatNear(nan1_, 0.0); + EXPECT_EQ("never matches", Describe(m3)); + EXPECT_EQ("is anything", DescribeNegation(m3)); +} + +TEST_F(FloatNearTest, NanSensitiveFloatNearCanDescribeSelf) { + Matcher m1 = NanSensitiveFloatNear(2.0f, 0.5f); + EXPECT_EQ("is approximately 2 (absolute error <= 0.5)", Describe(m1)); + EXPECT_EQ("isn't approximately 2 (absolute error > 0.5)", + DescribeNegation(m1)); + + Matcher m2 = NanSensitiveFloatNear(0.5f, 0.5f); + EXPECT_EQ("is approximately 0.5 (absolute error <= 0.5)", Describe(m2)); + EXPECT_EQ("isn't approximately 0.5 (absolute error > 0.5)", + DescribeNegation(m2)); + + Matcher m3 = NanSensitiveFloatNear(nan1_, 0.1f); + EXPECT_EQ("is NaN", Describe(m3)); + EXPECT_EQ("isn't NaN", DescribeNegation(m3)); +} + +TEST_F(FloatNearTest, FloatNearCannotMatchNaN) { + // FloatNear never matches NaN. + Matcher m = FloatNear(ParentType::nan1_, 0.1f); + EXPECT_FALSE(m.Matches(nan1_)); + EXPECT_FALSE(m.Matches(nan2_)); + EXPECT_FALSE(m.Matches(1.0)); +} + +TEST_F(FloatNearTest, NanSensitiveFloatNearCanMatchNaN) { + // NanSensitiveFloatNear will match NaN. + Matcher m = NanSensitiveFloatNear(nan1_, 0.1f); + EXPECT_TRUE(m.Matches(nan1_)); + EXPECT_TRUE(m.Matches(nan2_)); + EXPECT_FALSE(m.Matches(1.0)); +} + +// Instantiate FloatingPointTest for testing doubles. +typedef FloatingPointTest DoubleTest; + +TEST_F(DoubleTest, DoubleEqApproximatelyMatchesDoubles) { + TestMatches(&DoubleEq); +} + +TEST_F(DoubleTest, NanSensitiveDoubleEqApproximatelyMatchesDoubles) { + TestMatches(&NanSensitiveDoubleEq); +} + +TEST_F(DoubleTest, DoubleEqCannotMatchNaN) { + // DoubleEq never matches NaN. + Matcher m = DoubleEq(nan1_); + EXPECT_FALSE(m.Matches(nan1_)); + EXPECT_FALSE(m.Matches(nan2_)); + EXPECT_FALSE(m.Matches(1.0)); +} + +TEST_F(DoubleTest, NanSensitiveDoubleEqCanMatchNaN) { + // NanSensitiveDoubleEq will match NaN. + Matcher m = NanSensitiveDoubleEq(nan1_); + EXPECT_TRUE(m.Matches(nan1_)); + EXPECT_TRUE(m.Matches(nan2_)); + EXPECT_FALSE(m.Matches(1.0)); +} + +TEST_F(DoubleTest, DoubleEqCanDescribeSelf) { + Matcher m1 = DoubleEq(2.0); + EXPECT_EQ("is approximately 2", Describe(m1)); + EXPECT_EQ("isn't approximately 2", DescribeNegation(m1)); + + Matcher m2 = DoubleEq(0.5); + EXPECT_EQ("is approximately 0.5", Describe(m2)); + EXPECT_EQ("isn't approximately 0.5", DescribeNegation(m2)); + + Matcher m3 = DoubleEq(nan1_); + EXPECT_EQ("never matches", Describe(m3)); + EXPECT_EQ("is anything", DescribeNegation(m3)); +} + +TEST_F(DoubleTest, NanSensitiveDoubleEqCanDescribeSelf) { + Matcher m1 = NanSensitiveDoubleEq(2.0); + EXPECT_EQ("is approximately 2", Describe(m1)); + EXPECT_EQ("isn't approximately 2", DescribeNegation(m1)); + + Matcher m2 = NanSensitiveDoubleEq(0.5); + EXPECT_EQ("is approximately 0.5", Describe(m2)); + EXPECT_EQ("isn't approximately 0.5", DescribeNegation(m2)); + + Matcher m3 = NanSensitiveDoubleEq(nan1_); + EXPECT_EQ("is NaN", Describe(m3)); + EXPECT_EQ("isn't NaN", DescribeNegation(m3)); +} + +// Instantiate FloatingPointTest for testing floats with a user-specified +// max absolute error. +typedef FloatingPointNearTest DoubleNearTest; + +TEST_F(DoubleNearTest, DoubleNearMatches) { TestNearMatches(&DoubleNear); } + +TEST_F(DoubleNearTest, NanSensitiveDoubleNearApproximatelyMatchesDoubles) { + TestNearMatches(&NanSensitiveDoubleNear); +} + +TEST_F(DoubleNearTest, DoubleNearCanDescribeSelf) { + Matcher m1 = DoubleNear(2.0, 0.5); + EXPECT_EQ("is approximately 2 (absolute error <= 0.5)", Describe(m1)); + EXPECT_EQ("isn't approximately 2 (absolute error > 0.5)", + DescribeNegation(m1)); + + Matcher m2 = DoubleNear(0.5, 0.5); + EXPECT_EQ("is approximately 0.5 (absolute error <= 0.5)", Describe(m2)); + EXPECT_EQ("isn't approximately 0.5 (absolute error > 0.5)", + DescribeNegation(m2)); + + Matcher m3 = DoubleNear(nan1_, 0.0); + EXPECT_EQ("never matches", Describe(m3)); + EXPECT_EQ("is anything", DescribeNegation(m3)); +} + +TEST_F(DoubleNearTest, ExplainsResultWhenMatchFails) { + EXPECT_EQ("", Explain(DoubleNear(2.0, 0.1), 2.05)); + EXPECT_EQ("which is 0.2 from 2", Explain(DoubleNear(2.0, 0.1), 2.2)); + EXPECT_EQ("which is -0.3 from 2", Explain(DoubleNear(2.0, 0.1), 1.7)); + + const std::string explanation = + Explain(DoubleNear(2.1, 1e-10), 2.1 + 1.2e-10); + // Different C++ implementations may print floating-point numbers + // slightly differently. + EXPECT_TRUE(explanation == "which is 1.2e-10 from 2.1" || // GCC + explanation == "which is 1.2e-010 from 2.1") // MSVC + << " where explanation is \"" << explanation << "\"."; +} + +TEST_F(DoubleNearTest, NanSensitiveDoubleNearCanDescribeSelf) { + Matcher m1 = NanSensitiveDoubleNear(2.0, 0.5); + EXPECT_EQ("is approximately 2 (absolute error <= 0.5)", Describe(m1)); + EXPECT_EQ("isn't approximately 2 (absolute error > 0.5)", + DescribeNegation(m1)); + + Matcher m2 = NanSensitiveDoubleNear(0.5, 0.5); + EXPECT_EQ("is approximately 0.5 (absolute error <= 0.5)", Describe(m2)); + EXPECT_EQ("isn't approximately 0.5 (absolute error > 0.5)", + DescribeNegation(m2)); + + Matcher m3 = NanSensitiveDoubleNear(nan1_, 0.1); + EXPECT_EQ("is NaN", Describe(m3)); + EXPECT_EQ("isn't NaN", DescribeNegation(m3)); +} + +TEST_F(DoubleNearTest, DoubleNearCannotMatchNaN) { + // DoubleNear never matches NaN. + Matcher m = DoubleNear(ParentType::nan1_, 0.1); + EXPECT_FALSE(m.Matches(nan1_)); + EXPECT_FALSE(m.Matches(nan2_)); + EXPECT_FALSE(m.Matches(1.0)); +} + +TEST_F(DoubleNearTest, NanSensitiveDoubleNearCanMatchNaN) { + // NanSensitiveDoubleNear will match NaN. + Matcher m = NanSensitiveDoubleNear(nan1_, 0.1); + EXPECT_TRUE(m.Matches(nan1_)); + EXPECT_TRUE(m.Matches(nan2_)); + EXPECT_FALSE(m.Matches(1.0)); +} + +TEST(NotTest, WorksOnMoveOnlyType) { + std::unique_ptr p(new int(3)); + EXPECT_THAT(p, Pointee(Eq(3))); + EXPECT_THAT(p, Not(Pointee(Eq(2)))); +} + +TEST(AllOfTest, HugeMatcher) { + // Verify that using AllOf with many arguments doesn't cause + // the compiler to exceed template instantiation depth limit. + EXPECT_THAT(0, testing::AllOf(_, _, _, _, _, _, _, _, _, + testing::AllOf(_, _, _, _, _, _, _, _, _, _))); +} + +TEST(AnyOfTest, HugeMatcher) { + // Verify that using AnyOf with many arguments doesn't cause + // the compiler to exceed template instantiation depth limit. + EXPECT_THAT(0, testing::AnyOf(_, _, _, _, _, _, _, _, _, + testing::AnyOf(_, _, _, _, _, _, _, _, _, _))); +} + +namespace adl_test { + +// Verifies that the implementation of ::testing::AllOf and ::testing::AnyOf +// don't issue unqualified recursive calls. If they do, the argument dependent +// name lookup will cause AllOf/AnyOf in the 'adl_test' namespace to be found +// as a candidate and the compilation will break due to an ambiguous overload. + +// The matcher must be in the same namespace as AllOf/AnyOf to make argument +// dependent lookup find those. +MATCHER(M, "") { + (void)arg; + return true; +} + +template +bool AllOf(const T1& /*t1*/, const T2& /*t2*/) { + return true; +} + +TEST(AllOfTest, DoesNotCallAllOfUnqualified) { + EXPECT_THAT(42, + testing::AllOf(M(), M(), M(), M(), M(), M(), M(), M(), M(), M())); +} + +template +bool AnyOf(const T1&, const T2&) { + return true; +} + +TEST(AnyOfTest, DoesNotCallAnyOfUnqualified) { + EXPECT_THAT(42, + testing::AnyOf(M(), M(), M(), M(), M(), M(), M(), M(), M(), M())); +} + +} // namespace adl_test + +TEST(AllOfTest, WorksOnMoveOnlyType) { + std::unique_ptr p(new int(3)); + EXPECT_THAT(p, AllOf(Pointee(Eq(3)), Pointee(Gt(0)), Pointee(Lt(5)))); + EXPECT_THAT(p, Not(AllOf(Pointee(Eq(3)), Pointee(Gt(0)), Pointee(Lt(3))))); +} + +TEST(AnyOfTest, WorksOnMoveOnlyType) { + std::unique_ptr p(new int(3)); + EXPECT_THAT(p, AnyOf(Pointee(Eq(5)), Pointee(Lt(0)), Pointee(Lt(5)))); + EXPECT_THAT(p, Not(AnyOf(Pointee(Eq(5)), Pointee(Lt(0)), Pointee(Gt(5))))); +} + +} // namespace +} // namespace gmock_matchers_test +} // namespace testing + +#ifdef _MSC_VER +#pragma warning(pop) +#endif diff --git a/ext/googletest/googlemock/test/gmock-matchers-comparisons_test.cc b/ext/googletest/googlemock/test/gmock-matchers-comparisons_test.cc new file mode 100644 index 0000000000..eb8f3f6319 --- /dev/null +++ b/ext/googletest/googlemock/test/gmock-matchers-comparisons_test.cc @@ -0,0 +1,2318 @@ +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Google Mock - a framework for writing C++ mock classes. +// +// This file tests some commonly used argument matchers. + +// Silence warning C4244: 'initializing': conversion from 'int' to 'short', +// possible loss of data and C4100, unreferenced local parameter +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable : 4244) +#pragma warning(disable : 4100) +#endif + +#include "test/gmock-matchers_test.h" + +namespace testing { +namespace gmock_matchers_test { +namespace { + +INSTANTIATE_GTEST_MATCHER_TEST_P(MonotonicMatcherTest); + +TEST_P(MonotonicMatcherTestP, IsPrintable) { + stringstream ss; + ss << GreaterThan(5); + EXPECT_EQ("is > 5", ss.str()); +} + +TEST(MatchResultListenerTest, StreamingWorks) { + StringMatchResultListener listener; + listener << "hi" << 5; + EXPECT_EQ("hi5", listener.str()); + + listener.Clear(); + EXPECT_EQ("", listener.str()); + + listener << 42; + EXPECT_EQ("42", listener.str()); + + // Streaming shouldn't crash when the underlying ostream is NULL. + DummyMatchResultListener dummy; + dummy << "hi" << 5; +} + +TEST(MatchResultListenerTest, CanAccessUnderlyingStream) { + EXPECT_TRUE(DummyMatchResultListener().stream() == nullptr); + EXPECT_TRUE(StreamMatchResultListener(nullptr).stream() == nullptr); + + EXPECT_EQ(&std::cout, StreamMatchResultListener(&std::cout).stream()); +} + +TEST(MatchResultListenerTest, IsInterestedWorks) { + EXPECT_TRUE(StringMatchResultListener().IsInterested()); + EXPECT_TRUE(StreamMatchResultListener(&std::cout).IsInterested()); + + EXPECT_FALSE(DummyMatchResultListener().IsInterested()); + EXPECT_FALSE(StreamMatchResultListener(nullptr).IsInterested()); +} + +// Makes sure that the MatcherInterface interface doesn't +// change. +class EvenMatcherImpl : public MatcherInterface { + public: + bool MatchAndExplain(int x, + MatchResultListener* /* listener */) const override { + return x % 2 == 0; + } + + void DescribeTo(ostream* os) const override { *os << "is an even number"; } + + // We deliberately don't define DescribeNegationTo() and + // ExplainMatchResultTo() here, to make sure the definition of these + // two methods is optional. +}; + +// Makes sure that the MatcherInterface API doesn't change. +TEST(MatcherInterfaceTest, CanBeImplementedUsingPublishedAPI) { + EvenMatcherImpl m; +} + +// Tests implementing a monomorphic matcher using MatchAndExplain(). + +class NewEvenMatcherImpl : public MatcherInterface { + public: + bool MatchAndExplain(int x, MatchResultListener* listener) const override { + const bool match = x % 2 == 0; + // Verifies that we can stream to a listener directly. + *listener << "value % " << 2; + if (listener->stream() != nullptr) { + // Verifies that we can stream to a listener's underlying stream + // too. + *listener->stream() << " == " << (x % 2); + } + return match; + } + + void DescribeTo(ostream* os) const override { *os << "is an even number"; } +}; + +TEST(MatcherInterfaceTest, CanBeImplementedUsingNewAPI) { + Matcher m = MakeMatcher(new NewEvenMatcherImpl); + EXPECT_TRUE(m.Matches(2)); + EXPECT_FALSE(m.Matches(3)); + EXPECT_EQ("value % 2 == 0", Explain(m, 2)); + EXPECT_EQ("value % 2 == 1", Explain(m, 3)); +} + +INSTANTIATE_GTEST_MATCHER_TEST_P(MatcherTest); + +// Tests default-constructing a matcher. +TEST(MatcherTest, CanBeDefaultConstructed) { Matcher m; } + +// Tests that Matcher can be constructed from a MatcherInterface*. +TEST(MatcherTest, CanBeConstructedFromMatcherInterface) { + const MatcherInterface* impl = new EvenMatcherImpl; + Matcher m(impl); + EXPECT_TRUE(m.Matches(4)); + EXPECT_FALSE(m.Matches(5)); +} + +// Tests that value can be used in place of Eq(value). +TEST(MatcherTest, CanBeImplicitlyConstructedFromValue) { + Matcher m1 = 5; + EXPECT_TRUE(m1.Matches(5)); + EXPECT_FALSE(m1.Matches(6)); +} + +// Tests that NULL can be used in place of Eq(NULL). +TEST(MatcherTest, CanBeImplicitlyConstructedFromNULL) { + Matcher m1 = nullptr; + EXPECT_TRUE(m1.Matches(nullptr)); + int n = 0; + EXPECT_FALSE(m1.Matches(&n)); +} + +// Tests that matchers can be constructed from a variable that is not properly +// defined. This should be illegal, but many users rely on this accidentally. +struct Undefined { + virtual ~Undefined() = 0; + static const int kInt = 1; +}; + +TEST(MatcherTest, CanBeConstructedFromUndefinedVariable) { + Matcher m1 = Undefined::kInt; + EXPECT_TRUE(m1.Matches(1)); + EXPECT_FALSE(m1.Matches(2)); +} + +// Test that a matcher parameterized with an abstract class compiles. +TEST(MatcherTest, CanAcceptAbstractClass) { Matcher m = _; } + +// Tests that matchers are copyable. +TEST(MatcherTest, IsCopyable) { + // Tests the copy constructor. + Matcher m1 = Eq(false); + EXPECT_TRUE(m1.Matches(false)); + EXPECT_FALSE(m1.Matches(true)); + + // Tests the assignment operator. + m1 = Eq(true); + EXPECT_TRUE(m1.Matches(true)); + EXPECT_FALSE(m1.Matches(false)); +} + +// Tests that Matcher::DescribeTo() calls +// MatcherInterface::DescribeTo(). +TEST(MatcherTest, CanDescribeItself) { + EXPECT_EQ("is an even number", Describe(Matcher(new EvenMatcherImpl))); +} + +// Tests Matcher::MatchAndExplain(). +TEST_P(MatcherTestP, MatchAndExplain) { + Matcher m = GreaterThan(0); + StringMatchResultListener listener1; + EXPECT_TRUE(m.MatchAndExplain(42, &listener1)); + EXPECT_EQ("which is 42 more than 0", listener1.str()); + + StringMatchResultListener listener2; + EXPECT_FALSE(m.MatchAndExplain(-9, &listener2)); + EXPECT_EQ("which is 9 less than 0", listener2.str()); +} + +// Tests that a C-string literal can be implicitly converted to a +// Matcher or Matcher. +TEST(StringMatcherTest, CanBeImplicitlyConstructedFromCStringLiteral) { + Matcher m1 = "hi"; + EXPECT_TRUE(m1.Matches("hi")); + EXPECT_FALSE(m1.Matches("hello")); + + Matcher m2 = "hi"; + EXPECT_TRUE(m2.Matches("hi")); + EXPECT_FALSE(m2.Matches("hello")); +} + +// Tests that a string object can be implicitly converted to a +// Matcher or Matcher. +TEST(StringMatcherTest, CanBeImplicitlyConstructedFromString) { + Matcher m1 = std::string("hi"); + EXPECT_TRUE(m1.Matches("hi")); + EXPECT_FALSE(m1.Matches("hello")); + + Matcher m2 = std::string("hi"); + EXPECT_TRUE(m2.Matches("hi")); + EXPECT_FALSE(m2.Matches("hello")); +} + +#if GTEST_INTERNAL_HAS_STRING_VIEW +// Tests that a C-string literal can be implicitly converted to a +// Matcher or Matcher. +TEST(StringViewMatcherTest, CanBeImplicitlyConstructedFromCStringLiteral) { + Matcher m1 = "cats"; + EXPECT_TRUE(m1.Matches("cats")); + EXPECT_FALSE(m1.Matches("dogs")); + + Matcher m2 = "cats"; + EXPECT_TRUE(m2.Matches("cats")); + EXPECT_FALSE(m2.Matches("dogs")); +} + +// Tests that a std::string object can be implicitly converted to a +// Matcher or Matcher. +TEST(StringViewMatcherTest, CanBeImplicitlyConstructedFromString) { + Matcher m1 = std::string("cats"); + EXPECT_TRUE(m1.Matches("cats")); + EXPECT_FALSE(m1.Matches("dogs")); + + Matcher m2 = std::string("cats"); + EXPECT_TRUE(m2.Matches("cats")); + EXPECT_FALSE(m2.Matches("dogs")); +} + +// Tests that a StringView object can be implicitly converted to a +// Matcher or Matcher. +TEST(StringViewMatcherTest, CanBeImplicitlyConstructedFromStringView) { + Matcher m1 = internal::StringView("cats"); + EXPECT_TRUE(m1.Matches("cats")); + EXPECT_FALSE(m1.Matches("dogs")); + + Matcher m2 = internal::StringView("cats"); + EXPECT_TRUE(m2.Matches("cats")); + EXPECT_FALSE(m2.Matches("dogs")); +} +#endif // GTEST_INTERNAL_HAS_STRING_VIEW + +// Tests that a std::reference_wrapper object can be implicitly +// converted to a Matcher or Matcher via Eq(). +TEST(StringMatcherTest, + CanBeImplicitlyConstructedFromEqReferenceWrapperString) { + std::string value = "cats"; + Matcher m1 = Eq(std::ref(value)); + EXPECT_TRUE(m1.Matches("cats")); + EXPECT_FALSE(m1.Matches("dogs")); + + Matcher m2 = Eq(std::ref(value)); + EXPECT_TRUE(m2.Matches("cats")); + EXPECT_FALSE(m2.Matches("dogs")); +} + +// Tests that MakeMatcher() constructs a Matcher from a +// MatcherInterface* without requiring the user to explicitly +// write the type. +TEST(MakeMatcherTest, ConstructsMatcherFromMatcherInterface) { + const MatcherInterface* dummy_impl = new EvenMatcherImpl; + Matcher m = MakeMatcher(dummy_impl); +} + +// Tests that MakePolymorphicMatcher() can construct a polymorphic +// matcher from its implementation using the old API. +const int g_bar = 1; +class ReferencesBarOrIsZeroImpl { + public: + template + bool MatchAndExplain(const T& x, MatchResultListener* /* listener */) const { + const void* p = &x; + return p == &g_bar || x == 0; + } + + void DescribeTo(ostream* os) const { *os << "g_bar or zero"; } + + void DescribeNegationTo(ostream* os) const { + *os << "doesn't reference g_bar and is not zero"; + } +}; + +// This function verifies that MakePolymorphicMatcher() returns a +// PolymorphicMatcher where T is the argument's type. +PolymorphicMatcher ReferencesBarOrIsZero() { + return MakePolymorphicMatcher(ReferencesBarOrIsZeroImpl()); +} + +TEST(MakePolymorphicMatcherTest, ConstructsMatcherUsingOldAPI) { + // Using a polymorphic matcher to match a reference type. + Matcher m1 = ReferencesBarOrIsZero(); + EXPECT_TRUE(m1.Matches(0)); + // Verifies that the identity of a by-reference argument is preserved. + EXPECT_TRUE(m1.Matches(g_bar)); + EXPECT_FALSE(m1.Matches(1)); + EXPECT_EQ("g_bar or zero", Describe(m1)); + + // Using a polymorphic matcher to match a value type. + Matcher m2 = ReferencesBarOrIsZero(); + EXPECT_TRUE(m2.Matches(0.0)); + EXPECT_FALSE(m2.Matches(0.1)); + EXPECT_EQ("g_bar or zero", Describe(m2)); +} + +// Tests implementing a polymorphic matcher using MatchAndExplain(). + +class PolymorphicIsEvenImpl { + public: + void DescribeTo(ostream* os) const { *os << "is even"; } + + void DescribeNegationTo(ostream* os) const { *os << "is odd"; } + + template + bool MatchAndExplain(const T& x, MatchResultListener* listener) const { + // Verifies that we can stream to the listener directly. + *listener << "% " << 2; + if (listener->stream() != nullptr) { + // Verifies that we can stream to the listener's underlying stream + // too. + *listener->stream() << " == " << (x % 2); + } + return (x % 2) == 0; + } +}; + +PolymorphicMatcher PolymorphicIsEven() { + return MakePolymorphicMatcher(PolymorphicIsEvenImpl()); +} + +TEST(MakePolymorphicMatcherTest, ConstructsMatcherUsingNewAPI) { + // Using PolymorphicIsEven() as a Matcher. + const Matcher m1 = PolymorphicIsEven(); + EXPECT_TRUE(m1.Matches(42)); + EXPECT_FALSE(m1.Matches(43)); + EXPECT_EQ("is even", Describe(m1)); + + const Matcher not_m1 = Not(m1); + EXPECT_EQ("is odd", Describe(not_m1)); + + EXPECT_EQ("% 2 == 0", Explain(m1, 42)); + + // Using PolymorphicIsEven() as a Matcher. + const Matcher m2 = PolymorphicIsEven(); + EXPECT_TRUE(m2.Matches('\x42')); + EXPECT_FALSE(m2.Matches('\x43')); + EXPECT_EQ("is even", Describe(m2)); + + const Matcher not_m2 = Not(m2); + EXPECT_EQ("is odd", Describe(not_m2)); + + EXPECT_EQ("% 2 == 0", Explain(m2, '\x42')); +} + +INSTANTIATE_GTEST_MATCHER_TEST_P(MatcherCastTest); + +// Tests that MatcherCast(m) works when m is a polymorphic matcher. +TEST_P(MatcherCastTestP, FromPolymorphicMatcher) { + Matcher m; + if (use_gtest_matcher_) { + m = MatcherCast(GtestGreaterThan(int64_t{5})); + } else { + m = MatcherCast(Gt(int64_t{5})); + } + EXPECT_TRUE(m.Matches(6)); + EXPECT_FALSE(m.Matches(4)); +} + +// For testing casting matchers between compatible types. +class IntValue { + public: + // An int can be statically (although not implicitly) cast to a + // IntValue. + explicit IntValue(int a_value) : value_(a_value) {} + + int value() const { return value_; } + + private: + int value_; +}; + +// For testing casting matchers between compatible types. +bool IsPositiveIntValue(const IntValue& foo) { return foo.value() > 0; } + +// Tests that MatcherCast(m) works when m is a Matcher where T +// can be statically converted to U. +TEST(MatcherCastTest, FromCompatibleType) { + Matcher m1 = Eq(2.0); + Matcher m2 = MatcherCast(m1); + EXPECT_TRUE(m2.Matches(2)); + EXPECT_FALSE(m2.Matches(3)); + + Matcher m3 = Truly(IsPositiveIntValue); + Matcher m4 = MatcherCast(m3); + // In the following, the arguments 1 and 0 are statically converted + // to IntValue objects, and then tested by the IsPositiveIntValue() + // predicate. + EXPECT_TRUE(m4.Matches(1)); + EXPECT_FALSE(m4.Matches(0)); +} + +// Tests that MatcherCast(m) works when m is a Matcher. +TEST(MatcherCastTest, FromConstReferenceToNonReference) { + Matcher m1 = Eq(0); + Matcher m2 = MatcherCast(m1); + EXPECT_TRUE(m2.Matches(0)); + EXPECT_FALSE(m2.Matches(1)); +} + +// Tests that MatcherCast(m) works when m is a Matcher. +TEST(MatcherCastTest, FromReferenceToNonReference) { + Matcher m1 = Eq(0); + Matcher m2 = MatcherCast(m1); + EXPECT_TRUE(m2.Matches(0)); + EXPECT_FALSE(m2.Matches(1)); +} + +// Tests that MatcherCast(m) works when m is a Matcher. +TEST(MatcherCastTest, FromNonReferenceToConstReference) { + Matcher m1 = Eq(0); + Matcher m2 = MatcherCast(m1); + EXPECT_TRUE(m2.Matches(0)); + EXPECT_FALSE(m2.Matches(1)); +} + +// Tests that MatcherCast(m) works when m is a Matcher. +TEST(MatcherCastTest, FromNonReferenceToReference) { + Matcher m1 = Eq(0); + Matcher m2 = MatcherCast(m1); + int n = 0; + EXPECT_TRUE(m2.Matches(n)); + n = 1; + EXPECT_FALSE(m2.Matches(n)); +} + +// Tests that MatcherCast(m) works when m is a Matcher. +TEST(MatcherCastTest, FromSameType) { + Matcher m1 = Eq(0); + Matcher m2 = MatcherCast(m1); + EXPECT_TRUE(m2.Matches(0)); + EXPECT_FALSE(m2.Matches(1)); +} + +// Tests that MatcherCast(m) works when m is a value of the same type as the +// value type of the Matcher. +TEST(MatcherCastTest, FromAValue) { + Matcher m = MatcherCast(42); + EXPECT_TRUE(m.Matches(42)); + EXPECT_FALSE(m.Matches(239)); +} + +// Tests that MatcherCast(m) works when m is a value of the type implicitly +// convertible to the value type of the Matcher. +TEST(MatcherCastTest, FromAnImplicitlyConvertibleValue) { + const int kExpected = 'c'; + Matcher m = MatcherCast('c'); + EXPECT_TRUE(m.Matches(kExpected)); + EXPECT_FALSE(m.Matches(kExpected + 1)); +} + +struct NonImplicitlyConstructibleTypeWithOperatorEq { + friend bool operator==( + const NonImplicitlyConstructibleTypeWithOperatorEq& /* ignored */, + int rhs) { + return 42 == rhs; + } + friend bool operator==( + int lhs, + const NonImplicitlyConstructibleTypeWithOperatorEq& /* ignored */) { + return lhs == 42; + } +}; + +// Tests that MatcherCast(m) works when m is a neither a matcher nor +// implicitly convertible to the value type of the Matcher, but the value type +// of the matcher has operator==() overload accepting m. +TEST(MatcherCastTest, NonImplicitlyConstructibleTypeWithOperatorEq) { + Matcher m1 = + MatcherCast(42); + EXPECT_TRUE(m1.Matches(NonImplicitlyConstructibleTypeWithOperatorEq())); + + Matcher m2 = + MatcherCast(239); + EXPECT_FALSE(m2.Matches(NonImplicitlyConstructibleTypeWithOperatorEq())); + + // When updating the following lines please also change the comment to + // namespace convertible_from_any. + Matcher m3 = + MatcherCast(NonImplicitlyConstructibleTypeWithOperatorEq()); + EXPECT_TRUE(m3.Matches(42)); + EXPECT_FALSE(m3.Matches(239)); +} + +// ConvertibleFromAny does not work with MSVC. resulting in +// error C2440: 'initializing': cannot convert from 'Eq' to 'M' +// No constructor could take the source type, or constructor overload +// resolution was ambiguous + +#if !defined _MSC_VER + +// The below ConvertibleFromAny struct is implicitly constructible from anything +// and when in the same namespace can interact with other tests. In particular, +// if it is in the same namespace as other tests and one removes +// NonImplicitlyConstructibleTypeWithOperatorEq::operator==(int lhs, ...); +// then the corresponding test still compiles (and it should not!) by implicitly +// converting NonImplicitlyConstructibleTypeWithOperatorEq to ConvertibleFromAny +// in m3.Matcher(). +namespace convertible_from_any { +// Implicitly convertible from any type. +struct ConvertibleFromAny { + ConvertibleFromAny(int a_value) : value(a_value) {} + template + ConvertibleFromAny(const T& /*a_value*/) : value(-1) { + ADD_FAILURE() << "Conversion constructor called"; + } + int value; +}; + +bool operator==(const ConvertibleFromAny& a, const ConvertibleFromAny& b) { + return a.value == b.value; +} + +ostream& operator<<(ostream& os, const ConvertibleFromAny& a) { + return os << a.value; +} + +TEST(MatcherCastTest, ConversionConstructorIsUsed) { + Matcher m = MatcherCast(1); + EXPECT_TRUE(m.Matches(ConvertibleFromAny(1))); + EXPECT_FALSE(m.Matches(ConvertibleFromAny(2))); +} + +TEST(MatcherCastTest, FromConvertibleFromAny) { + Matcher m = + MatcherCast(Eq(ConvertibleFromAny(1))); + EXPECT_TRUE(m.Matches(ConvertibleFromAny(1))); + EXPECT_FALSE(m.Matches(ConvertibleFromAny(2))); +} +} // namespace convertible_from_any + +#endif // !defined _MSC_VER + +struct IntReferenceWrapper { + IntReferenceWrapper(const int& a_value) : value(&a_value) {} + const int* value; +}; + +bool operator==(const IntReferenceWrapper& a, const IntReferenceWrapper& b) { + return a.value == b.value; +} + +TEST(MatcherCastTest, ValueIsNotCopied) { + int n = 42; + Matcher m = MatcherCast(n); + // Verify that the matcher holds a reference to n, not to its temporary copy. + EXPECT_TRUE(m.Matches(n)); +} + +class Base { + public: + virtual ~Base() {} + Base() {} + + private: + Base(const Base&) = delete; + Base& operator=(const Base&) = delete; +}; + +class Derived : public Base { + public: + Derived() : Base() {} + int i; +}; + +class OtherDerived : public Base {}; + +INSTANTIATE_GTEST_MATCHER_TEST_P(SafeMatcherCastTest); + +// Tests that SafeMatcherCast(m) works when m is a polymorphic matcher. +TEST_P(SafeMatcherCastTestP, FromPolymorphicMatcher) { + Matcher m2; + if (use_gtest_matcher_) { + m2 = SafeMatcherCast(GtestGreaterThan(32)); + } else { + m2 = SafeMatcherCast(Gt(32)); + } + EXPECT_TRUE(m2.Matches('A')); + EXPECT_FALSE(m2.Matches('\n')); +} + +// Tests that SafeMatcherCast(m) works when m is a Matcher where +// T and U are arithmetic types and T can be losslessly converted to +// U. +TEST(SafeMatcherCastTest, FromLosslesslyConvertibleArithmeticType) { + Matcher m1 = DoubleEq(1.0); + Matcher m2 = SafeMatcherCast(m1); + EXPECT_TRUE(m2.Matches(1.0f)); + EXPECT_FALSE(m2.Matches(2.0f)); + + Matcher m3 = SafeMatcherCast(TypedEq('a')); + EXPECT_TRUE(m3.Matches('a')); + EXPECT_FALSE(m3.Matches('b')); +} + +// Tests that SafeMatcherCast(m) works when m is a Matcher where T and U +// are pointers or references to a derived and a base class, correspondingly. +TEST(SafeMatcherCastTest, FromBaseClass) { + Derived d, d2; + Matcher m1 = Eq(&d); + Matcher m2 = SafeMatcherCast(m1); + EXPECT_TRUE(m2.Matches(&d)); + EXPECT_FALSE(m2.Matches(&d2)); + + Matcher m3 = Ref(d); + Matcher m4 = SafeMatcherCast(m3); + EXPECT_TRUE(m4.Matches(d)); + EXPECT_FALSE(m4.Matches(d2)); +} + +// Tests that SafeMatcherCast(m) works when m is a Matcher. +TEST(SafeMatcherCastTest, FromConstReferenceToReference) { + int n = 0; + Matcher m1 = Ref(n); + Matcher m2 = SafeMatcherCast(m1); + int n1 = 0; + EXPECT_TRUE(m2.Matches(n)); + EXPECT_FALSE(m2.Matches(n1)); +} + +// Tests that MatcherCast(m) works when m is a Matcher. +TEST(SafeMatcherCastTest, FromNonReferenceToConstReference) { + Matcher> m1 = IsNull(); + Matcher&> m2 = + SafeMatcherCast&>(m1); + EXPECT_TRUE(m2.Matches(std::unique_ptr())); + EXPECT_FALSE(m2.Matches(std::unique_ptr(new int))); +} + +// Tests that SafeMatcherCast(m) works when m is a Matcher. +TEST(SafeMatcherCastTest, FromNonReferenceToReference) { + Matcher m1 = Eq(0); + Matcher m2 = SafeMatcherCast(m1); + int n = 0; + EXPECT_TRUE(m2.Matches(n)); + n = 1; + EXPECT_FALSE(m2.Matches(n)); +} + +// Tests that SafeMatcherCast(m) works when m is a Matcher. +TEST(SafeMatcherCastTest, FromSameType) { + Matcher m1 = Eq(0); + Matcher m2 = SafeMatcherCast(m1); + EXPECT_TRUE(m2.Matches(0)); + EXPECT_FALSE(m2.Matches(1)); +} + +#if !defined _MSC_VER + +namespace convertible_from_any { +TEST(SafeMatcherCastTest, ConversionConstructorIsUsed) { + Matcher m = SafeMatcherCast(1); + EXPECT_TRUE(m.Matches(ConvertibleFromAny(1))); + EXPECT_FALSE(m.Matches(ConvertibleFromAny(2))); +} + +TEST(SafeMatcherCastTest, FromConvertibleFromAny) { + Matcher m = + SafeMatcherCast(Eq(ConvertibleFromAny(1))); + EXPECT_TRUE(m.Matches(ConvertibleFromAny(1))); + EXPECT_FALSE(m.Matches(ConvertibleFromAny(2))); +} +} // namespace convertible_from_any + +#endif // !defined _MSC_VER + +TEST(SafeMatcherCastTest, ValueIsNotCopied) { + int n = 42; + Matcher m = SafeMatcherCast(n); + // Verify that the matcher holds a reference to n, not to its temporary copy. + EXPECT_TRUE(m.Matches(n)); +} + +TEST(ExpectThat, TakesLiterals) { + EXPECT_THAT(1, 1); + EXPECT_THAT(1.0, 1.0); + EXPECT_THAT(std::string(), ""); +} + +TEST(ExpectThat, TakesFunctions) { + struct Helper { + static void Func() {} + }; + void (*func)() = Helper::Func; + EXPECT_THAT(func, Helper::Func); + EXPECT_THAT(func, &Helper::Func); +} + +// Tests that A() matches any value of type T. +TEST(ATest, MatchesAnyValue) { + // Tests a matcher for a value type. + Matcher m1 = A(); + EXPECT_TRUE(m1.Matches(91.43)); + EXPECT_TRUE(m1.Matches(-15.32)); + + // Tests a matcher for a reference type. + int a = 2; + int b = -6; + Matcher m2 = A(); + EXPECT_TRUE(m2.Matches(a)); + EXPECT_TRUE(m2.Matches(b)); +} + +TEST(ATest, WorksForDerivedClass) { + Base base; + Derived derived; + EXPECT_THAT(&base, A()); + // This shouldn't compile: EXPECT_THAT(&base, A()); + EXPECT_THAT(&derived, A()); + EXPECT_THAT(&derived, A()); +} + +// Tests that A() describes itself properly. +TEST(ATest, CanDescribeSelf) { EXPECT_EQ("is anything", Describe(A())); } + +// Tests that An() matches any value of type T. +TEST(AnTest, MatchesAnyValue) { + // Tests a matcher for a value type. + Matcher m1 = An(); + EXPECT_TRUE(m1.Matches(9143)); + EXPECT_TRUE(m1.Matches(-1532)); + + // Tests a matcher for a reference type. + int a = 2; + int b = -6; + Matcher m2 = An(); + EXPECT_TRUE(m2.Matches(a)); + EXPECT_TRUE(m2.Matches(b)); +} + +// Tests that An() describes itself properly. +TEST(AnTest, CanDescribeSelf) { EXPECT_EQ("is anything", Describe(An())); } + +// Tests that _ can be used as a matcher for any type and matches any +// value of that type. +TEST(UnderscoreTest, MatchesAnyValue) { + // Uses _ as a matcher for a value type. + Matcher m1 = _; + EXPECT_TRUE(m1.Matches(123)); + EXPECT_TRUE(m1.Matches(-242)); + + // Uses _ as a matcher for a reference type. + bool a = false; + const bool b = true; + Matcher m2 = _; + EXPECT_TRUE(m2.Matches(a)); + EXPECT_TRUE(m2.Matches(b)); +} + +// Tests that _ describes itself properly. +TEST(UnderscoreTest, CanDescribeSelf) { + Matcher m = _; + EXPECT_EQ("is anything", Describe(m)); +} + +// Tests that Eq(x) matches any value equal to x. +TEST(EqTest, MatchesEqualValue) { + // 2 C-strings with same content but different addresses. + const char a1[] = "hi"; + const char a2[] = "hi"; + + Matcher m1 = Eq(a1); + EXPECT_TRUE(m1.Matches(a1)); + EXPECT_FALSE(m1.Matches(a2)); +} + +// Tests that Eq(v) describes itself properly. + +class Unprintable { + public: + Unprintable() : c_('a') {} + + bool operator==(const Unprintable& /* rhs */) const { return true; } + // -Wunused-private-field: dummy accessor for `c_`. + char dummy_c() { return c_; } + + private: + char c_; +}; + +TEST(EqTest, CanDescribeSelf) { + Matcher m = Eq(Unprintable()); + EXPECT_EQ("is equal to 1-byte object <61>", Describe(m)); +} + +// Tests that Eq(v) can be used to match any type that supports +// comparing with type T, where T is v's type. +TEST(EqTest, IsPolymorphic) { + Matcher m1 = Eq(1); + EXPECT_TRUE(m1.Matches(1)); + EXPECT_FALSE(m1.Matches(2)); + + Matcher m2 = Eq(1); + EXPECT_TRUE(m2.Matches('\1')); + EXPECT_FALSE(m2.Matches('a')); +} + +// Tests that TypedEq(v) matches values of type T that's equal to v. +TEST(TypedEqTest, ChecksEqualityForGivenType) { + Matcher m1 = TypedEq('a'); + EXPECT_TRUE(m1.Matches('a')); + EXPECT_FALSE(m1.Matches('b')); + + Matcher m2 = TypedEq(6); + EXPECT_TRUE(m2.Matches(6)); + EXPECT_FALSE(m2.Matches(7)); +} + +// Tests that TypedEq(v) describes itself properly. +TEST(TypedEqTest, CanDescribeSelf) { + EXPECT_EQ("is equal to 2", Describe(TypedEq(2))); +} + +// Tests that TypedEq(v) has type Matcher. + +// Type::IsTypeOf(v) compiles if and only if the type of value v is T, where +// T is a "bare" type (i.e. not in the form of const U or U&). If v's type is +// not T, the compiler will generate a message about "undefined reference". +template +struct Type { + static bool IsTypeOf(const T& /* v */) { return true; } + + template + static void IsTypeOf(T2 v); +}; + +TEST(TypedEqTest, HasSpecifiedType) { + // Verfies that the type of TypedEq(v) is Matcher. + Type>::IsTypeOf(TypedEq(5)); + Type>::IsTypeOf(TypedEq(5)); +} + +// Tests that Ge(v) matches anything >= v. +TEST(GeTest, ImplementsGreaterThanOrEqual) { + Matcher m1 = Ge(0); + EXPECT_TRUE(m1.Matches(1)); + EXPECT_TRUE(m1.Matches(0)); + EXPECT_FALSE(m1.Matches(-1)); +} + +// Tests that Ge(v) describes itself properly. +TEST(GeTest, CanDescribeSelf) { + Matcher m = Ge(5); + EXPECT_EQ("is >= 5", Describe(m)); +} + +// Tests that Gt(v) matches anything > v. +TEST(GtTest, ImplementsGreaterThan) { + Matcher m1 = Gt(0); + EXPECT_TRUE(m1.Matches(1.0)); + EXPECT_FALSE(m1.Matches(0.0)); + EXPECT_FALSE(m1.Matches(-1.0)); +} + +// Tests that Gt(v) describes itself properly. +TEST(GtTest, CanDescribeSelf) { + Matcher m = Gt(5); + EXPECT_EQ("is > 5", Describe(m)); +} + +// Tests that Le(v) matches anything <= v. +TEST(LeTest, ImplementsLessThanOrEqual) { + Matcher m1 = Le('b'); + EXPECT_TRUE(m1.Matches('a')); + EXPECT_TRUE(m1.Matches('b')); + EXPECT_FALSE(m1.Matches('c')); +} + +// Tests that Le(v) describes itself properly. +TEST(LeTest, CanDescribeSelf) { + Matcher m = Le(5); + EXPECT_EQ("is <= 5", Describe(m)); +} + +// Tests that Lt(v) matches anything < v. +TEST(LtTest, ImplementsLessThan) { + Matcher m1 = Lt("Hello"); + EXPECT_TRUE(m1.Matches("Abc")); + EXPECT_FALSE(m1.Matches("Hello")); + EXPECT_FALSE(m1.Matches("Hello, world!")); +} + +// Tests that Lt(v) describes itself properly. +TEST(LtTest, CanDescribeSelf) { + Matcher m = Lt(5); + EXPECT_EQ("is < 5", Describe(m)); +} + +// Tests that Ne(v) matches anything != v. +TEST(NeTest, ImplementsNotEqual) { + Matcher m1 = Ne(0); + EXPECT_TRUE(m1.Matches(1)); + EXPECT_TRUE(m1.Matches(-1)); + EXPECT_FALSE(m1.Matches(0)); +} + +// Tests that Ne(v) describes itself properly. +TEST(NeTest, CanDescribeSelf) { + Matcher m = Ne(5); + EXPECT_EQ("isn't equal to 5", Describe(m)); +} + +class MoveOnly { + public: + explicit MoveOnly(int i) : i_(i) {} + MoveOnly(const MoveOnly&) = delete; + MoveOnly(MoveOnly&&) = default; + MoveOnly& operator=(const MoveOnly&) = delete; + MoveOnly& operator=(MoveOnly&&) = default; + + bool operator==(const MoveOnly& other) const { return i_ == other.i_; } + bool operator!=(const MoveOnly& other) const { return i_ != other.i_; } + bool operator<(const MoveOnly& other) const { return i_ < other.i_; } + bool operator<=(const MoveOnly& other) const { return i_ <= other.i_; } + bool operator>(const MoveOnly& other) const { return i_ > other.i_; } + bool operator>=(const MoveOnly& other) const { return i_ >= other.i_; } + + private: + int i_; +}; + +struct MoveHelper { + MOCK_METHOD1(Call, void(MoveOnly)); +}; + +// Disable this test in VS 2015 (version 14), where it fails when SEH is enabled +#if defined(_MSC_VER) && (_MSC_VER < 1910) +TEST(ComparisonBaseTest, DISABLED_WorksWithMoveOnly) { +#else +TEST(ComparisonBaseTest, WorksWithMoveOnly) { +#endif + MoveOnly m{0}; + MoveHelper helper; + + EXPECT_CALL(helper, Call(Eq(ByRef(m)))); + helper.Call(MoveOnly(0)); + EXPECT_CALL(helper, Call(Ne(ByRef(m)))); + helper.Call(MoveOnly(1)); + EXPECT_CALL(helper, Call(Le(ByRef(m)))); + helper.Call(MoveOnly(0)); + EXPECT_CALL(helper, Call(Lt(ByRef(m)))); + helper.Call(MoveOnly(-1)); + EXPECT_CALL(helper, Call(Ge(ByRef(m)))); + helper.Call(MoveOnly(0)); + EXPECT_CALL(helper, Call(Gt(ByRef(m)))); + helper.Call(MoveOnly(1)); +} + +// Tests that IsNull() matches any NULL pointer of any type. +TEST(IsNullTest, MatchesNullPointer) { + Matcher m1 = IsNull(); + int* p1 = nullptr; + int n = 0; + EXPECT_TRUE(m1.Matches(p1)); + EXPECT_FALSE(m1.Matches(&n)); + + Matcher m2 = IsNull(); + const char* p2 = nullptr; + EXPECT_TRUE(m2.Matches(p2)); + EXPECT_FALSE(m2.Matches("hi")); + + Matcher m3 = IsNull(); + void* p3 = nullptr; + EXPECT_TRUE(m3.Matches(p3)); + EXPECT_FALSE(m3.Matches(reinterpret_cast(0xbeef))); +} + +TEST(IsNullTest, StdFunction) { + const Matcher> m = IsNull(); + + EXPECT_TRUE(m.Matches(std::function())); + EXPECT_FALSE(m.Matches([] {})); +} + +// Tests that IsNull() describes itself properly. +TEST(IsNullTest, CanDescribeSelf) { + Matcher m = IsNull(); + EXPECT_EQ("is NULL", Describe(m)); + EXPECT_EQ("isn't NULL", DescribeNegation(m)); +} + +// Tests that NotNull() matches any non-NULL pointer of any type. +TEST(NotNullTest, MatchesNonNullPointer) { + Matcher m1 = NotNull(); + int* p1 = nullptr; + int n = 0; + EXPECT_FALSE(m1.Matches(p1)); + EXPECT_TRUE(m1.Matches(&n)); + + Matcher m2 = NotNull(); + const char* p2 = nullptr; + EXPECT_FALSE(m2.Matches(p2)); + EXPECT_TRUE(m2.Matches("hi")); +} + +TEST(NotNullTest, LinkedPtr) { + const Matcher> m = NotNull(); + const std::shared_ptr null_p; + const std::shared_ptr non_null_p(new int); + + EXPECT_FALSE(m.Matches(null_p)); + EXPECT_TRUE(m.Matches(non_null_p)); +} + +TEST(NotNullTest, ReferenceToConstLinkedPtr) { + const Matcher&> m = NotNull(); + const std::shared_ptr null_p; + const std::shared_ptr non_null_p(new double); + + EXPECT_FALSE(m.Matches(null_p)); + EXPECT_TRUE(m.Matches(non_null_p)); +} + +TEST(NotNullTest, StdFunction) { + const Matcher> m = NotNull(); + + EXPECT_TRUE(m.Matches([] {})); + EXPECT_FALSE(m.Matches(std::function())); +} + +// Tests that NotNull() describes itself properly. +TEST(NotNullTest, CanDescribeSelf) { + Matcher m = NotNull(); + EXPECT_EQ("isn't NULL", Describe(m)); +} + +// Tests that Ref(variable) matches an argument that references +// 'variable'. +TEST(RefTest, MatchesSameVariable) { + int a = 0; + int b = 0; + Matcher m = Ref(a); + EXPECT_TRUE(m.Matches(a)); + EXPECT_FALSE(m.Matches(b)); +} + +// Tests that Ref(variable) describes itself properly. +TEST(RefTest, CanDescribeSelf) { + int n = 5; + Matcher m = Ref(n); + stringstream ss; + ss << "references the variable @" << &n << " 5"; + EXPECT_EQ(ss.str(), Describe(m)); +} + +// Test that Ref(non_const_varialbe) can be used as a matcher for a +// const reference. +TEST(RefTest, CanBeUsedAsMatcherForConstReference) { + int a = 0; + int b = 0; + Matcher m = Ref(a); + EXPECT_TRUE(m.Matches(a)); + EXPECT_FALSE(m.Matches(b)); +} + +// Tests that Ref(variable) is covariant, i.e. Ref(derived) can be +// used wherever Ref(base) can be used (Ref(derived) is a sub-type +// of Ref(base), but not vice versa. + +TEST(RefTest, IsCovariant) { + Base base, base2; + Derived derived; + Matcher m1 = Ref(base); + EXPECT_TRUE(m1.Matches(base)); + EXPECT_FALSE(m1.Matches(base2)); + EXPECT_FALSE(m1.Matches(derived)); + + m1 = Ref(derived); + EXPECT_TRUE(m1.Matches(derived)); + EXPECT_FALSE(m1.Matches(base)); + EXPECT_FALSE(m1.Matches(base2)); +} + +TEST(RefTest, ExplainsResult) { + int n = 0; + EXPECT_THAT(Explain(Matcher(Ref(n)), n), + StartsWith("which is located @")); + + int m = 0; + EXPECT_THAT(Explain(Matcher(Ref(n)), m), + StartsWith("which is located @")); +} + +// Tests string comparison matchers. + +template +std::string FromStringLike(internal::StringLike str) { + return std::string(str); +} + +TEST(StringLike, TestConversions) { + EXPECT_EQ("foo", FromStringLike("foo")); + EXPECT_EQ("foo", FromStringLike(std::string("foo"))); +#if GTEST_INTERNAL_HAS_STRING_VIEW + EXPECT_EQ("foo", FromStringLike(internal::StringView("foo"))); +#endif // GTEST_INTERNAL_HAS_STRING_VIEW + + // Non deducible types. + EXPECT_EQ("", FromStringLike({})); + EXPECT_EQ("foo", FromStringLike({'f', 'o', 'o'})); + const char buf[] = "foo"; + EXPECT_EQ("foo", FromStringLike({buf, buf + 3})); +} + +TEST(StrEqTest, MatchesEqualString) { + Matcher m = StrEq(std::string("Hello")); + EXPECT_TRUE(m.Matches("Hello")); + EXPECT_FALSE(m.Matches("hello")); + EXPECT_FALSE(m.Matches(nullptr)); + + Matcher m2 = StrEq("Hello"); + EXPECT_TRUE(m2.Matches("Hello")); + EXPECT_FALSE(m2.Matches("Hi")); + +#if GTEST_INTERNAL_HAS_STRING_VIEW + Matcher m3 = + StrEq(internal::StringView("Hello")); + EXPECT_TRUE(m3.Matches(internal::StringView("Hello"))); + EXPECT_FALSE(m3.Matches(internal::StringView("hello"))); + EXPECT_FALSE(m3.Matches(internal::StringView())); + + Matcher m_empty = StrEq(""); + EXPECT_TRUE(m_empty.Matches(internal::StringView(""))); + EXPECT_TRUE(m_empty.Matches(internal::StringView())); + EXPECT_FALSE(m_empty.Matches(internal::StringView("hello"))); +#endif // GTEST_INTERNAL_HAS_STRING_VIEW +} + +TEST(StrEqTest, CanDescribeSelf) { + Matcher m = StrEq("Hi-\'\"?\\\a\b\f\n\r\t\v\xD3"); + EXPECT_EQ("is equal to \"Hi-\'\\\"?\\\\\\a\\b\\f\\n\\r\\t\\v\\xD3\"", + Describe(m)); + + std::string str("01204500800"); + str[3] = '\0'; + Matcher m2 = StrEq(str); + EXPECT_EQ("is equal to \"012\\04500800\"", Describe(m2)); + str[0] = str[6] = str[7] = str[9] = str[10] = '\0'; + Matcher m3 = StrEq(str); + EXPECT_EQ("is equal to \"\\012\\045\\0\\08\\0\\0\"", Describe(m3)); +} + +TEST(StrNeTest, MatchesUnequalString) { + Matcher m = StrNe("Hello"); + EXPECT_TRUE(m.Matches("")); + EXPECT_TRUE(m.Matches(nullptr)); + EXPECT_FALSE(m.Matches("Hello")); + + Matcher m2 = StrNe(std::string("Hello")); + EXPECT_TRUE(m2.Matches("hello")); + EXPECT_FALSE(m2.Matches("Hello")); + +#if GTEST_INTERNAL_HAS_STRING_VIEW + Matcher m3 = StrNe(internal::StringView("Hello")); + EXPECT_TRUE(m3.Matches(internal::StringView(""))); + EXPECT_TRUE(m3.Matches(internal::StringView())); + EXPECT_FALSE(m3.Matches(internal::StringView("Hello"))); +#endif // GTEST_INTERNAL_HAS_STRING_VIEW +} + +TEST(StrNeTest, CanDescribeSelf) { + Matcher m = StrNe("Hi"); + EXPECT_EQ("isn't equal to \"Hi\"", Describe(m)); +} + +TEST(StrCaseEqTest, MatchesEqualStringIgnoringCase) { + Matcher m = StrCaseEq(std::string("Hello")); + EXPECT_TRUE(m.Matches("Hello")); + EXPECT_TRUE(m.Matches("hello")); + EXPECT_FALSE(m.Matches("Hi")); + EXPECT_FALSE(m.Matches(nullptr)); + + Matcher m2 = StrCaseEq("Hello"); + EXPECT_TRUE(m2.Matches("hello")); + EXPECT_FALSE(m2.Matches("Hi")); + +#if GTEST_INTERNAL_HAS_STRING_VIEW + Matcher m3 = + StrCaseEq(internal::StringView("Hello")); + EXPECT_TRUE(m3.Matches(internal::StringView("Hello"))); + EXPECT_TRUE(m3.Matches(internal::StringView("hello"))); + EXPECT_FALSE(m3.Matches(internal::StringView("Hi"))); + EXPECT_FALSE(m3.Matches(internal::StringView())); +#endif // GTEST_INTERNAL_HAS_STRING_VIEW +} + +TEST(StrCaseEqTest, MatchesEqualStringWith0IgnoringCase) { + std::string str1("oabocdooeoo"); + std::string str2("OABOCDOOEOO"); + Matcher m0 = StrCaseEq(str1); + EXPECT_FALSE(m0.Matches(str2 + std::string(1, '\0'))); + + str1[3] = str2[3] = '\0'; + Matcher m1 = StrCaseEq(str1); + EXPECT_TRUE(m1.Matches(str2)); + + str1[0] = str1[6] = str1[7] = str1[10] = '\0'; + str2[0] = str2[6] = str2[7] = str2[10] = '\0'; + Matcher m2 = StrCaseEq(str1); + str1[9] = str2[9] = '\0'; + EXPECT_FALSE(m2.Matches(str2)); + + Matcher m3 = StrCaseEq(str1); + EXPECT_TRUE(m3.Matches(str2)); + + EXPECT_FALSE(m3.Matches(str2 + "x")); + str2.append(1, '\0'); + EXPECT_FALSE(m3.Matches(str2)); + EXPECT_FALSE(m3.Matches(std::string(str2, 0, 9))); +} + +TEST(StrCaseEqTest, CanDescribeSelf) { + Matcher m = StrCaseEq("Hi"); + EXPECT_EQ("is equal to (ignoring case) \"Hi\"", Describe(m)); +} + +TEST(StrCaseNeTest, MatchesUnequalStringIgnoringCase) { + Matcher m = StrCaseNe("Hello"); + EXPECT_TRUE(m.Matches("Hi")); + EXPECT_TRUE(m.Matches(nullptr)); + EXPECT_FALSE(m.Matches("Hello")); + EXPECT_FALSE(m.Matches("hello")); + + Matcher m2 = StrCaseNe(std::string("Hello")); + EXPECT_TRUE(m2.Matches("")); + EXPECT_FALSE(m2.Matches("Hello")); + +#if GTEST_INTERNAL_HAS_STRING_VIEW + Matcher m3 = + StrCaseNe(internal::StringView("Hello")); + EXPECT_TRUE(m3.Matches(internal::StringView("Hi"))); + EXPECT_TRUE(m3.Matches(internal::StringView())); + EXPECT_FALSE(m3.Matches(internal::StringView("Hello"))); + EXPECT_FALSE(m3.Matches(internal::StringView("hello"))); +#endif // GTEST_INTERNAL_HAS_STRING_VIEW +} + +TEST(StrCaseNeTest, CanDescribeSelf) { + Matcher m = StrCaseNe("Hi"); + EXPECT_EQ("isn't equal to (ignoring case) \"Hi\"", Describe(m)); +} + +// Tests that HasSubstr() works for matching string-typed values. +TEST(HasSubstrTest, WorksForStringClasses) { + const Matcher m1 = HasSubstr("foo"); + EXPECT_TRUE(m1.Matches(std::string("I love food."))); + EXPECT_FALSE(m1.Matches(std::string("tofo"))); + + const Matcher m2 = HasSubstr("foo"); + EXPECT_TRUE(m2.Matches(std::string("I love food."))); + EXPECT_FALSE(m2.Matches(std::string("tofo"))); + + const Matcher m_empty = HasSubstr(""); + EXPECT_TRUE(m_empty.Matches(std::string())); + EXPECT_TRUE(m_empty.Matches(std::string("not empty"))); +} + +// Tests that HasSubstr() works for matching C-string-typed values. +TEST(HasSubstrTest, WorksForCStrings) { + const Matcher m1 = HasSubstr("foo"); + EXPECT_TRUE(m1.Matches(const_cast("I love food."))); + EXPECT_FALSE(m1.Matches(const_cast("tofo"))); + EXPECT_FALSE(m1.Matches(nullptr)); + + const Matcher m2 = HasSubstr("foo"); + EXPECT_TRUE(m2.Matches("I love food.")); + EXPECT_FALSE(m2.Matches("tofo")); + EXPECT_FALSE(m2.Matches(nullptr)); + + const Matcher m_empty = HasSubstr(""); + EXPECT_TRUE(m_empty.Matches("not empty")); + EXPECT_TRUE(m_empty.Matches("")); + EXPECT_FALSE(m_empty.Matches(nullptr)); +} + +#if GTEST_INTERNAL_HAS_STRING_VIEW +// Tests that HasSubstr() works for matching StringView-typed values. +TEST(HasSubstrTest, WorksForStringViewClasses) { + const Matcher m1 = + HasSubstr(internal::StringView("foo")); + EXPECT_TRUE(m1.Matches(internal::StringView("I love food."))); + EXPECT_FALSE(m1.Matches(internal::StringView("tofo"))); + EXPECT_FALSE(m1.Matches(internal::StringView())); + + const Matcher m2 = HasSubstr("foo"); + EXPECT_TRUE(m2.Matches(internal::StringView("I love food."))); + EXPECT_FALSE(m2.Matches(internal::StringView("tofo"))); + EXPECT_FALSE(m2.Matches(internal::StringView())); + + const Matcher m3 = HasSubstr(""); + EXPECT_TRUE(m3.Matches(internal::StringView("foo"))); + EXPECT_TRUE(m3.Matches(internal::StringView(""))); + EXPECT_TRUE(m3.Matches(internal::StringView())); +} +#endif // GTEST_INTERNAL_HAS_STRING_VIEW + +// Tests that HasSubstr(s) describes itself properly. +TEST(HasSubstrTest, CanDescribeSelf) { + Matcher m = HasSubstr("foo\n\""); + EXPECT_EQ("has substring \"foo\\n\\\"\"", Describe(m)); +} + +INSTANTIATE_GTEST_MATCHER_TEST_P(KeyTest); + +TEST(KeyTest, CanDescribeSelf) { + Matcher&> m = Key("foo"); + EXPECT_EQ("has a key that is equal to \"foo\"", Describe(m)); + EXPECT_EQ("doesn't have a key that is equal to \"foo\"", DescribeNegation(m)); +} + +TEST_P(KeyTestP, ExplainsResult) { + Matcher> m = Key(GreaterThan(10)); + EXPECT_EQ("whose first field is a value which is 5 less than 10", + Explain(m, make_pair(5, true))); + EXPECT_EQ("whose first field is a value which is 5 more than 10", + Explain(m, make_pair(15, true))); +} + +TEST(KeyTest, MatchesCorrectly) { + pair p(25, "foo"); + EXPECT_THAT(p, Key(25)); + EXPECT_THAT(p, Not(Key(42))); + EXPECT_THAT(p, Key(Ge(20))); + EXPECT_THAT(p, Not(Key(Lt(25)))); +} + +TEST(KeyTest, WorksWithMoveOnly) { + pair, std::unique_ptr> p; + EXPECT_THAT(p, Key(Eq(nullptr))); +} + +INSTANTIATE_GTEST_MATCHER_TEST_P(PairTest); + +template +struct Tag {}; + +struct PairWithGet { + int member_1; + std::string member_2; + using first_type = int; + using second_type = std::string; + + const int& GetImpl(Tag<0>) const { return member_1; } + const std::string& GetImpl(Tag<1>) const { return member_2; } +}; +template +auto get(const PairWithGet& value) -> decltype(value.GetImpl(Tag())) { + return value.GetImpl(Tag()); +} +TEST(PairTest, MatchesPairWithGetCorrectly) { + PairWithGet p{25, "foo"}; + EXPECT_THAT(p, Key(25)); + EXPECT_THAT(p, Not(Key(42))); + EXPECT_THAT(p, Key(Ge(20))); + EXPECT_THAT(p, Not(Key(Lt(25)))); + + std::vector v = {{11, "Foo"}, {29, "gMockIsBestMock"}}; + EXPECT_THAT(v, Contains(Key(29))); +} + +TEST(KeyTest, SafelyCastsInnerMatcher) { + Matcher is_positive = Gt(0); + Matcher is_negative = Lt(0); + pair p('a', true); + EXPECT_THAT(p, Key(is_positive)); + EXPECT_THAT(p, Not(Key(is_negative))); +} + +TEST(KeyTest, InsideContainsUsingMap) { + map container; + container.insert(make_pair(1, 'a')); + container.insert(make_pair(2, 'b')); + container.insert(make_pair(4, 'c')); + EXPECT_THAT(container, Contains(Key(1))); + EXPECT_THAT(container, Not(Contains(Key(3)))); +} + +TEST(KeyTest, InsideContainsUsingMultimap) { + multimap container; + container.insert(make_pair(1, 'a')); + container.insert(make_pair(2, 'b')); + container.insert(make_pair(4, 'c')); + + EXPECT_THAT(container, Not(Contains(Key(25)))); + container.insert(make_pair(25, 'd')); + EXPECT_THAT(container, Contains(Key(25))); + container.insert(make_pair(25, 'e')); + EXPECT_THAT(container, Contains(Key(25))); + + EXPECT_THAT(container, Contains(Key(1))); + EXPECT_THAT(container, Not(Contains(Key(3)))); +} + +TEST(PairTest, Typing) { + // Test verifies the following type conversions can be compiled. + Matcher&> m1 = Pair("foo", 42); + Matcher> m2 = Pair("foo", 42); + Matcher> m3 = Pair("foo", 42); + + Matcher> m4 = Pair(25, "42"); + Matcher> m5 = Pair("25", 42); +} + +TEST(PairTest, CanDescribeSelf) { + Matcher&> m1 = Pair("foo", 42); + EXPECT_EQ( + "has a first field that is equal to \"foo\"" + ", and has a second field that is equal to 42", + Describe(m1)); + EXPECT_EQ( + "has a first field that isn't equal to \"foo\"" + ", or has a second field that isn't equal to 42", + DescribeNegation(m1)); + // Double and triple negation (1 or 2 times not and description of negation). + Matcher&> m2 = Not(Pair(Not(13), 42)); + EXPECT_EQ( + "has a first field that isn't equal to 13" + ", and has a second field that is equal to 42", + DescribeNegation(m2)); +} + +TEST_P(PairTestP, CanExplainMatchResultTo) { + // If neither field matches, Pair() should explain about the first + // field. + const Matcher> m = Pair(GreaterThan(0), GreaterThan(0)); + EXPECT_EQ("whose first field does not match, which is 1 less than 0", + Explain(m, make_pair(-1, -2))); + + // If the first field matches but the second doesn't, Pair() should + // explain about the second field. + EXPECT_EQ("whose second field does not match, which is 2 less than 0", + Explain(m, make_pair(1, -2))); + + // If the first field doesn't match but the second does, Pair() + // should explain about the first field. + EXPECT_EQ("whose first field does not match, which is 1 less than 0", + Explain(m, make_pair(-1, 2))); + + // If both fields match, Pair() should explain about them both. + EXPECT_EQ( + "whose both fields match, where the first field is a value " + "which is 1 more than 0, and the second field is a value " + "which is 2 more than 0", + Explain(m, make_pair(1, 2))); + + // If only the first match has an explanation, only this explanation should + // be printed. + const Matcher> explain_first = Pair(GreaterThan(0), 0); + EXPECT_EQ( + "whose both fields match, where the first field is a value " + "which is 1 more than 0", + Explain(explain_first, make_pair(1, 0))); + + // If only the second match has an explanation, only this explanation should + // be printed. + const Matcher> explain_second = Pair(0, GreaterThan(0)); + EXPECT_EQ( + "whose both fields match, where the second field is a value " + "which is 1 more than 0", + Explain(explain_second, make_pair(0, 1))); +} + +TEST(PairTest, MatchesCorrectly) { + pair p(25, "foo"); + + // Both fields match. + EXPECT_THAT(p, Pair(25, "foo")); + EXPECT_THAT(p, Pair(Ge(20), HasSubstr("o"))); + + // 'first' doesnt' match, but 'second' matches. + EXPECT_THAT(p, Not(Pair(42, "foo"))); + EXPECT_THAT(p, Not(Pair(Lt(25), "foo"))); + + // 'first' matches, but 'second' doesn't match. + EXPECT_THAT(p, Not(Pair(25, "bar"))); + EXPECT_THAT(p, Not(Pair(25, Not("foo")))); + + // Neither field matches. + EXPECT_THAT(p, Not(Pair(13, "bar"))); + EXPECT_THAT(p, Not(Pair(Lt(13), HasSubstr("a")))); +} + +TEST(PairTest, WorksWithMoveOnly) { + pair, std::unique_ptr> p; + p.second.reset(new int(7)); + EXPECT_THAT(p, Pair(Eq(nullptr), Ne(nullptr))); +} + +TEST(PairTest, SafelyCastsInnerMatchers) { + Matcher is_positive = Gt(0); + Matcher is_negative = Lt(0); + pair p('a', true); + EXPECT_THAT(p, Pair(is_positive, _)); + EXPECT_THAT(p, Not(Pair(is_negative, _))); + EXPECT_THAT(p, Pair(_, is_positive)); + EXPECT_THAT(p, Not(Pair(_, is_negative))); +} + +TEST(PairTest, InsideContainsUsingMap) { + map container; + container.insert(make_pair(1, 'a')); + container.insert(make_pair(2, 'b')); + container.insert(make_pair(4, 'c')); + EXPECT_THAT(container, Contains(Pair(1, 'a'))); + EXPECT_THAT(container, Contains(Pair(1, _))); + EXPECT_THAT(container, Contains(Pair(_, 'a'))); + EXPECT_THAT(container, Not(Contains(Pair(3, _)))); +} + +INSTANTIATE_GTEST_MATCHER_TEST_P(FieldsAreTest); + +TEST(FieldsAreTest, MatchesCorrectly) { + std::tuple p(25, "foo", .5); + + // All fields match. + EXPECT_THAT(p, FieldsAre(25, "foo", .5)); + EXPECT_THAT(p, FieldsAre(Ge(20), HasSubstr("o"), DoubleEq(.5))); + + // Some don't match. + EXPECT_THAT(p, Not(FieldsAre(26, "foo", .5))); + EXPECT_THAT(p, Not(FieldsAre(25, "fo", .5))); + EXPECT_THAT(p, Not(FieldsAre(25, "foo", .6))); +} + +TEST(FieldsAreTest, CanDescribeSelf) { + Matcher&> m1 = FieldsAre("foo", 42); + EXPECT_EQ( + "has field #0 that is equal to \"foo\"" + ", and has field #1 that is equal to 42", + Describe(m1)); + EXPECT_EQ( + "has field #0 that isn't equal to \"foo\"" + ", or has field #1 that isn't equal to 42", + DescribeNegation(m1)); +} + +TEST_P(FieldsAreTestP, CanExplainMatchResultTo) { + // The first one that fails is the one that gives the error. + Matcher> m = + FieldsAre(GreaterThan(0), GreaterThan(0), GreaterThan(0)); + + EXPECT_EQ("whose field #0 does not match, which is 1 less than 0", + Explain(m, std::make_tuple(-1, -2, -3))); + EXPECT_EQ("whose field #1 does not match, which is 2 less than 0", + Explain(m, std::make_tuple(1, -2, -3))); + EXPECT_EQ("whose field #2 does not match, which is 3 less than 0", + Explain(m, std::make_tuple(1, 2, -3))); + + // If they all match, we get a long explanation of success. + EXPECT_EQ( + "whose all elements match, " + "where field #0 is a value which is 1 more than 0" + ", and field #1 is a value which is 2 more than 0" + ", and field #2 is a value which is 3 more than 0", + Explain(m, std::make_tuple(1, 2, 3))); + + // Only print those that have an explanation. + m = FieldsAre(GreaterThan(0), 0, GreaterThan(0)); + EXPECT_EQ( + "whose all elements match, " + "where field #0 is a value which is 1 more than 0" + ", and field #2 is a value which is 3 more than 0", + Explain(m, std::make_tuple(1, 0, 3))); + + // If only one has an explanation, then print that one. + m = FieldsAre(0, GreaterThan(0), 0); + EXPECT_EQ( + "whose all elements match, " + "where field #1 is a value which is 1 more than 0", + Explain(m, std::make_tuple(0, 1, 0))); +} + +#if defined(__cpp_structured_bindings) && __cpp_structured_bindings >= 201606 +TEST(FieldsAreTest, StructuredBindings) { + // testing::FieldsAre can also match aggregates and such with C++17 and up. + struct MyType { + int i; + std::string str; + }; + EXPECT_THAT((MyType{17, "foo"}), FieldsAre(Eq(17), HasSubstr("oo"))); + + // Test all the supported arities. + struct MyVarType1 { + int a; + }; + EXPECT_THAT(MyVarType1{}, FieldsAre(0)); + struct MyVarType2 { + int a, b; + }; + EXPECT_THAT(MyVarType2{}, FieldsAre(0, 0)); + struct MyVarType3 { + int a, b, c; + }; + EXPECT_THAT(MyVarType3{}, FieldsAre(0, 0, 0)); + struct MyVarType4 { + int a, b, c, d; + }; + EXPECT_THAT(MyVarType4{}, FieldsAre(0, 0, 0, 0)); + struct MyVarType5 { + int a, b, c, d, e; + }; + EXPECT_THAT(MyVarType5{}, FieldsAre(0, 0, 0, 0, 0)); + struct MyVarType6 { + int a, b, c, d, e, f; + }; + EXPECT_THAT(MyVarType6{}, FieldsAre(0, 0, 0, 0, 0, 0)); + struct MyVarType7 { + int a, b, c, d, e, f, g; + }; + EXPECT_THAT(MyVarType7{}, FieldsAre(0, 0, 0, 0, 0, 0, 0)); + struct MyVarType8 { + int a, b, c, d, e, f, g, h; + }; + EXPECT_THAT(MyVarType8{}, FieldsAre(0, 0, 0, 0, 0, 0, 0, 0)); + struct MyVarType9 { + int a, b, c, d, e, f, g, h, i; + }; + EXPECT_THAT(MyVarType9{}, FieldsAre(0, 0, 0, 0, 0, 0, 0, 0, 0)); + struct MyVarType10 { + int a, b, c, d, e, f, g, h, i, j; + }; + EXPECT_THAT(MyVarType10{}, FieldsAre(0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); + struct MyVarType11 { + int a, b, c, d, e, f, g, h, i, j, k; + }; + EXPECT_THAT(MyVarType11{}, FieldsAre(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); + struct MyVarType12 { + int a, b, c, d, e, f, g, h, i, j, k, l; + }; + EXPECT_THAT(MyVarType12{}, FieldsAre(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); + struct MyVarType13 { + int a, b, c, d, e, f, g, h, i, j, k, l, m; + }; + EXPECT_THAT(MyVarType13{}, FieldsAre(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); + struct MyVarType14 { + int a, b, c, d, e, f, g, h, i, j, k, l, m, n; + }; + EXPECT_THAT(MyVarType14{}, + FieldsAre(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); + struct MyVarType15 { + int a, b, c, d, e, f, g, h, i, j, k, l, m, n, o; + }; + EXPECT_THAT(MyVarType15{}, + FieldsAre(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); + struct MyVarType16 { + int a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p; + }; + EXPECT_THAT(MyVarType16{}, + FieldsAre(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); +} +#endif + +TEST(PairTest, UseGetInsteadOfMembers) { + PairWithGet pair{7, "ABC"}; + EXPECT_THAT(pair, Pair(7, "ABC")); + EXPECT_THAT(pair, Pair(Ge(7), HasSubstr("AB"))); + EXPECT_THAT(pair, Not(Pair(Lt(7), "ABC"))); + + std::vector v = {{11, "Foo"}, {29, "gMockIsBestMock"}}; + EXPECT_THAT(v, + ElementsAre(Pair(11, std::string("Foo")), Pair(Ge(10), Not("")))); +} + +// Tests StartsWith(s). + +TEST(StartsWithTest, MatchesStringWithGivenPrefix) { + const Matcher m1 = StartsWith(std::string("")); + EXPECT_TRUE(m1.Matches("Hi")); + EXPECT_TRUE(m1.Matches("")); + EXPECT_FALSE(m1.Matches(nullptr)); + + const Matcher m2 = StartsWith("Hi"); + EXPECT_TRUE(m2.Matches("Hi")); + EXPECT_TRUE(m2.Matches("Hi Hi!")); + EXPECT_TRUE(m2.Matches("High")); + EXPECT_FALSE(m2.Matches("H")); + EXPECT_FALSE(m2.Matches(" Hi")); + +#if GTEST_INTERNAL_HAS_STRING_VIEW + const Matcher m_empty = + StartsWith(internal::StringView("")); + EXPECT_TRUE(m_empty.Matches(internal::StringView())); + EXPECT_TRUE(m_empty.Matches(internal::StringView(""))); + EXPECT_TRUE(m_empty.Matches(internal::StringView("not empty"))); +#endif // GTEST_INTERNAL_HAS_STRING_VIEW +} + +TEST(StartsWithTest, CanDescribeSelf) { + Matcher m = StartsWith("Hi"); + EXPECT_EQ("starts with \"Hi\"", Describe(m)); +} + +// Tests EndsWith(s). + +TEST(EndsWithTest, MatchesStringWithGivenSuffix) { + const Matcher m1 = EndsWith(""); + EXPECT_TRUE(m1.Matches("Hi")); + EXPECT_TRUE(m1.Matches("")); + EXPECT_FALSE(m1.Matches(nullptr)); + + const Matcher m2 = EndsWith(std::string("Hi")); + EXPECT_TRUE(m2.Matches("Hi")); + EXPECT_TRUE(m2.Matches("Wow Hi Hi")); + EXPECT_TRUE(m2.Matches("Super Hi")); + EXPECT_FALSE(m2.Matches("i")); + EXPECT_FALSE(m2.Matches("Hi ")); + +#if GTEST_INTERNAL_HAS_STRING_VIEW + const Matcher m4 = + EndsWith(internal::StringView("")); + EXPECT_TRUE(m4.Matches("Hi")); + EXPECT_TRUE(m4.Matches("")); + EXPECT_TRUE(m4.Matches(internal::StringView())); + EXPECT_TRUE(m4.Matches(internal::StringView(""))); +#endif // GTEST_INTERNAL_HAS_STRING_VIEW +} + +TEST(EndsWithTest, CanDescribeSelf) { + Matcher m = EndsWith("Hi"); + EXPECT_EQ("ends with \"Hi\"", Describe(m)); +} + +// Tests WhenBase64Unescaped. + +TEST(WhenBase64UnescapedTest, MatchesUnescapedBase64Strings) { + const Matcher m1 = WhenBase64Unescaped(EndsWith("!")); + EXPECT_FALSE(m1.Matches("invalid base64")); + EXPECT_FALSE(m1.Matches("aGVsbG8gd29ybGQ=")); // hello world + EXPECT_TRUE(m1.Matches("aGVsbG8gd29ybGQh")); // hello world! + + const Matcher m2 = WhenBase64Unescaped(EndsWith("!")); + EXPECT_FALSE(m2.Matches("invalid base64")); + EXPECT_FALSE(m2.Matches("aGVsbG8gd29ybGQ=")); // hello world + EXPECT_TRUE(m2.Matches("aGVsbG8gd29ybGQh")); // hello world! + +#if GTEST_INTERNAL_HAS_STRING_VIEW + const Matcher m3 = + WhenBase64Unescaped(EndsWith("!")); + EXPECT_FALSE(m3.Matches("invalid base64")); + EXPECT_FALSE(m3.Matches("aGVsbG8gd29ybGQ=")); // hello world + EXPECT_TRUE(m3.Matches("aGVsbG8gd29ybGQh")); // hello world! +#endif // GTEST_INTERNAL_HAS_STRING_VIEW +} + +TEST(WhenBase64UnescapedTest, CanDescribeSelf) { + const Matcher m = WhenBase64Unescaped(EndsWith("!")); + EXPECT_EQ("matches after Base64Unescape ends with \"!\"", Describe(m)); +} + +// Tests MatchesRegex(). + +TEST(MatchesRegexTest, MatchesStringMatchingGivenRegex) { + const Matcher m1 = MatchesRegex("a.*z"); + EXPECT_TRUE(m1.Matches("az")); + EXPECT_TRUE(m1.Matches("abcz")); + EXPECT_FALSE(m1.Matches(nullptr)); + + const Matcher m2 = MatchesRegex(new RE("a.*z")); + EXPECT_TRUE(m2.Matches("azbz")); + EXPECT_FALSE(m2.Matches("az1")); + EXPECT_FALSE(m2.Matches("1az")); + +#if GTEST_INTERNAL_HAS_STRING_VIEW + const Matcher m3 = MatchesRegex("a.*z"); + EXPECT_TRUE(m3.Matches(internal::StringView("az"))); + EXPECT_TRUE(m3.Matches(internal::StringView("abcz"))); + EXPECT_FALSE(m3.Matches(internal::StringView("1az"))); + EXPECT_FALSE(m3.Matches(internal::StringView())); + const Matcher m4 = + MatchesRegex(internal::StringView("")); + EXPECT_TRUE(m4.Matches(internal::StringView(""))); + EXPECT_TRUE(m4.Matches(internal::StringView())); +#endif // GTEST_INTERNAL_HAS_STRING_VIEW +} + +TEST(MatchesRegexTest, CanDescribeSelf) { + Matcher m1 = MatchesRegex(std::string("Hi.*")); + EXPECT_EQ("matches regular expression \"Hi.*\"", Describe(m1)); + + Matcher m2 = MatchesRegex(new RE("a.*")); + EXPECT_EQ("matches regular expression \"a.*\"", Describe(m2)); + +#if GTEST_INTERNAL_HAS_STRING_VIEW + Matcher m3 = MatchesRegex(new RE("0.*")); + EXPECT_EQ("matches regular expression \"0.*\"", Describe(m3)); +#endif // GTEST_INTERNAL_HAS_STRING_VIEW +} + +// Tests ContainsRegex(). + +TEST(ContainsRegexTest, MatchesStringContainingGivenRegex) { + const Matcher m1 = ContainsRegex(std::string("a.*z")); + EXPECT_TRUE(m1.Matches("az")); + EXPECT_TRUE(m1.Matches("0abcz1")); + EXPECT_FALSE(m1.Matches(nullptr)); + + const Matcher m2 = ContainsRegex(new RE("a.*z")); + EXPECT_TRUE(m2.Matches("azbz")); + EXPECT_TRUE(m2.Matches("az1")); + EXPECT_FALSE(m2.Matches("1a")); + +#if GTEST_INTERNAL_HAS_STRING_VIEW + const Matcher m3 = ContainsRegex(new RE("a.*z")); + EXPECT_TRUE(m3.Matches(internal::StringView("azbz"))); + EXPECT_TRUE(m3.Matches(internal::StringView("az1"))); + EXPECT_FALSE(m3.Matches(internal::StringView("1a"))); + EXPECT_FALSE(m3.Matches(internal::StringView())); + const Matcher m4 = + ContainsRegex(internal::StringView("")); + EXPECT_TRUE(m4.Matches(internal::StringView(""))); + EXPECT_TRUE(m4.Matches(internal::StringView())); +#endif // GTEST_INTERNAL_HAS_STRING_VIEW +} + +TEST(ContainsRegexTest, CanDescribeSelf) { + Matcher m1 = ContainsRegex("Hi.*"); + EXPECT_EQ("contains regular expression \"Hi.*\"", Describe(m1)); + + Matcher m2 = ContainsRegex(new RE("a.*")); + EXPECT_EQ("contains regular expression \"a.*\"", Describe(m2)); + +#if GTEST_INTERNAL_HAS_STRING_VIEW + Matcher m3 = ContainsRegex(new RE("0.*")); + EXPECT_EQ("contains regular expression \"0.*\"", Describe(m3)); +#endif // GTEST_INTERNAL_HAS_STRING_VIEW +} + +// Tests for wide strings. +#if GTEST_HAS_STD_WSTRING +TEST(StdWideStrEqTest, MatchesEqual) { + Matcher m = StrEq(::std::wstring(L"Hello")); + EXPECT_TRUE(m.Matches(L"Hello")); + EXPECT_FALSE(m.Matches(L"hello")); + EXPECT_FALSE(m.Matches(nullptr)); + + Matcher m2 = StrEq(L"Hello"); + EXPECT_TRUE(m2.Matches(L"Hello")); + EXPECT_FALSE(m2.Matches(L"Hi")); + + Matcher m3 = StrEq(L"\xD3\x576\x8D3\xC74D"); + EXPECT_TRUE(m3.Matches(L"\xD3\x576\x8D3\xC74D")); + EXPECT_FALSE(m3.Matches(L"\xD3\x576\x8D3\xC74E")); + + ::std::wstring str(L"01204500800"); + str[3] = L'\0'; + Matcher m4 = StrEq(str); + EXPECT_TRUE(m4.Matches(str)); + str[0] = str[6] = str[7] = str[9] = str[10] = L'\0'; + Matcher m5 = StrEq(str); + EXPECT_TRUE(m5.Matches(str)); +} + +TEST(StdWideStrEqTest, CanDescribeSelf) { + Matcher<::std::wstring> m = StrEq(L"Hi-\'\"?\\\a\b\f\n\r\t\v"); + EXPECT_EQ("is equal to L\"Hi-\'\\\"?\\\\\\a\\b\\f\\n\\r\\t\\v\"", + Describe(m)); + + Matcher<::std::wstring> m2 = StrEq(L"\xD3\x576\x8D3\xC74D"); + EXPECT_EQ("is equal to L\"\\xD3\\x576\\x8D3\\xC74D\"", Describe(m2)); + + ::std::wstring str(L"01204500800"); + str[3] = L'\0'; + Matcher m4 = StrEq(str); + EXPECT_EQ("is equal to L\"012\\04500800\"", Describe(m4)); + str[0] = str[6] = str[7] = str[9] = str[10] = L'\0'; + Matcher m5 = StrEq(str); + EXPECT_EQ("is equal to L\"\\012\\045\\0\\08\\0\\0\"", Describe(m5)); +} + +TEST(StdWideStrNeTest, MatchesUnequalString) { + Matcher m = StrNe(L"Hello"); + EXPECT_TRUE(m.Matches(L"")); + EXPECT_TRUE(m.Matches(nullptr)); + EXPECT_FALSE(m.Matches(L"Hello")); + + Matcher<::std::wstring> m2 = StrNe(::std::wstring(L"Hello")); + EXPECT_TRUE(m2.Matches(L"hello")); + EXPECT_FALSE(m2.Matches(L"Hello")); +} + +TEST(StdWideStrNeTest, CanDescribeSelf) { + Matcher m = StrNe(L"Hi"); + EXPECT_EQ("isn't equal to L\"Hi\"", Describe(m)); +} + +TEST(StdWideStrCaseEqTest, MatchesEqualStringIgnoringCase) { + Matcher m = StrCaseEq(::std::wstring(L"Hello")); + EXPECT_TRUE(m.Matches(L"Hello")); + EXPECT_TRUE(m.Matches(L"hello")); + EXPECT_FALSE(m.Matches(L"Hi")); + EXPECT_FALSE(m.Matches(nullptr)); + + Matcher m2 = StrCaseEq(L"Hello"); + EXPECT_TRUE(m2.Matches(L"hello")); + EXPECT_FALSE(m2.Matches(L"Hi")); +} + +TEST(StdWideStrCaseEqTest, MatchesEqualStringWith0IgnoringCase) { + ::std::wstring str1(L"oabocdooeoo"); + ::std::wstring str2(L"OABOCDOOEOO"); + Matcher m0 = StrCaseEq(str1); + EXPECT_FALSE(m0.Matches(str2 + ::std::wstring(1, L'\0'))); + + str1[3] = str2[3] = L'\0'; + Matcher m1 = StrCaseEq(str1); + EXPECT_TRUE(m1.Matches(str2)); + + str1[0] = str1[6] = str1[7] = str1[10] = L'\0'; + str2[0] = str2[6] = str2[7] = str2[10] = L'\0'; + Matcher m2 = StrCaseEq(str1); + str1[9] = str2[9] = L'\0'; + EXPECT_FALSE(m2.Matches(str2)); + + Matcher m3 = StrCaseEq(str1); + EXPECT_TRUE(m3.Matches(str2)); + + EXPECT_FALSE(m3.Matches(str2 + L"x")); + str2.append(1, L'\0'); + EXPECT_FALSE(m3.Matches(str2)); + EXPECT_FALSE(m3.Matches(::std::wstring(str2, 0, 9))); +} + +TEST(StdWideStrCaseEqTest, CanDescribeSelf) { + Matcher<::std::wstring> m = StrCaseEq(L"Hi"); + EXPECT_EQ("is equal to (ignoring case) L\"Hi\"", Describe(m)); +} + +TEST(StdWideStrCaseNeTest, MatchesUnequalStringIgnoringCase) { + Matcher m = StrCaseNe(L"Hello"); + EXPECT_TRUE(m.Matches(L"Hi")); + EXPECT_TRUE(m.Matches(nullptr)); + EXPECT_FALSE(m.Matches(L"Hello")); + EXPECT_FALSE(m.Matches(L"hello")); + + Matcher<::std::wstring> m2 = StrCaseNe(::std::wstring(L"Hello")); + EXPECT_TRUE(m2.Matches(L"")); + EXPECT_FALSE(m2.Matches(L"Hello")); +} + +TEST(StdWideStrCaseNeTest, CanDescribeSelf) { + Matcher m = StrCaseNe(L"Hi"); + EXPECT_EQ("isn't equal to (ignoring case) L\"Hi\"", Describe(m)); +} + +// Tests that HasSubstr() works for matching wstring-typed values. +TEST(StdWideHasSubstrTest, WorksForStringClasses) { + const Matcher<::std::wstring> m1 = HasSubstr(L"foo"); + EXPECT_TRUE(m1.Matches(::std::wstring(L"I love food."))); + EXPECT_FALSE(m1.Matches(::std::wstring(L"tofo"))); + + const Matcher m2 = HasSubstr(L"foo"); + EXPECT_TRUE(m2.Matches(::std::wstring(L"I love food."))); + EXPECT_FALSE(m2.Matches(::std::wstring(L"tofo"))); +} + +// Tests that HasSubstr() works for matching C-wide-string-typed values. +TEST(StdWideHasSubstrTest, WorksForCStrings) { + const Matcher m1 = HasSubstr(L"foo"); + EXPECT_TRUE(m1.Matches(const_cast(L"I love food."))); + EXPECT_FALSE(m1.Matches(const_cast(L"tofo"))); + EXPECT_FALSE(m1.Matches(nullptr)); + + const Matcher m2 = HasSubstr(L"foo"); + EXPECT_TRUE(m2.Matches(L"I love food.")); + EXPECT_FALSE(m2.Matches(L"tofo")); + EXPECT_FALSE(m2.Matches(nullptr)); +} + +// Tests that HasSubstr(s) describes itself properly. +TEST(StdWideHasSubstrTest, CanDescribeSelf) { + Matcher<::std::wstring> m = HasSubstr(L"foo\n\""); + EXPECT_EQ("has substring L\"foo\\n\\\"\"", Describe(m)); +} + +// Tests StartsWith(s). + +TEST(StdWideStartsWithTest, MatchesStringWithGivenPrefix) { + const Matcher m1 = StartsWith(::std::wstring(L"")); + EXPECT_TRUE(m1.Matches(L"Hi")); + EXPECT_TRUE(m1.Matches(L"")); + EXPECT_FALSE(m1.Matches(nullptr)); + + const Matcher m2 = StartsWith(L"Hi"); + EXPECT_TRUE(m2.Matches(L"Hi")); + EXPECT_TRUE(m2.Matches(L"Hi Hi!")); + EXPECT_TRUE(m2.Matches(L"High")); + EXPECT_FALSE(m2.Matches(L"H")); + EXPECT_FALSE(m2.Matches(L" Hi")); +} + +TEST(StdWideStartsWithTest, CanDescribeSelf) { + Matcher m = StartsWith(L"Hi"); + EXPECT_EQ("starts with L\"Hi\"", Describe(m)); +} + +// Tests EndsWith(s). + +TEST(StdWideEndsWithTest, MatchesStringWithGivenSuffix) { + const Matcher m1 = EndsWith(L""); + EXPECT_TRUE(m1.Matches(L"Hi")); + EXPECT_TRUE(m1.Matches(L"")); + EXPECT_FALSE(m1.Matches(nullptr)); + + const Matcher m2 = EndsWith(::std::wstring(L"Hi")); + EXPECT_TRUE(m2.Matches(L"Hi")); + EXPECT_TRUE(m2.Matches(L"Wow Hi Hi")); + EXPECT_TRUE(m2.Matches(L"Super Hi")); + EXPECT_FALSE(m2.Matches(L"i")); + EXPECT_FALSE(m2.Matches(L"Hi ")); +} + +TEST(StdWideEndsWithTest, CanDescribeSelf) { + Matcher m = EndsWith(L"Hi"); + EXPECT_EQ("ends with L\"Hi\"", Describe(m)); +} + +#endif // GTEST_HAS_STD_WSTRING + +TEST(ExplainMatchResultTest, WorksWithPolymorphicMatcher) { + StringMatchResultListener listener1; + EXPECT_TRUE(ExplainMatchResult(PolymorphicIsEven(), 42, &listener1)); + EXPECT_EQ("% 2 == 0", listener1.str()); + + StringMatchResultListener listener2; + EXPECT_FALSE(ExplainMatchResult(Ge(42), 1.5, &listener2)); + EXPECT_EQ("", listener2.str()); +} + +TEST(ExplainMatchResultTest, WorksWithMonomorphicMatcher) { + const Matcher is_even = PolymorphicIsEven(); + StringMatchResultListener listener1; + EXPECT_TRUE(ExplainMatchResult(is_even, 42, &listener1)); + EXPECT_EQ("% 2 == 0", listener1.str()); + + const Matcher is_zero = Eq(0); + StringMatchResultListener listener2; + EXPECT_FALSE(ExplainMatchResult(is_zero, 1.5, &listener2)); + EXPECT_EQ("", listener2.str()); +} + +MATCHER(ConstructNoArg, "") { return true; } +MATCHER_P(Construct1Arg, arg1, "") { return true; } +MATCHER_P2(Construct2Args, arg1, arg2, "") { return true; } + +TEST(MatcherConstruct, ExplicitVsImplicit) { + { + // No arg constructor can be constructed with empty brace. + ConstructNoArgMatcher m = {}; + (void)m; + // And with no args + ConstructNoArgMatcher m2; + (void)m2; + } + { + // The one arg constructor has an explicit constructor. + // This is to prevent the implicit conversion. + using M = Construct1ArgMatcherP; + EXPECT_TRUE((std::is_constructible::value)); + EXPECT_FALSE((std::is_convertible::value)); + } + { + // Multiple arg matchers can be constructed with an implicit construction. + Construct2ArgsMatcherP2 m = {1, 2.2}; + (void)m; + } +} + +MATCHER_P(Really, inner_matcher, "") { + return ExplainMatchResult(inner_matcher, arg, result_listener); +} + +TEST(ExplainMatchResultTest, WorksInsideMATCHER) { + EXPECT_THAT(0, Really(Eq(0))); +} + +TEST(DescribeMatcherTest, WorksWithValue) { + EXPECT_EQ("is equal to 42", DescribeMatcher(42)); + EXPECT_EQ("isn't equal to 42", DescribeMatcher(42, true)); +} + +TEST(DescribeMatcherTest, WorksWithMonomorphicMatcher) { + const Matcher monomorphic = Le(0); + EXPECT_EQ("is <= 0", DescribeMatcher(monomorphic)); + EXPECT_EQ("isn't <= 0", DescribeMatcher(monomorphic, true)); +} + +TEST(DescribeMatcherTest, WorksWithPolymorphicMatcher) { + EXPECT_EQ("is even", DescribeMatcher(PolymorphicIsEven())); + EXPECT_EQ("is odd", DescribeMatcher(PolymorphicIsEven(), true)); +} + +MATCHER_P(FieldIIs, inner_matcher, "") { + return ExplainMatchResult(inner_matcher, arg.i, result_listener); +} + +#if GTEST_HAS_RTTI +TEST(WhenDynamicCastToTest, SameType) { + Derived derived; + derived.i = 4; + + // Right type. A pointer is passed down. + Base* as_base_ptr = &derived; + EXPECT_THAT(as_base_ptr, WhenDynamicCastTo(Not(IsNull()))); + EXPECT_THAT(as_base_ptr, WhenDynamicCastTo(Pointee(FieldIIs(4)))); + EXPECT_THAT(as_base_ptr, + Not(WhenDynamicCastTo(Pointee(FieldIIs(5))))); +} + +TEST(WhenDynamicCastToTest, WrongTypes) { + Base base; + Derived derived; + OtherDerived other_derived; + + // Wrong types. NULL is passed. + EXPECT_THAT(&base, Not(WhenDynamicCastTo(Pointee(_)))); + EXPECT_THAT(&base, WhenDynamicCastTo(IsNull())); + Base* as_base_ptr = &derived; + EXPECT_THAT(as_base_ptr, Not(WhenDynamicCastTo(Pointee(_)))); + EXPECT_THAT(as_base_ptr, WhenDynamicCastTo(IsNull())); + as_base_ptr = &other_derived; + EXPECT_THAT(as_base_ptr, Not(WhenDynamicCastTo(Pointee(_)))); + EXPECT_THAT(as_base_ptr, WhenDynamicCastTo(IsNull())); +} + +TEST(WhenDynamicCastToTest, AlreadyNull) { + // Already NULL. + Base* as_base_ptr = nullptr; + EXPECT_THAT(as_base_ptr, WhenDynamicCastTo(IsNull())); +} + +struct AmbiguousCastTypes { + class VirtualDerived : public virtual Base {}; + class DerivedSub1 : public VirtualDerived {}; + class DerivedSub2 : public VirtualDerived {}; + class ManyDerivedInHierarchy : public DerivedSub1, public DerivedSub2 {}; +}; + +TEST(WhenDynamicCastToTest, AmbiguousCast) { + AmbiguousCastTypes::DerivedSub1 sub1; + AmbiguousCastTypes::ManyDerivedInHierarchy many_derived; + // Multiply derived from Base. dynamic_cast<> returns NULL. + Base* as_base_ptr = + static_cast(&many_derived); + EXPECT_THAT(as_base_ptr, + WhenDynamicCastTo(IsNull())); + as_base_ptr = &sub1; + EXPECT_THAT( + as_base_ptr, + WhenDynamicCastTo(Not(IsNull()))); +} + +TEST(WhenDynamicCastToTest, Describe) { + Matcher matcher = WhenDynamicCastTo(Pointee(_)); + const std::string prefix = + "when dynamic_cast to " + internal::GetTypeName() + ", "; + EXPECT_EQ(prefix + "points to a value that is anything", Describe(matcher)); + EXPECT_EQ(prefix + "does not point to a value that is anything", + DescribeNegation(matcher)); +} + +TEST(WhenDynamicCastToTest, Explain) { + Matcher matcher = WhenDynamicCastTo(Pointee(_)); + Base* null = nullptr; + EXPECT_THAT(Explain(matcher, null), HasSubstr("NULL")); + Derived derived; + EXPECT_TRUE(matcher.Matches(&derived)); + EXPECT_THAT(Explain(matcher, &derived), HasSubstr("which points to ")); + + // With references, the matcher itself can fail. Test for that one. + Matcher ref_matcher = WhenDynamicCastTo(_); + EXPECT_THAT(Explain(ref_matcher, derived), + HasSubstr("which cannot be dynamic_cast")); +} + +TEST(WhenDynamicCastToTest, GoodReference) { + Derived derived; + derived.i = 4; + Base& as_base_ref = derived; + EXPECT_THAT(as_base_ref, WhenDynamicCastTo(FieldIIs(4))); + EXPECT_THAT(as_base_ref, WhenDynamicCastTo(Not(FieldIIs(5)))); +} + +TEST(WhenDynamicCastToTest, BadReference) { + Derived derived; + Base& as_base_ref = derived; + EXPECT_THAT(as_base_ref, Not(WhenDynamicCastTo(_))); +} +#endif // GTEST_HAS_RTTI + +class DivisibleByImpl { + public: + explicit DivisibleByImpl(int a_divider) : divider_(a_divider) {} + + // For testing using ExplainMatchResultTo() with polymorphic matchers. + template + bool MatchAndExplain(const T& n, MatchResultListener* listener) const { + *listener << "which is " << (n % divider_) << " modulo " << divider_; + return (n % divider_) == 0; + } + + void DescribeTo(ostream* os) const { *os << "is divisible by " << divider_; } + + void DescribeNegationTo(ostream* os) const { + *os << "is not divisible by " << divider_; + } + + void set_divider(int a_divider) { divider_ = a_divider; } + int divider() const { return divider_; } + + private: + int divider_; +}; + +PolymorphicMatcher DivisibleBy(int n) { + return MakePolymorphicMatcher(DivisibleByImpl(n)); +} + +// Tests that when AllOf() fails, only the first failing matcher is +// asked to explain why. +TEST(ExplainMatchResultTest, AllOf_False_False) { + const Matcher m = AllOf(DivisibleBy(4), DivisibleBy(3)); + EXPECT_EQ("which is 1 modulo 4", Explain(m, 5)); +} + +// Tests that when AllOf() fails, only the first failing matcher is +// asked to explain why. +TEST(ExplainMatchResultTest, AllOf_False_True) { + const Matcher m = AllOf(DivisibleBy(4), DivisibleBy(3)); + EXPECT_EQ("which is 2 modulo 4", Explain(m, 6)); +} + +// Tests that when AllOf() fails, only the first failing matcher is +// asked to explain why. +TEST(ExplainMatchResultTest, AllOf_True_False) { + const Matcher m = AllOf(Ge(1), DivisibleBy(3)); + EXPECT_EQ("which is 2 modulo 3", Explain(m, 5)); +} + +// Tests that when AllOf() succeeds, all matchers are asked to explain +// why. +TEST(ExplainMatchResultTest, AllOf_True_True) { + const Matcher m = AllOf(DivisibleBy(2), DivisibleBy(3)); + EXPECT_EQ("which is 0 modulo 2, and which is 0 modulo 3", Explain(m, 6)); +} + +TEST(ExplainMatchResultTest, AllOf_True_True_2) { + const Matcher m = AllOf(Ge(2), Le(3)); + EXPECT_EQ("", Explain(m, 2)); +} + +INSTANTIATE_GTEST_MATCHER_TEST_P(ExplainmatcherResultTest); + +TEST_P(ExplainmatcherResultTestP, MonomorphicMatcher) { + const Matcher m = GreaterThan(5); + EXPECT_EQ("which is 1 more than 5", Explain(m, 6)); +} + +// Tests PolymorphicMatcher::mutable_impl(). +TEST(PolymorphicMatcherTest, CanAccessMutableImpl) { + PolymorphicMatcher m(DivisibleByImpl(42)); + DivisibleByImpl& impl = m.mutable_impl(); + EXPECT_EQ(42, impl.divider()); + + impl.set_divider(0); + EXPECT_EQ(0, m.mutable_impl().divider()); +} + +// Tests PolymorphicMatcher::impl(). +TEST(PolymorphicMatcherTest, CanAccessImpl) { + const PolymorphicMatcher m(DivisibleByImpl(42)); + const DivisibleByImpl& impl = m.impl(); + EXPECT_EQ(42, impl.divider()); +} + +} // namespace +} // namespace gmock_matchers_test +} // namespace testing + +#ifdef _MSC_VER +#pragma warning(pop) +#endif diff --git a/ext/googletest/googlemock/test/gmock-matchers-containers_test.cc b/ext/googletest/googlemock/test/gmock-matchers-containers_test.cc new file mode 100644 index 0000000000..f50159f802 --- /dev/null +++ b/ext/googletest/googlemock/test/gmock-matchers-containers_test.cc @@ -0,0 +1,3129 @@ +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Google Mock - a framework for writing C++ mock classes. +// +// This file tests some commonly used argument matchers. + +// Silence warning C4244: 'initializing': conversion from 'int' to 'short', +// possible loss of data and C4100, unreferenced local parameter +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable : 4244) +#pragma warning(disable : 4100) +#endif + +#include "test/gmock-matchers_test.h" + +namespace testing { +namespace gmock_matchers_test { +namespace { + +std::vector> MakeUniquePtrs(const std::vector& ints) { + std::vector> pointers; + for (int i : ints) pointers.emplace_back(new int(i)); + return pointers; +} + +std::string OfType(const std::string& type_name) { +#if GTEST_HAS_RTTI + return IsReadableTypeName(type_name) ? " (of type " + type_name + ")" : ""; +#else + return ""; +#endif +} + +TEST(ContainsTest, WorksWithMoveOnly) { + ContainerHelper helper; + EXPECT_CALL(helper, Call(Contains(Pointee(2)))); + helper.Call(MakeUniquePtrs({1, 2})); +} + +INSTANTIATE_GTEST_MATCHER_TEST_P(ElementsAreTest); + +// Tests the variadic version of the ElementsAreMatcher +TEST(ElementsAreTest, HugeMatcher) { + vector test_vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; + + EXPECT_THAT(test_vector, + ElementsAre(Eq(1), Eq(2), Lt(13), Eq(4), Eq(5), Eq(6), Eq(7), + Eq(8), Eq(9), Eq(10), Gt(1), Eq(12))); +} + +// Tests the variadic version of the UnorderedElementsAreMatcher +TEST(ElementsAreTest, HugeMatcherStr) { + vector test_vector{ + "literal_string", "", "", "", "", "", "", "", "", "", "", ""}; + + EXPECT_THAT(test_vector, UnorderedElementsAre("literal_string", _, _, _, _, _, + _, _, _, _, _, _)); +} + +// Tests the variadic version of the UnorderedElementsAreMatcher +TEST(ElementsAreTest, HugeMatcherUnordered) { + vector test_vector{2, 1, 8, 5, 4, 6, 7, 3, 9, 12, 11, 10}; + + EXPECT_THAT(test_vector, UnorderedElementsAre( + Eq(2), Eq(1), Gt(7), Eq(5), Eq(4), Eq(6), Eq(7), + Eq(3), Eq(9), Eq(12), Eq(11), Ne(122))); +} + +// Tests that ASSERT_THAT() and EXPECT_THAT() work when the value +// matches the matcher. +TEST(MatcherAssertionTest, WorksWhenMatcherIsSatisfied) { + ASSERT_THAT(5, Ge(2)) << "This should succeed."; + ASSERT_THAT("Foo", EndsWith("oo")); + EXPECT_THAT(2, AllOf(Le(7), Ge(0))) << "This should succeed too."; + EXPECT_THAT("Hello", StartsWith("Hell")); +} + +// Tests that ASSERT_THAT() and EXPECT_THAT() work when the value +// doesn't match the matcher. +TEST(MatcherAssertionTest, WorksWhenMatcherIsNotSatisfied) { + // 'n' must be static as it is used in an EXPECT_FATAL_FAILURE(), + // which cannot reference auto variables. + static unsigned short n; // NOLINT + n = 5; + + EXPECT_FATAL_FAILURE(ASSERT_THAT(n, Gt(10)), + "Value of: n\n" + "Expected: is > 10\n" + " Actual: 5" + + OfType("unsigned short")); + n = 0; + EXPECT_NONFATAL_FAILURE(EXPECT_THAT(n, AllOf(Le(7), Ge(5))), + "Value of: n\n" + "Expected: (is <= 7) and (is >= 5)\n" + " Actual: 0" + + OfType("unsigned short")); +} + +// Tests that ASSERT_THAT() and EXPECT_THAT() work when the argument +// has a reference type. +TEST(MatcherAssertionTest, WorksForByRefArguments) { + // We use a static variable here as EXPECT_FATAL_FAILURE() cannot + // reference auto variables. + static int n; + n = 0; + EXPECT_THAT(n, AllOf(Le(7), Ref(n))); + EXPECT_FATAL_FAILURE(ASSERT_THAT(n, Not(Ref(n))), + "Value of: n\n" + "Expected: does not reference the variable @"); + // Tests the "Actual" part. + EXPECT_FATAL_FAILURE(ASSERT_THAT(n, Not(Ref(n))), + "Actual: 0" + OfType("int") + ", which is located @"); +} + +// Tests that ASSERT_THAT() and EXPECT_THAT() work when the matcher is +// monomorphic. +TEST(MatcherAssertionTest, WorksForMonomorphicMatcher) { + Matcher starts_with_he = StartsWith("he"); + ASSERT_THAT("hello", starts_with_he); + + Matcher ends_with_ok = EndsWith("ok"); + ASSERT_THAT("book", ends_with_ok); + const std::string bad = "bad"; + EXPECT_NONFATAL_FAILURE(EXPECT_THAT(bad, ends_with_ok), + "Value of: bad\n" + "Expected: ends with \"ok\"\n" + " Actual: \"bad\""); + Matcher is_greater_than_5 = Gt(5); + EXPECT_NONFATAL_FAILURE(EXPECT_THAT(5, is_greater_than_5), + "Value of: 5\n" + "Expected: is > 5\n" + " Actual: 5" + + OfType("int")); +} + +TEST(PointeeTest, RawPointer) { + const Matcher m = Pointee(Ge(0)); + + int n = 1; + EXPECT_TRUE(m.Matches(&n)); + n = -1; + EXPECT_FALSE(m.Matches(&n)); + EXPECT_FALSE(m.Matches(nullptr)); +} + +TEST(PointeeTest, RawPointerToConst) { + const Matcher m = Pointee(Ge(0)); + + double x = 1; + EXPECT_TRUE(m.Matches(&x)); + x = -1; + EXPECT_FALSE(m.Matches(&x)); + EXPECT_FALSE(m.Matches(nullptr)); +} + +TEST(PointeeTest, ReferenceToConstRawPointer) { + const Matcher m = Pointee(Ge(0)); + + int n = 1; + EXPECT_TRUE(m.Matches(&n)); + n = -1; + EXPECT_FALSE(m.Matches(&n)); + EXPECT_FALSE(m.Matches(nullptr)); +} + +TEST(PointeeTest, ReferenceToNonConstRawPointer) { + const Matcher m = Pointee(Ge(0)); + + double x = 1.0; + double* p = &x; + EXPECT_TRUE(m.Matches(p)); + x = -1; + EXPECT_FALSE(m.Matches(p)); + p = nullptr; + EXPECT_FALSE(m.Matches(p)); +} + +TEST(PointeeTest, SmartPointer) { + const Matcher> m = Pointee(Ge(0)); + + std::unique_ptr n(new int(1)); + EXPECT_TRUE(m.Matches(n)); +} + +TEST(PointeeTest, SmartPointerToConst) { + const Matcher> m = Pointee(Ge(0)); + + // There's no implicit conversion from unique_ptr to const + // unique_ptr, so we must pass a unique_ptr into the + // matcher. + std::unique_ptr n(new int(1)); + EXPECT_TRUE(m.Matches(n)); +} + +TEST(PointerTest, RawPointer) { + int n = 1; + const Matcher m = Pointer(Eq(&n)); + + EXPECT_TRUE(m.Matches(&n)); + + int* p = nullptr; + EXPECT_FALSE(m.Matches(p)); + EXPECT_FALSE(m.Matches(nullptr)); +} + +TEST(PointerTest, RawPointerToConst) { + int n = 1; + const Matcher m = Pointer(Eq(&n)); + + EXPECT_TRUE(m.Matches(&n)); + + int* p = nullptr; + EXPECT_FALSE(m.Matches(p)); + EXPECT_FALSE(m.Matches(nullptr)); +} + +TEST(PointerTest, SmartPointer) { + std::unique_ptr n(new int(10)); + int* raw_n = n.get(); + const Matcher> m = Pointer(Eq(raw_n)); + + EXPECT_TRUE(m.Matches(n)); +} + +TEST(PointerTest, SmartPointerToConst) { + std::unique_ptr n(new int(10)); + const int* raw_n = n.get(); + const Matcher> m = Pointer(Eq(raw_n)); + + // There's no implicit conversion from unique_ptr to const + // unique_ptr, so we must pass a unique_ptr into the + // matcher. + std::unique_ptr p(new int(10)); + EXPECT_FALSE(m.Matches(p)); +} + +// Minimal const-propagating pointer. +template +class ConstPropagatingPtr { + public: + typedef T element_type; + + ConstPropagatingPtr() : val_() {} + explicit ConstPropagatingPtr(T* t) : val_(t) {} + ConstPropagatingPtr(const ConstPropagatingPtr& other) : val_(other.val_) {} + + T* get() { return val_; } + T& operator*() { return *val_; } + // Most smart pointers return non-const T* and T& from the next methods. + const T* get() const { return val_; } + const T& operator*() const { return *val_; } + + private: + T* val_; +}; + +INSTANTIATE_GTEST_MATCHER_TEST_P(PointeeTest); + +TEST(PointeeTest, WorksWithConstPropagatingPointers) { + const Matcher> m = Pointee(Lt(5)); + int three = 3; + const ConstPropagatingPtr co(&three); + ConstPropagatingPtr o(&three); + EXPECT_TRUE(m.Matches(o)); + EXPECT_TRUE(m.Matches(co)); + *o = 6; + EXPECT_FALSE(m.Matches(o)); + EXPECT_FALSE(m.Matches(ConstPropagatingPtr())); +} + +TEST(PointeeTest, NeverMatchesNull) { + const Matcher m = Pointee(_); + EXPECT_FALSE(m.Matches(nullptr)); +} + +// Tests that we can write Pointee(value) instead of Pointee(Eq(value)). +TEST(PointeeTest, MatchesAgainstAValue) { + const Matcher m = Pointee(5); + + int n = 5; + EXPECT_TRUE(m.Matches(&n)); + n = -1; + EXPECT_FALSE(m.Matches(&n)); + EXPECT_FALSE(m.Matches(nullptr)); +} + +TEST(PointeeTest, CanDescribeSelf) { + const Matcher m = Pointee(Gt(3)); + EXPECT_EQ("points to a value that is > 3", Describe(m)); + EXPECT_EQ("does not point to a value that is > 3", DescribeNegation(m)); +} + +TEST_P(PointeeTestP, CanExplainMatchResult) { + const Matcher m = Pointee(StartsWith("Hi")); + + EXPECT_EQ("", Explain(m, static_cast(nullptr))); + + const Matcher m2 = Pointee(GreaterThan(1)); // NOLINT + long n = 3; // NOLINT + EXPECT_EQ("which points to 3" + OfType("long") + ", which is 2 more than 1", + Explain(m2, &n)); +} + +TEST(PointeeTest, AlwaysExplainsPointee) { + const Matcher m = Pointee(0); + int n = 42; + EXPECT_EQ("which points to 42" + OfType("int"), Explain(m, &n)); +} + +// An uncopyable class. +class Uncopyable { + public: + Uncopyable() : value_(-1) {} + explicit Uncopyable(int a_value) : value_(a_value) {} + + int value() const { return value_; } + void set_value(int i) { value_ = i; } + + private: + int value_; + Uncopyable(const Uncopyable&) = delete; + Uncopyable& operator=(const Uncopyable&) = delete; +}; + +// Returns true if and only if x.value() is positive. +bool ValueIsPositive(const Uncopyable& x) { return x.value() > 0; } + +MATCHER_P(UncopyableIs, inner_matcher, "") { + return ExplainMatchResult(inner_matcher, arg.value(), result_listener); +} + +// A user-defined struct for testing Field(). +struct AStruct { + AStruct() : x(0), y(1.0), z(5), p(nullptr) {} + AStruct(const AStruct& rhs) + : x(rhs.x), y(rhs.y), z(rhs.z.value()), p(rhs.p) {} + + int x; // A non-const field. + const double y; // A const field. + Uncopyable z; // An uncopyable field. + const char* p; // A pointer field. +}; + +// A derived struct for testing Field(). +struct DerivedStruct : public AStruct { + char ch; +}; + +INSTANTIATE_GTEST_MATCHER_TEST_P(FieldTest); + +// Tests that Field(&Foo::field, ...) works when field is non-const. +TEST(FieldTest, WorksForNonConstField) { + Matcher m = Field(&AStruct::x, Ge(0)); + Matcher m_with_name = Field("x", &AStruct::x, Ge(0)); + + AStruct a; + EXPECT_TRUE(m.Matches(a)); + EXPECT_TRUE(m_with_name.Matches(a)); + a.x = -1; + EXPECT_FALSE(m.Matches(a)); + EXPECT_FALSE(m_with_name.Matches(a)); +} + +// Tests that Field(&Foo::field, ...) works when field is const. +TEST(FieldTest, WorksForConstField) { + AStruct a; + + Matcher m = Field(&AStruct::y, Ge(0.0)); + Matcher m_with_name = Field("y", &AStruct::y, Ge(0.0)); + EXPECT_TRUE(m.Matches(a)); + EXPECT_TRUE(m_with_name.Matches(a)); + m = Field(&AStruct::y, Le(0.0)); + m_with_name = Field("y", &AStruct::y, Le(0.0)); + EXPECT_FALSE(m.Matches(a)); + EXPECT_FALSE(m_with_name.Matches(a)); +} + +// Tests that Field(&Foo::field, ...) works when field is not copyable. +TEST(FieldTest, WorksForUncopyableField) { + AStruct a; + + Matcher m = Field(&AStruct::z, Truly(ValueIsPositive)); + EXPECT_TRUE(m.Matches(a)); + m = Field(&AStruct::z, Not(Truly(ValueIsPositive))); + EXPECT_FALSE(m.Matches(a)); +} + +// Tests that Field(&Foo::field, ...) works when field is a pointer. +TEST(FieldTest, WorksForPointerField) { + // Matching against NULL. + Matcher m = Field(&AStruct::p, static_cast(nullptr)); + AStruct a; + EXPECT_TRUE(m.Matches(a)); + a.p = "hi"; + EXPECT_FALSE(m.Matches(a)); + + // Matching a pointer that is not NULL. + m = Field(&AStruct::p, StartsWith("hi")); + a.p = "hill"; + EXPECT_TRUE(m.Matches(a)); + a.p = "hole"; + EXPECT_FALSE(m.Matches(a)); +} + +// Tests that Field() works when the object is passed by reference. +TEST(FieldTest, WorksForByRefArgument) { + Matcher m = Field(&AStruct::x, Ge(0)); + + AStruct a; + EXPECT_TRUE(m.Matches(a)); + a.x = -1; + EXPECT_FALSE(m.Matches(a)); +} + +// Tests that Field(&Foo::field, ...) works when the argument's type +// is a sub-type of Foo. +TEST(FieldTest, WorksForArgumentOfSubType) { + // Note that the matcher expects DerivedStruct but we say AStruct + // inside Field(). + Matcher m = Field(&AStruct::x, Ge(0)); + + DerivedStruct d; + EXPECT_TRUE(m.Matches(d)); + d.x = -1; + EXPECT_FALSE(m.Matches(d)); +} + +// Tests that Field(&Foo::field, m) works when field's type and m's +// argument type are compatible but not the same. +TEST(FieldTest, WorksForCompatibleMatcherType) { + // The field is an int, but the inner matcher expects a signed char. + Matcher m = Field(&AStruct::x, Matcher(Ge(0))); + + AStruct a; + EXPECT_TRUE(m.Matches(a)); + a.x = -1; + EXPECT_FALSE(m.Matches(a)); +} + +// Tests that Field() can describe itself. +TEST(FieldTest, CanDescribeSelf) { + Matcher m = Field(&AStruct::x, Ge(0)); + + EXPECT_EQ("is an object whose given field is >= 0", Describe(m)); + EXPECT_EQ("is an object whose given field isn't >= 0", DescribeNegation(m)); +} + +TEST(FieldTest, CanDescribeSelfWithFieldName) { + Matcher m = Field("field_name", &AStruct::x, Ge(0)); + + EXPECT_EQ("is an object whose field `field_name` is >= 0", Describe(m)); + EXPECT_EQ("is an object whose field `field_name` isn't >= 0", + DescribeNegation(m)); +} + +// Tests that Field() can explain the match result. +TEST_P(FieldTestP, CanExplainMatchResult) { + Matcher m = Field(&AStruct::x, Ge(0)); + + AStruct a; + a.x = 1; + EXPECT_EQ("whose given field is 1" + OfType("int"), Explain(m, a)); + + m = Field(&AStruct::x, GreaterThan(0)); + EXPECT_EQ( + "whose given field is 1" + OfType("int") + ", which is 1 more than 0", + Explain(m, a)); +} + +TEST_P(FieldTestP, CanExplainMatchResultWithFieldName) { + Matcher m = Field("field_name", &AStruct::x, Ge(0)); + + AStruct a; + a.x = 1; + EXPECT_EQ("whose field `field_name` is 1" + OfType("int"), Explain(m, a)); + + m = Field("field_name", &AStruct::x, GreaterThan(0)); + EXPECT_EQ("whose field `field_name` is 1" + OfType("int") + + ", which is 1 more than 0", + Explain(m, a)); +} + +INSTANTIATE_GTEST_MATCHER_TEST_P(FieldForPointerTest); + +// Tests that Field() works when the argument is a pointer to const. +TEST(FieldForPointerTest, WorksForPointerToConst) { + Matcher m = Field(&AStruct::x, Ge(0)); + + AStruct a; + EXPECT_TRUE(m.Matches(&a)); + a.x = -1; + EXPECT_FALSE(m.Matches(&a)); +} + +// Tests that Field() works when the argument is a pointer to non-const. +TEST(FieldForPointerTest, WorksForPointerToNonConst) { + Matcher m = Field(&AStruct::x, Ge(0)); + + AStruct a; + EXPECT_TRUE(m.Matches(&a)); + a.x = -1; + EXPECT_FALSE(m.Matches(&a)); +} + +// Tests that Field() works when the argument is a reference to a const pointer. +TEST(FieldForPointerTest, WorksForReferenceToConstPointer) { + Matcher m = Field(&AStruct::x, Ge(0)); + + AStruct a; + EXPECT_TRUE(m.Matches(&a)); + a.x = -1; + EXPECT_FALSE(m.Matches(&a)); +} + +// Tests that Field() does not match the NULL pointer. +TEST(FieldForPointerTest, DoesNotMatchNull) { + Matcher m = Field(&AStruct::x, _); + EXPECT_FALSE(m.Matches(nullptr)); +} + +// Tests that Field(&Foo::field, ...) works when the argument's type +// is a sub-type of const Foo*. +TEST(FieldForPointerTest, WorksForArgumentOfSubType) { + // Note that the matcher expects DerivedStruct but we say AStruct + // inside Field(). + Matcher m = Field(&AStruct::x, Ge(0)); + + DerivedStruct d; + EXPECT_TRUE(m.Matches(&d)); + d.x = -1; + EXPECT_FALSE(m.Matches(&d)); +} + +// Tests that Field() can describe itself when used to match a pointer. +TEST(FieldForPointerTest, CanDescribeSelf) { + Matcher m = Field(&AStruct::x, Ge(0)); + + EXPECT_EQ("is an object whose given field is >= 0", Describe(m)); + EXPECT_EQ("is an object whose given field isn't >= 0", DescribeNegation(m)); +} + +TEST(FieldForPointerTest, CanDescribeSelfWithFieldName) { + Matcher m = Field("field_name", &AStruct::x, Ge(0)); + + EXPECT_EQ("is an object whose field `field_name` is >= 0", Describe(m)); + EXPECT_EQ("is an object whose field `field_name` isn't >= 0", + DescribeNegation(m)); +} + +// Tests that Field() can explain the result of matching a pointer. +TEST_P(FieldForPointerTestP, CanExplainMatchResult) { + Matcher m = Field(&AStruct::x, Ge(0)); + + AStruct a; + a.x = 1; + EXPECT_EQ("", Explain(m, static_cast(nullptr))); + EXPECT_EQ("which points to an object whose given field is 1" + OfType("int"), + Explain(m, &a)); + + m = Field(&AStruct::x, GreaterThan(0)); + EXPECT_EQ("which points to an object whose given field is 1" + OfType("int") + + ", which is 1 more than 0", + Explain(m, &a)); +} + +TEST_P(FieldForPointerTestP, CanExplainMatchResultWithFieldName) { + Matcher m = Field("field_name", &AStruct::x, Ge(0)); + + AStruct a; + a.x = 1; + EXPECT_EQ("", Explain(m, static_cast(nullptr))); + EXPECT_EQ( + "which points to an object whose field `field_name` is 1" + OfType("int"), + Explain(m, &a)); + + m = Field("field_name", &AStruct::x, GreaterThan(0)); + EXPECT_EQ("which points to an object whose field `field_name` is 1" + + OfType("int") + ", which is 1 more than 0", + Explain(m, &a)); +} + +// A user-defined class for testing Property(). +class AClass { + public: + AClass() : n_(0) {} + + // A getter that returns a non-reference. + int n() const { return n_; } + + void set_n(int new_n) { n_ = new_n; } + + // A getter that returns a reference to const. + const std::string& s() const { return s_; } + + const std::string& s_ref() const& { return s_; } + + void set_s(const std::string& new_s) { s_ = new_s; } + + // A getter that returns a reference to non-const. + double& x() const { return x_; } + + private: + int n_; + std::string s_; + + static double x_; +}; + +double AClass::x_ = 0.0; + +// A derived class for testing Property(). +class DerivedClass : public AClass { + public: + int k() const { return k_; } + + private: + int k_; +}; + +INSTANTIATE_GTEST_MATCHER_TEST_P(PropertyTest); + +// Tests that Property(&Foo::property, ...) works when property() +// returns a non-reference. +TEST(PropertyTest, WorksForNonReferenceProperty) { + Matcher m = Property(&AClass::n, Ge(0)); + Matcher m_with_name = Property("n", &AClass::n, Ge(0)); + + AClass a; + a.set_n(1); + EXPECT_TRUE(m.Matches(a)); + EXPECT_TRUE(m_with_name.Matches(a)); + + a.set_n(-1); + EXPECT_FALSE(m.Matches(a)); + EXPECT_FALSE(m_with_name.Matches(a)); +} + +// Tests that Property(&Foo::property, ...) works when property() +// returns a reference to const. +TEST(PropertyTest, WorksForReferenceToConstProperty) { + Matcher m = Property(&AClass::s, StartsWith("hi")); + Matcher m_with_name = + Property("s", &AClass::s, StartsWith("hi")); + + AClass a; + a.set_s("hill"); + EXPECT_TRUE(m.Matches(a)); + EXPECT_TRUE(m_with_name.Matches(a)); + + a.set_s("hole"); + EXPECT_FALSE(m.Matches(a)); + EXPECT_FALSE(m_with_name.Matches(a)); +} + +// Tests that Property(&Foo::property, ...) works when property() is +// ref-qualified. +TEST(PropertyTest, WorksForRefQualifiedProperty) { + Matcher m = Property(&AClass::s_ref, StartsWith("hi")); + Matcher m_with_name = + Property("s", &AClass::s_ref, StartsWith("hi")); + + AClass a; + a.set_s("hill"); + EXPECT_TRUE(m.Matches(a)); + EXPECT_TRUE(m_with_name.Matches(a)); + + a.set_s("hole"); + EXPECT_FALSE(m.Matches(a)); + EXPECT_FALSE(m_with_name.Matches(a)); +} + +// Tests that Property(&Foo::property, ...) works when property() +// returns a reference to non-const. +TEST(PropertyTest, WorksForReferenceToNonConstProperty) { + double x = 0.0; + AClass a; + + Matcher m = Property(&AClass::x, Ref(x)); + EXPECT_FALSE(m.Matches(a)); + + m = Property(&AClass::x, Not(Ref(x))); + EXPECT_TRUE(m.Matches(a)); +} + +// Tests that Property(&Foo::property, ...) works when the argument is +// passed by value. +TEST(PropertyTest, WorksForByValueArgument) { + Matcher m = Property(&AClass::s, StartsWith("hi")); + + AClass a; + a.set_s("hill"); + EXPECT_TRUE(m.Matches(a)); + + a.set_s("hole"); + EXPECT_FALSE(m.Matches(a)); +} + +// Tests that Property(&Foo::property, ...) works when the argument's +// type is a sub-type of Foo. +TEST(PropertyTest, WorksForArgumentOfSubType) { + // The matcher expects a DerivedClass, but inside the Property() we + // say AClass. + Matcher m = Property(&AClass::n, Ge(0)); + + DerivedClass d; + d.set_n(1); + EXPECT_TRUE(m.Matches(d)); + + d.set_n(-1); + EXPECT_FALSE(m.Matches(d)); +} + +// Tests that Property(&Foo::property, m) works when property()'s type +// and m's argument type are compatible but different. +TEST(PropertyTest, WorksForCompatibleMatcherType) { + // n() returns an int but the inner matcher expects a signed char. + Matcher m = Property(&AClass::n, Matcher(Ge(0))); + + Matcher m_with_name = + Property("n", &AClass::n, Matcher(Ge(0))); + + AClass a; + EXPECT_TRUE(m.Matches(a)); + EXPECT_TRUE(m_with_name.Matches(a)); + a.set_n(-1); + EXPECT_FALSE(m.Matches(a)); + EXPECT_FALSE(m_with_name.Matches(a)); +} + +// Tests that Property() can describe itself. +TEST(PropertyTest, CanDescribeSelf) { + Matcher m = Property(&AClass::n, Ge(0)); + + EXPECT_EQ("is an object whose given property is >= 0", Describe(m)); + EXPECT_EQ("is an object whose given property isn't >= 0", + DescribeNegation(m)); +} + +TEST(PropertyTest, CanDescribeSelfWithPropertyName) { + Matcher m = Property("fancy_name", &AClass::n, Ge(0)); + + EXPECT_EQ("is an object whose property `fancy_name` is >= 0", Describe(m)); + EXPECT_EQ("is an object whose property `fancy_name` isn't >= 0", + DescribeNegation(m)); +} + +// Tests that Property() can explain the match result. +TEST_P(PropertyTestP, CanExplainMatchResult) { + Matcher m = Property(&AClass::n, Ge(0)); + + AClass a; + a.set_n(1); + EXPECT_EQ("whose given property is 1" + OfType("int"), Explain(m, a)); + + m = Property(&AClass::n, GreaterThan(0)); + EXPECT_EQ( + "whose given property is 1" + OfType("int") + ", which is 1 more than 0", + Explain(m, a)); +} + +TEST_P(PropertyTestP, CanExplainMatchResultWithPropertyName) { + Matcher m = Property("fancy_name", &AClass::n, Ge(0)); + + AClass a; + a.set_n(1); + EXPECT_EQ("whose property `fancy_name` is 1" + OfType("int"), Explain(m, a)); + + m = Property("fancy_name", &AClass::n, GreaterThan(0)); + EXPECT_EQ("whose property `fancy_name` is 1" + OfType("int") + + ", which is 1 more than 0", + Explain(m, a)); +} + +INSTANTIATE_GTEST_MATCHER_TEST_P(PropertyForPointerTest); + +// Tests that Property() works when the argument is a pointer to const. +TEST(PropertyForPointerTest, WorksForPointerToConst) { + Matcher m = Property(&AClass::n, Ge(0)); + + AClass a; + a.set_n(1); + EXPECT_TRUE(m.Matches(&a)); + + a.set_n(-1); + EXPECT_FALSE(m.Matches(&a)); +} + +// Tests that Property() works when the argument is a pointer to non-const. +TEST(PropertyForPointerTest, WorksForPointerToNonConst) { + Matcher m = Property(&AClass::s, StartsWith("hi")); + + AClass a; + a.set_s("hill"); + EXPECT_TRUE(m.Matches(&a)); + + a.set_s("hole"); + EXPECT_FALSE(m.Matches(&a)); +} + +// Tests that Property() works when the argument is a reference to a +// const pointer. +TEST(PropertyForPointerTest, WorksForReferenceToConstPointer) { + Matcher m = Property(&AClass::s, StartsWith("hi")); + + AClass a; + a.set_s("hill"); + EXPECT_TRUE(m.Matches(&a)); + + a.set_s("hole"); + EXPECT_FALSE(m.Matches(&a)); +} + +// Tests that Property() does not match the NULL pointer. +TEST(PropertyForPointerTest, WorksForReferenceToNonConstProperty) { + Matcher m = Property(&AClass::x, _); + EXPECT_FALSE(m.Matches(nullptr)); +} + +// Tests that Property(&Foo::property, ...) works when the argument's +// type is a sub-type of const Foo*. +TEST(PropertyForPointerTest, WorksForArgumentOfSubType) { + // The matcher expects a DerivedClass, but inside the Property() we + // say AClass. + Matcher m = Property(&AClass::n, Ge(0)); + + DerivedClass d; + d.set_n(1); + EXPECT_TRUE(m.Matches(&d)); + + d.set_n(-1); + EXPECT_FALSE(m.Matches(&d)); +} + +// Tests that Property() can describe itself when used to match a pointer. +TEST(PropertyForPointerTest, CanDescribeSelf) { + Matcher m = Property(&AClass::n, Ge(0)); + + EXPECT_EQ("is an object whose given property is >= 0", Describe(m)); + EXPECT_EQ("is an object whose given property isn't >= 0", + DescribeNegation(m)); +} + +TEST(PropertyForPointerTest, CanDescribeSelfWithPropertyDescription) { + Matcher m = Property("fancy_name", &AClass::n, Ge(0)); + + EXPECT_EQ("is an object whose property `fancy_name` is >= 0", Describe(m)); + EXPECT_EQ("is an object whose property `fancy_name` isn't >= 0", + DescribeNegation(m)); +} + +// Tests that Property() can explain the result of matching a pointer. +TEST_P(PropertyForPointerTestP, CanExplainMatchResult) { + Matcher m = Property(&AClass::n, Ge(0)); + + AClass a; + a.set_n(1); + EXPECT_EQ("", Explain(m, static_cast(nullptr))); + EXPECT_EQ( + "which points to an object whose given property is 1" + OfType("int"), + Explain(m, &a)); + + m = Property(&AClass::n, GreaterThan(0)); + EXPECT_EQ("which points to an object whose given property is 1" + + OfType("int") + ", which is 1 more than 0", + Explain(m, &a)); +} + +TEST_P(PropertyForPointerTestP, CanExplainMatchResultWithPropertyName) { + Matcher m = Property("fancy_name", &AClass::n, Ge(0)); + + AClass a; + a.set_n(1); + EXPECT_EQ("", Explain(m, static_cast(nullptr))); + EXPECT_EQ("which points to an object whose property `fancy_name` is 1" + + OfType("int"), + Explain(m, &a)); + + m = Property("fancy_name", &AClass::n, GreaterThan(0)); + EXPECT_EQ("which points to an object whose property `fancy_name` is 1" + + OfType("int") + ", which is 1 more than 0", + Explain(m, &a)); +} + +// Tests ResultOf. + +// Tests that ResultOf(f, ...) compiles and works as expected when f is a +// function pointer. +std::string IntToStringFunction(int input) { + return input == 1 ? "foo" : "bar"; +} + +INSTANTIATE_GTEST_MATCHER_TEST_P(ResultOfTest); + +TEST(ResultOfTest, WorksForFunctionPointers) { + Matcher matcher = ResultOf(&IntToStringFunction, Eq(std::string("foo"))); + + EXPECT_TRUE(matcher.Matches(1)); + EXPECT_FALSE(matcher.Matches(2)); +} + +// Tests that ResultOf() can describe itself. +TEST(ResultOfTest, CanDescribeItself) { + Matcher matcher = ResultOf(&IntToStringFunction, StrEq("foo")); + + EXPECT_EQ( + "is mapped by the given callable to a value that " + "is equal to \"foo\"", + Describe(matcher)); + EXPECT_EQ( + "is mapped by the given callable to a value that " + "isn't equal to \"foo\"", + DescribeNegation(matcher)); +} + +// Tests that ResultOf() can describe itself when provided a result description. +TEST(ResultOfTest, CanDescribeItselfWithResultDescription) { + Matcher matcher = + ResultOf("string conversion", &IntToStringFunction, StrEq("foo")); + + EXPECT_EQ("whose string conversion is equal to \"foo\"", Describe(matcher)); + EXPECT_EQ("whose string conversion isn't equal to \"foo\"", + DescribeNegation(matcher)); +} + +// Tests that ResultOf() can explain the match result. +int IntFunction(int input) { return input == 42 ? 80 : 90; } + +TEST_P(ResultOfTestP, CanExplainMatchResult) { + Matcher matcher = ResultOf(&IntFunction, Ge(85)); + EXPECT_EQ("which is mapped by the given callable to 90" + OfType("int"), + Explain(matcher, 36)); + + matcher = ResultOf(&IntFunction, GreaterThan(85)); + EXPECT_EQ("which is mapped by the given callable to 90" + OfType("int") + + ", which is 5 more than 85", + Explain(matcher, 36)); +} + +TEST_P(ResultOfTestP, CanExplainMatchResultWithResultDescription) { + Matcher matcher = ResultOf("magic int conversion", &IntFunction, Ge(85)); + EXPECT_EQ("whose magic int conversion is 90" + OfType("int"), + Explain(matcher, 36)); + + matcher = ResultOf("magic int conversion", &IntFunction, GreaterThan(85)); + EXPECT_EQ("whose magic int conversion is 90" + OfType("int") + + ", which is 5 more than 85", + Explain(matcher, 36)); +} + +// Tests that ResultOf(f, ...) compiles and works as expected when f(x) +// returns a non-reference. +TEST(ResultOfTest, WorksForNonReferenceResults) { + Matcher matcher = ResultOf(&IntFunction, Eq(80)); + + EXPECT_TRUE(matcher.Matches(42)); + EXPECT_FALSE(matcher.Matches(36)); +} + +// Tests that ResultOf(f, ...) compiles and works as expected when f(x) +// returns a reference to non-const. +double& DoubleFunction(double& input) { return input; } // NOLINT + +Uncopyable& RefUncopyableFunction(Uncopyable& obj) { // NOLINT + return obj; +} + +TEST(ResultOfTest, WorksForReferenceToNonConstResults) { + double x = 3.14; + double x2 = x; + Matcher matcher = ResultOf(&DoubleFunction, Ref(x)); + + EXPECT_TRUE(matcher.Matches(x)); + EXPECT_FALSE(matcher.Matches(x2)); + + // Test that ResultOf works with uncopyable objects + Uncopyable obj(0); + Uncopyable obj2(0); + Matcher matcher2 = ResultOf(&RefUncopyableFunction, Ref(obj)); + + EXPECT_TRUE(matcher2.Matches(obj)); + EXPECT_FALSE(matcher2.Matches(obj2)); +} + +// Tests that ResultOf(f, ...) compiles and works as expected when f(x) +// returns a reference to const. +const std::string& StringFunction(const std::string& input) { return input; } + +TEST(ResultOfTest, WorksForReferenceToConstResults) { + std::string s = "foo"; + std::string s2 = s; + Matcher matcher = ResultOf(&StringFunction, Ref(s)); + + EXPECT_TRUE(matcher.Matches(s)); + EXPECT_FALSE(matcher.Matches(s2)); +} + +// Tests that ResultOf(f, m) works when f(x) and m's +// argument types are compatible but different. +TEST(ResultOfTest, WorksForCompatibleMatcherTypes) { + // IntFunction() returns int but the inner matcher expects a signed char. + Matcher matcher = ResultOf(IntFunction, Matcher(Ge(85))); + + EXPECT_TRUE(matcher.Matches(36)); + EXPECT_FALSE(matcher.Matches(42)); +} + +// Tests that the program aborts when ResultOf is passed +// a NULL function pointer. +TEST(ResultOfDeathTest, DiesOnNullFunctionPointers) { + EXPECT_DEATH_IF_SUPPORTED( + ResultOf(static_cast(nullptr), + Eq(std::string("foo"))), + "NULL function pointer is passed into ResultOf\\(\\)\\."); +} + +// Tests that ResultOf(f, ...) compiles and works as expected when f is a +// function reference. +TEST(ResultOfTest, WorksForFunctionReferences) { + Matcher matcher = ResultOf(IntToStringFunction, StrEq("foo")); + EXPECT_TRUE(matcher.Matches(1)); + EXPECT_FALSE(matcher.Matches(2)); +} + +// Tests that ResultOf(f, ...) compiles and works as expected when f is a +// function object. +struct Functor { + std::string operator()(int input) const { return IntToStringFunction(input); } +}; + +TEST(ResultOfTest, WorksForFunctors) { + Matcher matcher = ResultOf(Functor(), Eq(std::string("foo"))); + + EXPECT_TRUE(matcher.Matches(1)); + EXPECT_FALSE(matcher.Matches(2)); +} + +// Tests that ResultOf(f, ...) compiles and works as expected when f is a +// functor with more than one operator() defined. ResultOf() must work +// for each defined operator(). +struct PolymorphicFunctor { + typedef int result_type; + int operator()(int n) { return n; } + int operator()(const char* s) { return static_cast(strlen(s)); } + std::string operator()(int* p) { return p ? "good ptr" : "null"; } +}; + +TEST(ResultOfTest, WorksForPolymorphicFunctors) { + Matcher matcher_int = ResultOf(PolymorphicFunctor(), Ge(5)); + + EXPECT_TRUE(matcher_int.Matches(10)); + EXPECT_FALSE(matcher_int.Matches(2)); + + Matcher matcher_string = ResultOf(PolymorphicFunctor(), Ge(5)); + + EXPECT_TRUE(matcher_string.Matches("long string")); + EXPECT_FALSE(matcher_string.Matches("shrt")); +} + +TEST(ResultOfTest, WorksForPolymorphicFunctorsIgnoringResultType) { + Matcher matcher = ResultOf(PolymorphicFunctor(), "good ptr"); + + int n = 0; + EXPECT_TRUE(matcher.Matches(&n)); + EXPECT_FALSE(matcher.Matches(nullptr)); +} + +TEST(ResultOfTest, WorksForLambdas) { + Matcher matcher = ResultOf( + [](int str_len) { + return std::string(static_cast(str_len), 'x'); + }, + "xxx"); + EXPECT_TRUE(matcher.Matches(3)); + EXPECT_FALSE(matcher.Matches(1)); +} + +TEST(ResultOfTest, WorksForNonCopyableArguments) { + Matcher> matcher = ResultOf( + [](const std::unique_ptr& str_len) { + return std::string(static_cast(*str_len), 'x'); + }, + "xxx"); + EXPECT_TRUE(matcher.Matches(std::unique_ptr(new int(3)))); + EXPECT_FALSE(matcher.Matches(std::unique_ptr(new int(1)))); +} + +const int* ReferencingFunction(const int& n) { return &n; } + +struct ReferencingFunctor { + typedef const int* result_type; + result_type operator()(const int& n) { return &n; } +}; + +TEST(ResultOfTest, WorksForReferencingCallables) { + const int n = 1; + const int n2 = 1; + Matcher matcher2 = ResultOf(ReferencingFunction, Eq(&n)); + EXPECT_TRUE(matcher2.Matches(n)); + EXPECT_FALSE(matcher2.Matches(n2)); + + Matcher matcher3 = ResultOf(ReferencingFunctor(), Eq(&n)); + EXPECT_TRUE(matcher3.Matches(n)); + EXPECT_FALSE(matcher3.Matches(n2)); +} + +TEST(SizeIsTest, ImplementsSizeIs) { + vector container; + EXPECT_THAT(container, SizeIs(0)); + EXPECT_THAT(container, Not(SizeIs(1))); + container.push_back(0); + EXPECT_THAT(container, Not(SizeIs(0))); + EXPECT_THAT(container, SizeIs(1)); + container.push_back(0); + EXPECT_THAT(container, Not(SizeIs(0))); + EXPECT_THAT(container, SizeIs(2)); +} + +TEST(SizeIsTest, WorksWithMap) { + map container; + EXPECT_THAT(container, SizeIs(0)); + EXPECT_THAT(container, Not(SizeIs(1))); + container.insert(make_pair("foo", 1)); + EXPECT_THAT(container, Not(SizeIs(0))); + EXPECT_THAT(container, SizeIs(1)); + container.insert(make_pair("bar", 2)); + EXPECT_THAT(container, Not(SizeIs(0))); + EXPECT_THAT(container, SizeIs(2)); +} + +TEST(SizeIsTest, WorksWithReferences) { + vector container; + Matcher&> m = SizeIs(1); + EXPECT_THAT(container, Not(m)); + container.push_back(0); + EXPECT_THAT(container, m); +} + +TEST(SizeIsTest, WorksWithMoveOnly) { + ContainerHelper helper; + EXPECT_CALL(helper, Call(SizeIs(3))); + helper.Call(MakeUniquePtrs({1, 2, 3})); +} + +// SizeIs should work for any type that provides a size() member function. +// For example, a size_type member type should not need to be provided. +struct MinimalistCustomType { + int size() const { return 1; } +}; +TEST(SizeIsTest, WorksWithMinimalistCustomType) { + MinimalistCustomType container; + EXPECT_THAT(container, SizeIs(1)); + EXPECT_THAT(container, Not(SizeIs(0))); +} + +TEST(SizeIsTest, CanDescribeSelf) { + Matcher> m = SizeIs(2); + EXPECT_EQ("size is equal to 2", Describe(m)); + EXPECT_EQ("size isn't equal to 2", DescribeNegation(m)); +} + +TEST(SizeIsTest, ExplainsResult) { + Matcher> m1 = SizeIs(2); + Matcher> m2 = SizeIs(Lt(2u)); + Matcher> m3 = SizeIs(AnyOf(0, 3)); + Matcher> m4 = SizeIs(Gt(1u)); + vector container; + EXPECT_EQ("whose size 0 doesn't match", Explain(m1, container)); + EXPECT_EQ("whose size 0 matches", Explain(m2, container)); + EXPECT_EQ("whose size 0 matches", Explain(m3, container)); + EXPECT_EQ("whose size 0 doesn't match", Explain(m4, container)); + container.push_back(0); + container.push_back(0); + EXPECT_EQ("whose size 2 matches", Explain(m1, container)); + EXPECT_EQ("whose size 2 doesn't match", Explain(m2, container)); + EXPECT_EQ("whose size 2 doesn't match", Explain(m3, container)); + EXPECT_EQ("whose size 2 matches", Explain(m4, container)); +} + +TEST(WhenSortedByTest, WorksForEmptyContainer) { + const vector numbers; + EXPECT_THAT(numbers, WhenSortedBy(less(), ElementsAre())); + EXPECT_THAT(numbers, Not(WhenSortedBy(less(), ElementsAre(1)))); +} + +TEST(WhenSortedByTest, WorksForNonEmptyContainer) { + vector numbers; + numbers.push_back(3); + numbers.push_back(1); + numbers.push_back(2); + numbers.push_back(2); + EXPECT_THAT(numbers, + WhenSortedBy(greater(), ElementsAre(3, 2, 2, 1))); + EXPECT_THAT(numbers, + Not(WhenSortedBy(greater(), ElementsAre(1, 2, 2, 3)))); +} + +TEST(WhenSortedByTest, WorksForNonVectorContainer) { + list words; + words.push_back("say"); + words.push_back("hello"); + words.push_back("world"); + EXPECT_THAT(words, WhenSortedBy(less(), + ElementsAre("hello", "say", "world"))); + EXPECT_THAT(words, Not(WhenSortedBy(less(), + ElementsAre("say", "hello", "world")))); +} + +TEST(WhenSortedByTest, WorksForNativeArray) { + const int numbers[] = {1, 3, 2, 4}; + const int sorted_numbers[] = {1, 2, 3, 4}; + EXPECT_THAT(numbers, WhenSortedBy(less(), ElementsAre(1, 2, 3, 4))); + EXPECT_THAT(numbers, + WhenSortedBy(less(), ElementsAreArray(sorted_numbers))); + EXPECT_THAT(numbers, Not(WhenSortedBy(less(), ElementsAre(1, 3, 2, 4)))); +} + +TEST(WhenSortedByTest, CanDescribeSelf) { + const Matcher> m = WhenSortedBy(less(), ElementsAre(1, 2)); + EXPECT_EQ( + "(when sorted) has 2 elements where\n" + "element #0 is equal to 1,\n" + "element #1 is equal to 2", + Describe(m)); + EXPECT_EQ( + "(when sorted) doesn't have 2 elements, or\n" + "element #0 isn't equal to 1, or\n" + "element #1 isn't equal to 2", + DescribeNegation(m)); +} + +TEST(WhenSortedByTest, ExplainsMatchResult) { + const int a[] = {2, 1}; + EXPECT_EQ("which is { 1, 2 } when sorted, whose element #0 doesn't match", + Explain(WhenSortedBy(less(), ElementsAre(2, 3)), a)); + EXPECT_EQ("which is { 1, 2 } when sorted", + Explain(WhenSortedBy(less(), ElementsAre(1, 2)), a)); +} + +// WhenSorted() is a simple wrapper on WhenSortedBy(). Hence we don't +// need to test it as exhaustively as we test the latter. + +TEST(WhenSortedTest, WorksForEmptyContainer) { + const vector numbers; + EXPECT_THAT(numbers, WhenSorted(ElementsAre())); + EXPECT_THAT(numbers, Not(WhenSorted(ElementsAre(1)))); +} + +TEST(WhenSortedTest, WorksForNonEmptyContainer) { + list words; + words.push_back("3"); + words.push_back("1"); + words.push_back("2"); + words.push_back("2"); + EXPECT_THAT(words, WhenSorted(ElementsAre("1", "2", "2", "3"))); + EXPECT_THAT(words, Not(WhenSorted(ElementsAre("3", "1", "2", "2")))); +} + +TEST(WhenSortedTest, WorksForMapTypes) { + map word_counts; + word_counts["and"] = 1; + word_counts["the"] = 1; + word_counts["buffalo"] = 2; + EXPECT_THAT(word_counts, + WhenSorted(ElementsAre(Pair("and", 1), Pair("buffalo", 2), + Pair("the", 1)))); + EXPECT_THAT(word_counts, + Not(WhenSorted(ElementsAre(Pair("and", 1), Pair("the", 1), + Pair("buffalo", 2))))); +} + +TEST(WhenSortedTest, WorksForMultiMapTypes) { + multimap ifib; + ifib.insert(make_pair(8, 6)); + ifib.insert(make_pair(2, 3)); + ifib.insert(make_pair(1, 1)); + ifib.insert(make_pair(3, 4)); + ifib.insert(make_pair(1, 2)); + ifib.insert(make_pair(5, 5)); + EXPECT_THAT(ifib, + WhenSorted(ElementsAre(Pair(1, 1), Pair(1, 2), Pair(2, 3), + Pair(3, 4), Pair(5, 5), Pair(8, 6)))); + EXPECT_THAT(ifib, + Not(WhenSorted(ElementsAre(Pair(8, 6), Pair(2, 3), Pair(1, 1), + Pair(3, 4), Pair(1, 2), Pair(5, 5))))); +} + +TEST(WhenSortedTest, WorksForPolymorphicMatcher) { + std::deque d; + d.push_back(2); + d.push_back(1); + EXPECT_THAT(d, WhenSorted(ElementsAre(1, 2))); + EXPECT_THAT(d, Not(WhenSorted(ElementsAre(2, 1)))); +} + +TEST(WhenSortedTest, WorksForVectorConstRefMatcher) { + std::deque d; + d.push_back(2); + d.push_back(1); + Matcher&> vector_match = ElementsAre(1, 2); + EXPECT_THAT(d, WhenSorted(vector_match)); + Matcher&> not_vector_match = ElementsAre(2, 1); + EXPECT_THAT(d, Not(WhenSorted(not_vector_match))); +} + +// Deliberately bare pseudo-container. +// Offers only begin() and end() accessors, yielding InputIterator. +template +class Streamlike { + private: + class ConstIter; + + public: + typedef ConstIter const_iterator; + typedef T value_type; + + template + Streamlike(InIter first, InIter last) : remainder_(first, last) {} + + const_iterator begin() const { + return const_iterator(this, remainder_.begin()); + } + const_iterator end() const { return const_iterator(this, remainder_.end()); } + + private: + class ConstIter { + public: + using iterator_category = std::input_iterator_tag; + using value_type = T; + using difference_type = ptrdiff_t; + using pointer = const value_type*; + using reference = const value_type&; + + ConstIter(const Streamlike* s, typename std::list::iterator pos) + : s_(s), pos_(pos) {} + + const value_type& operator*() const { return *pos_; } + const value_type* operator->() const { return &*pos_; } + ConstIter& operator++() { + s_->remainder_.erase(pos_++); + return *this; + } + + // *iter++ is required to work (see std::istreambuf_iterator). + // (void)iter++ is also required to work. + class PostIncrProxy { + public: + explicit PostIncrProxy(const value_type& value) : value_(value) {} + value_type operator*() const { return value_; } + + private: + value_type value_; + }; + PostIncrProxy operator++(int) { + PostIncrProxy proxy(**this); + ++(*this); + return proxy; + } + + friend bool operator==(const ConstIter& a, const ConstIter& b) { + return a.s_ == b.s_ && a.pos_ == b.pos_; + } + friend bool operator!=(const ConstIter& a, const ConstIter& b) { + return !(a == b); + } + + private: + const Streamlike* s_; + typename std::list::iterator pos_; + }; + + friend std::ostream& operator<<(std::ostream& os, const Streamlike& s) { + os << "["; + typedef typename std::list::const_iterator Iter; + const char* sep = ""; + for (Iter it = s.remainder_.begin(); it != s.remainder_.end(); ++it) { + os << sep << *it; + sep = ","; + } + os << "]"; + return os; + } + + mutable std::list remainder_; // modified by iteration +}; + +TEST(StreamlikeTest, Iteration) { + const int a[5] = {2, 1, 4, 5, 3}; + Streamlike s(a, a + 5); + Streamlike::const_iterator it = s.begin(); + const int* ip = a; + while (it != s.end()) { + SCOPED_TRACE(ip - a); + EXPECT_EQ(*ip++, *it++); + } +} + +INSTANTIATE_GTEST_MATCHER_TEST_P(BeginEndDistanceIsTest); + +TEST(BeginEndDistanceIsTest, WorksWithForwardList) { + std::forward_list container; + EXPECT_THAT(container, BeginEndDistanceIs(0)); + EXPECT_THAT(container, Not(BeginEndDistanceIs(1))); + container.push_front(0); + EXPECT_THAT(container, Not(BeginEndDistanceIs(0))); + EXPECT_THAT(container, BeginEndDistanceIs(1)); + container.push_front(0); + EXPECT_THAT(container, Not(BeginEndDistanceIs(0))); + EXPECT_THAT(container, BeginEndDistanceIs(2)); +} + +TEST(BeginEndDistanceIsTest, WorksWithNonStdList) { + const int a[5] = {1, 2, 3, 4, 5}; + Streamlike s(a, a + 5); + EXPECT_THAT(s, BeginEndDistanceIs(5)); +} + +TEST(BeginEndDistanceIsTest, CanDescribeSelf) { + Matcher> m = BeginEndDistanceIs(2); + EXPECT_EQ("distance between begin() and end() is equal to 2", Describe(m)); + EXPECT_EQ("distance between begin() and end() isn't equal to 2", + DescribeNegation(m)); +} + +TEST(BeginEndDistanceIsTest, WorksWithMoveOnly) { + ContainerHelper helper; + EXPECT_CALL(helper, Call(BeginEndDistanceIs(2))); + helper.Call(MakeUniquePtrs({1, 2})); +} + +TEST_P(BeginEndDistanceIsTestP, ExplainsResult) { + Matcher> m1 = BeginEndDistanceIs(2); + Matcher> m2 = BeginEndDistanceIs(Lt(2)); + Matcher> m3 = BeginEndDistanceIs(AnyOf(0, 3)); + Matcher> m4 = BeginEndDistanceIs(GreaterThan(1)); + vector container; + EXPECT_EQ("whose distance between begin() and end() 0 doesn't match", + Explain(m1, container)); + EXPECT_EQ("whose distance between begin() and end() 0 matches", + Explain(m2, container)); + EXPECT_EQ("whose distance between begin() and end() 0 matches", + Explain(m3, container)); + EXPECT_EQ( + "whose distance between begin() and end() 0 doesn't match, which is 1 " + "less than 1", + Explain(m4, container)); + container.push_back(0); + container.push_back(0); + EXPECT_EQ("whose distance between begin() and end() 2 matches", + Explain(m1, container)); + EXPECT_EQ("whose distance between begin() and end() 2 doesn't match", + Explain(m2, container)); + EXPECT_EQ("whose distance between begin() and end() 2 doesn't match", + Explain(m3, container)); + EXPECT_EQ( + "whose distance between begin() and end() 2 matches, which is 1 more " + "than 1", + Explain(m4, container)); +} + +TEST(WhenSortedTest, WorksForStreamlike) { + // Streamlike 'container' provides only minimal iterator support. + // Its iterators are tagged with input_iterator_tag. + const int a[5] = {2, 1, 4, 5, 3}; + Streamlike s(std::begin(a), std::end(a)); + EXPECT_THAT(s, WhenSorted(ElementsAre(1, 2, 3, 4, 5))); + EXPECT_THAT(s, Not(WhenSorted(ElementsAre(2, 1, 4, 5, 3)))); +} + +TEST(WhenSortedTest, WorksForVectorConstRefMatcherOnStreamlike) { + const int a[] = {2, 1, 4, 5, 3}; + Streamlike s(std::begin(a), std::end(a)); + Matcher&> vector_match = ElementsAre(1, 2, 3, 4, 5); + EXPECT_THAT(s, WhenSorted(vector_match)); + EXPECT_THAT(s, Not(WhenSorted(ElementsAre(2, 1, 4, 5, 3)))); +} + +TEST(IsSupersetOfTest, WorksForNativeArray) { + const int subset[] = {1, 4}; + const int superset[] = {1, 2, 4}; + const int disjoint[] = {1, 0, 3}; + EXPECT_THAT(subset, IsSupersetOf(subset)); + EXPECT_THAT(subset, Not(IsSupersetOf(superset))); + EXPECT_THAT(superset, IsSupersetOf(subset)); + EXPECT_THAT(subset, Not(IsSupersetOf(disjoint))); + EXPECT_THAT(disjoint, Not(IsSupersetOf(subset))); +} + +TEST(IsSupersetOfTest, WorksWithDuplicates) { + const int not_enough[] = {1, 2}; + const int enough[] = {1, 1, 2}; + const int expected[] = {1, 1}; + EXPECT_THAT(not_enough, Not(IsSupersetOf(expected))); + EXPECT_THAT(enough, IsSupersetOf(expected)); +} + +TEST(IsSupersetOfTest, WorksForEmpty) { + vector numbers; + vector expected; + EXPECT_THAT(numbers, IsSupersetOf(expected)); + expected.push_back(1); + EXPECT_THAT(numbers, Not(IsSupersetOf(expected))); + expected.clear(); + numbers.push_back(1); + numbers.push_back(2); + EXPECT_THAT(numbers, IsSupersetOf(expected)); + expected.push_back(1); + EXPECT_THAT(numbers, IsSupersetOf(expected)); + expected.push_back(2); + EXPECT_THAT(numbers, IsSupersetOf(expected)); + expected.push_back(3); + EXPECT_THAT(numbers, Not(IsSupersetOf(expected))); +} + +TEST(IsSupersetOfTest, WorksForStreamlike) { + const int a[5] = {1, 2, 3, 4, 5}; + Streamlike s(std::begin(a), std::end(a)); + + vector expected; + expected.push_back(1); + expected.push_back(2); + expected.push_back(5); + EXPECT_THAT(s, IsSupersetOf(expected)); + + expected.push_back(0); + EXPECT_THAT(s, Not(IsSupersetOf(expected))); +} + +TEST(IsSupersetOfTest, TakesStlContainer) { + const int actual[] = {3, 1, 2}; + + ::std::list expected; + expected.push_back(1); + expected.push_back(3); + EXPECT_THAT(actual, IsSupersetOf(expected)); + + expected.push_back(4); + EXPECT_THAT(actual, Not(IsSupersetOf(expected))); +} + +TEST(IsSupersetOfTest, Describe) { + typedef std::vector IntVec; + IntVec expected; + expected.push_back(111); + expected.push_back(222); + expected.push_back(333); + EXPECT_THAT( + Describe(IsSupersetOf(expected)), + Eq("a surjection from elements to requirements exists such that:\n" + " - an element is equal to 111\n" + " - an element is equal to 222\n" + " - an element is equal to 333")); +} + +TEST(IsSupersetOfTest, DescribeNegation) { + typedef std::vector IntVec; + IntVec expected; + expected.push_back(111); + expected.push_back(222); + expected.push_back(333); + EXPECT_THAT( + DescribeNegation(IsSupersetOf(expected)), + Eq("no surjection from elements to requirements exists such that:\n" + " - an element is equal to 111\n" + " - an element is equal to 222\n" + " - an element is equal to 333")); +} + +TEST(IsSupersetOfTest, MatchAndExplain) { + std::vector v; + v.push_back(2); + v.push_back(3); + std::vector expected; + expected.push_back(1); + expected.push_back(2); + StringMatchResultListener listener; + ASSERT_FALSE(ExplainMatchResult(IsSupersetOf(expected), v, &listener)) + << listener.str(); + EXPECT_THAT(listener.str(), + Eq("where the following matchers don't match any elements:\n" + "matcher #0: is equal to 1")); + + v.push_back(1); + listener.Clear(); + ASSERT_TRUE(ExplainMatchResult(IsSupersetOf(expected), v, &listener)) + << listener.str(); + EXPECT_THAT(listener.str(), Eq("where:\n" + " - element #0 is matched by matcher #1,\n" + " - element #2 is matched by matcher #0")); +} + +TEST(IsSupersetOfTest, WorksForRhsInitializerList) { + const int numbers[] = {1, 3, 6, 2, 4, 5}; + EXPECT_THAT(numbers, IsSupersetOf({1, 2})); + EXPECT_THAT(numbers, Not(IsSupersetOf({3, 0}))); +} + +TEST(IsSupersetOfTest, WorksWithMoveOnly) { + ContainerHelper helper; + EXPECT_CALL(helper, Call(IsSupersetOf({Pointee(1)}))); + helper.Call(MakeUniquePtrs({1, 2})); + EXPECT_CALL(helper, Call(Not(IsSupersetOf({Pointee(1), Pointee(2)})))); + helper.Call(MakeUniquePtrs({2})); +} + +TEST(IsSubsetOfTest, WorksForNativeArray) { + const int subset[] = {1, 4}; + const int superset[] = {1, 2, 4}; + const int disjoint[] = {1, 0, 3}; + EXPECT_THAT(subset, IsSubsetOf(subset)); + EXPECT_THAT(subset, IsSubsetOf(superset)); + EXPECT_THAT(superset, Not(IsSubsetOf(subset))); + EXPECT_THAT(subset, Not(IsSubsetOf(disjoint))); + EXPECT_THAT(disjoint, Not(IsSubsetOf(subset))); +} + +TEST(IsSubsetOfTest, WorksWithDuplicates) { + const int not_enough[] = {1, 2}; + const int enough[] = {1, 1, 2}; + const int actual[] = {1, 1}; + EXPECT_THAT(actual, Not(IsSubsetOf(not_enough))); + EXPECT_THAT(actual, IsSubsetOf(enough)); +} + +TEST(IsSubsetOfTest, WorksForEmpty) { + vector numbers; + vector expected; + EXPECT_THAT(numbers, IsSubsetOf(expected)); + expected.push_back(1); + EXPECT_THAT(numbers, IsSubsetOf(expected)); + expected.clear(); + numbers.push_back(1); + numbers.push_back(2); + EXPECT_THAT(numbers, Not(IsSubsetOf(expected))); + expected.push_back(1); + EXPECT_THAT(numbers, Not(IsSubsetOf(expected))); + expected.push_back(2); + EXPECT_THAT(numbers, IsSubsetOf(expected)); + expected.push_back(3); + EXPECT_THAT(numbers, IsSubsetOf(expected)); +} + +TEST(IsSubsetOfTest, WorksForStreamlike) { + const int a[5] = {1, 2}; + Streamlike s(std::begin(a), std::end(a)); + + vector expected; + expected.push_back(1); + EXPECT_THAT(s, Not(IsSubsetOf(expected))); + expected.push_back(2); + expected.push_back(5); + EXPECT_THAT(s, IsSubsetOf(expected)); +} + +TEST(IsSubsetOfTest, TakesStlContainer) { + const int actual[] = {3, 1, 2}; + + ::std::list expected; + expected.push_back(1); + expected.push_back(3); + EXPECT_THAT(actual, Not(IsSubsetOf(expected))); + + expected.push_back(2); + expected.push_back(4); + EXPECT_THAT(actual, IsSubsetOf(expected)); +} + +TEST(IsSubsetOfTest, Describe) { + typedef std::vector IntVec; + IntVec expected; + expected.push_back(111); + expected.push_back(222); + expected.push_back(333); + + EXPECT_THAT( + Describe(IsSubsetOf(expected)), + Eq("an injection from elements to requirements exists such that:\n" + " - an element is equal to 111\n" + " - an element is equal to 222\n" + " - an element is equal to 333")); +} + +TEST(IsSubsetOfTest, DescribeNegation) { + typedef std::vector IntVec; + IntVec expected; + expected.push_back(111); + expected.push_back(222); + expected.push_back(333); + EXPECT_THAT( + DescribeNegation(IsSubsetOf(expected)), + Eq("no injection from elements to requirements exists such that:\n" + " - an element is equal to 111\n" + " - an element is equal to 222\n" + " - an element is equal to 333")); +} + +TEST(IsSubsetOfTest, MatchAndExplain) { + std::vector v; + v.push_back(2); + v.push_back(3); + std::vector expected; + expected.push_back(1); + expected.push_back(2); + StringMatchResultListener listener; + ASSERT_FALSE(ExplainMatchResult(IsSubsetOf(expected), v, &listener)) + << listener.str(); + EXPECT_THAT(listener.str(), + Eq("where the following elements don't match any matchers:\n" + "element #1: 3")); + + expected.push_back(3); + listener.Clear(); + ASSERT_TRUE(ExplainMatchResult(IsSubsetOf(expected), v, &listener)) + << listener.str(); + EXPECT_THAT(listener.str(), Eq("where:\n" + " - element #0 is matched by matcher #1,\n" + " - element #1 is matched by matcher #2")); +} + +TEST(IsSubsetOfTest, WorksForRhsInitializerList) { + const int numbers[] = {1, 2, 3}; + EXPECT_THAT(numbers, IsSubsetOf({1, 2, 3, 4})); + EXPECT_THAT(numbers, Not(IsSubsetOf({1, 2}))); +} + +TEST(IsSubsetOfTest, WorksWithMoveOnly) { + ContainerHelper helper; + EXPECT_CALL(helper, Call(IsSubsetOf({Pointee(1), Pointee(2)}))); + helper.Call(MakeUniquePtrs({1})); + EXPECT_CALL(helper, Call(Not(IsSubsetOf({Pointee(1)})))); + helper.Call(MakeUniquePtrs({2})); +} + +// Tests using ElementsAre() and ElementsAreArray() with stream-like +// "containers". + +TEST(ElemensAreStreamTest, WorksForStreamlike) { + const int a[5] = {1, 2, 3, 4, 5}; + Streamlike s(std::begin(a), std::end(a)); + EXPECT_THAT(s, ElementsAre(1, 2, 3, 4, 5)); + EXPECT_THAT(s, Not(ElementsAre(2, 1, 4, 5, 3))); +} + +TEST(ElemensAreArrayStreamTest, WorksForStreamlike) { + const int a[5] = {1, 2, 3, 4, 5}; + Streamlike s(std::begin(a), std::end(a)); + + vector expected; + expected.push_back(1); + expected.push_back(2); + expected.push_back(3); + expected.push_back(4); + expected.push_back(5); + EXPECT_THAT(s, ElementsAreArray(expected)); + + expected[3] = 0; + EXPECT_THAT(s, Not(ElementsAreArray(expected))); +} + +TEST(ElementsAreTest, WorksWithUncopyable) { + Uncopyable objs[2]; + objs[0].set_value(-3); + objs[1].set_value(1); + EXPECT_THAT(objs, ElementsAre(UncopyableIs(-3), Truly(ValueIsPositive))); +} + +TEST(ElementsAreTest, WorksWithMoveOnly) { + ContainerHelper helper; + EXPECT_CALL(helper, Call(ElementsAre(Pointee(1), Pointee(2)))); + helper.Call(MakeUniquePtrs({1, 2})); + + EXPECT_CALL(helper, Call(ElementsAreArray({Pointee(3), Pointee(4)}))); + helper.Call(MakeUniquePtrs({3, 4})); +} + +TEST(ElementsAreTest, TakesStlContainer) { + const int actual[] = {3, 1, 2}; + + ::std::list expected; + expected.push_back(3); + expected.push_back(1); + expected.push_back(2); + EXPECT_THAT(actual, ElementsAreArray(expected)); + + expected.push_back(4); + EXPECT_THAT(actual, Not(ElementsAreArray(expected))); +} + +// Tests for UnorderedElementsAreArray() + +TEST(UnorderedElementsAreArrayTest, SucceedsWhenExpected) { + const int a[] = {0, 1, 2, 3, 4}; + std::vector s(std::begin(a), std::end(a)); + do { + StringMatchResultListener listener; + EXPECT_TRUE(ExplainMatchResult(UnorderedElementsAreArray(a), s, &listener)) + << listener.str(); + } while (std::next_permutation(s.begin(), s.end())); +} + +TEST(UnorderedElementsAreArrayTest, VectorBool) { + const bool a[] = {0, 1, 0, 1, 1}; + const bool b[] = {1, 0, 1, 1, 0}; + std::vector expected(std::begin(a), std::end(a)); + std::vector actual(std::begin(b), std::end(b)); + StringMatchResultListener listener; + EXPECT_TRUE(ExplainMatchResult(UnorderedElementsAreArray(expected), actual, + &listener)) + << listener.str(); +} + +TEST(UnorderedElementsAreArrayTest, WorksForStreamlike) { + // Streamlike 'container' provides only minimal iterator support. + // Its iterators are tagged with input_iterator_tag, and it has no + // size() or empty() methods. + const int a[5] = {2, 1, 4, 5, 3}; + Streamlike s(std::begin(a), std::end(a)); + + ::std::vector expected; + expected.push_back(1); + expected.push_back(2); + expected.push_back(3); + expected.push_back(4); + expected.push_back(5); + EXPECT_THAT(s, UnorderedElementsAreArray(expected)); + + expected.push_back(6); + EXPECT_THAT(s, Not(UnorderedElementsAreArray(expected))); +} + +TEST(UnorderedElementsAreArrayTest, TakesStlContainer) { + const int actual[] = {3, 1, 2}; + + ::std::list expected; + expected.push_back(1); + expected.push_back(2); + expected.push_back(3); + EXPECT_THAT(actual, UnorderedElementsAreArray(expected)); + + expected.push_back(4); + EXPECT_THAT(actual, Not(UnorderedElementsAreArray(expected))); +} + +TEST(UnorderedElementsAreArrayTest, TakesInitializerList) { + const int a[5] = {2, 1, 4, 5, 3}; + EXPECT_THAT(a, UnorderedElementsAreArray({1, 2, 3, 4, 5})); + EXPECT_THAT(a, Not(UnorderedElementsAreArray({1, 2, 3, 4, 6}))); +} + +TEST(UnorderedElementsAreArrayTest, TakesInitializerListOfCStrings) { + const std::string a[5] = {"a", "b", "c", "d", "e"}; + EXPECT_THAT(a, UnorderedElementsAreArray({"a", "b", "c", "d", "e"})); + EXPECT_THAT(a, Not(UnorderedElementsAreArray({"a", "b", "c", "d", "ef"}))); +} + +TEST(UnorderedElementsAreArrayTest, TakesInitializerListOfSameTypedMatchers) { + const int a[5] = {2, 1, 4, 5, 3}; + EXPECT_THAT(a, + UnorderedElementsAreArray({Eq(1), Eq(2), Eq(3), Eq(4), Eq(5)})); + EXPECT_THAT( + a, Not(UnorderedElementsAreArray({Eq(1), Eq(2), Eq(3), Eq(4), Eq(6)}))); +} + +TEST(UnorderedElementsAreArrayTest, + TakesInitializerListOfDifferentTypedMatchers) { + const int a[5] = {2, 1, 4, 5, 3}; + // The compiler cannot infer the type of the initializer list if its + // elements have different types. We must explicitly specify the + // unified element type in this case. + EXPECT_THAT(a, UnorderedElementsAreArray>( + {Eq(1), Ne(-2), Ge(3), Le(4), Eq(5)})); + EXPECT_THAT(a, Not(UnorderedElementsAreArray>( + {Eq(1), Ne(-2), Ge(3), Le(4), Eq(6)}))); +} + +TEST(UnorderedElementsAreArrayTest, WorksWithMoveOnly) { + ContainerHelper helper; + EXPECT_CALL(helper, + Call(UnorderedElementsAreArray({Pointee(1), Pointee(2)}))); + helper.Call(MakeUniquePtrs({2, 1})); +} + +class UnorderedElementsAreTest : public testing::Test { + protected: + typedef std::vector IntVec; +}; + +TEST_F(UnorderedElementsAreTest, WorksWithUncopyable) { + Uncopyable objs[2]; + objs[0].set_value(-3); + objs[1].set_value(1); + EXPECT_THAT(objs, + UnorderedElementsAre(Truly(ValueIsPositive), UncopyableIs(-3))); +} + +TEST_F(UnorderedElementsAreTest, SucceedsWhenExpected) { + const int a[] = {1, 2, 3}; + std::vector s(std::begin(a), std::end(a)); + do { + StringMatchResultListener listener; + EXPECT_TRUE(ExplainMatchResult(UnorderedElementsAre(1, 2, 3), s, &listener)) + << listener.str(); + } while (std::next_permutation(s.begin(), s.end())); +} + +TEST_F(UnorderedElementsAreTest, FailsWhenAnElementMatchesNoMatcher) { + const int a[] = {1, 2, 3}; + std::vector s(std::begin(a), std::end(a)); + std::vector> mv; + mv.push_back(1); + mv.push_back(2); + mv.push_back(2); + // The element with value '3' matches nothing: fail fast. + StringMatchResultListener listener; + EXPECT_FALSE(ExplainMatchResult(UnorderedElementsAreArray(mv), s, &listener)) + << listener.str(); +} + +TEST_F(UnorderedElementsAreTest, WorksForStreamlike) { + // Streamlike 'container' provides only minimal iterator support. + // Its iterators are tagged with input_iterator_tag, and it has no + // size() or empty() methods. + const int a[5] = {2, 1, 4, 5, 3}; + Streamlike s(std::begin(a), std::end(a)); + + EXPECT_THAT(s, UnorderedElementsAre(1, 2, 3, 4, 5)); + EXPECT_THAT(s, Not(UnorderedElementsAre(2, 2, 3, 4, 5))); +} + +TEST_F(UnorderedElementsAreTest, WorksWithMoveOnly) { + ContainerHelper helper; + EXPECT_CALL(helper, Call(UnorderedElementsAre(Pointee(1), Pointee(2)))); + helper.Call(MakeUniquePtrs({2, 1})); +} + +// One naive implementation of the matcher runs in O(N!) time, which is too +// slow for many real-world inputs. This test shows that our matcher can match +// 100 inputs very quickly (a few milliseconds). An O(100!) is 10^158 +// iterations and obviously effectively incomputable. +// [ RUN ] UnorderedElementsAreTest.Performance +// [ OK ] UnorderedElementsAreTest.Performance (4 ms) +TEST_F(UnorderedElementsAreTest, Performance) { + std::vector s; + std::vector> mv; + for (int i = 0; i < 100; ++i) { + s.push_back(i); + mv.push_back(_); + } + mv[50] = Eq(0); + StringMatchResultListener listener; + EXPECT_TRUE(ExplainMatchResult(UnorderedElementsAreArray(mv), s, &listener)) + << listener.str(); +} + +// Another variant of 'Performance' with similar expectations. +// [ RUN ] UnorderedElementsAreTest.PerformanceHalfStrict +// [ OK ] UnorderedElementsAreTest.PerformanceHalfStrict (4 ms) +TEST_F(UnorderedElementsAreTest, PerformanceHalfStrict) { + std::vector s; + std::vector> mv; + for (int i = 0; i < 100; ++i) { + s.push_back(i); + if (i & 1) { + mv.push_back(_); + } else { + mv.push_back(i); + } + } + StringMatchResultListener listener; + EXPECT_TRUE(ExplainMatchResult(UnorderedElementsAreArray(mv), s, &listener)) + << listener.str(); +} + +TEST_F(UnorderedElementsAreTest, FailMessageCountWrong) { + std::vector v; + v.push_back(4); + StringMatchResultListener listener; + EXPECT_FALSE(ExplainMatchResult(UnorderedElementsAre(1, 2, 3), v, &listener)) + << listener.str(); + EXPECT_THAT(listener.str(), Eq("which has 1 element")); +} + +TEST_F(UnorderedElementsAreTest, FailMessageCountWrongZero) { + std::vector v; + StringMatchResultListener listener; + EXPECT_FALSE(ExplainMatchResult(UnorderedElementsAre(1, 2, 3), v, &listener)) + << listener.str(); + EXPECT_THAT(listener.str(), Eq("")); +} + +TEST_F(UnorderedElementsAreTest, FailMessageUnmatchedMatchers) { + std::vector v; + v.push_back(1); + v.push_back(1); + StringMatchResultListener listener; + EXPECT_FALSE(ExplainMatchResult(UnorderedElementsAre(1, 2), v, &listener)) + << listener.str(); + EXPECT_THAT(listener.str(), + Eq("where the following matchers don't match any elements:\n" + "matcher #1: is equal to 2")); +} + +TEST_F(UnorderedElementsAreTest, FailMessageUnmatchedElements) { + std::vector v; + v.push_back(1); + v.push_back(2); + StringMatchResultListener listener; + EXPECT_FALSE(ExplainMatchResult(UnorderedElementsAre(1, 1), v, &listener)) + << listener.str(); + EXPECT_THAT(listener.str(), + Eq("where the following elements don't match any matchers:\n" + "element #1: 2")); +} + +TEST_F(UnorderedElementsAreTest, FailMessageUnmatchedMatcherAndElement) { + std::vector v; + v.push_back(2); + v.push_back(3); + StringMatchResultListener listener; + EXPECT_FALSE(ExplainMatchResult(UnorderedElementsAre(1, 2), v, &listener)) + << listener.str(); + EXPECT_THAT(listener.str(), + Eq("where" + " the following matchers don't match any elements:\n" + "matcher #0: is equal to 1\n" + "and" + " where" + " the following elements don't match any matchers:\n" + "element #1: 3")); +} + +// Test helper for formatting element, matcher index pairs in expectations. +static std::string EMString(int element, int matcher) { + stringstream ss; + ss << "(element #" << element << ", matcher #" << matcher << ")"; + return ss.str(); +} + +TEST_F(UnorderedElementsAreTest, FailMessageImperfectMatchOnly) { + // A situation where all elements and matchers have a match + // associated with them, but the max matching is not perfect. + std::vector v; + v.push_back("a"); + v.push_back("b"); + v.push_back("c"); + StringMatchResultListener listener; + EXPECT_FALSE(ExplainMatchResult( + UnorderedElementsAre("a", "a", AnyOf("b", "c")), v, &listener)) + << listener.str(); + + std::string prefix = + "where no permutation of the elements can satisfy all matchers, " + "and the closest match is 2 of 3 matchers with the " + "pairings:\n"; + + // We have to be a bit loose here, because there are 4 valid max matches. + EXPECT_THAT( + listener.str(), + AnyOf( + prefix + "{\n " + EMString(0, 0) + ",\n " + EMString(1, 2) + "\n}", + prefix + "{\n " + EMString(0, 1) + ",\n " + EMString(1, 2) + "\n}", + prefix + "{\n " + EMString(0, 0) + ",\n " + EMString(2, 2) + "\n}", + prefix + "{\n " + EMString(0, 1) + ",\n " + EMString(2, 2) + + "\n}")); +} + +TEST_F(UnorderedElementsAreTest, Describe) { + EXPECT_THAT(Describe(UnorderedElementsAre()), Eq("is empty")); + EXPECT_THAT(Describe(UnorderedElementsAre(345)), + Eq("has 1 element and that element is equal to 345")); + EXPECT_THAT(Describe(UnorderedElementsAre(111, 222, 333)), + Eq("has 3 elements and there exists some permutation " + "of elements such that:\n" + " - element #0 is equal to 111, and\n" + " - element #1 is equal to 222, and\n" + " - element #2 is equal to 333")); +} + +TEST_F(UnorderedElementsAreTest, DescribeNegation) { + EXPECT_THAT(DescribeNegation(UnorderedElementsAre()), + Eq("isn't empty")); + EXPECT_THAT( + DescribeNegation(UnorderedElementsAre(345)), + Eq("doesn't have 1 element, or has 1 element that isn't equal to 345")); + EXPECT_THAT(DescribeNegation(UnorderedElementsAre(123, 234, 345)), + Eq("doesn't have 3 elements, or there exists no permutation " + "of elements such that:\n" + " - element #0 is equal to 123, and\n" + " - element #1 is equal to 234, and\n" + " - element #2 is equal to 345")); +} + +// Tests Each(). + +INSTANTIATE_GTEST_MATCHER_TEST_P(EachTest); + +TEST_P(EachTestP, ExplainsMatchResultCorrectly) { + set a; // empty + + Matcher> m = Each(2); + EXPECT_EQ("", Explain(m, a)); + + Matcher n = Each(1); // NOLINT + + const int b[1] = {1}; + EXPECT_EQ("", Explain(n, b)); + + n = Each(3); + EXPECT_EQ("whose element #0 doesn't match", Explain(n, b)); + + a.insert(1); + a.insert(2); + a.insert(3); + m = Each(GreaterThan(0)); + EXPECT_EQ("", Explain(m, a)); + + m = Each(GreaterThan(10)); + EXPECT_EQ("whose element #0 doesn't match, which is 9 less than 10", + Explain(m, a)); +} + +TEST(EachTest, DescribesItselfCorrectly) { + Matcher> m = Each(1); + EXPECT_EQ("only contains elements that is equal to 1", Describe(m)); + + Matcher> m2 = Not(m); + EXPECT_EQ("contains some element that isn't equal to 1", Describe(m2)); +} + +TEST(EachTest, MatchesVectorWhenAllElementsMatch) { + vector some_vector; + EXPECT_THAT(some_vector, Each(1)); + some_vector.push_back(3); + EXPECT_THAT(some_vector, Not(Each(1))); + EXPECT_THAT(some_vector, Each(3)); + some_vector.push_back(1); + some_vector.push_back(2); + EXPECT_THAT(some_vector, Not(Each(3))); + EXPECT_THAT(some_vector, Each(Lt(3.5))); + + vector another_vector; + another_vector.push_back("fee"); + EXPECT_THAT(another_vector, Each(std::string("fee"))); + another_vector.push_back("fie"); + another_vector.push_back("foe"); + another_vector.push_back("fum"); + EXPECT_THAT(another_vector, Not(Each(std::string("fee")))); +} + +TEST(EachTest, MatchesMapWhenAllElementsMatch) { + map my_map; + const char* bar = "a string"; + my_map[bar] = 2; + EXPECT_THAT(my_map, Each(make_pair(bar, 2))); + + map another_map; + EXPECT_THAT(another_map, Each(make_pair(std::string("fee"), 1))); + another_map["fee"] = 1; + EXPECT_THAT(another_map, Each(make_pair(std::string("fee"), 1))); + another_map["fie"] = 2; + another_map["foe"] = 3; + another_map["fum"] = 4; + EXPECT_THAT(another_map, Not(Each(make_pair(std::string("fee"), 1)))); + EXPECT_THAT(another_map, Not(Each(make_pair(std::string("fum"), 1)))); + EXPECT_THAT(another_map, Each(Pair(_, Gt(0)))); +} + +TEST(EachTest, AcceptsMatcher) { + const int a[] = {1, 2, 3}; + EXPECT_THAT(a, Each(Gt(0))); + EXPECT_THAT(a, Not(Each(Gt(1)))); +} + +TEST(EachTest, WorksForNativeArrayAsTuple) { + const int a[] = {1, 2}; + const int* const pointer = a; + EXPECT_THAT(std::make_tuple(pointer, 2), Each(Gt(0))); + EXPECT_THAT(std::make_tuple(pointer, 2), Not(Each(Gt(1)))); +} + +TEST(EachTest, WorksWithMoveOnly) { + ContainerHelper helper; + EXPECT_CALL(helper, Call(Each(Pointee(Gt(0))))); + helper.Call(MakeUniquePtrs({1, 2})); +} + +// For testing Pointwise(). +class IsHalfOfMatcher { + public: + template + bool MatchAndExplain(const std::tuple& a_pair, + MatchResultListener* listener) const { + if (std::get<0>(a_pair) == std::get<1>(a_pair) / 2) { + *listener << "where the second is " << std::get<1>(a_pair); + return true; + } else { + *listener << "where the second/2 is " << std::get<1>(a_pair) / 2; + return false; + } + } + + void DescribeTo(ostream* os) const { + *os << "are a pair where the first is half of the second"; + } + + void DescribeNegationTo(ostream* os) const { + *os << "are a pair where the first isn't half of the second"; + } +}; + +PolymorphicMatcher IsHalfOf() { + return MakePolymorphicMatcher(IsHalfOfMatcher()); +} + +TEST(PointwiseTest, DescribesSelf) { + vector rhs; + rhs.push_back(1); + rhs.push_back(2); + rhs.push_back(3); + const Matcher&> m = Pointwise(IsHalfOf(), rhs); + EXPECT_EQ( + "contains 3 values, where each value and its corresponding value " + "in { 1, 2, 3 } are a pair where the first is half of the second", + Describe(m)); + EXPECT_EQ( + "doesn't contain exactly 3 values, or contains a value x at some " + "index i where x and the i-th value of { 1, 2, 3 } are a pair " + "where the first isn't half of the second", + DescribeNegation(m)); +} + +TEST(PointwiseTest, MakesCopyOfRhs) { + list rhs; + rhs.push_back(2); + rhs.push_back(4); + + int lhs[] = {1, 2}; + const Matcher m = Pointwise(IsHalfOf(), rhs); + EXPECT_THAT(lhs, m); + + // Changing rhs now shouldn't affect m, which made a copy of rhs. + rhs.push_back(6); + EXPECT_THAT(lhs, m); +} + +TEST(PointwiseTest, WorksForLhsNativeArray) { + const int lhs[] = {1, 2, 3}; + vector rhs; + rhs.push_back(2); + rhs.push_back(4); + rhs.push_back(6); + EXPECT_THAT(lhs, Pointwise(Lt(), rhs)); + EXPECT_THAT(lhs, Not(Pointwise(Gt(), rhs))); +} + +TEST(PointwiseTest, WorksForRhsNativeArray) { + const int rhs[] = {1, 2, 3}; + vector lhs; + lhs.push_back(2); + lhs.push_back(4); + lhs.push_back(6); + EXPECT_THAT(lhs, Pointwise(Gt(), rhs)); + EXPECT_THAT(lhs, Not(Pointwise(Lt(), rhs))); +} + +// Test is effective only with sanitizers. +TEST(PointwiseTest, WorksForVectorOfBool) { + vector rhs(3, false); + rhs[1] = true; + vector lhs = rhs; + EXPECT_THAT(lhs, Pointwise(Eq(), rhs)); + rhs[0] = true; + EXPECT_THAT(lhs, Not(Pointwise(Eq(), rhs))); +} + +TEST(PointwiseTest, WorksForRhsInitializerList) { + const vector lhs{2, 4, 6}; + EXPECT_THAT(lhs, Pointwise(Gt(), {1, 2, 3})); + EXPECT_THAT(lhs, Not(Pointwise(Lt(), {3, 3, 7}))); +} + +TEST(PointwiseTest, RejectsWrongSize) { + const double lhs[2] = {1, 2}; + const int rhs[1] = {0}; + EXPECT_THAT(lhs, Not(Pointwise(Gt(), rhs))); + EXPECT_EQ("which contains 2 values", Explain(Pointwise(Gt(), rhs), lhs)); + + const int rhs2[3] = {0, 1, 2}; + EXPECT_THAT(lhs, Not(Pointwise(Gt(), rhs2))); +} + +TEST(PointwiseTest, RejectsWrongContent) { + const double lhs[3] = {1, 2, 3}; + const int rhs[3] = {2, 6, 4}; + EXPECT_THAT(lhs, Not(Pointwise(IsHalfOf(), rhs))); + EXPECT_EQ( + "where the value pair (2, 6) at index #1 don't match, " + "where the second/2 is 3", + Explain(Pointwise(IsHalfOf(), rhs), lhs)); +} + +TEST(PointwiseTest, AcceptsCorrectContent) { + const double lhs[3] = {1, 2, 3}; + const int rhs[3] = {2, 4, 6}; + EXPECT_THAT(lhs, Pointwise(IsHalfOf(), rhs)); + EXPECT_EQ("", Explain(Pointwise(IsHalfOf(), rhs), lhs)); +} + +TEST(PointwiseTest, AllowsMonomorphicInnerMatcher) { + const double lhs[3] = {1, 2, 3}; + const int rhs[3] = {2, 4, 6}; + const Matcher> m1 = IsHalfOf(); + EXPECT_THAT(lhs, Pointwise(m1, rhs)); + EXPECT_EQ("", Explain(Pointwise(m1, rhs), lhs)); + + // This type works as a std::tuple can be + // implicitly cast to std::tuple. + const Matcher> m2 = IsHalfOf(); + EXPECT_THAT(lhs, Pointwise(m2, rhs)); + EXPECT_EQ("", Explain(Pointwise(m2, rhs), lhs)); +} + +MATCHER(PointeeEquals, "Points to an equal value") { + return ExplainMatchResult(::testing::Pointee(::testing::get<1>(arg)), + ::testing::get<0>(arg), result_listener); +} + +TEST(PointwiseTest, WorksWithMoveOnly) { + ContainerHelper helper; + EXPECT_CALL(helper, Call(Pointwise(PointeeEquals(), std::vector{1, 2}))); + helper.Call(MakeUniquePtrs({1, 2})); +} + +TEST(UnorderedPointwiseTest, DescribesSelf) { + vector rhs; + rhs.push_back(1); + rhs.push_back(2); + rhs.push_back(3); + const Matcher&> m = UnorderedPointwise(IsHalfOf(), rhs); + EXPECT_EQ( + "has 3 elements and there exists some permutation of elements such " + "that:\n" + " - element #0 and 1 are a pair where the first is half of the second, " + "and\n" + " - element #1 and 2 are a pair where the first is half of the second, " + "and\n" + " - element #2 and 3 are a pair where the first is half of the second", + Describe(m)); + EXPECT_EQ( + "doesn't have 3 elements, or there exists no permutation of elements " + "such that:\n" + " - element #0 and 1 are a pair where the first is half of the second, " + "and\n" + " - element #1 and 2 are a pair where the first is half of the second, " + "and\n" + " - element #2 and 3 are a pair where the first is half of the second", + DescribeNegation(m)); +} + +TEST(UnorderedPointwiseTest, MakesCopyOfRhs) { + list rhs; + rhs.push_back(2); + rhs.push_back(4); + + int lhs[] = {2, 1}; + const Matcher m = UnorderedPointwise(IsHalfOf(), rhs); + EXPECT_THAT(lhs, m); + + // Changing rhs now shouldn't affect m, which made a copy of rhs. + rhs.push_back(6); + EXPECT_THAT(lhs, m); +} + +TEST(UnorderedPointwiseTest, WorksForLhsNativeArray) { + const int lhs[] = {1, 2, 3}; + vector rhs; + rhs.push_back(4); + rhs.push_back(6); + rhs.push_back(2); + EXPECT_THAT(lhs, UnorderedPointwise(Lt(), rhs)); + EXPECT_THAT(lhs, Not(UnorderedPointwise(Gt(), rhs))); +} + +TEST(UnorderedPointwiseTest, WorksForRhsNativeArray) { + const int rhs[] = {1, 2, 3}; + vector lhs; + lhs.push_back(4); + lhs.push_back(2); + lhs.push_back(6); + EXPECT_THAT(lhs, UnorderedPointwise(Gt(), rhs)); + EXPECT_THAT(lhs, Not(UnorderedPointwise(Lt(), rhs))); +} + +TEST(UnorderedPointwiseTest, WorksForRhsInitializerList) { + const vector lhs{2, 4, 6}; + EXPECT_THAT(lhs, UnorderedPointwise(Gt(), {5, 1, 3})); + EXPECT_THAT(lhs, Not(UnorderedPointwise(Lt(), {1, 1, 7}))); +} + +TEST(UnorderedPointwiseTest, RejectsWrongSize) { + const double lhs[2] = {1, 2}; + const int rhs[1] = {0}; + EXPECT_THAT(lhs, Not(UnorderedPointwise(Gt(), rhs))); + EXPECT_EQ("which has 2 elements", + Explain(UnorderedPointwise(Gt(), rhs), lhs)); + + const int rhs2[3] = {0, 1, 2}; + EXPECT_THAT(lhs, Not(UnorderedPointwise(Gt(), rhs2))); +} + +TEST(UnorderedPointwiseTest, RejectsWrongContent) { + const double lhs[3] = {1, 2, 3}; + const int rhs[3] = {2, 6, 6}; + EXPECT_THAT(lhs, Not(UnorderedPointwise(IsHalfOf(), rhs))); + EXPECT_EQ( + "where the following elements don't match any matchers:\n" + "element #1: 2", + Explain(UnorderedPointwise(IsHalfOf(), rhs), lhs)); +} + +TEST(UnorderedPointwiseTest, AcceptsCorrectContentInSameOrder) { + const double lhs[3] = {1, 2, 3}; + const int rhs[3] = {2, 4, 6}; + EXPECT_THAT(lhs, UnorderedPointwise(IsHalfOf(), rhs)); +} + +TEST(UnorderedPointwiseTest, AcceptsCorrectContentInDifferentOrder) { + const double lhs[3] = {1, 2, 3}; + const int rhs[3] = {6, 4, 2}; + EXPECT_THAT(lhs, UnorderedPointwise(IsHalfOf(), rhs)); +} + +TEST(UnorderedPointwiseTest, AllowsMonomorphicInnerMatcher) { + const double lhs[3] = {1, 2, 3}; + const int rhs[3] = {4, 6, 2}; + const Matcher> m1 = IsHalfOf(); + EXPECT_THAT(lhs, UnorderedPointwise(m1, rhs)); + + // This type works as a std::tuple can be + // implicitly cast to std::tuple. + const Matcher> m2 = IsHalfOf(); + EXPECT_THAT(lhs, UnorderedPointwise(m2, rhs)); +} + +TEST(UnorderedPointwiseTest, WorksWithMoveOnly) { + ContainerHelper helper; + EXPECT_CALL(helper, Call(UnorderedPointwise(PointeeEquals(), + std::vector{1, 2}))); + helper.Call(MakeUniquePtrs({2, 1})); +} + +TEST(PointeeTest, WorksOnMoveOnlyType) { + std::unique_ptr p(new int(3)); + EXPECT_THAT(p, Pointee(Eq(3))); + EXPECT_THAT(p, Not(Pointee(Eq(2)))); +} + +class PredicateFormatterFromMatcherTest : public ::testing::Test { + protected: + enum Behavior { kInitialSuccess, kAlwaysFail, kFlaky }; + + // A matcher that can return different results when used multiple times on the + // same input. No real matcher should do this; but this lets us test that we + // detect such behavior and fail appropriately. + class MockMatcher : public MatcherInterface { + public: + bool MatchAndExplain(Behavior behavior, + MatchResultListener* listener) const override { + *listener << "[MatchAndExplain]"; + switch (behavior) { + case kInitialSuccess: + // The first call to MatchAndExplain should use a "not interested" + // listener; so this is expected to return |true|. There should be no + // subsequent calls. + return !listener->IsInterested(); + + case kAlwaysFail: + return false; + + case kFlaky: + // The first call to MatchAndExplain should use a "not interested" + // listener; so this will return |false|. Subsequent calls should have + // an "interested" listener; so this will return |true|, thus + // simulating a flaky matcher. + return listener->IsInterested(); + } + + GTEST_LOG_(FATAL) << "This should never be reached"; + return false; + } + + void DescribeTo(ostream* os) const override { *os << "[DescribeTo]"; } + + void DescribeNegationTo(ostream* os) const override { + *os << "[DescribeNegationTo]"; + } + }; + + AssertionResult RunPredicateFormatter(Behavior behavior) { + auto matcher = MakeMatcher(new MockMatcher); + PredicateFormatterFromMatcher> predicate_formatter( + matcher); + return predicate_formatter("dummy-name", behavior); + } +}; + +TEST_F(PredicateFormatterFromMatcherTest, ShortCircuitOnSuccess) { + AssertionResult result = RunPredicateFormatter(kInitialSuccess); + EXPECT_TRUE(result); // Implicit cast to bool. + std::string expect; + EXPECT_EQ(expect, result.message()); +} + +TEST_F(PredicateFormatterFromMatcherTest, NoShortCircuitOnFailure) { + AssertionResult result = RunPredicateFormatter(kAlwaysFail); + EXPECT_FALSE(result); // Implicit cast to bool. + std::string expect = + "Value of: dummy-name\nExpected: [DescribeTo]\n" + " Actual: 1" + + OfType(internal::GetTypeName()) + ", [MatchAndExplain]"; + EXPECT_EQ(expect, result.message()); +} + +TEST_F(PredicateFormatterFromMatcherTest, DetectsFlakyShortCircuit) { + AssertionResult result = RunPredicateFormatter(kFlaky); + EXPECT_FALSE(result); // Implicit cast to bool. + std::string expect = + "Value of: dummy-name\nExpected: [DescribeTo]\n" + " The matcher failed on the initial attempt; but passed when rerun to " + "generate the explanation.\n" + " Actual: 2" + + OfType(internal::GetTypeName()) + ", [MatchAndExplain]"; + EXPECT_EQ(expect, result.message()); +} + +// Tests for ElementsAre(). + +TEST(ElementsAreTest, CanDescribeExpectingNoElement) { + Matcher&> m = ElementsAre(); + EXPECT_EQ("is empty", Describe(m)); +} + +TEST(ElementsAreTest, CanDescribeExpectingOneElement) { + Matcher> m = ElementsAre(Gt(5)); + EXPECT_EQ("has 1 element that is > 5", Describe(m)); +} + +TEST(ElementsAreTest, CanDescribeExpectingManyElements) { + Matcher> m = ElementsAre(StrEq("one"), "two"); + EXPECT_EQ( + "has 2 elements where\n" + "element #0 is equal to \"one\",\n" + "element #1 is equal to \"two\"", + Describe(m)); +} + +TEST(ElementsAreTest, CanDescribeNegationOfExpectingNoElement) { + Matcher> m = ElementsAre(); + EXPECT_EQ("isn't empty", DescribeNegation(m)); +} + +TEST(ElementsAreTest, CanDescribeNegationOfExpectingOneElement) { + Matcher&> m = ElementsAre(Gt(5)); + EXPECT_EQ( + "doesn't have 1 element, or\n" + "element #0 isn't > 5", + DescribeNegation(m)); +} + +TEST(ElementsAreTest, CanDescribeNegationOfExpectingManyElements) { + Matcher&> m = ElementsAre("one", "two"); + EXPECT_EQ( + "doesn't have 2 elements, or\n" + "element #0 isn't equal to \"one\", or\n" + "element #1 isn't equal to \"two\"", + DescribeNegation(m)); +} + +TEST(ElementsAreTest, DoesNotExplainTrivialMatch) { + Matcher&> m = ElementsAre(1, Ne(2)); + + list test_list; + test_list.push_back(1); + test_list.push_back(3); + EXPECT_EQ("", Explain(m, test_list)); // No need to explain anything. +} + +TEST_P(ElementsAreTestP, ExplainsNonTrivialMatch) { + Matcher&> m = + ElementsAre(GreaterThan(1), 0, GreaterThan(2)); + + const int a[] = {10, 0, 100}; + vector test_vector(std::begin(a), std::end(a)); + EXPECT_EQ( + "whose element #0 matches, which is 9 more than 1,\n" + "and whose element #2 matches, which is 98 more than 2", + Explain(m, test_vector)); +} + +TEST(ElementsAreTest, CanExplainMismatchWrongSize) { + Matcher&> m = ElementsAre(1, 3); + + list test_list; + // No need to explain when the container is empty. + EXPECT_EQ("", Explain(m, test_list)); + + test_list.push_back(1); + EXPECT_EQ("which has 1 element", Explain(m, test_list)); +} + +TEST_P(ElementsAreTestP, CanExplainMismatchRightSize) { + Matcher&> m = ElementsAre(1, GreaterThan(5)); + + vector v; + v.push_back(2); + v.push_back(1); + EXPECT_EQ("whose element #0 doesn't match", Explain(m, v)); + + v[0] = 1; + EXPECT_EQ("whose element #1 doesn't match, which is 4 less than 5", + Explain(m, v)); +} + +TEST(ElementsAreTest, MatchesOneElementVector) { + vector test_vector; + test_vector.push_back("test string"); + + EXPECT_THAT(test_vector, ElementsAre(StrEq("test string"))); +} + +TEST(ElementsAreTest, MatchesOneElementList) { + list test_list; + test_list.push_back("test string"); + + EXPECT_THAT(test_list, ElementsAre("test string")); +} + +TEST(ElementsAreTest, MatchesThreeElementVector) { + vector test_vector; + test_vector.push_back("one"); + test_vector.push_back("two"); + test_vector.push_back("three"); + + EXPECT_THAT(test_vector, ElementsAre("one", StrEq("two"), _)); +} + +TEST(ElementsAreTest, MatchesOneElementEqMatcher) { + vector test_vector; + test_vector.push_back(4); + + EXPECT_THAT(test_vector, ElementsAre(Eq(4))); +} + +TEST(ElementsAreTest, MatchesOneElementAnyMatcher) { + vector test_vector; + test_vector.push_back(4); + + EXPECT_THAT(test_vector, ElementsAre(_)); +} + +TEST(ElementsAreTest, MatchesOneElementValue) { + vector test_vector; + test_vector.push_back(4); + + EXPECT_THAT(test_vector, ElementsAre(4)); +} + +TEST(ElementsAreTest, MatchesThreeElementsMixedMatchers) { + vector test_vector; + test_vector.push_back(1); + test_vector.push_back(2); + test_vector.push_back(3); + + EXPECT_THAT(test_vector, ElementsAre(1, Eq(2), _)); +} + +TEST(ElementsAreTest, MatchesTenElementVector) { + const int a[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; + vector test_vector(std::begin(a), std::end(a)); + + EXPECT_THAT(test_vector, + // The element list can contain values and/or matchers + // of different types. + ElementsAre(0, Ge(0), _, 3, 4, Ne(2), Eq(6), 7, 8, _)); +} + +TEST(ElementsAreTest, DoesNotMatchWrongSize) { + vector test_vector; + test_vector.push_back("test string"); + test_vector.push_back("test string"); + + Matcher> m = ElementsAre(StrEq("test string")); + EXPECT_FALSE(m.Matches(test_vector)); +} + +TEST(ElementsAreTest, DoesNotMatchWrongValue) { + vector test_vector; + test_vector.push_back("other string"); + + Matcher> m = ElementsAre(StrEq("test string")); + EXPECT_FALSE(m.Matches(test_vector)); +} + +TEST(ElementsAreTest, DoesNotMatchWrongOrder) { + vector test_vector; + test_vector.push_back("one"); + test_vector.push_back("three"); + test_vector.push_back("two"); + + Matcher> m = + ElementsAre(StrEq("one"), StrEq("two"), StrEq("three")); + EXPECT_FALSE(m.Matches(test_vector)); +} + +TEST(ElementsAreTest, WorksForNestedContainer) { + constexpr std::array strings = {{"Hi", "world"}}; + + vector> nested; + for (const auto& s : strings) { + nested.emplace_back(s, s + strlen(s)); + } + + EXPECT_THAT(nested, ElementsAre(ElementsAre('H', Ne('e')), + ElementsAre('w', 'o', _, _, 'd'))); + EXPECT_THAT(nested, Not(ElementsAre(ElementsAre('H', 'e'), + ElementsAre('w', 'o', _, _, 'd')))); +} + +TEST(ElementsAreTest, WorksWithByRefElementMatchers) { + int a[] = {0, 1, 2}; + vector v(std::begin(a), std::end(a)); + + EXPECT_THAT(v, ElementsAre(Ref(v[0]), Ref(v[1]), Ref(v[2]))); + EXPECT_THAT(v, Not(ElementsAre(Ref(v[0]), Ref(v[1]), Ref(a[2])))); +} + +TEST(ElementsAreTest, WorksWithContainerPointerUsingPointee) { + int a[] = {0, 1, 2}; + vector v(std::begin(a), std::end(a)); + + EXPECT_THAT(&v, Pointee(ElementsAre(0, 1, _))); + EXPECT_THAT(&v, Not(Pointee(ElementsAre(0, _, 3)))); +} + +TEST(ElementsAreTest, WorksWithNativeArrayPassedByReference) { + int array[] = {0, 1, 2}; + EXPECT_THAT(array, ElementsAre(0, 1, _)); + EXPECT_THAT(array, Not(ElementsAre(1, _, _))); + EXPECT_THAT(array, Not(ElementsAre(0, _))); +} + +class NativeArrayPassedAsPointerAndSize { + public: + NativeArrayPassedAsPointerAndSize() {} + + MOCK_METHOD(void, Helper, (int* array, int size)); + + private: + NativeArrayPassedAsPointerAndSize(const NativeArrayPassedAsPointerAndSize&) = + delete; + NativeArrayPassedAsPointerAndSize& operator=( + const NativeArrayPassedAsPointerAndSize&) = delete; +}; + +TEST(ElementsAreTest, WorksWithNativeArrayPassedAsPointerAndSize) { + int array[] = {0, 1}; + ::std::tuple array_as_tuple(array, 2); + EXPECT_THAT(array_as_tuple, ElementsAre(0, 1)); + EXPECT_THAT(array_as_tuple, Not(ElementsAre(0))); + + NativeArrayPassedAsPointerAndSize helper; + EXPECT_CALL(helper, Helper(_, _)).With(ElementsAre(0, 1)); + helper.Helper(array, 2); +} + +TEST(ElementsAreTest, WorksWithTwoDimensionalNativeArray) { + const char a2[][3] = {"hi", "lo"}; + EXPECT_THAT(a2, ElementsAre(ElementsAre('h', 'i', '\0'), + ElementsAre('l', 'o', '\0'))); + EXPECT_THAT(a2, ElementsAre(StrEq("hi"), StrEq("lo"))); + EXPECT_THAT(a2, ElementsAre(Not(ElementsAre('h', 'o', '\0')), + ElementsAre('l', 'o', '\0'))); +} + +TEST(ElementsAreTest, AcceptsStringLiteral) { + std::string array[] = {"hi", "one", "two"}; + EXPECT_THAT(array, ElementsAre("hi", "one", "two")); + EXPECT_THAT(array, Not(ElementsAre("hi", "one", "too"))); +} + +// Declared here with the size unknown. Defined AFTER the following test. +extern const char kHi[]; + +TEST(ElementsAreTest, AcceptsArrayWithUnknownSize) { + // The size of kHi is not known in this test, but ElementsAre() should + // still accept it. + + std::string array1[] = {"hi"}; + EXPECT_THAT(array1, ElementsAre(kHi)); + + std::string array2[] = {"ho"}; + EXPECT_THAT(array2, Not(ElementsAre(kHi))); +} + +const char kHi[] = "hi"; + +TEST(ElementsAreTest, MakesCopyOfArguments) { + int x = 1; + int y = 2; + // This should make a copy of x and y. + ::testing::internal::ElementsAreMatcher> + polymorphic_matcher = ElementsAre(x, y); + // Changing x and y now shouldn't affect the meaning of the above matcher. + x = y = 0; + const int array1[] = {1, 2}; + EXPECT_THAT(array1, polymorphic_matcher); + const int array2[] = {0, 0}; + EXPECT_THAT(array2, Not(polymorphic_matcher)); +} + +// Tests for ElementsAreArray(). Since ElementsAreArray() shares most +// of the implementation with ElementsAre(), we don't test it as +// thoroughly here. + +TEST(ElementsAreArrayTest, CanBeCreatedWithValueArray) { + const int a[] = {1, 2, 3}; + + vector test_vector(std::begin(a), std::end(a)); + EXPECT_THAT(test_vector, ElementsAreArray(a)); + + test_vector[2] = 0; + EXPECT_THAT(test_vector, Not(ElementsAreArray(a))); +} + +TEST(ElementsAreArrayTest, CanBeCreatedWithArraySize) { + std::array a = {{"one", "two", "three"}}; + + vector test_vector(std::begin(a), std::end(a)); + EXPECT_THAT(test_vector, ElementsAreArray(a.data(), a.size())); + + const char** p = a.data(); + test_vector[0] = "1"; + EXPECT_THAT(test_vector, Not(ElementsAreArray(p, a.size()))); +} + +TEST(ElementsAreArrayTest, CanBeCreatedWithoutArraySize) { + const char* a[] = {"one", "two", "three"}; + + vector test_vector(std::begin(a), std::end(a)); + EXPECT_THAT(test_vector, ElementsAreArray(a)); + + test_vector[0] = "1"; + EXPECT_THAT(test_vector, Not(ElementsAreArray(a))); +} + +TEST(ElementsAreArrayTest, CanBeCreatedWithMatcherArray) { + const Matcher kMatcherArray[] = {StrEq("one"), StrEq("two"), + StrEq("three")}; + + vector test_vector; + test_vector.push_back("one"); + test_vector.push_back("two"); + test_vector.push_back("three"); + EXPECT_THAT(test_vector, ElementsAreArray(kMatcherArray)); + + test_vector.push_back("three"); + EXPECT_THAT(test_vector, Not(ElementsAreArray(kMatcherArray))); +} + +TEST(ElementsAreArrayTest, CanBeCreatedWithVector) { + const int a[] = {1, 2, 3}; + vector test_vector(std::begin(a), std::end(a)); + const vector expected(std::begin(a), std::end(a)); + EXPECT_THAT(test_vector, ElementsAreArray(expected)); + test_vector.push_back(4); + EXPECT_THAT(test_vector, Not(ElementsAreArray(expected))); +} + +TEST(ElementsAreArrayTest, TakesInitializerList) { + const int a[5] = {1, 2, 3, 4, 5}; + EXPECT_THAT(a, ElementsAreArray({1, 2, 3, 4, 5})); + EXPECT_THAT(a, Not(ElementsAreArray({1, 2, 3, 5, 4}))); + EXPECT_THAT(a, Not(ElementsAreArray({1, 2, 3, 4, 6}))); +} + +TEST(ElementsAreArrayTest, TakesInitializerListOfCStrings) { + const std::string a[5] = {"a", "b", "c", "d", "e"}; + EXPECT_THAT(a, ElementsAreArray({"a", "b", "c", "d", "e"})); + EXPECT_THAT(a, Not(ElementsAreArray({"a", "b", "c", "e", "d"}))); + EXPECT_THAT(a, Not(ElementsAreArray({"a", "b", "c", "d", "ef"}))); +} + +TEST(ElementsAreArrayTest, TakesInitializerListOfSameTypedMatchers) { + const int a[5] = {1, 2, 3, 4, 5}; + EXPECT_THAT(a, ElementsAreArray({Eq(1), Eq(2), Eq(3), Eq(4), Eq(5)})); + EXPECT_THAT(a, Not(ElementsAreArray({Eq(1), Eq(2), Eq(3), Eq(4), Eq(6)}))); +} + +TEST(ElementsAreArrayTest, TakesInitializerListOfDifferentTypedMatchers) { + const int a[5] = {1, 2, 3, 4, 5}; + // The compiler cannot infer the type of the initializer list if its + // elements have different types. We must explicitly specify the + // unified element type in this case. + EXPECT_THAT( + a, ElementsAreArray>({Eq(1), Ne(-2), Ge(3), Le(4), Eq(5)})); + EXPECT_THAT(a, Not(ElementsAreArray>( + {Eq(1), Ne(-2), Ge(3), Le(4), Eq(6)}))); +} + +TEST(ElementsAreArrayTest, CanBeCreatedWithMatcherVector) { + const int a[] = {1, 2, 3}; + const Matcher kMatchers[] = {Eq(1), Eq(2), Eq(3)}; + vector test_vector(std::begin(a), std::end(a)); + const vector> expected(std::begin(kMatchers), + std::end(kMatchers)); + EXPECT_THAT(test_vector, ElementsAreArray(expected)); + test_vector.push_back(4); + EXPECT_THAT(test_vector, Not(ElementsAreArray(expected))); +} + +TEST(ElementsAreArrayTest, CanBeCreatedWithIteratorRange) { + const int a[] = {1, 2, 3}; + const vector test_vector(std::begin(a), std::end(a)); + const vector expected(std::begin(a), std::end(a)); + EXPECT_THAT(test_vector, ElementsAreArray(expected.begin(), expected.end())); + // Pointers are iterators, too. + EXPECT_THAT(test_vector, ElementsAreArray(std::begin(a), std::end(a))); + // The empty range of NULL pointers should also be okay. + int* const null_int = nullptr; + EXPECT_THAT(test_vector, Not(ElementsAreArray(null_int, null_int))); + EXPECT_THAT((vector()), ElementsAreArray(null_int, null_int)); +} + +// Since ElementsAre() and ElementsAreArray() share much of the +// implementation, we only do a test for native arrays here. +TEST(ElementsAreArrayTest, WorksWithNativeArray) { + ::std::string a[] = {"hi", "ho"}; + ::std::string b[] = {"hi", "ho"}; + + EXPECT_THAT(a, ElementsAreArray(b)); + EXPECT_THAT(a, ElementsAreArray(b, 2)); + EXPECT_THAT(a, Not(ElementsAreArray(b, 1))); +} + +TEST(ElementsAreArrayTest, SourceLifeSpan) { + const int a[] = {1, 2, 3}; + vector test_vector(std::begin(a), std::end(a)); + vector expect(std::begin(a), std::end(a)); + ElementsAreArrayMatcher matcher_maker = + ElementsAreArray(expect.begin(), expect.end()); + EXPECT_THAT(test_vector, matcher_maker); + // Changing in place the values that initialized matcher_maker should not + // affect matcher_maker anymore. It should have made its own copy of them. + for (int& i : expect) { + i += 10; + } + EXPECT_THAT(test_vector, matcher_maker); + test_vector.push_back(3); + EXPECT_THAT(test_vector, Not(matcher_maker)); +} + +// Tests Contains(). + +INSTANTIATE_GTEST_MATCHER_TEST_P(ContainsTest); + +TEST(ContainsTest, ListMatchesWhenElementIsInContainer) { + list some_list; + some_list.push_back(3); + some_list.push_back(1); + some_list.push_back(2); + some_list.push_back(3); + EXPECT_THAT(some_list, Contains(1)); + EXPECT_THAT(some_list, Contains(Gt(2.5))); + EXPECT_THAT(some_list, Contains(Eq(2.0f))); + + list another_list; + another_list.push_back("fee"); + another_list.push_back("fie"); + another_list.push_back("foe"); + another_list.push_back("fum"); + EXPECT_THAT(another_list, Contains(std::string("fee"))); +} + +TEST(ContainsTest, ListDoesNotMatchWhenElementIsNotInContainer) { + list some_list; + some_list.push_back(3); + some_list.push_back(1); + EXPECT_THAT(some_list, Not(Contains(4))); +} + +TEST(ContainsTest, SetMatchesWhenElementIsInContainer) { + set some_set; + some_set.insert(3); + some_set.insert(1); + some_set.insert(2); + EXPECT_THAT(some_set, Contains(Eq(1.0))); + EXPECT_THAT(some_set, Contains(Eq(3.0f))); + EXPECT_THAT(some_set, Contains(2)); + + set another_set; + another_set.insert("fee"); + another_set.insert("fie"); + another_set.insert("foe"); + another_set.insert("fum"); + EXPECT_THAT(another_set, Contains(Eq(std::string("fum")))); +} + +TEST(ContainsTest, SetDoesNotMatchWhenElementIsNotInContainer) { + set some_set; + some_set.insert(3); + some_set.insert(1); + EXPECT_THAT(some_set, Not(Contains(4))); + + set c_string_set; + c_string_set.insert("hello"); + EXPECT_THAT(c_string_set, Not(Contains(std::string("goodbye")))); +} + +TEST_P(ContainsTestP, ExplainsMatchResultCorrectly) { + const int a[2] = {1, 2}; + Matcher m = Contains(2); + EXPECT_EQ("whose element #1 matches", Explain(m, a)); + + m = Contains(3); + EXPECT_EQ("", Explain(m, a)); + + m = Contains(GreaterThan(0)); + EXPECT_EQ("whose element #0 matches, which is 1 more than 0", Explain(m, a)); + + m = Contains(GreaterThan(10)); + EXPECT_EQ("", Explain(m, a)); +} + +TEST(ContainsTest, DescribesItselfCorrectly) { + Matcher> m = Contains(1); + EXPECT_EQ("contains at least one element that is equal to 1", Describe(m)); + + Matcher> m2 = Not(m); + EXPECT_EQ("doesn't contain any element that is equal to 1", Describe(m2)); +} + +TEST(ContainsTest, MapMatchesWhenElementIsInContainer) { + map my_map; + const char* bar = "a string"; + my_map[bar] = 2; + EXPECT_THAT(my_map, Contains(pair(bar, 2))); + + map another_map; + another_map["fee"] = 1; + another_map["fie"] = 2; + another_map["foe"] = 3; + another_map["fum"] = 4; + EXPECT_THAT(another_map, + Contains(pair(std::string("fee"), 1))); + EXPECT_THAT(another_map, Contains(pair("fie", 2))); +} + +TEST(ContainsTest, MapDoesNotMatchWhenElementIsNotInContainer) { + map some_map; + some_map[1] = 11; + some_map[2] = 22; + EXPECT_THAT(some_map, Not(Contains(pair(2, 23)))); +} + +TEST(ContainsTest, ArrayMatchesWhenElementIsInContainer) { + const char* string_array[] = {"fee", "fie", "foe", "fum"}; + EXPECT_THAT(string_array, Contains(Eq(std::string("fum")))); +} + +TEST(ContainsTest, ArrayDoesNotMatchWhenElementIsNotInContainer) { + int int_array[] = {1, 2, 3, 4}; + EXPECT_THAT(int_array, Not(Contains(5))); +} + +TEST(ContainsTest, AcceptsMatcher) { + const int a[] = {1, 2, 3}; + EXPECT_THAT(a, Contains(Gt(2))); + EXPECT_THAT(a, Not(Contains(Gt(4)))); +} + +TEST(ContainsTest, WorksForNativeArrayAsTuple) { + const int a[] = {1, 2}; + const int* const pointer = a; + EXPECT_THAT(std::make_tuple(pointer, 2), Contains(1)); + EXPECT_THAT(std::make_tuple(pointer, 2), Not(Contains(Gt(3)))); +} + +TEST(ContainsTest, WorksForTwoDimensionalNativeArray) { + int a[][3] = {{1, 2, 3}, {4, 5, 6}}; + EXPECT_THAT(a, Contains(ElementsAre(4, 5, 6))); + EXPECT_THAT(a, Contains(Contains(5))); + EXPECT_THAT(a, Not(Contains(ElementsAre(3, 4, 5)))); + EXPECT_THAT(a, Contains(Not(Contains(5)))); +} + +} // namespace +} // namespace gmock_matchers_test +} // namespace testing + +#ifdef _MSC_VER +#pragma warning(pop) +#endif diff --git a/ext/googletest/googlemock/test/gmock-matchers-misc_test.cc b/ext/googletest/googlemock/test/gmock-matchers-misc_test.cc new file mode 100644 index 0000000000..c68431c139 --- /dev/null +++ b/ext/googletest/googlemock/test/gmock-matchers-misc_test.cc @@ -0,0 +1,1805 @@ +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Google Mock - a framework for writing C++ mock classes. +// +// This file tests some commonly used argument matchers. + +// Silence warning C4244: 'initializing': conversion from 'int' to 'short', +// possible loss of data and C4100, unreferenced local parameter +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable : 4244) +#pragma warning(disable : 4100) +#endif + +#include "test/gmock-matchers_test.h" + +namespace testing { +namespace gmock_matchers_test { +namespace { + +TEST(AddressTest, NonConst) { + int n = 1; + const Matcher m = Address(Eq(&n)); + + EXPECT_TRUE(m.Matches(n)); + + int other = 5; + + EXPECT_FALSE(m.Matches(other)); + + int& n_ref = n; + + EXPECT_TRUE(m.Matches(n_ref)); +} + +TEST(AddressTest, Const) { + const int n = 1; + const Matcher m = Address(Eq(&n)); + + EXPECT_TRUE(m.Matches(n)); + + int other = 5; + + EXPECT_FALSE(m.Matches(other)); +} + +TEST(AddressTest, MatcherDoesntCopy) { + std::unique_ptr n(new int(1)); + const Matcher> m = Address(Eq(&n)); + + EXPECT_TRUE(m.Matches(n)); +} + +TEST(AddressTest, Describe) { + Matcher matcher = Address(_); + EXPECT_EQ("has address that is anything", Describe(matcher)); + EXPECT_EQ("does not have address that is anything", + DescribeNegation(matcher)); +} + +// The following two tests verify that values without a public copy +// ctor can be used as arguments to matchers like Eq(), Ge(), and etc +// with the help of ByRef(). + +class NotCopyable { + public: + explicit NotCopyable(int a_value) : value_(a_value) {} + + int value() const { return value_; } + + bool operator==(const NotCopyable& rhs) const { + return value() == rhs.value(); + } + + bool operator>=(const NotCopyable& rhs) const { + return value() >= rhs.value(); + } + + private: + int value_; + + NotCopyable(const NotCopyable&) = delete; + NotCopyable& operator=(const NotCopyable&) = delete; +}; + +TEST(ByRefTest, AllowsNotCopyableConstValueInMatchers) { + const NotCopyable const_value1(1); + const Matcher m = Eq(ByRef(const_value1)); + + const NotCopyable n1(1), n2(2); + EXPECT_TRUE(m.Matches(n1)); + EXPECT_FALSE(m.Matches(n2)); +} + +TEST(ByRefTest, AllowsNotCopyableValueInMatchers) { + NotCopyable value2(2); + const Matcher m = Ge(ByRef(value2)); + + NotCopyable n1(1), n2(2); + EXPECT_FALSE(m.Matches(n1)); + EXPECT_TRUE(m.Matches(n2)); +} + +TEST(IsEmptyTest, ImplementsIsEmpty) { + vector container; + EXPECT_THAT(container, IsEmpty()); + container.push_back(0); + EXPECT_THAT(container, Not(IsEmpty())); + container.push_back(1); + EXPECT_THAT(container, Not(IsEmpty())); +} + +TEST(IsEmptyTest, WorksWithString) { + std::string text; + EXPECT_THAT(text, IsEmpty()); + text = "foo"; + EXPECT_THAT(text, Not(IsEmpty())); + text = std::string("\0", 1); + EXPECT_THAT(text, Not(IsEmpty())); +} + +TEST(IsEmptyTest, CanDescribeSelf) { + Matcher> m = IsEmpty(); + EXPECT_EQ("is empty", Describe(m)); + EXPECT_EQ("isn't empty", DescribeNegation(m)); +} + +TEST(IsEmptyTest, ExplainsResult) { + Matcher> m = IsEmpty(); + vector container; + EXPECT_EQ("", Explain(m, container)); + container.push_back(0); + EXPECT_EQ("whose size is 1", Explain(m, container)); +} + +TEST(IsEmptyTest, WorksWithMoveOnly) { + ContainerHelper helper; + EXPECT_CALL(helper, Call(IsEmpty())); + helper.Call({}); +} + +TEST(IsTrueTest, IsTrueIsFalse) { + EXPECT_THAT(true, IsTrue()); + EXPECT_THAT(false, IsFalse()); + EXPECT_THAT(true, Not(IsFalse())); + EXPECT_THAT(false, Not(IsTrue())); + EXPECT_THAT(0, Not(IsTrue())); + EXPECT_THAT(0, IsFalse()); + EXPECT_THAT(nullptr, Not(IsTrue())); + EXPECT_THAT(nullptr, IsFalse()); + EXPECT_THAT(-1, IsTrue()); + EXPECT_THAT(-1, Not(IsFalse())); + EXPECT_THAT(1, IsTrue()); + EXPECT_THAT(1, Not(IsFalse())); + EXPECT_THAT(2, IsTrue()); + EXPECT_THAT(2, Not(IsFalse())); + int a = 42; + EXPECT_THAT(a, IsTrue()); + EXPECT_THAT(a, Not(IsFalse())); + EXPECT_THAT(&a, IsTrue()); + EXPECT_THAT(&a, Not(IsFalse())); + EXPECT_THAT(false, Not(IsTrue())); + EXPECT_THAT(true, Not(IsFalse())); + EXPECT_THAT(std::true_type(), IsTrue()); + EXPECT_THAT(std::true_type(), Not(IsFalse())); + EXPECT_THAT(std::false_type(), IsFalse()); + EXPECT_THAT(std::false_type(), Not(IsTrue())); + EXPECT_THAT(nullptr, Not(IsTrue())); + EXPECT_THAT(nullptr, IsFalse()); + std::unique_ptr null_unique; + std::unique_ptr nonnull_unique(new int(0)); + EXPECT_THAT(null_unique, Not(IsTrue())); + EXPECT_THAT(null_unique, IsFalse()); + EXPECT_THAT(nonnull_unique, IsTrue()); + EXPECT_THAT(nonnull_unique, Not(IsFalse())); +} + +#if GTEST_HAS_TYPED_TEST +// Tests ContainerEq with different container types, and +// different element types. + +template +class ContainerEqTest : public testing::Test {}; + +typedef testing::Types, vector, multiset, list> + ContainerEqTestTypes; + +TYPED_TEST_SUITE(ContainerEqTest, ContainerEqTestTypes); + +// Tests that the filled container is equal to itself. +TYPED_TEST(ContainerEqTest, EqualsSelf) { + static const int vals[] = {1, 1, 2, 3, 5, 8}; + TypeParam my_set(vals, vals + 6); + const Matcher m = ContainerEq(my_set); + EXPECT_TRUE(m.Matches(my_set)); + EXPECT_EQ("", Explain(m, my_set)); +} + +// Tests that missing values are reported. +TYPED_TEST(ContainerEqTest, ValueMissing) { + static const int vals[] = {1, 1, 2, 3, 5, 8}; + static const int test_vals[] = {2, 1, 8, 5}; + TypeParam my_set(vals, vals + 6); + TypeParam test_set(test_vals, test_vals + 4); + const Matcher m = ContainerEq(my_set); + EXPECT_FALSE(m.Matches(test_set)); + EXPECT_EQ("which doesn't have these expected elements: 3", + Explain(m, test_set)); +} + +// Tests that added values are reported. +TYPED_TEST(ContainerEqTest, ValueAdded) { + static const int vals[] = {1, 1, 2, 3, 5, 8}; + static const int test_vals[] = {1, 2, 3, 5, 8, 46}; + TypeParam my_set(vals, vals + 6); + TypeParam test_set(test_vals, test_vals + 6); + const Matcher m = ContainerEq(my_set); + EXPECT_FALSE(m.Matches(test_set)); + EXPECT_EQ("which has these unexpected elements: 46", Explain(m, test_set)); +} + +// Tests that added and missing values are reported together. +TYPED_TEST(ContainerEqTest, ValueAddedAndRemoved) { + static const int vals[] = {1, 1, 2, 3, 5, 8}; + static const int test_vals[] = {1, 2, 3, 8, 46}; + TypeParam my_set(vals, vals + 6); + TypeParam test_set(test_vals, test_vals + 5); + const Matcher m = ContainerEq(my_set); + EXPECT_FALSE(m.Matches(test_set)); + EXPECT_EQ( + "which has these unexpected elements: 46,\n" + "and doesn't have these expected elements: 5", + Explain(m, test_set)); +} + +// Tests duplicated value -- expect no explanation. +TYPED_TEST(ContainerEqTest, DuplicateDifference) { + static const int vals[] = {1, 1, 2, 3, 5, 8}; + static const int test_vals[] = {1, 2, 3, 5, 8}; + TypeParam my_set(vals, vals + 6); + TypeParam test_set(test_vals, test_vals + 5); + const Matcher m = ContainerEq(my_set); + // Depending on the container, match may be true or false + // But in any case there should be no explanation. + EXPECT_EQ("", Explain(m, test_set)); +} +#endif // GTEST_HAS_TYPED_TEST + +// Tests that multiple missing values are reported. +// Using just vector here, so order is predictable. +TEST(ContainerEqExtraTest, MultipleValuesMissing) { + static const int vals[] = {1, 1, 2, 3, 5, 8}; + static const int test_vals[] = {2, 1, 5}; + vector my_set(vals, vals + 6); + vector test_set(test_vals, test_vals + 3); + const Matcher> m = ContainerEq(my_set); + EXPECT_FALSE(m.Matches(test_set)); + EXPECT_EQ("which doesn't have these expected elements: 3, 8", + Explain(m, test_set)); +} + +// Tests that added values are reported. +// Using just vector here, so order is predictable. +TEST(ContainerEqExtraTest, MultipleValuesAdded) { + static const int vals[] = {1, 1, 2, 3, 5, 8}; + static const int test_vals[] = {1, 2, 92, 3, 5, 8, 46}; + list my_set(vals, vals + 6); + list test_set(test_vals, test_vals + 7); + const Matcher&> m = ContainerEq(my_set); + EXPECT_FALSE(m.Matches(test_set)); + EXPECT_EQ("which has these unexpected elements: 92, 46", + Explain(m, test_set)); +} + +// Tests that added and missing values are reported together. +TEST(ContainerEqExtraTest, MultipleValuesAddedAndRemoved) { + static const int vals[] = {1, 1, 2, 3, 5, 8}; + static const int test_vals[] = {1, 2, 3, 92, 46}; + list my_set(vals, vals + 6); + list test_set(test_vals, test_vals + 5); + const Matcher> m = ContainerEq(my_set); + EXPECT_FALSE(m.Matches(test_set)); + EXPECT_EQ( + "which has these unexpected elements: 92, 46,\n" + "and doesn't have these expected elements: 5, 8", + Explain(m, test_set)); +} + +// Tests to see that duplicate elements are detected, +// but (as above) not reported in the explanation. +TEST(ContainerEqExtraTest, MultiSetOfIntDuplicateDifference) { + static const int vals[] = {1, 1, 2, 3, 5, 8}; + static const int test_vals[] = {1, 2, 3, 5, 8}; + vector my_set(vals, vals + 6); + vector test_set(test_vals, test_vals + 5); + const Matcher> m = ContainerEq(my_set); + EXPECT_TRUE(m.Matches(my_set)); + EXPECT_FALSE(m.Matches(test_set)); + // There is nothing to report when both sets contain all the same values. + EXPECT_EQ("", Explain(m, test_set)); +} + +// Tests that ContainerEq works for non-trivial associative containers, +// like maps. +TEST(ContainerEqExtraTest, WorksForMaps) { + map my_map; + my_map[0] = "a"; + my_map[1] = "b"; + + map test_map; + test_map[0] = "aa"; + test_map[1] = "b"; + + const Matcher&> m = ContainerEq(my_map); + EXPECT_TRUE(m.Matches(my_map)); + EXPECT_FALSE(m.Matches(test_map)); + + EXPECT_EQ( + "which has these unexpected elements: (0, \"aa\"),\n" + "and doesn't have these expected elements: (0, \"a\")", + Explain(m, test_map)); +} + +TEST(ContainerEqExtraTest, WorksForNativeArray) { + int a1[] = {1, 2, 3}; + int a2[] = {1, 2, 3}; + int b[] = {1, 2, 4}; + + EXPECT_THAT(a1, ContainerEq(a2)); + EXPECT_THAT(a1, Not(ContainerEq(b))); +} + +TEST(ContainerEqExtraTest, WorksForTwoDimensionalNativeArray) { + const char a1[][3] = {"hi", "lo"}; + const char a2[][3] = {"hi", "lo"}; + const char b[][3] = {"lo", "hi"}; + + // Tests using ContainerEq() in the first dimension. + EXPECT_THAT(a1, ContainerEq(a2)); + EXPECT_THAT(a1, Not(ContainerEq(b))); + + // Tests using ContainerEq() in the second dimension. + EXPECT_THAT(a1, ElementsAre(ContainerEq(a2[0]), ContainerEq(a2[1]))); + EXPECT_THAT(a1, ElementsAre(Not(ContainerEq(b[0])), ContainerEq(a2[1]))); +} + +TEST(ContainerEqExtraTest, WorksForNativeArrayAsTuple) { + const int a1[] = {1, 2, 3}; + const int a2[] = {1, 2, 3}; + const int b[] = {1, 2, 3, 4}; + + const int* const p1 = a1; + EXPECT_THAT(std::make_tuple(p1, 3), ContainerEq(a2)); + EXPECT_THAT(std::make_tuple(p1, 3), Not(ContainerEq(b))); + + const int c[] = {1, 3, 2}; + EXPECT_THAT(std::make_tuple(p1, 3), Not(ContainerEq(c))); +} + +TEST(ContainerEqExtraTest, CopiesNativeArrayParameter) { + std::string a1[][3] = {{"hi", "hello", "ciao"}, {"bye", "see you", "ciao"}}; + + std::string a2[][3] = {{"hi", "hello", "ciao"}, {"bye", "see you", "ciao"}}; + + const Matcher m = ContainerEq(a2); + EXPECT_THAT(a1, m); + + a2[0][0] = "ha"; + EXPECT_THAT(a1, m); +} + +namespace { + +// Used as a check on the more complex max flow method used in the +// real testing::internal::FindMaxBipartiteMatching. This method is +// compatible but runs in worst-case factorial time, so we only +// use it in testing for small problem sizes. +template +class BacktrackingMaxBPMState { + public: + // Does not take ownership of 'g'. + explicit BacktrackingMaxBPMState(const Graph* g) : graph_(g) {} + + ElementMatcherPairs Compute() { + if (graph_->LhsSize() == 0 || graph_->RhsSize() == 0) { + return best_so_far_; + } + lhs_used_.assign(graph_->LhsSize(), kUnused); + rhs_used_.assign(graph_->RhsSize(), kUnused); + for (size_t irhs = 0; irhs < graph_->RhsSize(); ++irhs) { + matches_.clear(); + RecurseInto(irhs); + if (best_so_far_.size() == graph_->RhsSize()) break; + } + return best_so_far_; + } + + private: + static const size_t kUnused = static_cast(-1); + + void PushMatch(size_t lhs, size_t rhs) { + matches_.push_back(ElementMatcherPair(lhs, rhs)); + lhs_used_[lhs] = rhs; + rhs_used_[rhs] = lhs; + if (matches_.size() > best_so_far_.size()) { + best_so_far_ = matches_; + } + } + + void PopMatch() { + const ElementMatcherPair& back = matches_.back(); + lhs_used_[back.first] = kUnused; + rhs_used_[back.second] = kUnused; + matches_.pop_back(); + } + + bool RecurseInto(size_t irhs) { + if (rhs_used_[irhs] != kUnused) { + return true; + } + for (size_t ilhs = 0; ilhs < graph_->LhsSize(); ++ilhs) { + if (lhs_used_[ilhs] != kUnused) { + continue; + } + if (!graph_->HasEdge(ilhs, irhs)) { + continue; + } + PushMatch(ilhs, irhs); + if (best_so_far_.size() == graph_->RhsSize()) { + return false; + } + for (size_t mi = irhs + 1; mi < graph_->RhsSize(); ++mi) { + if (!RecurseInto(mi)) return false; + } + PopMatch(); + } + return true; + } + + const Graph* graph_; // not owned + std::vector lhs_used_; + std::vector rhs_used_; + ElementMatcherPairs matches_; + ElementMatcherPairs best_so_far_; +}; + +template +const size_t BacktrackingMaxBPMState::kUnused; + +} // namespace + +// Implement a simple backtracking algorithm to determine if it is possible +// to find one element per matcher, without reusing elements. +template +ElementMatcherPairs FindBacktrackingMaxBPM(const Graph& g) { + return BacktrackingMaxBPMState(&g).Compute(); +} + +class BacktrackingBPMTest : public ::testing::Test {}; + +// Tests the MaxBipartiteMatching algorithm with square matrices. +// The single int param is the # of nodes on each of the left and right sides. +class BipartiteTest : public ::testing::TestWithParam {}; + +// Verify all match graphs up to some moderate number of edges. +TEST_P(BipartiteTest, Exhaustive) { + size_t nodes = GetParam(); + MatchMatrix graph(nodes, nodes); + do { + ElementMatcherPairs matches = internal::FindMaxBipartiteMatching(graph); + EXPECT_EQ(FindBacktrackingMaxBPM(graph).size(), matches.size()) + << "graph: " << graph.DebugString(); + // Check that all elements of matches are in the graph. + // Check that elements of first and second are unique. + std::vector seen_element(graph.LhsSize()); + std::vector seen_matcher(graph.RhsSize()); + SCOPED_TRACE(PrintToString(matches)); + for (size_t i = 0; i < matches.size(); ++i) { + size_t ilhs = matches[i].first; + size_t irhs = matches[i].second; + EXPECT_TRUE(graph.HasEdge(ilhs, irhs)); + EXPECT_FALSE(seen_element[ilhs]); + EXPECT_FALSE(seen_matcher[irhs]); + seen_element[ilhs] = true; + seen_matcher[irhs] = true; + } + } while (graph.NextGraph()); +} + +INSTANTIATE_TEST_SUITE_P(AllGraphs, BipartiteTest, + ::testing::Range(size_t{0}, size_t{5})); + +// Parameterized by a pair interpreted as (LhsSize, RhsSize). +class BipartiteNonSquareTest + : public ::testing::TestWithParam> {}; + +TEST_F(BipartiteNonSquareTest, SimpleBacktracking) { + // ....... + // 0:-----\ : + // 1:---\ | : + // 2:---\ | : + // 3:-\ | | : + // :.......: + // 0 1 2 + MatchMatrix g(4, 3); + constexpr std::array, 4> kEdges = { + {{{0, 2}}, {{1, 1}}, {{2, 1}}, {{3, 0}}}}; + for (size_t i = 0; i < kEdges.size(); ++i) { + g.SetEdge(kEdges[i][0], kEdges[i][1], true); + } + EXPECT_THAT(FindBacktrackingMaxBPM(g), + ElementsAre(Pair(3, 0), Pair(AnyOf(1, 2), 1), Pair(0, 2))) + << g.DebugString(); +} + +// Verify a few nonsquare matrices. +TEST_P(BipartiteNonSquareTest, Exhaustive) { + size_t nlhs = GetParam().first; + size_t nrhs = GetParam().second; + MatchMatrix graph(nlhs, nrhs); + do { + EXPECT_EQ(FindBacktrackingMaxBPM(graph).size(), + internal::FindMaxBipartiteMatching(graph).size()) + << "graph: " << graph.DebugString() + << "\nbacktracking: " << PrintToString(FindBacktrackingMaxBPM(graph)) + << "\nmax flow: " + << PrintToString(internal::FindMaxBipartiteMatching(graph)); + } while (graph.NextGraph()); +} + +INSTANTIATE_TEST_SUITE_P( + AllGraphs, BipartiteNonSquareTest, + testing::Values(std::make_pair(1, 2), std::make_pair(2, 1), + std::make_pair(3, 2), std::make_pair(2, 3), + std::make_pair(4, 1), std::make_pair(1, 4), + std::make_pair(4, 3), std::make_pair(3, 4))); + +class BipartiteRandomTest + : public ::testing::TestWithParam> {}; + +// Verifies a large sample of larger graphs. +TEST_P(BipartiteRandomTest, LargerNets) { + int nodes = GetParam().first; + int iters = GetParam().second; + MatchMatrix graph(static_cast(nodes), static_cast(nodes)); + + auto seed = static_cast(GTEST_FLAG_GET(random_seed)); + if (seed == 0) { + seed = static_cast(time(nullptr)); + } + + for (; iters > 0; --iters, ++seed) { + srand(static_cast(seed)); + graph.Randomize(); + EXPECT_EQ(FindBacktrackingMaxBPM(graph).size(), + internal::FindMaxBipartiteMatching(graph).size()) + << " graph: " << graph.DebugString() + << "\nTo reproduce the failure, rerun the test with the flag" + " --" + << GTEST_FLAG_PREFIX_ << "random_seed=" << seed; + } +} + +// Test argument is a std::pair representing (nodes, iters). +INSTANTIATE_TEST_SUITE_P(Samples, BipartiteRandomTest, + testing::Values(std::make_pair(5, 10000), + std::make_pair(6, 5000), + std::make_pair(7, 2000), + std::make_pair(8, 500), + std::make_pair(9, 100))); + +// Tests IsReadableTypeName(). + +TEST(IsReadableTypeNameTest, ReturnsTrueForShortNames) { + EXPECT_TRUE(IsReadableTypeName("int")); + EXPECT_TRUE(IsReadableTypeName("const unsigned char*")); + EXPECT_TRUE(IsReadableTypeName("MyMap")); + EXPECT_TRUE(IsReadableTypeName("void (*)(int, bool)")); +} + +TEST(IsReadableTypeNameTest, ReturnsTrueForLongNonTemplateNonFunctionNames) { + EXPECT_TRUE(IsReadableTypeName("my_long_namespace::MyClassName")); + EXPECT_TRUE(IsReadableTypeName("int [5][6][7][8][9][10][11]")); + EXPECT_TRUE(IsReadableTypeName("my_namespace::MyOuterClass::MyInnerClass")); +} + +TEST(IsReadableTypeNameTest, ReturnsFalseForLongTemplateNames) { + EXPECT_FALSE( + IsReadableTypeName("basic_string >")); + EXPECT_FALSE(IsReadableTypeName("std::vector >")); +} + +TEST(IsReadableTypeNameTest, ReturnsFalseForLongFunctionTypeNames) { + EXPECT_FALSE(IsReadableTypeName("void (&)(int, bool, char, float)")); +} + +// Tests FormatMatcherDescription(). + +TEST(FormatMatcherDescriptionTest, WorksForEmptyDescription) { + EXPECT_EQ("is even", + FormatMatcherDescription(false, "IsEven", {}, Strings())); + EXPECT_EQ("not (is even)", + FormatMatcherDescription(true, "IsEven", {}, Strings())); + + EXPECT_EQ("equals (a: 5)", + FormatMatcherDescription(false, "Equals", {"a"}, {"5"})); + + EXPECT_EQ( + "is in range (a: 5, b: 8)", + FormatMatcherDescription(false, "IsInRange", {"a", "b"}, {"5", "8"})); +} + +INSTANTIATE_GTEST_MATCHER_TEST_P(MatcherTupleTest); + +TEST_P(MatcherTupleTestP, ExplainsMatchFailure) { + stringstream ss1; + ExplainMatchFailureTupleTo( + std::make_tuple(Matcher(Eq('a')), GreaterThan(5)), + std::make_tuple('a', 10), &ss1); + EXPECT_EQ("", ss1.str()); // Successful match. + + stringstream ss2; + ExplainMatchFailureTupleTo( + std::make_tuple(GreaterThan(5), Matcher(Eq('a'))), + std::make_tuple(2, 'b'), &ss2); + EXPECT_EQ( + " Expected arg #0: is > 5\n" + " Actual: 2, which is 3 less than 5\n" + " Expected arg #1: is equal to 'a' (97, 0x61)\n" + " Actual: 'b' (98, 0x62)\n", + ss2.str()); // Failed match where both arguments need explanation. + + stringstream ss3; + ExplainMatchFailureTupleTo( + std::make_tuple(GreaterThan(5), Matcher(Eq('a'))), + std::make_tuple(2, 'a'), &ss3); + EXPECT_EQ( + " Expected arg #0: is > 5\n" + " Actual: 2, which is 3 less than 5\n", + ss3.str()); // Failed match where only one argument needs + // explanation. +} + +// Sample optional type implementation with minimal requirements for use with +// Optional matcher. +template +class SampleOptional { + public: + using value_type = T; + explicit SampleOptional(T value) + : value_(std::move(value)), has_value_(true) {} + SampleOptional() : value_(), has_value_(false) {} + operator bool() const { return has_value_; } + const T& operator*() const { return value_; } + + private: + T value_; + bool has_value_; +}; + +TEST(OptionalTest, DescribesSelf) { + const Matcher> m = Optional(Eq(1)); + EXPECT_EQ("value is equal to 1", Describe(m)); +} + +TEST(OptionalTest, ExplainsSelf) { + const Matcher> m = Optional(Eq(1)); + EXPECT_EQ("whose value 1 matches", Explain(m, SampleOptional(1))); + EXPECT_EQ("whose value 2 doesn't match", Explain(m, SampleOptional(2))); +} + +TEST(OptionalTest, MatchesNonEmptyOptional) { + const Matcher> m1 = Optional(1); + const Matcher> m2 = Optional(Eq(2)); + const Matcher> m3 = Optional(Lt(3)); + SampleOptional opt(1); + EXPECT_TRUE(m1.Matches(opt)); + EXPECT_FALSE(m2.Matches(opt)); + EXPECT_TRUE(m3.Matches(opt)); +} + +TEST(OptionalTest, DoesNotMatchNullopt) { + const Matcher> m = Optional(1); + SampleOptional empty; + EXPECT_FALSE(m.Matches(empty)); +} + +TEST(OptionalTest, WorksWithMoveOnly) { + Matcher>> m = Optional(Eq(nullptr)); + EXPECT_TRUE(m.Matches(SampleOptional>(nullptr))); +} + +class SampleVariantIntString { + public: + SampleVariantIntString(int i) : i_(i), has_int_(true) {} + SampleVariantIntString(const std::string& s) : s_(s), has_int_(false) {} + + template + friend bool holds_alternative(const SampleVariantIntString& value) { + return value.has_int_ == std::is_same::value; + } + + template + friend const T& get(const SampleVariantIntString& value) { + return value.get_impl(static_cast(nullptr)); + } + + private: + const int& get_impl(int*) const { return i_; } + const std::string& get_impl(std::string*) const { return s_; } + + int i_; + std::string s_; + bool has_int_; +}; + +TEST(VariantTest, DescribesSelf) { + const Matcher m = VariantWith(Eq(1)); + EXPECT_THAT(Describe(m), ContainsRegex("is a variant<> with value of type " + "'.*' and the value is equal to 1")); +} + +TEST(VariantTest, ExplainsSelf) { + const Matcher m = VariantWith(Eq(1)); + EXPECT_THAT(Explain(m, SampleVariantIntString(1)), + ContainsRegex("whose value 1")); + EXPECT_THAT(Explain(m, SampleVariantIntString("A")), + HasSubstr("whose value is not of type '")); + EXPECT_THAT(Explain(m, SampleVariantIntString(2)), + "whose value 2 doesn't match"); +} + +TEST(VariantTest, FullMatch) { + Matcher m = VariantWith(Eq(1)); + EXPECT_TRUE(m.Matches(SampleVariantIntString(1))); + + m = VariantWith(Eq("1")); + EXPECT_TRUE(m.Matches(SampleVariantIntString("1"))); +} + +TEST(VariantTest, TypeDoesNotMatch) { + Matcher m = VariantWith(Eq(1)); + EXPECT_FALSE(m.Matches(SampleVariantIntString("1"))); + + m = VariantWith(Eq("1")); + EXPECT_FALSE(m.Matches(SampleVariantIntString(1))); +} + +TEST(VariantTest, InnerDoesNotMatch) { + Matcher m = VariantWith(Eq(1)); + EXPECT_FALSE(m.Matches(SampleVariantIntString(2))); + + m = VariantWith(Eq("1")); + EXPECT_FALSE(m.Matches(SampleVariantIntString("2"))); +} + +class SampleAnyType { + public: + explicit SampleAnyType(int i) : index_(0), i_(i) {} + explicit SampleAnyType(const std::string& s) : index_(1), s_(s) {} + + template + friend const T* any_cast(const SampleAnyType* any) { + return any->get_impl(static_cast(nullptr)); + } + + private: + int index_; + int i_; + std::string s_; + + const int* get_impl(int*) const { return index_ == 0 ? &i_ : nullptr; } + const std::string* get_impl(std::string*) const { + return index_ == 1 ? &s_ : nullptr; + } +}; + +TEST(AnyWithTest, FullMatch) { + Matcher m = AnyWith(Eq(1)); + EXPECT_TRUE(m.Matches(SampleAnyType(1))); +} + +TEST(AnyWithTest, TestBadCastType) { + Matcher m = AnyWith(Eq("fail")); + EXPECT_FALSE(m.Matches(SampleAnyType(1))); +} + +TEST(AnyWithTest, TestUseInContainers) { + std::vector a; + a.emplace_back(1); + a.emplace_back(2); + a.emplace_back(3); + EXPECT_THAT( + a, ElementsAreArray({AnyWith(1), AnyWith(2), AnyWith(3)})); + + std::vector b; + b.emplace_back("hello"); + b.emplace_back("merhaba"); + b.emplace_back("salut"); + EXPECT_THAT(b, ElementsAreArray({AnyWith("hello"), + AnyWith("merhaba"), + AnyWith("salut")})); +} +TEST(AnyWithTest, TestCompare) { + EXPECT_THAT(SampleAnyType(1), AnyWith(Gt(0))); +} + +TEST(AnyWithTest, DescribesSelf) { + const Matcher m = AnyWith(Eq(1)); + EXPECT_THAT(Describe(m), ContainsRegex("is an 'any' type with value of type " + "'.*' and the value is equal to 1")); +} + +TEST(AnyWithTest, ExplainsSelf) { + const Matcher m = AnyWith(Eq(1)); + + EXPECT_THAT(Explain(m, SampleAnyType(1)), ContainsRegex("whose value 1")); + EXPECT_THAT(Explain(m, SampleAnyType("A")), + HasSubstr("whose value is not of type '")); + EXPECT_THAT(Explain(m, SampleAnyType(2)), "whose value 2 doesn't match"); +} + +// Tests Args(m). + +TEST(ArgsTest, AcceptsZeroTemplateArg) { + const std::tuple t(5, true); + EXPECT_THAT(t, Args<>(Eq(std::tuple<>()))); + EXPECT_THAT(t, Not(Args<>(Ne(std::tuple<>())))); +} + +TEST(ArgsTest, AcceptsOneTemplateArg) { + const std::tuple t(5, true); + EXPECT_THAT(t, Args<0>(Eq(std::make_tuple(5)))); + EXPECT_THAT(t, Args<1>(Eq(std::make_tuple(true)))); + EXPECT_THAT(t, Not(Args<1>(Eq(std::make_tuple(false))))); +} + +TEST(ArgsTest, AcceptsTwoTemplateArgs) { + const std::tuple t(4, 5, 6L); // NOLINT + + EXPECT_THAT(t, (Args<0, 1>(Lt()))); + EXPECT_THAT(t, (Args<1, 2>(Lt()))); + EXPECT_THAT(t, Not(Args<0, 2>(Gt()))); +} + +TEST(ArgsTest, AcceptsRepeatedTemplateArgs) { + const std::tuple t(4, 5, 6L); // NOLINT + EXPECT_THAT(t, (Args<0, 0>(Eq()))); + EXPECT_THAT(t, Not(Args<1, 1>(Ne()))); +} + +TEST(ArgsTest, AcceptsDecreasingTemplateArgs) { + const std::tuple t(4, 5, 6L); // NOLINT + EXPECT_THAT(t, (Args<2, 0>(Gt()))); + EXPECT_THAT(t, Not(Args<2, 1>(Lt()))); +} + +MATCHER(SumIsZero, "") { + return std::get<0>(arg) + std::get<1>(arg) + std::get<2>(arg) == 0; +} + +TEST(ArgsTest, AcceptsMoreTemplateArgsThanArityOfOriginalTuple) { + EXPECT_THAT(std::make_tuple(-1, 2), (Args<0, 0, 1>(SumIsZero()))); + EXPECT_THAT(std::make_tuple(1, 2), Not(Args<0, 0, 1>(SumIsZero()))); +} + +TEST(ArgsTest, CanBeNested) { + const std::tuple t(4, 5, 6L, 6); // NOLINT + EXPECT_THAT(t, (Args<1, 2, 3>(Args<1, 2>(Eq())))); + EXPECT_THAT(t, (Args<0, 1, 3>(Args<0, 2>(Lt())))); +} + +TEST(ArgsTest, CanMatchTupleByValue) { + typedef std::tuple Tuple3; + const Matcher m = Args<1, 2>(Lt()); + EXPECT_TRUE(m.Matches(Tuple3('a', 1, 2))); + EXPECT_FALSE(m.Matches(Tuple3('b', 2, 2))); +} + +TEST(ArgsTest, CanMatchTupleByReference) { + typedef std::tuple Tuple3; + const Matcher m = Args<0, 1>(Lt()); + EXPECT_TRUE(m.Matches(Tuple3('a', 'b', 2))); + EXPECT_FALSE(m.Matches(Tuple3('b', 'b', 2))); +} + +// Validates that arg is printed as str. +MATCHER_P(PrintsAs, str, "") { return testing::PrintToString(arg) == str; } + +TEST(ArgsTest, AcceptsTenTemplateArgs) { + EXPECT_THAT(std::make_tuple(0, 1L, 2, 3L, 4, 5, 6, 7, 8, 9), + (Args<9, 8, 7, 6, 5, 4, 3, 2, 1, 0>( + PrintsAs("(9, 8, 7, 6, 5, 4, 3, 2, 1, 0)")))); + EXPECT_THAT(std::make_tuple(0, 1L, 2, 3L, 4, 5, 6, 7, 8, 9), + Not(Args<9, 8, 7, 6, 5, 4, 3, 2, 1, 0>( + PrintsAs("(0, 8, 7, 6, 5, 4, 3, 2, 1, 0)")))); +} + +TEST(ArgsTest, DescirbesSelfCorrectly) { + const Matcher> m = Args<2, 0>(Lt()); + EXPECT_EQ( + "are a tuple whose fields (#2, #0) are a pair where " + "the first < the second", + Describe(m)); +} + +TEST(ArgsTest, DescirbesNestedArgsCorrectly) { + const Matcher&> m = + Args<0, 2, 3>(Args<2, 0>(Lt())); + EXPECT_EQ( + "are a tuple whose fields (#0, #2, #3) are a tuple " + "whose fields (#2, #0) are a pair where the first < the second", + Describe(m)); +} + +TEST(ArgsTest, DescribesNegationCorrectly) { + const Matcher> m = Args<1, 0>(Gt()); + EXPECT_EQ( + "are a tuple whose fields (#1, #0) aren't a pair " + "where the first > the second", + DescribeNegation(m)); +} + +TEST(ArgsTest, ExplainsMatchResultWithoutInnerExplanation) { + const Matcher> m = Args<1, 2>(Eq()); + EXPECT_EQ("whose fields (#1, #2) are (42, 42)", + Explain(m, std::make_tuple(false, 42, 42))); + EXPECT_EQ("whose fields (#1, #2) are (42, 43)", + Explain(m, std::make_tuple(false, 42, 43))); +} + +// For testing Args<>'s explanation. +class LessThanMatcher : public MatcherInterface> { + public: + void DescribeTo(::std::ostream* /*os*/) const override {} + + bool MatchAndExplain(std::tuple value, + MatchResultListener* listener) const override { + const int diff = std::get<0>(value) - std::get<1>(value); + if (diff > 0) { + *listener << "where the first value is " << diff + << " more than the second"; + } + return diff < 0; + } +}; + +Matcher> LessThan() { + return MakeMatcher(new LessThanMatcher); +} + +TEST(ArgsTest, ExplainsMatchResultWithInnerExplanation) { + const Matcher> m = Args<0, 2>(LessThan()); + EXPECT_EQ( + "whose fields (#0, #2) are ('a' (97, 0x61), 42), " + "where the first value is 55 more than the second", + Explain(m, std::make_tuple('a', 42, 42))); + EXPECT_EQ("whose fields (#0, #2) are ('\\0', 43)", + Explain(m, std::make_tuple('\0', 42, 43))); +} + +// Tests for the MATCHER*() macro family. + +// Tests that a simple MATCHER() definition works. + +MATCHER(IsEven, "") { return (arg % 2) == 0; } + +TEST(MatcherMacroTest, Works) { + const Matcher m = IsEven(); + EXPECT_TRUE(m.Matches(6)); + EXPECT_FALSE(m.Matches(7)); + + EXPECT_EQ("is even", Describe(m)); + EXPECT_EQ("not (is even)", DescribeNegation(m)); + EXPECT_EQ("", Explain(m, 6)); + EXPECT_EQ("", Explain(m, 7)); +} + +// This also tests that the description string can reference 'negation'. +MATCHER(IsEven2, negation ? "is odd" : "is even") { + if ((arg % 2) == 0) { + // Verifies that we can stream to result_listener, a listener + // supplied by the MATCHER macro implicitly. + *result_listener << "OK"; + return true; + } else { + *result_listener << "% 2 == " << (arg % 2); + return false; + } +} + +// This also tests that the description string can reference matcher +// parameters. +MATCHER_P2(EqSumOf, x, y, + std::string(negation ? "doesn't equal" : "equals") + " the sum of " + + PrintToString(x) + " and " + PrintToString(y)) { + if (arg == (x + y)) { + *result_listener << "OK"; + return true; + } else { + // Verifies that we can stream to the underlying stream of + // result_listener. + if (result_listener->stream() != nullptr) { + *result_listener->stream() << "diff == " << (x + y - arg); + } + return false; + } +} + +// Tests that the matcher description can reference 'negation' and the +// matcher parameters. +TEST(MatcherMacroTest, DescriptionCanReferenceNegationAndParameters) { + const Matcher m1 = IsEven2(); + EXPECT_EQ("is even", Describe(m1)); + EXPECT_EQ("is odd", DescribeNegation(m1)); + + const Matcher m2 = EqSumOf(5, 9); + EXPECT_EQ("equals the sum of 5 and 9", Describe(m2)); + EXPECT_EQ("doesn't equal the sum of 5 and 9", DescribeNegation(m2)); +} + +// Tests explaining match result in a MATCHER* macro. +TEST(MatcherMacroTest, CanExplainMatchResult) { + const Matcher m1 = IsEven2(); + EXPECT_EQ("OK", Explain(m1, 4)); + EXPECT_EQ("% 2 == 1", Explain(m1, 5)); + + const Matcher m2 = EqSumOf(1, 2); + EXPECT_EQ("OK", Explain(m2, 3)); + EXPECT_EQ("diff == -1", Explain(m2, 4)); +} + +// Tests that the body of MATCHER() can reference the type of the +// value being matched. + +MATCHER(IsEmptyString, "") { + StaticAssertTypeEq<::std::string, arg_type>(); + return arg.empty(); +} + +MATCHER(IsEmptyStringByRef, "") { + StaticAssertTypeEq(); + return arg.empty(); +} + +TEST(MatcherMacroTest, CanReferenceArgType) { + const Matcher<::std::string> m1 = IsEmptyString(); + EXPECT_TRUE(m1.Matches("")); + + const Matcher m2 = IsEmptyStringByRef(); + EXPECT_TRUE(m2.Matches("")); +} + +// Tests that MATCHER() can be used in a namespace. + +namespace matcher_test { +MATCHER(IsOdd, "") { return (arg % 2) != 0; } +} // namespace matcher_test + +TEST(MatcherMacroTest, WorksInNamespace) { + Matcher m = matcher_test::IsOdd(); + EXPECT_FALSE(m.Matches(4)); + EXPECT_TRUE(m.Matches(5)); +} + +// Tests that Value() can be used to compose matchers. +MATCHER(IsPositiveOdd, "") { + return Value(arg, matcher_test::IsOdd()) && arg > 0; +} + +TEST(MatcherMacroTest, CanBeComposedUsingValue) { + EXPECT_THAT(3, IsPositiveOdd()); + EXPECT_THAT(4, Not(IsPositiveOdd())); + EXPECT_THAT(-1, Not(IsPositiveOdd())); +} + +// Tests that a simple MATCHER_P() definition works. + +MATCHER_P(IsGreaterThan32And, n, "") { return arg > 32 && arg > n; } + +TEST(MatcherPMacroTest, Works) { + const Matcher m = IsGreaterThan32And(5); + EXPECT_TRUE(m.Matches(36)); + EXPECT_FALSE(m.Matches(5)); + + EXPECT_EQ("is greater than 32 and (n: 5)", Describe(m)); + EXPECT_EQ("not (is greater than 32 and (n: 5))", DescribeNegation(m)); + EXPECT_EQ("", Explain(m, 36)); + EXPECT_EQ("", Explain(m, 5)); +} + +// Tests that the description is calculated correctly from the matcher name. +MATCHER_P(_is_Greater_Than32and_, n, "") { return arg > 32 && arg > n; } + +TEST(MatcherPMacroTest, GeneratesCorrectDescription) { + const Matcher m = _is_Greater_Than32and_(5); + + EXPECT_EQ("is greater than 32 and (n: 5)", Describe(m)); + EXPECT_EQ("not (is greater than 32 and (n: 5))", DescribeNegation(m)); + EXPECT_EQ("", Explain(m, 36)); + EXPECT_EQ("", Explain(m, 5)); +} + +// Tests that a MATCHER_P matcher can be explicitly instantiated with +// a reference parameter type. + +class UncopyableFoo { + public: + explicit UncopyableFoo(char value) : value_(value) { (void)value_; } + + UncopyableFoo(const UncopyableFoo&) = delete; + void operator=(const UncopyableFoo&) = delete; + + private: + char value_; +}; + +MATCHER_P(ReferencesUncopyable, variable, "") { return &arg == &variable; } + +TEST(MatcherPMacroTest, WorksWhenExplicitlyInstantiatedWithReference) { + UncopyableFoo foo1('1'), foo2('2'); + const Matcher m = + ReferencesUncopyable(foo1); + + EXPECT_TRUE(m.Matches(foo1)); + EXPECT_FALSE(m.Matches(foo2)); + + // We don't want the address of the parameter printed, as most + // likely it will just annoy the user. If the address is + // interesting, the user should consider passing the parameter by + // pointer instead. + EXPECT_EQ("references uncopyable (variable: 1-byte object <31>)", + Describe(m)); +} + +// Tests that the body of MATCHER_Pn() can reference the parameter +// types. + +MATCHER_P3(ParamTypesAreIntLongAndChar, foo, bar, baz, "") { + StaticAssertTypeEq(); + StaticAssertTypeEq(); // NOLINT + StaticAssertTypeEq(); + return arg == 0; +} + +TEST(MatcherPnMacroTest, CanReferenceParamTypes) { + EXPECT_THAT(0, ParamTypesAreIntLongAndChar(10, 20L, 'a')); +} + +// Tests that a MATCHER_Pn matcher can be explicitly instantiated with +// reference parameter types. + +MATCHER_P2(ReferencesAnyOf, variable1, variable2, "") { + return &arg == &variable1 || &arg == &variable2; +} + +TEST(MatcherPnMacroTest, WorksWhenExplicitlyInstantiatedWithReferences) { + UncopyableFoo foo1('1'), foo2('2'), foo3('3'); + const Matcher const_m = + ReferencesAnyOf(foo1, foo2); + + EXPECT_TRUE(const_m.Matches(foo1)); + EXPECT_TRUE(const_m.Matches(foo2)); + EXPECT_FALSE(const_m.Matches(foo3)); + + const Matcher m = + ReferencesAnyOf(foo1, foo2); + + EXPECT_TRUE(m.Matches(foo1)); + EXPECT_TRUE(m.Matches(foo2)); + EXPECT_FALSE(m.Matches(foo3)); +} + +TEST(MatcherPnMacroTest, + GeneratesCorretDescriptionWhenExplicitlyInstantiatedWithReferences) { + UncopyableFoo foo1('1'), foo2('2'); + const Matcher m = + ReferencesAnyOf(foo1, foo2); + + // We don't want the addresses of the parameters printed, as most + // likely they will just annoy the user. If the addresses are + // interesting, the user should consider passing the parameters by + // pointers instead. + EXPECT_EQ( + "references any of (variable1: 1-byte object <31>, variable2: 1-byte " + "object <32>)", + Describe(m)); +} + +// Tests that a simple MATCHER_P2() definition works. + +MATCHER_P2(IsNotInClosedRange, low, hi, "") { return arg < low || arg > hi; } + +TEST(MatcherPnMacroTest, Works) { + const Matcher m = IsNotInClosedRange(10, 20); // NOLINT + EXPECT_TRUE(m.Matches(36L)); + EXPECT_FALSE(m.Matches(15L)); + + EXPECT_EQ("is not in closed range (low: 10, hi: 20)", Describe(m)); + EXPECT_EQ("not (is not in closed range (low: 10, hi: 20))", + DescribeNegation(m)); + EXPECT_EQ("", Explain(m, 36L)); + EXPECT_EQ("", Explain(m, 15L)); +} + +// Tests that MATCHER*() definitions can be overloaded on the number +// of parameters; also tests MATCHER_Pn() where n >= 3. + +MATCHER(EqualsSumOf, "") { return arg == 0; } +MATCHER_P(EqualsSumOf, a, "") { return arg == a; } +MATCHER_P2(EqualsSumOf, a, b, "") { return arg == a + b; } +MATCHER_P3(EqualsSumOf, a, b, c, "") { return arg == a + b + c; } +MATCHER_P4(EqualsSumOf, a, b, c, d, "") { return arg == a + b + c + d; } +MATCHER_P5(EqualsSumOf, a, b, c, d, e, "") { return arg == a + b + c + d + e; } +MATCHER_P6(EqualsSumOf, a, b, c, d, e, f, "") { + return arg == a + b + c + d + e + f; +} +MATCHER_P7(EqualsSumOf, a, b, c, d, e, f, g, "") { + return arg == a + b + c + d + e + f + g; +} +MATCHER_P8(EqualsSumOf, a, b, c, d, e, f, g, h, "") { + return arg == a + b + c + d + e + f + g + h; +} +MATCHER_P9(EqualsSumOf, a, b, c, d, e, f, g, h, i, "") { + return arg == a + b + c + d + e + f + g + h + i; +} +MATCHER_P10(EqualsSumOf, a, b, c, d, e, f, g, h, i, j, "") { + return arg == a + b + c + d + e + f + g + h + i + j; +} + +TEST(MatcherPnMacroTest, CanBeOverloadedOnNumberOfParameters) { + EXPECT_THAT(0, EqualsSumOf()); + EXPECT_THAT(1, EqualsSumOf(1)); + EXPECT_THAT(12, EqualsSumOf(10, 2)); + EXPECT_THAT(123, EqualsSumOf(100, 20, 3)); + EXPECT_THAT(1234, EqualsSumOf(1000, 200, 30, 4)); + EXPECT_THAT(12345, EqualsSumOf(10000, 2000, 300, 40, 5)); + EXPECT_THAT("abcdef", + EqualsSumOf(::std::string("a"), 'b', 'c', "d", "e", 'f')); + EXPECT_THAT("abcdefg", + EqualsSumOf(::std::string("a"), 'b', 'c', "d", "e", 'f', 'g')); + EXPECT_THAT("abcdefgh", EqualsSumOf(::std::string("a"), 'b', 'c', "d", "e", + 'f', 'g', "h")); + EXPECT_THAT("abcdefghi", EqualsSumOf(::std::string("a"), 'b', 'c', "d", "e", + 'f', 'g', "h", 'i')); + EXPECT_THAT("abcdefghij", + EqualsSumOf(::std::string("a"), 'b', 'c', "d", "e", 'f', 'g', "h", + 'i', ::std::string("j"))); + + EXPECT_THAT(1, Not(EqualsSumOf())); + EXPECT_THAT(-1, Not(EqualsSumOf(1))); + EXPECT_THAT(-12, Not(EqualsSumOf(10, 2))); + EXPECT_THAT(-123, Not(EqualsSumOf(100, 20, 3))); + EXPECT_THAT(-1234, Not(EqualsSumOf(1000, 200, 30, 4))); + EXPECT_THAT(-12345, Not(EqualsSumOf(10000, 2000, 300, 40, 5))); + EXPECT_THAT("abcdef ", + Not(EqualsSumOf(::std::string("a"), 'b', 'c', "d", "e", 'f'))); + EXPECT_THAT("abcdefg ", Not(EqualsSumOf(::std::string("a"), 'b', 'c', "d", + "e", 'f', 'g'))); + EXPECT_THAT("abcdefgh ", Not(EqualsSumOf(::std::string("a"), 'b', 'c', "d", + "e", 'f', 'g', "h"))); + EXPECT_THAT("abcdefghi ", Not(EqualsSumOf(::std::string("a"), 'b', 'c', "d", + "e", 'f', 'g', "h", 'i'))); + EXPECT_THAT("abcdefghij ", + Not(EqualsSumOf(::std::string("a"), 'b', 'c', "d", "e", 'f', 'g', + "h", 'i', ::std::string("j")))); +} + +// Tests that a MATCHER_Pn() definition can be instantiated with any +// compatible parameter types. +TEST(MatcherPnMacroTest, WorksForDifferentParameterTypes) { + EXPECT_THAT(123, EqualsSumOf(100L, 20, static_cast(3))); + EXPECT_THAT("abcd", EqualsSumOf(::std::string("a"), "b", 'c', "d")); + + EXPECT_THAT(124, Not(EqualsSumOf(100L, 20, static_cast(3)))); + EXPECT_THAT("abcde", Not(EqualsSumOf(::std::string("a"), "b", 'c', "d"))); +} + +// Tests that the matcher body can promote the parameter types. + +MATCHER_P2(EqConcat, prefix, suffix, "") { + // The following lines promote the two parameters to desired types. + std::string prefix_str(prefix); + char suffix_char = static_cast(suffix); + return arg == prefix_str + suffix_char; +} + +TEST(MatcherPnMacroTest, SimpleTypePromotion) { + Matcher no_promo = EqConcat(std::string("foo"), 't'); + Matcher promo = EqConcat("foo", static_cast('t')); + EXPECT_FALSE(no_promo.Matches("fool")); + EXPECT_FALSE(promo.Matches("fool")); + EXPECT_TRUE(no_promo.Matches("foot")); + EXPECT_TRUE(promo.Matches("foot")); +} + +// Verifies the type of a MATCHER*. + +TEST(MatcherPnMacroTest, TypesAreCorrect) { + // EqualsSumOf() must be assignable to a EqualsSumOfMatcher variable. + EqualsSumOfMatcher a0 = EqualsSumOf(); + + // EqualsSumOf(1) must be assignable to a EqualsSumOfMatcherP variable. + EqualsSumOfMatcherP a1 = EqualsSumOf(1); + + // EqualsSumOf(p1, ..., pk) must be assignable to a EqualsSumOfMatcherPk + // variable, and so on. + EqualsSumOfMatcherP2 a2 = EqualsSumOf(1, '2'); + EqualsSumOfMatcherP3 a3 = EqualsSumOf(1, 2, '3'); + EqualsSumOfMatcherP4 a4 = EqualsSumOf(1, 2, 3, '4'); + EqualsSumOfMatcherP5 a5 = + EqualsSumOf(1, 2, 3, 4, '5'); + EqualsSumOfMatcherP6 a6 = + EqualsSumOf(1, 2, 3, 4, 5, '6'); + EqualsSumOfMatcherP7 a7 = + EqualsSumOf(1, 2, 3, 4, 5, 6, '7'); + EqualsSumOfMatcherP8 a8 = + EqualsSumOf(1, 2, 3, 4, 5, 6, 7, '8'); + EqualsSumOfMatcherP9 a9 = + EqualsSumOf(1, 2, 3, 4, 5, 6, 7, 8, '9'); + EqualsSumOfMatcherP10 a10 = + EqualsSumOf(1, 2, 3, 4, 5, 6, 7, 8, 9, '0'); + + // Avoid "unused variable" warnings. + (void)a0; + (void)a1; + (void)a2; + (void)a3; + (void)a4; + (void)a5; + (void)a6; + (void)a7; + (void)a8; + (void)a9; + (void)a10; +} + +// Tests that matcher-typed parameters can be used in Value() inside a +// MATCHER_Pn definition. + +// Succeeds if arg matches exactly 2 of the 3 matchers. +MATCHER_P3(TwoOf, m1, m2, m3, "") { + const int count = static_cast(Value(arg, m1)) + + static_cast(Value(arg, m2)) + + static_cast(Value(arg, m3)); + return count == 2; +} + +TEST(MatcherPnMacroTest, CanUseMatcherTypedParameterInValue) { + EXPECT_THAT(42, TwoOf(Gt(0), Lt(50), Eq(10))); + EXPECT_THAT(0, Not(TwoOf(Gt(-1), Lt(1), Eq(0)))); +} + +// Tests Contains().Times(). + +INSTANTIATE_GTEST_MATCHER_TEST_P(ContainsTimes); + +TEST(ContainsTimes, ListMatchesWhenElementQuantityMatches) { + list some_list; + some_list.push_back(3); + some_list.push_back(1); + some_list.push_back(2); + some_list.push_back(3); + EXPECT_THAT(some_list, Contains(3).Times(2)); + EXPECT_THAT(some_list, Contains(2).Times(1)); + EXPECT_THAT(some_list, Contains(Ge(2)).Times(3)); + EXPECT_THAT(some_list, Contains(Ge(2)).Times(Gt(2))); + EXPECT_THAT(some_list, Contains(4).Times(0)); + EXPECT_THAT(some_list, Contains(_).Times(4)); + EXPECT_THAT(some_list, Not(Contains(5).Times(1))); + EXPECT_THAT(some_list, Contains(5).Times(_)); // Times(_) always matches + EXPECT_THAT(some_list, Not(Contains(3).Times(1))); + EXPECT_THAT(some_list, Contains(3).Times(Not(1))); + EXPECT_THAT(list{}, Not(Contains(_))); +} + +TEST_P(ContainsTimesP, ExplainsMatchResultCorrectly) { + const int a[2] = {1, 2}; + Matcher m = Contains(2).Times(3); + EXPECT_EQ( + "whose element #1 matches but whose match quantity of 1 does not match", + Explain(m, a)); + + m = Contains(3).Times(0); + EXPECT_EQ("has no element that matches and whose match quantity of 0 matches", + Explain(m, a)); + + m = Contains(3).Times(4); + EXPECT_EQ( + "has no element that matches and whose match quantity of 0 does not " + "match", + Explain(m, a)); + + m = Contains(2).Times(4); + EXPECT_EQ( + "whose element #1 matches but whose match quantity of 1 does not " + "match", + Explain(m, a)); + + m = Contains(GreaterThan(0)).Times(2); + EXPECT_EQ("whose elements (0, 1) match and whose match quantity of 2 matches", + Explain(m, a)); + + m = Contains(GreaterThan(10)).Times(Gt(1)); + EXPECT_EQ( + "has no element that matches and whose match quantity of 0 does not " + "match", + Explain(m, a)); + + m = Contains(GreaterThan(0)).Times(GreaterThan(5)); + EXPECT_EQ( + "whose elements (0, 1) match but whose match quantity of 2 does not " + "match, which is 3 less than 5", + Explain(m, a)); +} + +TEST(ContainsTimes, DescribesItselfCorrectly) { + Matcher> m = Contains(1).Times(2); + EXPECT_EQ("quantity of elements that match is equal to 1 is equal to 2", + Describe(m)); + + Matcher> m2 = Not(m); + EXPECT_EQ("quantity of elements that match is equal to 1 isn't equal to 2", + Describe(m2)); +} + +// Tests AllOfArray() + +TEST(AllOfArrayTest, BasicForms) { + // Iterator + std::vector v0{}; + std::vector v1{1}; + std::vector v2{2, 3}; + std::vector v3{4, 4, 4}; + EXPECT_THAT(0, AllOfArray(v0.begin(), v0.end())); + EXPECT_THAT(1, AllOfArray(v1.begin(), v1.end())); + EXPECT_THAT(2, Not(AllOfArray(v1.begin(), v1.end()))); + EXPECT_THAT(3, Not(AllOfArray(v2.begin(), v2.end()))); + EXPECT_THAT(4, AllOfArray(v3.begin(), v3.end())); + // Pointer + size + int ar[6] = {1, 2, 3, 4, 4, 4}; + EXPECT_THAT(0, AllOfArray(ar, 0)); + EXPECT_THAT(1, AllOfArray(ar, 1)); + EXPECT_THAT(2, Not(AllOfArray(ar, 1))); + EXPECT_THAT(3, Not(AllOfArray(ar + 1, 3))); + EXPECT_THAT(4, AllOfArray(ar + 3, 3)); + // Array + // int ar0[0]; Not usable + int ar1[1] = {1}; + int ar2[2] = {2, 3}; + int ar3[3] = {4, 4, 4}; + // EXPECT_THAT(0, Not(AllOfArray(ar0))); // Cannot work + EXPECT_THAT(1, AllOfArray(ar1)); + EXPECT_THAT(2, Not(AllOfArray(ar1))); + EXPECT_THAT(3, Not(AllOfArray(ar2))); + EXPECT_THAT(4, AllOfArray(ar3)); + // Container + EXPECT_THAT(0, AllOfArray(v0)); + EXPECT_THAT(1, AllOfArray(v1)); + EXPECT_THAT(2, Not(AllOfArray(v1))); + EXPECT_THAT(3, Not(AllOfArray(v2))); + EXPECT_THAT(4, AllOfArray(v3)); + // Initializer + EXPECT_THAT(0, AllOfArray({})); // Requires template arg. + EXPECT_THAT(1, AllOfArray({1})); + EXPECT_THAT(2, Not(AllOfArray({1}))); + EXPECT_THAT(3, Not(AllOfArray({2, 3}))); + EXPECT_THAT(4, AllOfArray({4, 4, 4})); +} + +TEST(AllOfArrayTest, Matchers) { + // vector + std::vector> matchers{Ge(1), Lt(2)}; + EXPECT_THAT(0, Not(AllOfArray(matchers))); + EXPECT_THAT(1, AllOfArray(matchers)); + EXPECT_THAT(2, Not(AllOfArray(matchers))); + // initializer_list + EXPECT_THAT(0, Not(AllOfArray({Ge(0), Ge(1)}))); + EXPECT_THAT(1, AllOfArray({Ge(0), Ge(1)})); +} + +INSTANTIATE_GTEST_MATCHER_TEST_P(AnyOfArrayTest); + +TEST(AnyOfArrayTest, BasicForms) { + // Iterator + std::vector v0{}; + std::vector v1{1}; + std::vector v2{2, 3}; + EXPECT_THAT(0, Not(AnyOfArray(v0.begin(), v0.end()))); + EXPECT_THAT(1, AnyOfArray(v1.begin(), v1.end())); + EXPECT_THAT(2, Not(AnyOfArray(v1.begin(), v1.end()))); + EXPECT_THAT(3, AnyOfArray(v2.begin(), v2.end())); + EXPECT_THAT(4, Not(AnyOfArray(v2.begin(), v2.end()))); + // Pointer + size + int ar[3] = {1, 2, 3}; + EXPECT_THAT(0, Not(AnyOfArray(ar, 0))); + EXPECT_THAT(1, AnyOfArray(ar, 1)); + EXPECT_THAT(2, Not(AnyOfArray(ar, 1))); + EXPECT_THAT(3, AnyOfArray(ar + 1, 2)); + EXPECT_THAT(4, Not(AnyOfArray(ar + 1, 2))); + // Array + // int ar0[0]; Not usable + int ar1[1] = {1}; + int ar2[2] = {2, 3}; + // EXPECT_THAT(0, Not(AnyOfArray(ar0))); // Cannot work + EXPECT_THAT(1, AnyOfArray(ar1)); + EXPECT_THAT(2, Not(AnyOfArray(ar1))); + EXPECT_THAT(3, AnyOfArray(ar2)); + EXPECT_THAT(4, Not(AnyOfArray(ar2))); + // Container + EXPECT_THAT(0, Not(AnyOfArray(v0))); + EXPECT_THAT(1, AnyOfArray(v1)); + EXPECT_THAT(2, Not(AnyOfArray(v1))); + EXPECT_THAT(3, AnyOfArray(v2)); + EXPECT_THAT(4, Not(AnyOfArray(v2))); + // Initializer + EXPECT_THAT(0, Not(AnyOfArray({}))); // Requires template arg. + EXPECT_THAT(1, AnyOfArray({1})); + EXPECT_THAT(2, Not(AnyOfArray({1}))); + EXPECT_THAT(3, AnyOfArray({2, 3})); + EXPECT_THAT(4, Not(AnyOfArray({2, 3}))); +} + +TEST(AnyOfArrayTest, Matchers) { + // We negate test AllOfArrayTest.Matchers. + // vector + std::vector> matchers{Lt(1), Ge(2)}; + EXPECT_THAT(0, AnyOfArray(matchers)); + EXPECT_THAT(1, Not(AnyOfArray(matchers))); + EXPECT_THAT(2, AnyOfArray(matchers)); + // initializer_list + EXPECT_THAT(0, AnyOfArray({Lt(0), Lt(1)})); + EXPECT_THAT(1, Not(AllOfArray({Lt(0), Lt(1)}))); +} + +TEST_P(AnyOfArrayTestP, ExplainsMatchResultCorrectly) { + // AnyOfArray and AllOfArry use the same underlying template-template, + // thus it is sufficient to test one here. + const std::vector v0{}; + const std::vector v1{1}; + const std::vector v2{2, 3}; + const Matcher m0 = AnyOfArray(v0); + const Matcher m1 = AnyOfArray(v1); + const Matcher m2 = AnyOfArray(v2); + EXPECT_EQ("", Explain(m0, 0)); + EXPECT_EQ("", Explain(m1, 1)); + EXPECT_EQ("", Explain(m1, 2)); + EXPECT_EQ("", Explain(m2, 3)); + EXPECT_EQ("", Explain(m2, 4)); + EXPECT_EQ("()", Describe(m0)); + EXPECT_EQ("(is equal to 1)", Describe(m1)); + EXPECT_EQ("(is equal to 2) or (is equal to 3)", Describe(m2)); + EXPECT_EQ("()", DescribeNegation(m0)); + EXPECT_EQ("(isn't equal to 1)", DescribeNegation(m1)); + EXPECT_EQ("(isn't equal to 2) and (isn't equal to 3)", DescribeNegation(m2)); + // Explain with matchers + const Matcher g1 = AnyOfArray({GreaterThan(1)}); + const Matcher g2 = AnyOfArray({GreaterThan(1), GreaterThan(2)}); + // Explains the first positive match and all prior negative matches... + EXPECT_EQ("which is 1 less than 1", Explain(g1, 0)); + EXPECT_EQ("which is the same as 1", Explain(g1, 1)); + EXPECT_EQ("which is 1 more than 1", Explain(g1, 2)); + EXPECT_EQ("which is 1 less than 1, and which is 2 less than 2", + Explain(g2, 0)); + EXPECT_EQ("which is the same as 1, and which is 1 less than 2", + Explain(g2, 1)); + EXPECT_EQ("which is 1 more than 1", // Only the first + Explain(g2, 2)); +} + +MATCHER(IsNotNull, "") { return arg != nullptr; } + +// Verifies that a matcher defined using MATCHER() can work on +// move-only types. +TEST(MatcherMacroTest, WorksOnMoveOnlyType) { + std::unique_ptr p(new int(3)); + EXPECT_THAT(p, IsNotNull()); + EXPECT_THAT(std::unique_ptr(), Not(IsNotNull())); +} + +MATCHER_P(UniquePointee, pointee, "") { return *arg == pointee; } + +// Verifies that a matcher defined using MATCHER_P*() can work on +// move-only types. +TEST(MatcherPMacroTest, WorksOnMoveOnlyType) { + std::unique_ptr p(new int(3)); + EXPECT_THAT(p, UniquePointee(3)); + EXPECT_THAT(p, Not(UniquePointee(2))); +} + +#if GTEST_HAS_EXCEPTIONS + +// std::function is used below for compatibility with older copies of +// GCC. Normally, a raw lambda is all that is needed. + +// Test that examples from documentation compile +TEST(ThrowsTest, Examples) { + EXPECT_THAT( + std::function([]() { throw std::runtime_error("message"); }), + Throws()); + + EXPECT_THAT( + std::function([]() { throw std::runtime_error("message"); }), + ThrowsMessage(HasSubstr("message"))); +} + +TEST(ThrowsTest, PrintsExceptionWhat) { + EXPECT_THAT( + std::function([]() { throw std::runtime_error("ABC123XYZ"); }), + ThrowsMessage(HasSubstr("ABC123XYZ"))); +} + +TEST(ThrowsTest, DoesNotGenerateDuplicateCatchClauseWarning) { + EXPECT_THAT(std::function([]() { throw std::exception(); }), + Throws()); +} + +TEST(ThrowsTest, CallableExecutedExactlyOnce) { + size_t a = 0; + + EXPECT_THAT(std::function([&a]() { + a++; + throw 10; + }), + Throws()); + EXPECT_EQ(a, 1u); + + EXPECT_THAT(std::function([&a]() { + a++; + throw std::runtime_error("message"); + }), + Throws()); + EXPECT_EQ(a, 2u); + + EXPECT_THAT(std::function([&a]() { + a++; + throw std::runtime_error("message"); + }), + ThrowsMessage(HasSubstr("message"))); + EXPECT_EQ(a, 3u); + + EXPECT_THAT(std::function([&a]() { + a++; + throw std::runtime_error("message"); + }), + Throws( + Property(&std::runtime_error::what, HasSubstr("message")))); + EXPECT_EQ(a, 4u); +} + +TEST(ThrowsTest, Describe) { + Matcher> matcher = Throws(); + std::stringstream ss; + matcher.DescribeTo(&ss); + auto explanation = ss.str(); + EXPECT_THAT(explanation, HasSubstr("std::runtime_error")); +} + +TEST(ThrowsTest, Success) { + Matcher> matcher = Throws(); + StringMatchResultListener listener; + EXPECT_TRUE(matcher.MatchAndExplain( + []() { throw std::runtime_error("error message"); }, &listener)); + EXPECT_THAT(listener.str(), HasSubstr("std::runtime_error")); +} + +TEST(ThrowsTest, FailWrongType) { + Matcher> matcher = Throws(); + StringMatchResultListener listener; + EXPECT_FALSE(matcher.MatchAndExplain( + []() { throw std::logic_error("error message"); }, &listener)); + EXPECT_THAT(listener.str(), HasSubstr("std::logic_error")); + EXPECT_THAT(listener.str(), HasSubstr("\"error message\"")); +} + +TEST(ThrowsTest, FailWrongTypeNonStd) { + Matcher> matcher = Throws(); + StringMatchResultListener listener; + EXPECT_FALSE(matcher.MatchAndExplain([]() { throw 10; }, &listener)); + EXPECT_THAT(listener.str(), + HasSubstr("throws an exception of an unknown type")); +} + +TEST(ThrowsTest, FailNoThrow) { + Matcher> matcher = Throws(); + StringMatchResultListener listener; + EXPECT_FALSE(matcher.MatchAndExplain([]() { (void)0; }, &listener)); + EXPECT_THAT(listener.str(), HasSubstr("does not throw any exception")); +} + +class ThrowsPredicateTest + : public TestWithParam>> {}; + +TEST_P(ThrowsPredicateTest, Describe) { + Matcher> matcher = GetParam(); + std::stringstream ss; + matcher.DescribeTo(&ss); + auto explanation = ss.str(); + EXPECT_THAT(explanation, HasSubstr("std::runtime_error")); + EXPECT_THAT(explanation, HasSubstr("error message")); +} + +TEST_P(ThrowsPredicateTest, Success) { + Matcher> matcher = GetParam(); + StringMatchResultListener listener; + EXPECT_TRUE(matcher.MatchAndExplain( + []() { throw std::runtime_error("error message"); }, &listener)); + EXPECT_THAT(listener.str(), HasSubstr("std::runtime_error")); +} + +TEST_P(ThrowsPredicateTest, FailWrongType) { + Matcher> matcher = GetParam(); + StringMatchResultListener listener; + EXPECT_FALSE(matcher.MatchAndExplain( + []() { throw std::logic_error("error message"); }, &listener)); + EXPECT_THAT(listener.str(), HasSubstr("std::logic_error")); + EXPECT_THAT(listener.str(), HasSubstr("\"error message\"")); +} + +TEST_P(ThrowsPredicateTest, FailWrongTypeNonStd) { + Matcher> matcher = GetParam(); + StringMatchResultListener listener; + EXPECT_FALSE(matcher.MatchAndExplain([]() { throw 10; }, &listener)); + EXPECT_THAT(listener.str(), + HasSubstr("throws an exception of an unknown type")); +} + +TEST_P(ThrowsPredicateTest, FailNoThrow) { + Matcher> matcher = GetParam(); + StringMatchResultListener listener; + EXPECT_FALSE(matcher.MatchAndExplain([]() {}, &listener)); + EXPECT_THAT(listener.str(), HasSubstr("does not throw any exception")); +} + +INSTANTIATE_TEST_SUITE_P( + AllMessagePredicates, ThrowsPredicateTest, + Values(Matcher>( + ThrowsMessage(HasSubstr("error message"))))); + +// Tests that Throws(Matcher{}) compiles even when E2 != const E1&. +TEST(ThrowsPredicateCompilesTest, ExceptionMatcherAcceptsBroadType) { + { + Matcher> matcher = + ThrowsMessage(HasSubstr("error message")); + EXPECT_TRUE( + matcher.Matches([]() { throw std::runtime_error("error message"); })); + EXPECT_FALSE( + matcher.Matches([]() { throw std::runtime_error("wrong message"); })); + } + + { + Matcher inner = Eq(10); + Matcher> matcher = Throws(inner); + EXPECT_TRUE(matcher.Matches([]() { throw(uint32_t) 10; })); + EXPECT_FALSE(matcher.Matches([]() { throw(uint32_t) 11; })); + } +} + +// Tests that ThrowsMessage("message") is equivalent +// to ThrowsMessage(Eq("message")). +TEST(ThrowsPredicateCompilesTest, MessageMatcherAcceptsNonMatcher) { + Matcher> matcher = + ThrowsMessage("error message"); + EXPECT_TRUE( + matcher.Matches([]() { throw std::runtime_error("error message"); })); + EXPECT_FALSE(matcher.Matches( + []() { throw std::runtime_error("wrong error message"); })); +} + +#endif // GTEST_HAS_EXCEPTIONS + +} // namespace +} // namespace gmock_matchers_test +} // namespace testing + +#ifdef _MSC_VER +#pragma warning(pop) +#endif diff --git a/ext/googletest/googlemock/test/gmock-matchers_test.cc b/ext/googletest/googlemock/test/gmock-matchers_test.cc deleted file mode 100644 index e6f280d4d2..0000000000 --- a/ext/googletest/googlemock/test/gmock-matchers_test.cc +++ /dev/null @@ -1,8665 +0,0 @@ -// Copyright 2007, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -// Google Mock - a framework for writing C++ mock classes. -// -// This file tests some commonly used argument matchers. - -// Silence warning C4244: 'initializing': conversion from 'int' to 'short', -// possible loss of data and C4100, unreferenced local parameter -#ifdef _MSC_VER -# pragma warning(push) -# pragma warning(disable:4244) -# pragma warning(disable:4100) -#endif - -#include "gmock/gmock-matchers.h" - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "gmock/gmock-more-matchers.h" -#include "gmock/gmock.h" -#include "gtest/gtest-spi.h" -#include "gtest/gtest.h" - -namespace testing { -namespace gmock_matchers_test { -namespace { - -using std::greater; -using std::less; -using std::list; -using std::make_pair; -using std::map; -using std::multimap; -using std::multiset; -using std::ostream; -using std::pair; -using std::set; -using std::stringstream; -using std::vector; -using testing::internal::DummyMatchResultListener; -using testing::internal::ElementMatcherPair; -using testing::internal::ElementMatcherPairs; -using testing::internal::ElementsAreArrayMatcher; -using testing::internal::ExplainMatchFailureTupleTo; -using testing::internal::FloatingEqMatcher; -using testing::internal::FormatMatcherDescription; -using testing::internal::IsReadableTypeName; -using testing::internal::MatchMatrix; -using testing::internal::PredicateFormatterFromMatcher; -using testing::internal::RE; -using testing::internal::StreamMatchResultListener; -using testing::internal::Strings; - -// Helper for testing container-valued matchers in mock method context. It is -// important to test matchers in this context, since it requires additional type -// deduction beyond what EXPECT_THAT does, thus making it more restrictive. -struct ContainerHelper { - MOCK_METHOD1(Call, void(std::vector>)); -}; - -std::vector> MakeUniquePtrs(const std::vector& ints) { - std::vector> pointers; - for (int i : ints) pointers.emplace_back(new int(i)); - return pointers; -} - -// For testing ExplainMatchResultTo(). -template -class GreaterThanMatcher : public MatcherInterface { - public: - explicit GreaterThanMatcher(T rhs) : rhs_(rhs) {} - - void DescribeTo(ostream* os) const override { *os << "is > " << rhs_; } - - bool MatchAndExplain(T lhs, MatchResultListener* listener) const override { - if (lhs > rhs_) { - *listener << "which is " << (lhs - rhs_) << " more than " << rhs_; - } else if (lhs == rhs_) { - *listener << "which is the same as " << rhs_; - } else { - *listener << "which is " << (rhs_ - lhs) << " less than " << rhs_; - } - - return lhs > rhs_; - } - - private: - const T rhs_; -}; - -template -Matcher GreaterThan(T n) { - return MakeMatcher(new GreaterThanMatcher(n)); -} - -std::string OfType(const std::string& type_name) { -#if GTEST_HAS_RTTI - return IsReadableTypeName(type_name) ? " (of type " + type_name + ")" : ""; -#else - return ""; -#endif -} - -// Returns the description of the given matcher. -template -std::string Describe(const Matcher& m) { - return DescribeMatcher(m); -} - -// Returns the description of the negation of the given matcher. -template -std::string DescribeNegation(const Matcher& m) { - return DescribeMatcher(m, true); -} - -// Returns the reason why x matches, or doesn't match, m. -template -std::string Explain(const MatcherType& m, const Value& x) { - StringMatchResultListener listener; - ExplainMatchResult(m, x, &listener); - return listener.str(); -} - -TEST(MonotonicMatcherTest, IsPrintable) { - stringstream ss; - ss << GreaterThan(5); - EXPECT_EQ("is > 5", ss.str()); -} - -TEST(MatchResultListenerTest, StreamingWorks) { - StringMatchResultListener listener; - listener << "hi" << 5; - EXPECT_EQ("hi5", listener.str()); - - listener.Clear(); - EXPECT_EQ("", listener.str()); - - listener << 42; - EXPECT_EQ("42", listener.str()); - - // Streaming shouldn't crash when the underlying ostream is NULL. - DummyMatchResultListener dummy; - dummy << "hi" << 5; -} - -TEST(MatchResultListenerTest, CanAccessUnderlyingStream) { - EXPECT_TRUE(DummyMatchResultListener().stream() == nullptr); - EXPECT_TRUE(StreamMatchResultListener(nullptr).stream() == nullptr); - - EXPECT_EQ(&std::cout, StreamMatchResultListener(&std::cout).stream()); -} - -TEST(MatchResultListenerTest, IsInterestedWorks) { - EXPECT_TRUE(StringMatchResultListener().IsInterested()); - EXPECT_TRUE(StreamMatchResultListener(&std::cout).IsInterested()); - - EXPECT_FALSE(DummyMatchResultListener().IsInterested()); - EXPECT_FALSE(StreamMatchResultListener(nullptr).IsInterested()); -} - -// Makes sure that the MatcherInterface interface doesn't -// change. -class EvenMatcherImpl : public MatcherInterface { - public: - bool MatchAndExplain(int x, - MatchResultListener* /* listener */) const override { - return x % 2 == 0; - } - - void DescribeTo(ostream* os) const override { *os << "is an even number"; } - - // We deliberately don't define DescribeNegationTo() and - // ExplainMatchResultTo() here, to make sure the definition of these - // two methods is optional. -}; - -// Makes sure that the MatcherInterface API doesn't change. -TEST(MatcherInterfaceTest, CanBeImplementedUsingPublishedAPI) { - EvenMatcherImpl m; -} - -// Tests implementing a monomorphic matcher using MatchAndExplain(). - -class NewEvenMatcherImpl : public MatcherInterface { - public: - bool MatchAndExplain(int x, MatchResultListener* listener) const override { - const bool match = x % 2 == 0; - // Verifies that we can stream to a listener directly. - *listener << "value % " << 2; - if (listener->stream() != nullptr) { - // Verifies that we can stream to a listener's underlying stream - // too. - *listener->stream() << " == " << (x % 2); - } - return match; - } - - void DescribeTo(ostream* os) const override { *os << "is an even number"; } -}; - -TEST(MatcherInterfaceTest, CanBeImplementedUsingNewAPI) { - Matcher m = MakeMatcher(new NewEvenMatcherImpl); - EXPECT_TRUE(m.Matches(2)); - EXPECT_FALSE(m.Matches(3)); - EXPECT_EQ("value % 2 == 0", Explain(m, 2)); - EXPECT_EQ("value % 2 == 1", Explain(m, 3)); -} - -// Tests default-constructing a matcher. -TEST(MatcherTest, CanBeDefaultConstructed) { - Matcher m; -} - -// Tests that Matcher can be constructed from a MatcherInterface*. -TEST(MatcherTest, CanBeConstructedFromMatcherInterface) { - const MatcherInterface* impl = new EvenMatcherImpl; - Matcher m(impl); - EXPECT_TRUE(m.Matches(4)); - EXPECT_FALSE(m.Matches(5)); -} - -// Tests that value can be used in place of Eq(value). -TEST(MatcherTest, CanBeImplicitlyConstructedFromValue) { - Matcher m1 = 5; - EXPECT_TRUE(m1.Matches(5)); - EXPECT_FALSE(m1.Matches(6)); -} - -// Tests that NULL can be used in place of Eq(NULL). -TEST(MatcherTest, CanBeImplicitlyConstructedFromNULL) { - Matcher m1 = nullptr; - EXPECT_TRUE(m1.Matches(nullptr)); - int n = 0; - EXPECT_FALSE(m1.Matches(&n)); -} - -// Tests that matchers can be constructed from a variable that is not properly -// defined. This should be illegal, but many users rely on this accidentally. -struct Undefined { - virtual ~Undefined() = 0; - static const int kInt = 1; -}; - -TEST(MatcherTest, CanBeConstructedFromUndefinedVariable) { - Matcher m1 = Undefined::kInt; - EXPECT_TRUE(m1.Matches(1)); - EXPECT_FALSE(m1.Matches(2)); -} - -// Test that a matcher parameterized with an abstract class compiles. -TEST(MatcherTest, CanAcceptAbstractClass) { Matcher m = _; } - -// Tests that matchers are copyable. -TEST(MatcherTest, IsCopyable) { - // Tests the copy constructor. - Matcher m1 = Eq(false); - EXPECT_TRUE(m1.Matches(false)); - EXPECT_FALSE(m1.Matches(true)); - - // Tests the assignment operator. - m1 = Eq(true); - EXPECT_TRUE(m1.Matches(true)); - EXPECT_FALSE(m1.Matches(false)); -} - -// Tests that Matcher::DescribeTo() calls -// MatcherInterface::DescribeTo(). -TEST(MatcherTest, CanDescribeItself) { - EXPECT_EQ("is an even number", - Describe(Matcher(new EvenMatcherImpl))); -} - -// Tests Matcher::MatchAndExplain(). -TEST(MatcherTest, MatchAndExplain) { - Matcher m = GreaterThan(0); - StringMatchResultListener listener1; - EXPECT_TRUE(m.MatchAndExplain(42, &listener1)); - EXPECT_EQ("which is 42 more than 0", listener1.str()); - - StringMatchResultListener listener2; - EXPECT_FALSE(m.MatchAndExplain(-9, &listener2)); - EXPECT_EQ("which is 9 less than 0", listener2.str()); -} - -// Tests that a C-string literal can be implicitly converted to a -// Matcher or Matcher. -TEST(StringMatcherTest, CanBeImplicitlyConstructedFromCStringLiteral) { - Matcher m1 = "hi"; - EXPECT_TRUE(m1.Matches("hi")); - EXPECT_FALSE(m1.Matches("hello")); - - Matcher m2 = "hi"; - EXPECT_TRUE(m2.Matches("hi")); - EXPECT_FALSE(m2.Matches("hello")); -} - -// Tests that a string object can be implicitly converted to a -// Matcher or Matcher. -TEST(StringMatcherTest, CanBeImplicitlyConstructedFromString) { - Matcher m1 = std::string("hi"); - EXPECT_TRUE(m1.Matches("hi")); - EXPECT_FALSE(m1.Matches("hello")); - - Matcher m2 = std::string("hi"); - EXPECT_TRUE(m2.Matches("hi")); - EXPECT_FALSE(m2.Matches("hello")); -} - -#if GTEST_INTERNAL_HAS_STRING_VIEW -// Tests that a C-string literal can be implicitly converted to a -// Matcher or Matcher. -TEST(StringViewMatcherTest, CanBeImplicitlyConstructedFromCStringLiteral) { - Matcher m1 = "cats"; - EXPECT_TRUE(m1.Matches("cats")); - EXPECT_FALSE(m1.Matches("dogs")); - - Matcher m2 = "cats"; - EXPECT_TRUE(m2.Matches("cats")); - EXPECT_FALSE(m2.Matches("dogs")); -} - -// Tests that a std::string object can be implicitly converted to a -// Matcher or Matcher. -TEST(StringViewMatcherTest, CanBeImplicitlyConstructedFromString) { - Matcher m1 = std::string("cats"); - EXPECT_TRUE(m1.Matches("cats")); - EXPECT_FALSE(m1.Matches("dogs")); - - Matcher m2 = std::string("cats"); - EXPECT_TRUE(m2.Matches("cats")); - EXPECT_FALSE(m2.Matches("dogs")); -} - -// Tests that a StringView object can be implicitly converted to a -// Matcher or Matcher. -TEST(StringViewMatcherTest, CanBeImplicitlyConstructedFromStringView) { - Matcher m1 = internal::StringView("cats"); - EXPECT_TRUE(m1.Matches("cats")); - EXPECT_FALSE(m1.Matches("dogs")); - - Matcher m2 = internal::StringView("cats"); - EXPECT_TRUE(m2.Matches("cats")); - EXPECT_FALSE(m2.Matches("dogs")); -} -#endif // GTEST_INTERNAL_HAS_STRING_VIEW - -// Tests that a std::reference_wrapper object can be implicitly -// converted to a Matcher or Matcher via Eq(). -TEST(StringMatcherTest, - CanBeImplicitlyConstructedFromEqReferenceWrapperString) { - std::string value = "cats"; - Matcher m1 = Eq(std::ref(value)); - EXPECT_TRUE(m1.Matches("cats")); - EXPECT_FALSE(m1.Matches("dogs")); - - Matcher m2 = Eq(std::ref(value)); - EXPECT_TRUE(m2.Matches("cats")); - EXPECT_FALSE(m2.Matches("dogs")); -} - -// Tests that MakeMatcher() constructs a Matcher from a -// MatcherInterface* without requiring the user to explicitly -// write the type. -TEST(MakeMatcherTest, ConstructsMatcherFromMatcherInterface) { - const MatcherInterface* dummy_impl = new EvenMatcherImpl; - Matcher m = MakeMatcher(dummy_impl); -} - -// Tests that MakePolymorphicMatcher() can construct a polymorphic -// matcher from its implementation using the old API. -const int g_bar = 1; -class ReferencesBarOrIsZeroImpl { - public: - template - bool MatchAndExplain(const T& x, - MatchResultListener* /* listener */) const { - const void* p = &x; - return p == &g_bar || x == 0; - } - - void DescribeTo(ostream* os) const { *os << "g_bar or zero"; } - - void DescribeNegationTo(ostream* os) const { - *os << "doesn't reference g_bar and is not zero"; - } -}; - -// This function verifies that MakePolymorphicMatcher() returns a -// PolymorphicMatcher where T is the argument's type. -PolymorphicMatcher ReferencesBarOrIsZero() { - return MakePolymorphicMatcher(ReferencesBarOrIsZeroImpl()); -} - -TEST(MakePolymorphicMatcherTest, ConstructsMatcherUsingOldAPI) { - // Using a polymorphic matcher to match a reference type. - Matcher m1 = ReferencesBarOrIsZero(); - EXPECT_TRUE(m1.Matches(0)); - // Verifies that the identity of a by-reference argument is preserved. - EXPECT_TRUE(m1.Matches(g_bar)); - EXPECT_FALSE(m1.Matches(1)); - EXPECT_EQ("g_bar or zero", Describe(m1)); - - // Using a polymorphic matcher to match a value type. - Matcher m2 = ReferencesBarOrIsZero(); - EXPECT_TRUE(m2.Matches(0.0)); - EXPECT_FALSE(m2.Matches(0.1)); - EXPECT_EQ("g_bar or zero", Describe(m2)); -} - -// Tests implementing a polymorphic matcher using MatchAndExplain(). - -class PolymorphicIsEvenImpl { - public: - void DescribeTo(ostream* os) const { *os << "is even"; } - - void DescribeNegationTo(ostream* os) const { - *os << "is odd"; - } - - template - bool MatchAndExplain(const T& x, MatchResultListener* listener) const { - // Verifies that we can stream to the listener directly. - *listener << "% " << 2; - if (listener->stream() != nullptr) { - // Verifies that we can stream to the listener's underlying stream - // too. - *listener->stream() << " == " << (x % 2); - } - return (x % 2) == 0; - } -}; - -PolymorphicMatcher PolymorphicIsEven() { - return MakePolymorphicMatcher(PolymorphicIsEvenImpl()); -} - -TEST(MakePolymorphicMatcherTest, ConstructsMatcherUsingNewAPI) { - // Using PolymorphicIsEven() as a Matcher. - const Matcher m1 = PolymorphicIsEven(); - EXPECT_TRUE(m1.Matches(42)); - EXPECT_FALSE(m1.Matches(43)); - EXPECT_EQ("is even", Describe(m1)); - - const Matcher not_m1 = Not(m1); - EXPECT_EQ("is odd", Describe(not_m1)); - - EXPECT_EQ("% 2 == 0", Explain(m1, 42)); - - // Using PolymorphicIsEven() as a Matcher. - const Matcher m2 = PolymorphicIsEven(); - EXPECT_TRUE(m2.Matches('\x42')); - EXPECT_FALSE(m2.Matches('\x43')); - EXPECT_EQ("is even", Describe(m2)); - - const Matcher not_m2 = Not(m2); - EXPECT_EQ("is odd", Describe(not_m2)); - - EXPECT_EQ("% 2 == 0", Explain(m2, '\x42')); -} - -// Tests that MatcherCast(m) works when m is a polymorphic matcher. -TEST(MatcherCastTest, FromPolymorphicMatcher) { - Matcher m = MatcherCast(Eq(5)); - EXPECT_TRUE(m.Matches(5)); - EXPECT_FALSE(m.Matches(6)); -} - -// For testing casting matchers between compatible types. -class IntValue { - public: - // An int can be statically (although not implicitly) cast to a - // IntValue. - explicit IntValue(int a_value) : value_(a_value) {} - - int value() const { return value_; } - private: - int value_; -}; - -// For testing casting matchers between compatible types. -bool IsPositiveIntValue(const IntValue& foo) { - return foo.value() > 0; -} - -// Tests that MatcherCast(m) works when m is a Matcher where T -// can be statically converted to U. -TEST(MatcherCastTest, FromCompatibleType) { - Matcher m1 = Eq(2.0); - Matcher m2 = MatcherCast(m1); - EXPECT_TRUE(m2.Matches(2)); - EXPECT_FALSE(m2.Matches(3)); - - Matcher m3 = Truly(IsPositiveIntValue); - Matcher m4 = MatcherCast(m3); - // In the following, the arguments 1 and 0 are statically converted - // to IntValue objects, and then tested by the IsPositiveIntValue() - // predicate. - EXPECT_TRUE(m4.Matches(1)); - EXPECT_FALSE(m4.Matches(0)); -} - -// Tests that MatcherCast(m) works when m is a Matcher. -TEST(MatcherCastTest, FromConstReferenceToNonReference) { - Matcher m1 = Eq(0); - Matcher m2 = MatcherCast(m1); - EXPECT_TRUE(m2.Matches(0)); - EXPECT_FALSE(m2.Matches(1)); -} - -// Tests that MatcherCast(m) works when m is a Matcher. -TEST(MatcherCastTest, FromReferenceToNonReference) { - Matcher m1 = Eq(0); - Matcher m2 = MatcherCast(m1); - EXPECT_TRUE(m2.Matches(0)); - EXPECT_FALSE(m2.Matches(1)); -} - -// Tests that MatcherCast(m) works when m is a Matcher. -TEST(MatcherCastTest, FromNonReferenceToConstReference) { - Matcher m1 = Eq(0); - Matcher m2 = MatcherCast(m1); - EXPECT_TRUE(m2.Matches(0)); - EXPECT_FALSE(m2.Matches(1)); -} - -// Tests that MatcherCast(m) works when m is a Matcher. -TEST(MatcherCastTest, FromNonReferenceToReference) { - Matcher m1 = Eq(0); - Matcher m2 = MatcherCast(m1); - int n = 0; - EXPECT_TRUE(m2.Matches(n)); - n = 1; - EXPECT_FALSE(m2.Matches(n)); -} - -// Tests that MatcherCast(m) works when m is a Matcher. -TEST(MatcherCastTest, FromSameType) { - Matcher m1 = Eq(0); - Matcher m2 = MatcherCast(m1); - EXPECT_TRUE(m2.Matches(0)); - EXPECT_FALSE(m2.Matches(1)); -} - -// Tests that MatcherCast(m) works when m is a value of the same type as the -// value type of the Matcher. -TEST(MatcherCastTest, FromAValue) { - Matcher m = MatcherCast(42); - EXPECT_TRUE(m.Matches(42)); - EXPECT_FALSE(m.Matches(239)); -} - -// Tests that MatcherCast(m) works when m is a value of the type implicitly -// convertible to the value type of the Matcher. -TEST(MatcherCastTest, FromAnImplicitlyConvertibleValue) { - const int kExpected = 'c'; - Matcher m = MatcherCast('c'); - EXPECT_TRUE(m.Matches(kExpected)); - EXPECT_FALSE(m.Matches(kExpected + 1)); -} - -struct NonImplicitlyConstructibleTypeWithOperatorEq { - friend bool operator==( - const NonImplicitlyConstructibleTypeWithOperatorEq& /* ignored */, - int rhs) { - return 42 == rhs; - } - friend bool operator==( - int lhs, - const NonImplicitlyConstructibleTypeWithOperatorEq& /* ignored */) { - return lhs == 42; - } -}; - -// Tests that MatcherCast(m) works when m is a neither a matcher nor -// implicitly convertible to the value type of the Matcher, but the value type -// of the matcher has operator==() overload accepting m. -TEST(MatcherCastTest, NonImplicitlyConstructibleTypeWithOperatorEq) { - Matcher m1 = - MatcherCast(42); - EXPECT_TRUE(m1.Matches(NonImplicitlyConstructibleTypeWithOperatorEq())); - - Matcher m2 = - MatcherCast(239); - EXPECT_FALSE(m2.Matches(NonImplicitlyConstructibleTypeWithOperatorEq())); - - // When updating the following lines please also change the comment to - // namespace convertible_from_any. - Matcher m3 = - MatcherCast(NonImplicitlyConstructibleTypeWithOperatorEq()); - EXPECT_TRUE(m3.Matches(42)); - EXPECT_FALSE(m3.Matches(239)); -} - -// ConvertibleFromAny does not work with MSVC. resulting in -// error C2440: 'initializing': cannot convert from 'Eq' to 'M' -// No constructor could take the source type, or constructor overload -// resolution was ambiguous - -#if !defined _MSC_VER - -// The below ConvertibleFromAny struct is implicitly constructible from anything -// and when in the same namespace can interact with other tests. In particular, -// if it is in the same namespace as other tests and one removes -// NonImplicitlyConstructibleTypeWithOperatorEq::operator==(int lhs, ...); -// then the corresponding test still compiles (and it should not!) by implicitly -// converting NonImplicitlyConstructibleTypeWithOperatorEq to ConvertibleFromAny -// in m3.Matcher(). -namespace convertible_from_any { -// Implicitly convertible from any type. -struct ConvertibleFromAny { - ConvertibleFromAny(int a_value) : value(a_value) {} - template - ConvertibleFromAny(const T& /*a_value*/) : value(-1) { - ADD_FAILURE() << "Conversion constructor called"; - } - int value; -}; - -bool operator==(const ConvertibleFromAny& a, const ConvertibleFromAny& b) { - return a.value == b.value; -} - -ostream& operator<<(ostream& os, const ConvertibleFromAny& a) { - return os << a.value; -} - -TEST(MatcherCastTest, ConversionConstructorIsUsed) { - Matcher m = MatcherCast(1); - EXPECT_TRUE(m.Matches(ConvertibleFromAny(1))); - EXPECT_FALSE(m.Matches(ConvertibleFromAny(2))); -} - -TEST(MatcherCastTest, FromConvertibleFromAny) { - Matcher m = - MatcherCast(Eq(ConvertibleFromAny(1))); - EXPECT_TRUE(m.Matches(ConvertibleFromAny(1))); - EXPECT_FALSE(m.Matches(ConvertibleFromAny(2))); -} -} // namespace convertible_from_any - -#endif // !defined _MSC_VER - -struct IntReferenceWrapper { - IntReferenceWrapper(const int& a_value) : value(&a_value) {} - const int* value; -}; - -bool operator==(const IntReferenceWrapper& a, const IntReferenceWrapper& b) { - return a.value == b.value; -} - -TEST(MatcherCastTest, ValueIsNotCopied) { - int n = 42; - Matcher m = MatcherCast(n); - // Verify that the matcher holds a reference to n, not to its temporary copy. - EXPECT_TRUE(m.Matches(n)); -} - -class Base { - public: - virtual ~Base() {} - Base() {} - private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(Base); -}; - -class Derived : public Base { - public: - Derived() : Base() {} - int i; -}; - -class OtherDerived : public Base {}; - -// Tests that SafeMatcherCast(m) works when m is a polymorphic matcher. -TEST(SafeMatcherCastTest, FromPolymorphicMatcher) { - Matcher m2 = SafeMatcherCast(Eq(32)); - EXPECT_TRUE(m2.Matches(' ')); - EXPECT_FALSE(m2.Matches('\n')); -} - -// Tests that SafeMatcherCast(m) works when m is a Matcher where -// T and U are arithmetic types and T can be losslessly converted to -// U. -TEST(SafeMatcherCastTest, FromLosslesslyConvertibleArithmeticType) { - Matcher m1 = DoubleEq(1.0); - Matcher m2 = SafeMatcherCast(m1); - EXPECT_TRUE(m2.Matches(1.0f)); - EXPECT_FALSE(m2.Matches(2.0f)); - - Matcher m3 = SafeMatcherCast(TypedEq('a')); - EXPECT_TRUE(m3.Matches('a')); - EXPECT_FALSE(m3.Matches('b')); -} - -// Tests that SafeMatcherCast(m) works when m is a Matcher where T and U -// are pointers or references to a derived and a base class, correspondingly. -TEST(SafeMatcherCastTest, FromBaseClass) { - Derived d, d2; - Matcher m1 = Eq(&d); - Matcher m2 = SafeMatcherCast(m1); - EXPECT_TRUE(m2.Matches(&d)); - EXPECT_FALSE(m2.Matches(&d2)); - - Matcher m3 = Ref(d); - Matcher m4 = SafeMatcherCast(m3); - EXPECT_TRUE(m4.Matches(d)); - EXPECT_FALSE(m4.Matches(d2)); -} - -// Tests that SafeMatcherCast(m) works when m is a Matcher. -TEST(SafeMatcherCastTest, FromConstReferenceToReference) { - int n = 0; - Matcher m1 = Ref(n); - Matcher m2 = SafeMatcherCast(m1); - int n1 = 0; - EXPECT_TRUE(m2.Matches(n)); - EXPECT_FALSE(m2.Matches(n1)); -} - -// Tests that MatcherCast(m) works when m is a Matcher. -TEST(SafeMatcherCastTest, FromNonReferenceToConstReference) { - Matcher> m1 = IsNull(); - Matcher&> m2 = - SafeMatcherCast&>(m1); - EXPECT_TRUE(m2.Matches(std::unique_ptr())); - EXPECT_FALSE(m2.Matches(std::unique_ptr(new int))); -} - -// Tests that SafeMatcherCast(m) works when m is a Matcher. -TEST(SafeMatcherCastTest, FromNonReferenceToReference) { - Matcher m1 = Eq(0); - Matcher m2 = SafeMatcherCast(m1); - int n = 0; - EXPECT_TRUE(m2.Matches(n)); - n = 1; - EXPECT_FALSE(m2.Matches(n)); -} - -// Tests that SafeMatcherCast(m) works when m is a Matcher. -TEST(SafeMatcherCastTest, FromSameType) { - Matcher m1 = Eq(0); - Matcher m2 = SafeMatcherCast(m1); - EXPECT_TRUE(m2.Matches(0)); - EXPECT_FALSE(m2.Matches(1)); -} - -#if !defined _MSC_VER - -namespace convertible_from_any { -TEST(SafeMatcherCastTest, ConversionConstructorIsUsed) { - Matcher m = SafeMatcherCast(1); - EXPECT_TRUE(m.Matches(ConvertibleFromAny(1))); - EXPECT_FALSE(m.Matches(ConvertibleFromAny(2))); -} - -TEST(SafeMatcherCastTest, FromConvertibleFromAny) { - Matcher m = - SafeMatcherCast(Eq(ConvertibleFromAny(1))); - EXPECT_TRUE(m.Matches(ConvertibleFromAny(1))); - EXPECT_FALSE(m.Matches(ConvertibleFromAny(2))); -} -} // namespace convertible_from_any - -#endif // !defined _MSC_VER - -TEST(SafeMatcherCastTest, ValueIsNotCopied) { - int n = 42; - Matcher m = SafeMatcherCast(n); - // Verify that the matcher holds a reference to n, not to its temporary copy. - EXPECT_TRUE(m.Matches(n)); -} - -TEST(ExpectThat, TakesLiterals) { - EXPECT_THAT(1, 1); - EXPECT_THAT(1.0, 1.0); - EXPECT_THAT(std::string(), ""); -} - -TEST(ExpectThat, TakesFunctions) { - struct Helper { - static void Func() {} - }; - void (*func)() = Helper::Func; - EXPECT_THAT(func, Helper::Func); - EXPECT_THAT(func, &Helper::Func); -} - -// Tests that A() matches any value of type T. -TEST(ATest, MatchesAnyValue) { - // Tests a matcher for a value type. - Matcher m1 = A(); - EXPECT_TRUE(m1.Matches(91.43)); - EXPECT_TRUE(m1.Matches(-15.32)); - - // Tests a matcher for a reference type. - int a = 2; - int b = -6; - Matcher m2 = A(); - EXPECT_TRUE(m2.Matches(a)); - EXPECT_TRUE(m2.Matches(b)); -} - -TEST(ATest, WorksForDerivedClass) { - Base base; - Derived derived; - EXPECT_THAT(&base, A()); - // This shouldn't compile: EXPECT_THAT(&base, A()); - EXPECT_THAT(&derived, A()); - EXPECT_THAT(&derived, A()); -} - -// Tests that A() describes itself properly. -TEST(ATest, CanDescribeSelf) { - EXPECT_EQ("is anything", Describe(A())); -} - -// Tests that An() matches any value of type T. -TEST(AnTest, MatchesAnyValue) { - // Tests a matcher for a value type. - Matcher m1 = An(); - EXPECT_TRUE(m1.Matches(9143)); - EXPECT_TRUE(m1.Matches(-1532)); - - // Tests a matcher for a reference type. - int a = 2; - int b = -6; - Matcher m2 = An(); - EXPECT_TRUE(m2.Matches(a)); - EXPECT_TRUE(m2.Matches(b)); -} - -// Tests that An() describes itself properly. -TEST(AnTest, CanDescribeSelf) { - EXPECT_EQ("is anything", Describe(An())); -} - -// Tests that _ can be used as a matcher for any type and matches any -// value of that type. -TEST(UnderscoreTest, MatchesAnyValue) { - // Uses _ as a matcher for a value type. - Matcher m1 = _; - EXPECT_TRUE(m1.Matches(123)); - EXPECT_TRUE(m1.Matches(-242)); - - // Uses _ as a matcher for a reference type. - bool a = false; - const bool b = true; - Matcher m2 = _; - EXPECT_TRUE(m2.Matches(a)); - EXPECT_TRUE(m2.Matches(b)); -} - -// Tests that _ describes itself properly. -TEST(UnderscoreTest, CanDescribeSelf) { - Matcher m = _; - EXPECT_EQ("is anything", Describe(m)); -} - -// Tests that Eq(x) matches any value equal to x. -TEST(EqTest, MatchesEqualValue) { - // 2 C-strings with same content but different addresses. - const char a1[] = "hi"; - const char a2[] = "hi"; - - Matcher m1 = Eq(a1); - EXPECT_TRUE(m1.Matches(a1)); - EXPECT_FALSE(m1.Matches(a2)); -} - -// Tests that Eq(v) describes itself properly. - -class Unprintable { - public: - Unprintable() : c_('a') {} - - bool operator==(const Unprintable& /* rhs */) const { return true; } - // -Wunused-private-field: dummy accessor for `c_`. - char dummy_c() { return c_; } - private: - char c_; -}; - -TEST(EqTest, CanDescribeSelf) { - Matcher m = Eq(Unprintable()); - EXPECT_EQ("is equal to 1-byte object <61>", Describe(m)); -} - -// Tests that Eq(v) can be used to match any type that supports -// comparing with type T, where T is v's type. -TEST(EqTest, IsPolymorphic) { - Matcher m1 = Eq(1); - EXPECT_TRUE(m1.Matches(1)); - EXPECT_FALSE(m1.Matches(2)); - - Matcher m2 = Eq(1); - EXPECT_TRUE(m2.Matches('\1')); - EXPECT_FALSE(m2.Matches('a')); -} - -// Tests that TypedEq(v) matches values of type T that's equal to v. -TEST(TypedEqTest, ChecksEqualityForGivenType) { - Matcher m1 = TypedEq('a'); - EXPECT_TRUE(m1.Matches('a')); - EXPECT_FALSE(m1.Matches('b')); - - Matcher m2 = TypedEq(6); - EXPECT_TRUE(m2.Matches(6)); - EXPECT_FALSE(m2.Matches(7)); -} - -// Tests that TypedEq(v) describes itself properly. -TEST(TypedEqTest, CanDescribeSelf) { - EXPECT_EQ("is equal to 2", Describe(TypedEq(2))); -} - -// Tests that TypedEq(v) has type Matcher. - -// Type::IsTypeOf(v) compiles if and only if the type of value v is T, where -// T is a "bare" type (i.e. not in the form of const U or U&). If v's type is -// not T, the compiler will generate a message about "undefined reference". -template -struct Type { - static bool IsTypeOf(const T& /* v */) { return true; } - - template - static void IsTypeOf(T2 v); -}; - -TEST(TypedEqTest, HasSpecifiedType) { - // Verfies that the type of TypedEq(v) is Matcher. - Type >::IsTypeOf(TypedEq(5)); - Type >::IsTypeOf(TypedEq(5)); -} - -// Tests that Ge(v) matches anything >= v. -TEST(GeTest, ImplementsGreaterThanOrEqual) { - Matcher m1 = Ge(0); - EXPECT_TRUE(m1.Matches(1)); - EXPECT_TRUE(m1.Matches(0)); - EXPECT_FALSE(m1.Matches(-1)); -} - -// Tests that Ge(v) describes itself properly. -TEST(GeTest, CanDescribeSelf) { - Matcher m = Ge(5); - EXPECT_EQ("is >= 5", Describe(m)); -} - -// Tests that Gt(v) matches anything > v. -TEST(GtTest, ImplementsGreaterThan) { - Matcher m1 = Gt(0); - EXPECT_TRUE(m1.Matches(1.0)); - EXPECT_FALSE(m1.Matches(0.0)); - EXPECT_FALSE(m1.Matches(-1.0)); -} - -// Tests that Gt(v) describes itself properly. -TEST(GtTest, CanDescribeSelf) { - Matcher m = Gt(5); - EXPECT_EQ("is > 5", Describe(m)); -} - -// Tests that Le(v) matches anything <= v. -TEST(LeTest, ImplementsLessThanOrEqual) { - Matcher m1 = Le('b'); - EXPECT_TRUE(m1.Matches('a')); - EXPECT_TRUE(m1.Matches('b')); - EXPECT_FALSE(m1.Matches('c')); -} - -// Tests that Le(v) describes itself properly. -TEST(LeTest, CanDescribeSelf) { - Matcher m = Le(5); - EXPECT_EQ("is <= 5", Describe(m)); -} - -// Tests that Lt(v) matches anything < v. -TEST(LtTest, ImplementsLessThan) { - Matcher m1 = Lt("Hello"); - EXPECT_TRUE(m1.Matches("Abc")); - EXPECT_FALSE(m1.Matches("Hello")); - EXPECT_FALSE(m1.Matches("Hello, world!")); -} - -// Tests that Lt(v) describes itself properly. -TEST(LtTest, CanDescribeSelf) { - Matcher m = Lt(5); - EXPECT_EQ("is < 5", Describe(m)); -} - -// Tests that Ne(v) matches anything != v. -TEST(NeTest, ImplementsNotEqual) { - Matcher m1 = Ne(0); - EXPECT_TRUE(m1.Matches(1)); - EXPECT_TRUE(m1.Matches(-1)); - EXPECT_FALSE(m1.Matches(0)); -} - -// Tests that Ne(v) describes itself properly. -TEST(NeTest, CanDescribeSelf) { - Matcher m = Ne(5); - EXPECT_EQ("isn't equal to 5", Describe(m)); -} - -class MoveOnly { - public: - explicit MoveOnly(int i) : i_(i) {} - MoveOnly(const MoveOnly&) = delete; - MoveOnly(MoveOnly&&) = default; - MoveOnly& operator=(const MoveOnly&) = delete; - MoveOnly& operator=(MoveOnly&&) = default; - - bool operator==(const MoveOnly& other) const { return i_ == other.i_; } - bool operator!=(const MoveOnly& other) const { return i_ != other.i_; } - bool operator<(const MoveOnly& other) const { return i_ < other.i_; } - bool operator<=(const MoveOnly& other) const { return i_ <= other.i_; } - bool operator>(const MoveOnly& other) const { return i_ > other.i_; } - bool operator>=(const MoveOnly& other) const { return i_ >= other.i_; } - - private: - int i_; -}; - -struct MoveHelper { - MOCK_METHOD1(Call, void(MoveOnly)); -}; - -// Disable this test in VS 2015 (version 14), where it fails when SEH is enabled -#if defined(_MSC_VER) && (_MSC_VER < 1910) -TEST(ComparisonBaseTest, DISABLED_WorksWithMoveOnly) { -#else -TEST(ComparisonBaseTest, WorksWithMoveOnly) { -#endif - MoveOnly m{0}; - MoveHelper helper; - - EXPECT_CALL(helper, Call(Eq(ByRef(m)))); - helper.Call(MoveOnly(0)); - EXPECT_CALL(helper, Call(Ne(ByRef(m)))); - helper.Call(MoveOnly(1)); - EXPECT_CALL(helper, Call(Le(ByRef(m)))); - helper.Call(MoveOnly(0)); - EXPECT_CALL(helper, Call(Lt(ByRef(m)))); - helper.Call(MoveOnly(-1)); - EXPECT_CALL(helper, Call(Ge(ByRef(m)))); - helper.Call(MoveOnly(0)); - EXPECT_CALL(helper, Call(Gt(ByRef(m)))); - helper.Call(MoveOnly(1)); -} - -// Tests that IsNull() matches any NULL pointer of any type. -TEST(IsNullTest, MatchesNullPointer) { - Matcher m1 = IsNull(); - int* p1 = nullptr; - int n = 0; - EXPECT_TRUE(m1.Matches(p1)); - EXPECT_FALSE(m1.Matches(&n)); - - Matcher m2 = IsNull(); - const char* p2 = nullptr; - EXPECT_TRUE(m2.Matches(p2)); - EXPECT_FALSE(m2.Matches("hi")); - - Matcher m3 = IsNull(); - void* p3 = nullptr; - EXPECT_TRUE(m3.Matches(p3)); - EXPECT_FALSE(m3.Matches(reinterpret_cast(0xbeef))); -} - -TEST(IsNullTest, StdFunction) { - const Matcher> m = IsNull(); - - EXPECT_TRUE(m.Matches(std::function())); - EXPECT_FALSE(m.Matches([]{})); -} - -// Tests that IsNull() describes itself properly. -TEST(IsNullTest, CanDescribeSelf) { - Matcher m = IsNull(); - EXPECT_EQ("is NULL", Describe(m)); - EXPECT_EQ("isn't NULL", DescribeNegation(m)); -} - -// Tests that NotNull() matches any non-NULL pointer of any type. -TEST(NotNullTest, MatchesNonNullPointer) { - Matcher m1 = NotNull(); - int* p1 = nullptr; - int n = 0; - EXPECT_FALSE(m1.Matches(p1)); - EXPECT_TRUE(m1.Matches(&n)); - - Matcher m2 = NotNull(); - const char* p2 = nullptr; - EXPECT_FALSE(m2.Matches(p2)); - EXPECT_TRUE(m2.Matches("hi")); -} - -TEST(NotNullTest, LinkedPtr) { - const Matcher> m = NotNull(); - const std::shared_ptr null_p; - const std::shared_ptr non_null_p(new int); - - EXPECT_FALSE(m.Matches(null_p)); - EXPECT_TRUE(m.Matches(non_null_p)); -} - -TEST(NotNullTest, ReferenceToConstLinkedPtr) { - const Matcher&> m = NotNull(); - const std::shared_ptr null_p; - const std::shared_ptr non_null_p(new double); - - EXPECT_FALSE(m.Matches(null_p)); - EXPECT_TRUE(m.Matches(non_null_p)); -} - -TEST(NotNullTest, StdFunction) { - const Matcher> m = NotNull(); - - EXPECT_TRUE(m.Matches([]{})); - EXPECT_FALSE(m.Matches(std::function())); -} - -// Tests that NotNull() describes itself properly. -TEST(NotNullTest, CanDescribeSelf) { - Matcher m = NotNull(); - EXPECT_EQ("isn't NULL", Describe(m)); -} - -// Tests that Ref(variable) matches an argument that references -// 'variable'. -TEST(RefTest, MatchesSameVariable) { - int a = 0; - int b = 0; - Matcher m = Ref(a); - EXPECT_TRUE(m.Matches(a)); - EXPECT_FALSE(m.Matches(b)); -} - -// Tests that Ref(variable) describes itself properly. -TEST(RefTest, CanDescribeSelf) { - int n = 5; - Matcher m = Ref(n); - stringstream ss; - ss << "references the variable @" << &n << " 5"; - EXPECT_EQ(ss.str(), Describe(m)); -} - -// Test that Ref(non_const_varialbe) can be used as a matcher for a -// const reference. -TEST(RefTest, CanBeUsedAsMatcherForConstReference) { - int a = 0; - int b = 0; - Matcher m = Ref(a); - EXPECT_TRUE(m.Matches(a)); - EXPECT_FALSE(m.Matches(b)); -} - -// Tests that Ref(variable) is covariant, i.e. Ref(derived) can be -// used wherever Ref(base) can be used (Ref(derived) is a sub-type -// of Ref(base), but not vice versa. - -TEST(RefTest, IsCovariant) { - Base base, base2; - Derived derived; - Matcher m1 = Ref(base); - EXPECT_TRUE(m1.Matches(base)); - EXPECT_FALSE(m1.Matches(base2)); - EXPECT_FALSE(m1.Matches(derived)); - - m1 = Ref(derived); - EXPECT_TRUE(m1.Matches(derived)); - EXPECT_FALSE(m1.Matches(base)); - EXPECT_FALSE(m1.Matches(base2)); -} - -TEST(RefTest, ExplainsResult) { - int n = 0; - EXPECT_THAT(Explain(Matcher(Ref(n)), n), - StartsWith("which is located @")); - - int m = 0; - EXPECT_THAT(Explain(Matcher(Ref(n)), m), - StartsWith("which is located @")); -} - -// Tests string comparison matchers. - -template -std::string FromStringLike(internal::StringLike str) { - return std::string(str); -} - -TEST(StringLike, TestConversions) { - EXPECT_EQ("foo", FromStringLike("foo")); - EXPECT_EQ("foo", FromStringLike(std::string("foo"))); -#if GTEST_INTERNAL_HAS_STRING_VIEW - EXPECT_EQ("foo", FromStringLike(internal::StringView("foo"))); -#endif // GTEST_INTERNAL_HAS_STRING_VIEW - - // Non deducible types. - EXPECT_EQ("", FromStringLike({})); - EXPECT_EQ("foo", FromStringLike({'f', 'o', 'o'})); - const char buf[] = "foo"; - EXPECT_EQ("foo", FromStringLike({buf, buf + 3})); -} - -TEST(StrEqTest, MatchesEqualString) { - Matcher m = StrEq(std::string("Hello")); - EXPECT_TRUE(m.Matches("Hello")); - EXPECT_FALSE(m.Matches("hello")); - EXPECT_FALSE(m.Matches(nullptr)); - - Matcher m2 = StrEq("Hello"); - EXPECT_TRUE(m2.Matches("Hello")); - EXPECT_FALSE(m2.Matches("Hi")); - -#if GTEST_INTERNAL_HAS_STRING_VIEW - Matcher m3 = - StrEq(internal::StringView("Hello")); - EXPECT_TRUE(m3.Matches(internal::StringView("Hello"))); - EXPECT_FALSE(m3.Matches(internal::StringView("hello"))); - EXPECT_FALSE(m3.Matches(internal::StringView())); - - Matcher m_empty = StrEq(""); - EXPECT_TRUE(m_empty.Matches(internal::StringView(""))); - EXPECT_TRUE(m_empty.Matches(internal::StringView())); - EXPECT_FALSE(m_empty.Matches(internal::StringView("hello"))); -#endif // GTEST_INTERNAL_HAS_STRING_VIEW -} - -TEST(StrEqTest, CanDescribeSelf) { - Matcher m = StrEq("Hi-\'\"?\\\a\b\f\n\r\t\v\xD3"); - EXPECT_EQ("is equal to \"Hi-\'\\\"?\\\\\\a\\b\\f\\n\\r\\t\\v\\xD3\"", - Describe(m)); - - std::string str("01204500800"); - str[3] = '\0'; - Matcher m2 = StrEq(str); - EXPECT_EQ("is equal to \"012\\04500800\"", Describe(m2)); - str[0] = str[6] = str[7] = str[9] = str[10] = '\0'; - Matcher m3 = StrEq(str); - EXPECT_EQ("is equal to \"\\012\\045\\0\\08\\0\\0\"", Describe(m3)); -} - -TEST(StrNeTest, MatchesUnequalString) { - Matcher m = StrNe("Hello"); - EXPECT_TRUE(m.Matches("")); - EXPECT_TRUE(m.Matches(nullptr)); - EXPECT_FALSE(m.Matches("Hello")); - - Matcher m2 = StrNe(std::string("Hello")); - EXPECT_TRUE(m2.Matches("hello")); - EXPECT_FALSE(m2.Matches("Hello")); - -#if GTEST_INTERNAL_HAS_STRING_VIEW - Matcher m3 = StrNe(internal::StringView("Hello")); - EXPECT_TRUE(m3.Matches(internal::StringView(""))); - EXPECT_TRUE(m3.Matches(internal::StringView())); - EXPECT_FALSE(m3.Matches(internal::StringView("Hello"))); -#endif // GTEST_INTERNAL_HAS_STRING_VIEW -} - -TEST(StrNeTest, CanDescribeSelf) { - Matcher m = StrNe("Hi"); - EXPECT_EQ("isn't equal to \"Hi\"", Describe(m)); -} - -TEST(StrCaseEqTest, MatchesEqualStringIgnoringCase) { - Matcher m = StrCaseEq(std::string("Hello")); - EXPECT_TRUE(m.Matches("Hello")); - EXPECT_TRUE(m.Matches("hello")); - EXPECT_FALSE(m.Matches("Hi")); - EXPECT_FALSE(m.Matches(nullptr)); - - Matcher m2 = StrCaseEq("Hello"); - EXPECT_TRUE(m2.Matches("hello")); - EXPECT_FALSE(m2.Matches("Hi")); - -#if GTEST_INTERNAL_HAS_STRING_VIEW - Matcher m3 = - StrCaseEq(internal::StringView("Hello")); - EXPECT_TRUE(m3.Matches(internal::StringView("Hello"))); - EXPECT_TRUE(m3.Matches(internal::StringView("hello"))); - EXPECT_FALSE(m3.Matches(internal::StringView("Hi"))); - EXPECT_FALSE(m3.Matches(internal::StringView())); -#endif // GTEST_INTERNAL_HAS_STRING_VIEW -} - -TEST(StrCaseEqTest, MatchesEqualStringWith0IgnoringCase) { - std::string str1("oabocdooeoo"); - std::string str2("OABOCDOOEOO"); - Matcher m0 = StrCaseEq(str1); - EXPECT_FALSE(m0.Matches(str2 + std::string(1, '\0'))); - - str1[3] = str2[3] = '\0'; - Matcher m1 = StrCaseEq(str1); - EXPECT_TRUE(m1.Matches(str2)); - - str1[0] = str1[6] = str1[7] = str1[10] = '\0'; - str2[0] = str2[6] = str2[7] = str2[10] = '\0'; - Matcher m2 = StrCaseEq(str1); - str1[9] = str2[9] = '\0'; - EXPECT_FALSE(m2.Matches(str2)); - - Matcher m3 = StrCaseEq(str1); - EXPECT_TRUE(m3.Matches(str2)); - - EXPECT_FALSE(m3.Matches(str2 + "x")); - str2.append(1, '\0'); - EXPECT_FALSE(m3.Matches(str2)); - EXPECT_FALSE(m3.Matches(std::string(str2, 0, 9))); -} - -TEST(StrCaseEqTest, CanDescribeSelf) { - Matcher m = StrCaseEq("Hi"); - EXPECT_EQ("is equal to (ignoring case) \"Hi\"", Describe(m)); -} - -TEST(StrCaseNeTest, MatchesUnequalStringIgnoringCase) { - Matcher m = StrCaseNe("Hello"); - EXPECT_TRUE(m.Matches("Hi")); - EXPECT_TRUE(m.Matches(nullptr)); - EXPECT_FALSE(m.Matches("Hello")); - EXPECT_FALSE(m.Matches("hello")); - - Matcher m2 = StrCaseNe(std::string("Hello")); - EXPECT_TRUE(m2.Matches("")); - EXPECT_FALSE(m2.Matches("Hello")); - -#if GTEST_INTERNAL_HAS_STRING_VIEW - Matcher m3 = - StrCaseNe(internal::StringView("Hello")); - EXPECT_TRUE(m3.Matches(internal::StringView("Hi"))); - EXPECT_TRUE(m3.Matches(internal::StringView())); - EXPECT_FALSE(m3.Matches(internal::StringView("Hello"))); - EXPECT_FALSE(m3.Matches(internal::StringView("hello"))); -#endif // GTEST_INTERNAL_HAS_STRING_VIEW -} - -TEST(StrCaseNeTest, CanDescribeSelf) { - Matcher m = StrCaseNe("Hi"); - EXPECT_EQ("isn't equal to (ignoring case) \"Hi\"", Describe(m)); -} - -// Tests that HasSubstr() works for matching string-typed values. -TEST(HasSubstrTest, WorksForStringClasses) { - const Matcher m1 = HasSubstr("foo"); - EXPECT_TRUE(m1.Matches(std::string("I love food."))); - EXPECT_FALSE(m1.Matches(std::string("tofo"))); - - const Matcher m2 = HasSubstr("foo"); - EXPECT_TRUE(m2.Matches(std::string("I love food."))); - EXPECT_FALSE(m2.Matches(std::string("tofo"))); - - const Matcher m_empty = HasSubstr(""); - EXPECT_TRUE(m_empty.Matches(std::string())); - EXPECT_TRUE(m_empty.Matches(std::string("not empty"))); -} - -// Tests that HasSubstr() works for matching C-string-typed values. -TEST(HasSubstrTest, WorksForCStrings) { - const Matcher m1 = HasSubstr("foo"); - EXPECT_TRUE(m1.Matches(const_cast("I love food."))); - EXPECT_FALSE(m1.Matches(const_cast("tofo"))); - EXPECT_FALSE(m1.Matches(nullptr)); - - const Matcher m2 = HasSubstr("foo"); - EXPECT_TRUE(m2.Matches("I love food.")); - EXPECT_FALSE(m2.Matches("tofo")); - EXPECT_FALSE(m2.Matches(nullptr)); - - const Matcher m_empty = HasSubstr(""); - EXPECT_TRUE(m_empty.Matches("not empty")); - EXPECT_TRUE(m_empty.Matches("")); - EXPECT_FALSE(m_empty.Matches(nullptr)); -} - -#if GTEST_INTERNAL_HAS_STRING_VIEW -// Tests that HasSubstr() works for matching StringView-typed values. -TEST(HasSubstrTest, WorksForStringViewClasses) { - const Matcher m1 = - HasSubstr(internal::StringView("foo")); - EXPECT_TRUE(m1.Matches(internal::StringView("I love food."))); - EXPECT_FALSE(m1.Matches(internal::StringView("tofo"))); - EXPECT_FALSE(m1.Matches(internal::StringView())); - - const Matcher m2 = HasSubstr("foo"); - EXPECT_TRUE(m2.Matches(internal::StringView("I love food."))); - EXPECT_FALSE(m2.Matches(internal::StringView("tofo"))); - EXPECT_FALSE(m2.Matches(internal::StringView())); - - const Matcher m3 = HasSubstr(""); - EXPECT_TRUE(m3.Matches(internal::StringView("foo"))); - EXPECT_TRUE(m3.Matches(internal::StringView(""))); - EXPECT_TRUE(m3.Matches(internal::StringView())); -} -#endif // GTEST_INTERNAL_HAS_STRING_VIEW - -// Tests that HasSubstr(s) describes itself properly. -TEST(HasSubstrTest, CanDescribeSelf) { - Matcher m = HasSubstr("foo\n\""); - EXPECT_EQ("has substring \"foo\\n\\\"\"", Describe(m)); -} - -TEST(KeyTest, CanDescribeSelf) { - Matcher&> m = Key("foo"); - EXPECT_EQ("has a key that is equal to \"foo\"", Describe(m)); - EXPECT_EQ("doesn't have a key that is equal to \"foo\"", DescribeNegation(m)); -} - -TEST(KeyTest, ExplainsResult) { - Matcher > m = Key(GreaterThan(10)); - EXPECT_EQ("whose first field is a value which is 5 less than 10", - Explain(m, make_pair(5, true))); - EXPECT_EQ("whose first field is a value which is 5 more than 10", - Explain(m, make_pair(15, true))); -} - -TEST(KeyTest, MatchesCorrectly) { - pair p(25, "foo"); - EXPECT_THAT(p, Key(25)); - EXPECT_THAT(p, Not(Key(42))); - EXPECT_THAT(p, Key(Ge(20))); - EXPECT_THAT(p, Not(Key(Lt(25)))); -} - -TEST(KeyTest, WorksWithMoveOnly) { - pair, std::unique_ptr> p; - EXPECT_THAT(p, Key(Eq(nullptr))); -} - -template -struct Tag {}; - -struct PairWithGet { - int member_1; - std::string member_2; - using first_type = int; - using second_type = std::string; - - const int& GetImpl(Tag<0>) const { return member_1; } - const std::string& GetImpl(Tag<1>) const { return member_2; } -}; -template -auto get(const PairWithGet& value) -> decltype(value.GetImpl(Tag())) { - return value.GetImpl(Tag()); -} -TEST(PairTest, MatchesPairWithGetCorrectly) { - PairWithGet p{25, "foo"}; - EXPECT_THAT(p, Key(25)); - EXPECT_THAT(p, Not(Key(42))); - EXPECT_THAT(p, Key(Ge(20))); - EXPECT_THAT(p, Not(Key(Lt(25)))); - - std::vector v = {{11, "Foo"}, {29, "gMockIsBestMock"}}; - EXPECT_THAT(v, Contains(Key(29))); -} - -TEST(KeyTest, SafelyCastsInnerMatcher) { - Matcher is_positive = Gt(0); - Matcher is_negative = Lt(0); - pair p('a', true); - EXPECT_THAT(p, Key(is_positive)); - EXPECT_THAT(p, Not(Key(is_negative))); -} - -TEST(KeyTest, InsideContainsUsingMap) { - map container; - container.insert(make_pair(1, 'a')); - container.insert(make_pair(2, 'b')); - container.insert(make_pair(4, 'c')); - EXPECT_THAT(container, Contains(Key(1))); - EXPECT_THAT(container, Not(Contains(Key(3)))); -} - -TEST(KeyTest, InsideContainsUsingMultimap) { - multimap container; - container.insert(make_pair(1, 'a')); - container.insert(make_pair(2, 'b')); - container.insert(make_pair(4, 'c')); - - EXPECT_THAT(container, Not(Contains(Key(25)))); - container.insert(make_pair(25, 'd')); - EXPECT_THAT(container, Contains(Key(25))); - container.insert(make_pair(25, 'e')); - EXPECT_THAT(container, Contains(Key(25))); - - EXPECT_THAT(container, Contains(Key(1))); - EXPECT_THAT(container, Not(Contains(Key(3)))); -} - -TEST(PairTest, Typing) { - // Test verifies the following type conversions can be compiled. - Matcher&> m1 = Pair("foo", 42); - Matcher > m2 = Pair("foo", 42); - Matcher > m3 = Pair("foo", 42); - - Matcher > m4 = Pair(25, "42"); - Matcher > m5 = Pair("25", 42); -} - -TEST(PairTest, CanDescribeSelf) { - Matcher&> m1 = Pair("foo", 42); - EXPECT_EQ("has a first field that is equal to \"foo\"" - ", and has a second field that is equal to 42", - Describe(m1)); - EXPECT_EQ("has a first field that isn't equal to \"foo\"" - ", or has a second field that isn't equal to 42", - DescribeNegation(m1)); - // Double and triple negation (1 or 2 times not and description of negation). - Matcher&> m2 = Not(Pair(Not(13), 42)); - EXPECT_EQ("has a first field that isn't equal to 13" - ", and has a second field that is equal to 42", - DescribeNegation(m2)); -} - -TEST(PairTest, CanExplainMatchResultTo) { - // If neither field matches, Pair() should explain about the first - // field. - const Matcher > m = Pair(GreaterThan(0), GreaterThan(0)); - EXPECT_EQ("whose first field does not match, which is 1 less than 0", - Explain(m, make_pair(-1, -2))); - - // If the first field matches but the second doesn't, Pair() should - // explain about the second field. - EXPECT_EQ("whose second field does not match, which is 2 less than 0", - Explain(m, make_pair(1, -2))); - - // If the first field doesn't match but the second does, Pair() - // should explain about the first field. - EXPECT_EQ("whose first field does not match, which is 1 less than 0", - Explain(m, make_pair(-1, 2))); - - // If both fields match, Pair() should explain about them both. - EXPECT_EQ("whose both fields match, where the first field is a value " - "which is 1 more than 0, and the second field is a value " - "which is 2 more than 0", - Explain(m, make_pair(1, 2))); - - // If only the first match has an explanation, only this explanation should - // be printed. - const Matcher > explain_first = Pair(GreaterThan(0), 0); - EXPECT_EQ("whose both fields match, where the first field is a value " - "which is 1 more than 0", - Explain(explain_first, make_pair(1, 0))); - - // If only the second match has an explanation, only this explanation should - // be printed. - const Matcher > explain_second = Pair(0, GreaterThan(0)); - EXPECT_EQ("whose both fields match, where the second field is a value " - "which is 1 more than 0", - Explain(explain_second, make_pair(0, 1))); -} - -TEST(PairTest, MatchesCorrectly) { - pair p(25, "foo"); - - // Both fields match. - EXPECT_THAT(p, Pair(25, "foo")); - EXPECT_THAT(p, Pair(Ge(20), HasSubstr("o"))); - - // 'first' doesnt' match, but 'second' matches. - EXPECT_THAT(p, Not(Pair(42, "foo"))); - EXPECT_THAT(p, Not(Pair(Lt(25), "foo"))); - - // 'first' matches, but 'second' doesn't match. - EXPECT_THAT(p, Not(Pair(25, "bar"))); - EXPECT_THAT(p, Not(Pair(25, Not("foo")))); - - // Neither field matches. - EXPECT_THAT(p, Not(Pair(13, "bar"))); - EXPECT_THAT(p, Not(Pair(Lt(13), HasSubstr("a")))); -} - -TEST(PairTest, WorksWithMoveOnly) { - pair, std::unique_ptr> p; - p.second.reset(new int(7)); - EXPECT_THAT(p, Pair(Eq(nullptr), Ne(nullptr))); -} - -TEST(PairTest, SafelyCastsInnerMatchers) { - Matcher is_positive = Gt(0); - Matcher is_negative = Lt(0); - pair p('a', true); - EXPECT_THAT(p, Pair(is_positive, _)); - EXPECT_THAT(p, Not(Pair(is_negative, _))); - EXPECT_THAT(p, Pair(_, is_positive)); - EXPECT_THAT(p, Not(Pair(_, is_negative))); -} - -TEST(PairTest, InsideContainsUsingMap) { - map container; - container.insert(make_pair(1, 'a')); - container.insert(make_pair(2, 'b')); - container.insert(make_pair(4, 'c')); - EXPECT_THAT(container, Contains(Pair(1, 'a'))); - EXPECT_THAT(container, Contains(Pair(1, _))); - EXPECT_THAT(container, Contains(Pair(_, 'a'))); - EXPECT_THAT(container, Not(Contains(Pair(3, _)))); -} - -TEST(FieldsAreTest, MatchesCorrectly) { - std::tuple p(25, "foo", .5); - - // All fields match. - EXPECT_THAT(p, FieldsAre(25, "foo", .5)); - EXPECT_THAT(p, FieldsAre(Ge(20), HasSubstr("o"), DoubleEq(.5))); - - // Some don't match. - EXPECT_THAT(p, Not(FieldsAre(26, "foo", .5))); - EXPECT_THAT(p, Not(FieldsAre(25, "fo", .5))); - EXPECT_THAT(p, Not(FieldsAre(25, "foo", .6))); -} - -TEST(FieldsAreTest, CanDescribeSelf) { - Matcher&> m1 = FieldsAre("foo", 42); - EXPECT_EQ( - "has field #0 that is equal to \"foo\"" - ", and has field #1 that is equal to 42", - Describe(m1)); - EXPECT_EQ( - "has field #0 that isn't equal to \"foo\"" - ", or has field #1 that isn't equal to 42", - DescribeNegation(m1)); -} - -TEST(FieldsAreTest, CanExplainMatchResultTo) { - // The first one that fails is the one that gives the error. - Matcher> m = - FieldsAre(GreaterThan(0), GreaterThan(0), GreaterThan(0)); - - EXPECT_EQ("whose field #0 does not match, which is 1 less than 0", - Explain(m, std::make_tuple(-1, -2, -3))); - EXPECT_EQ("whose field #1 does not match, which is 2 less than 0", - Explain(m, std::make_tuple(1, -2, -3))); - EXPECT_EQ("whose field #2 does not match, which is 3 less than 0", - Explain(m, std::make_tuple(1, 2, -3))); - - // If they all match, we get a long explanation of success. - EXPECT_EQ( - "whose all elements match, " - "where field #0 is a value which is 1 more than 0" - ", and field #1 is a value which is 2 more than 0" - ", and field #2 is a value which is 3 more than 0", - Explain(m, std::make_tuple(1, 2, 3))); - - // Only print those that have an explanation. - m = FieldsAre(GreaterThan(0), 0, GreaterThan(0)); - EXPECT_EQ( - "whose all elements match, " - "where field #0 is a value which is 1 more than 0" - ", and field #2 is a value which is 3 more than 0", - Explain(m, std::make_tuple(1, 0, 3))); - - // If only one has an explanation, then print that one. - m = FieldsAre(0, GreaterThan(0), 0); - EXPECT_EQ( - "whose all elements match, " - "where field #1 is a value which is 1 more than 0", - Explain(m, std::make_tuple(0, 1, 0))); -} - -#if defined(__cpp_structured_bindings) && __cpp_structured_bindings >= 201606 -TEST(FieldsAreTest, StructuredBindings) { - // testing::FieldsAre can also match aggregates and such with C++17 and up. - struct MyType { - int i; - std::string str; - }; - EXPECT_THAT((MyType{17, "foo"}), FieldsAre(Eq(17), HasSubstr("oo"))); - - // Test all the supported arities. - struct MyVarType1 { - int a; - }; - EXPECT_THAT(MyVarType1{}, FieldsAre(0)); - struct MyVarType2 { - int a, b; - }; - EXPECT_THAT(MyVarType2{}, FieldsAre(0, 0)); - struct MyVarType3 { - int a, b, c; - }; - EXPECT_THAT(MyVarType3{}, FieldsAre(0, 0, 0)); - struct MyVarType4 { - int a, b, c, d; - }; - EXPECT_THAT(MyVarType4{}, FieldsAre(0, 0, 0, 0)); - struct MyVarType5 { - int a, b, c, d, e; - }; - EXPECT_THAT(MyVarType5{}, FieldsAre(0, 0, 0, 0, 0)); - struct MyVarType6 { - int a, b, c, d, e, f; - }; - EXPECT_THAT(MyVarType6{}, FieldsAre(0, 0, 0, 0, 0, 0)); - struct MyVarType7 { - int a, b, c, d, e, f, g; - }; - EXPECT_THAT(MyVarType7{}, FieldsAre(0, 0, 0, 0, 0, 0, 0)); - struct MyVarType8 { - int a, b, c, d, e, f, g, h; - }; - EXPECT_THAT(MyVarType8{}, FieldsAre(0, 0, 0, 0, 0, 0, 0, 0)); - struct MyVarType9 { - int a, b, c, d, e, f, g, h, i; - }; - EXPECT_THAT(MyVarType9{}, FieldsAre(0, 0, 0, 0, 0, 0, 0, 0, 0)); - struct MyVarType10 { - int a, b, c, d, e, f, g, h, i, j; - }; - EXPECT_THAT(MyVarType10{}, FieldsAre(0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); - struct MyVarType11 { - int a, b, c, d, e, f, g, h, i, j, k; - }; - EXPECT_THAT(MyVarType11{}, FieldsAre(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); - struct MyVarType12 { - int a, b, c, d, e, f, g, h, i, j, k, l; - }; - EXPECT_THAT(MyVarType12{}, FieldsAre(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); - struct MyVarType13 { - int a, b, c, d, e, f, g, h, i, j, k, l, m; - }; - EXPECT_THAT(MyVarType13{}, FieldsAre(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); - struct MyVarType14 { - int a, b, c, d, e, f, g, h, i, j, k, l, m, n; - }; - EXPECT_THAT(MyVarType14{}, - FieldsAre(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); - struct MyVarType15 { - int a, b, c, d, e, f, g, h, i, j, k, l, m, n, o; - }; - EXPECT_THAT(MyVarType15{}, - FieldsAre(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); - struct MyVarType16 { - int a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p; - }; - EXPECT_THAT(MyVarType16{}, - FieldsAre(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); -} -#endif - -TEST(ContainsTest, WorksWithMoveOnly) { - ContainerHelper helper; - EXPECT_CALL(helper, Call(Contains(Pointee(2)))); - helper.Call(MakeUniquePtrs({1, 2})); -} - -TEST(PairTest, UseGetInsteadOfMembers) { - PairWithGet pair{7, "ABC"}; - EXPECT_THAT(pair, Pair(7, "ABC")); - EXPECT_THAT(pair, Pair(Ge(7), HasSubstr("AB"))); - EXPECT_THAT(pair, Not(Pair(Lt(7), "ABC"))); - - std::vector v = {{11, "Foo"}, {29, "gMockIsBestMock"}}; - EXPECT_THAT(v, - ElementsAre(Pair(11, std::string("Foo")), Pair(Ge(10), Not("")))); -} - -// Tests StartsWith(s). - -TEST(StartsWithTest, MatchesStringWithGivenPrefix) { - const Matcher m1 = StartsWith(std::string("")); - EXPECT_TRUE(m1.Matches("Hi")); - EXPECT_TRUE(m1.Matches("")); - EXPECT_FALSE(m1.Matches(nullptr)); - - const Matcher m2 = StartsWith("Hi"); - EXPECT_TRUE(m2.Matches("Hi")); - EXPECT_TRUE(m2.Matches("Hi Hi!")); - EXPECT_TRUE(m2.Matches("High")); - EXPECT_FALSE(m2.Matches("H")); - EXPECT_FALSE(m2.Matches(" Hi")); - -#if GTEST_INTERNAL_HAS_STRING_VIEW - const Matcher m_empty = - StartsWith(internal::StringView("")); - EXPECT_TRUE(m_empty.Matches(internal::StringView())); - EXPECT_TRUE(m_empty.Matches(internal::StringView(""))); - EXPECT_TRUE(m_empty.Matches(internal::StringView("not empty"))); -#endif // GTEST_INTERNAL_HAS_STRING_VIEW -} - -TEST(StartsWithTest, CanDescribeSelf) { - Matcher m = StartsWith("Hi"); - EXPECT_EQ("starts with \"Hi\"", Describe(m)); -} - -// Tests EndsWith(s). - -TEST(EndsWithTest, MatchesStringWithGivenSuffix) { - const Matcher m1 = EndsWith(""); - EXPECT_TRUE(m1.Matches("Hi")); - EXPECT_TRUE(m1.Matches("")); - EXPECT_FALSE(m1.Matches(nullptr)); - - const Matcher m2 = EndsWith(std::string("Hi")); - EXPECT_TRUE(m2.Matches("Hi")); - EXPECT_TRUE(m2.Matches("Wow Hi Hi")); - EXPECT_TRUE(m2.Matches("Super Hi")); - EXPECT_FALSE(m2.Matches("i")); - EXPECT_FALSE(m2.Matches("Hi ")); - -#if GTEST_INTERNAL_HAS_STRING_VIEW - const Matcher m4 = - EndsWith(internal::StringView("")); - EXPECT_TRUE(m4.Matches("Hi")); - EXPECT_TRUE(m4.Matches("")); - EXPECT_TRUE(m4.Matches(internal::StringView())); - EXPECT_TRUE(m4.Matches(internal::StringView(""))); -#endif // GTEST_INTERNAL_HAS_STRING_VIEW -} - -TEST(EndsWithTest, CanDescribeSelf) { - Matcher m = EndsWith("Hi"); - EXPECT_EQ("ends with \"Hi\"", Describe(m)); -} - -// Tests MatchesRegex(). - -TEST(MatchesRegexTest, MatchesStringMatchingGivenRegex) { - const Matcher m1 = MatchesRegex("a.*z"); - EXPECT_TRUE(m1.Matches("az")); - EXPECT_TRUE(m1.Matches("abcz")); - EXPECT_FALSE(m1.Matches(nullptr)); - - const Matcher m2 = MatchesRegex(new RE("a.*z")); - EXPECT_TRUE(m2.Matches("azbz")); - EXPECT_FALSE(m2.Matches("az1")); - EXPECT_FALSE(m2.Matches("1az")); - -#if GTEST_INTERNAL_HAS_STRING_VIEW - const Matcher m3 = MatchesRegex("a.*z"); - EXPECT_TRUE(m3.Matches(internal::StringView("az"))); - EXPECT_TRUE(m3.Matches(internal::StringView("abcz"))); - EXPECT_FALSE(m3.Matches(internal::StringView("1az"))); - EXPECT_FALSE(m3.Matches(internal::StringView())); - const Matcher m4 = - MatchesRegex(internal::StringView("")); - EXPECT_TRUE(m4.Matches(internal::StringView(""))); - EXPECT_TRUE(m4.Matches(internal::StringView())); -#endif // GTEST_INTERNAL_HAS_STRING_VIEW -} - -TEST(MatchesRegexTest, CanDescribeSelf) { - Matcher m1 = MatchesRegex(std::string("Hi.*")); - EXPECT_EQ("matches regular expression \"Hi.*\"", Describe(m1)); - - Matcher m2 = MatchesRegex(new RE("a.*")); - EXPECT_EQ("matches regular expression \"a.*\"", Describe(m2)); - -#if GTEST_INTERNAL_HAS_STRING_VIEW - Matcher m3 = MatchesRegex(new RE("0.*")); - EXPECT_EQ("matches regular expression \"0.*\"", Describe(m3)); -#endif // GTEST_INTERNAL_HAS_STRING_VIEW -} - -// Tests ContainsRegex(). - -TEST(ContainsRegexTest, MatchesStringContainingGivenRegex) { - const Matcher m1 = ContainsRegex(std::string("a.*z")); - EXPECT_TRUE(m1.Matches("az")); - EXPECT_TRUE(m1.Matches("0abcz1")); - EXPECT_FALSE(m1.Matches(nullptr)); - - const Matcher m2 = ContainsRegex(new RE("a.*z")); - EXPECT_TRUE(m2.Matches("azbz")); - EXPECT_TRUE(m2.Matches("az1")); - EXPECT_FALSE(m2.Matches("1a")); - -#if GTEST_INTERNAL_HAS_STRING_VIEW - const Matcher m3 = - ContainsRegex(new RE("a.*z")); - EXPECT_TRUE(m3.Matches(internal::StringView("azbz"))); - EXPECT_TRUE(m3.Matches(internal::StringView("az1"))); - EXPECT_FALSE(m3.Matches(internal::StringView("1a"))); - EXPECT_FALSE(m3.Matches(internal::StringView())); - const Matcher m4 = - ContainsRegex(internal::StringView("")); - EXPECT_TRUE(m4.Matches(internal::StringView(""))); - EXPECT_TRUE(m4.Matches(internal::StringView())); -#endif // GTEST_INTERNAL_HAS_STRING_VIEW -} - -TEST(ContainsRegexTest, CanDescribeSelf) { - Matcher m1 = ContainsRegex("Hi.*"); - EXPECT_EQ("contains regular expression \"Hi.*\"", Describe(m1)); - - Matcher m2 = ContainsRegex(new RE("a.*")); - EXPECT_EQ("contains regular expression \"a.*\"", Describe(m2)); - -#if GTEST_INTERNAL_HAS_STRING_VIEW - Matcher m3 = ContainsRegex(new RE("0.*")); - EXPECT_EQ("contains regular expression \"0.*\"", Describe(m3)); -#endif // GTEST_INTERNAL_HAS_STRING_VIEW -} - -// Tests for wide strings. -#if GTEST_HAS_STD_WSTRING -TEST(StdWideStrEqTest, MatchesEqual) { - Matcher m = StrEq(::std::wstring(L"Hello")); - EXPECT_TRUE(m.Matches(L"Hello")); - EXPECT_FALSE(m.Matches(L"hello")); - EXPECT_FALSE(m.Matches(nullptr)); - - Matcher m2 = StrEq(L"Hello"); - EXPECT_TRUE(m2.Matches(L"Hello")); - EXPECT_FALSE(m2.Matches(L"Hi")); - - Matcher m3 = StrEq(L"\xD3\x576\x8D3\xC74D"); - EXPECT_TRUE(m3.Matches(L"\xD3\x576\x8D3\xC74D")); - EXPECT_FALSE(m3.Matches(L"\xD3\x576\x8D3\xC74E")); - - ::std::wstring str(L"01204500800"); - str[3] = L'\0'; - Matcher m4 = StrEq(str); - EXPECT_TRUE(m4.Matches(str)); - str[0] = str[6] = str[7] = str[9] = str[10] = L'\0'; - Matcher m5 = StrEq(str); - EXPECT_TRUE(m5.Matches(str)); -} - -TEST(StdWideStrEqTest, CanDescribeSelf) { - Matcher< ::std::wstring> m = StrEq(L"Hi-\'\"?\\\a\b\f\n\r\t\v"); - EXPECT_EQ("is equal to L\"Hi-\'\\\"?\\\\\\a\\b\\f\\n\\r\\t\\v\"", - Describe(m)); - - Matcher< ::std::wstring> m2 = StrEq(L"\xD3\x576\x8D3\xC74D"); - EXPECT_EQ("is equal to L\"\\xD3\\x576\\x8D3\\xC74D\"", - Describe(m2)); - - ::std::wstring str(L"01204500800"); - str[3] = L'\0'; - Matcher m4 = StrEq(str); - EXPECT_EQ("is equal to L\"012\\04500800\"", Describe(m4)); - str[0] = str[6] = str[7] = str[9] = str[10] = L'\0'; - Matcher m5 = StrEq(str); - EXPECT_EQ("is equal to L\"\\012\\045\\0\\08\\0\\0\"", Describe(m5)); -} - -TEST(StdWideStrNeTest, MatchesUnequalString) { - Matcher m = StrNe(L"Hello"); - EXPECT_TRUE(m.Matches(L"")); - EXPECT_TRUE(m.Matches(nullptr)); - EXPECT_FALSE(m.Matches(L"Hello")); - - Matcher< ::std::wstring> m2 = StrNe(::std::wstring(L"Hello")); - EXPECT_TRUE(m2.Matches(L"hello")); - EXPECT_FALSE(m2.Matches(L"Hello")); -} - -TEST(StdWideStrNeTest, CanDescribeSelf) { - Matcher m = StrNe(L"Hi"); - EXPECT_EQ("isn't equal to L\"Hi\"", Describe(m)); -} - -TEST(StdWideStrCaseEqTest, MatchesEqualStringIgnoringCase) { - Matcher m = StrCaseEq(::std::wstring(L"Hello")); - EXPECT_TRUE(m.Matches(L"Hello")); - EXPECT_TRUE(m.Matches(L"hello")); - EXPECT_FALSE(m.Matches(L"Hi")); - EXPECT_FALSE(m.Matches(nullptr)); - - Matcher m2 = StrCaseEq(L"Hello"); - EXPECT_TRUE(m2.Matches(L"hello")); - EXPECT_FALSE(m2.Matches(L"Hi")); -} - -TEST(StdWideStrCaseEqTest, MatchesEqualStringWith0IgnoringCase) { - ::std::wstring str1(L"oabocdooeoo"); - ::std::wstring str2(L"OABOCDOOEOO"); - Matcher m0 = StrCaseEq(str1); - EXPECT_FALSE(m0.Matches(str2 + ::std::wstring(1, L'\0'))); - - str1[3] = str2[3] = L'\0'; - Matcher m1 = StrCaseEq(str1); - EXPECT_TRUE(m1.Matches(str2)); - - str1[0] = str1[6] = str1[7] = str1[10] = L'\0'; - str2[0] = str2[6] = str2[7] = str2[10] = L'\0'; - Matcher m2 = StrCaseEq(str1); - str1[9] = str2[9] = L'\0'; - EXPECT_FALSE(m2.Matches(str2)); - - Matcher m3 = StrCaseEq(str1); - EXPECT_TRUE(m3.Matches(str2)); - - EXPECT_FALSE(m3.Matches(str2 + L"x")); - str2.append(1, L'\0'); - EXPECT_FALSE(m3.Matches(str2)); - EXPECT_FALSE(m3.Matches(::std::wstring(str2, 0, 9))); -} - -TEST(StdWideStrCaseEqTest, CanDescribeSelf) { - Matcher< ::std::wstring> m = StrCaseEq(L"Hi"); - EXPECT_EQ("is equal to (ignoring case) L\"Hi\"", Describe(m)); -} - -TEST(StdWideStrCaseNeTest, MatchesUnequalStringIgnoringCase) { - Matcher m = StrCaseNe(L"Hello"); - EXPECT_TRUE(m.Matches(L"Hi")); - EXPECT_TRUE(m.Matches(nullptr)); - EXPECT_FALSE(m.Matches(L"Hello")); - EXPECT_FALSE(m.Matches(L"hello")); - - Matcher< ::std::wstring> m2 = StrCaseNe(::std::wstring(L"Hello")); - EXPECT_TRUE(m2.Matches(L"")); - EXPECT_FALSE(m2.Matches(L"Hello")); -} - -TEST(StdWideStrCaseNeTest, CanDescribeSelf) { - Matcher m = StrCaseNe(L"Hi"); - EXPECT_EQ("isn't equal to (ignoring case) L\"Hi\"", Describe(m)); -} - -// Tests that HasSubstr() works for matching wstring-typed values. -TEST(StdWideHasSubstrTest, WorksForStringClasses) { - const Matcher< ::std::wstring> m1 = HasSubstr(L"foo"); - EXPECT_TRUE(m1.Matches(::std::wstring(L"I love food."))); - EXPECT_FALSE(m1.Matches(::std::wstring(L"tofo"))); - - const Matcher m2 = HasSubstr(L"foo"); - EXPECT_TRUE(m2.Matches(::std::wstring(L"I love food."))); - EXPECT_FALSE(m2.Matches(::std::wstring(L"tofo"))); -} - -// Tests that HasSubstr() works for matching C-wide-string-typed values. -TEST(StdWideHasSubstrTest, WorksForCStrings) { - const Matcher m1 = HasSubstr(L"foo"); - EXPECT_TRUE(m1.Matches(const_cast(L"I love food."))); - EXPECT_FALSE(m1.Matches(const_cast(L"tofo"))); - EXPECT_FALSE(m1.Matches(nullptr)); - - const Matcher m2 = HasSubstr(L"foo"); - EXPECT_TRUE(m2.Matches(L"I love food.")); - EXPECT_FALSE(m2.Matches(L"tofo")); - EXPECT_FALSE(m2.Matches(nullptr)); -} - -// Tests that HasSubstr(s) describes itself properly. -TEST(StdWideHasSubstrTest, CanDescribeSelf) { - Matcher< ::std::wstring> m = HasSubstr(L"foo\n\""); - EXPECT_EQ("has substring L\"foo\\n\\\"\"", Describe(m)); -} - -// Tests StartsWith(s). - -TEST(StdWideStartsWithTest, MatchesStringWithGivenPrefix) { - const Matcher m1 = StartsWith(::std::wstring(L"")); - EXPECT_TRUE(m1.Matches(L"Hi")); - EXPECT_TRUE(m1.Matches(L"")); - EXPECT_FALSE(m1.Matches(nullptr)); - - const Matcher m2 = StartsWith(L"Hi"); - EXPECT_TRUE(m2.Matches(L"Hi")); - EXPECT_TRUE(m2.Matches(L"Hi Hi!")); - EXPECT_TRUE(m2.Matches(L"High")); - EXPECT_FALSE(m2.Matches(L"H")); - EXPECT_FALSE(m2.Matches(L" Hi")); -} - -TEST(StdWideStartsWithTest, CanDescribeSelf) { - Matcher m = StartsWith(L"Hi"); - EXPECT_EQ("starts with L\"Hi\"", Describe(m)); -} - -// Tests EndsWith(s). - -TEST(StdWideEndsWithTest, MatchesStringWithGivenSuffix) { - const Matcher m1 = EndsWith(L""); - EXPECT_TRUE(m1.Matches(L"Hi")); - EXPECT_TRUE(m1.Matches(L"")); - EXPECT_FALSE(m1.Matches(nullptr)); - - const Matcher m2 = EndsWith(::std::wstring(L"Hi")); - EXPECT_TRUE(m2.Matches(L"Hi")); - EXPECT_TRUE(m2.Matches(L"Wow Hi Hi")); - EXPECT_TRUE(m2.Matches(L"Super Hi")); - EXPECT_FALSE(m2.Matches(L"i")); - EXPECT_FALSE(m2.Matches(L"Hi ")); -} - -TEST(StdWideEndsWithTest, CanDescribeSelf) { - Matcher m = EndsWith(L"Hi"); - EXPECT_EQ("ends with L\"Hi\"", Describe(m)); -} - -#endif // GTEST_HAS_STD_WSTRING - -typedef ::std::tuple Tuple2; // NOLINT - -// Tests that Eq() matches a 2-tuple where the first field == the -// second field. -TEST(Eq2Test, MatchesEqualArguments) { - Matcher m = Eq(); - EXPECT_TRUE(m.Matches(Tuple2(5L, 5))); - EXPECT_FALSE(m.Matches(Tuple2(5L, 6))); -} - -// Tests that Eq() describes itself properly. -TEST(Eq2Test, CanDescribeSelf) { - Matcher m = Eq(); - EXPECT_EQ("are an equal pair", Describe(m)); -} - -// Tests that Ge() matches a 2-tuple where the first field >= the -// second field. -TEST(Ge2Test, MatchesGreaterThanOrEqualArguments) { - Matcher m = Ge(); - EXPECT_TRUE(m.Matches(Tuple2(5L, 4))); - EXPECT_TRUE(m.Matches(Tuple2(5L, 5))); - EXPECT_FALSE(m.Matches(Tuple2(5L, 6))); -} - -// Tests that Ge() describes itself properly. -TEST(Ge2Test, CanDescribeSelf) { - Matcher m = Ge(); - EXPECT_EQ("are a pair where the first >= the second", Describe(m)); -} - -// Tests that Gt() matches a 2-tuple where the first field > the -// second field. -TEST(Gt2Test, MatchesGreaterThanArguments) { - Matcher m = Gt(); - EXPECT_TRUE(m.Matches(Tuple2(5L, 4))); - EXPECT_FALSE(m.Matches(Tuple2(5L, 5))); - EXPECT_FALSE(m.Matches(Tuple2(5L, 6))); -} - -// Tests that Gt() describes itself properly. -TEST(Gt2Test, CanDescribeSelf) { - Matcher m = Gt(); - EXPECT_EQ("are a pair where the first > the second", Describe(m)); -} - -// Tests that Le() matches a 2-tuple where the first field <= the -// second field. -TEST(Le2Test, MatchesLessThanOrEqualArguments) { - Matcher m = Le(); - EXPECT_TRUE(m.Matches(Tuple2(5L, 6))); - EXPECT_TRUE(m.Matches(Tuple2(5L, 5))); - EXPECT_FALSE(m.Matches(Tuple2(5L, 4))); -} - -// Tests that Le() describes itself properly. -TEST(Le2Test, CanDescribeSelf) { - Matcher m = Le(); - EXPECT_EQ("are a pair where the first <= the second", Describe(m)); -} - -// Tests that Lt() matches a 2-tuple where the first field < the -// second field. -TEST(Lt2Test, MatchesLessThanArguments) { - Matcher m = Lt(); - EXPECT_TRUE(m.Matches(Tuple2(5L, 6))); - EXPECT_FALSE(m.Matches(Tuple2(5L, 5))); - EXPECT_FALSE(m.Matches(Tuple2(5L, 4))); -} - -// Tests that Lt() describes itself properly. -TEST(Lt2Test, CanDescribeSelf) { - Matcher m = Lt(); - EXPECT_EQ("are a pair where the first < the second", Describe(m)); -} - -// Tests that Ne() matches a 2-tuple where the first field != the -// second field. -TEST(Ne2Test, MatchesUnequalArguments) { - Matcher m = Ne(); - EXPECT_TRUE(m.Matches(Tuple2(5L, 6))); - EXPECT_TRUE(m.Matches(Tuple2(5L, 4))); - EXPECT_FALSE(m.Matches(Tuple2(5L, 5))); -} - -// Tests that Ne() describes itself properly. -TEST(Ne2Test, CanDescribeSelf) { - Matcher m = Ne(); - EXPECT_EQ("are an unequal pair", Describe(m)); -} - -TEST(PairMatchBaseTest, WorksWithMoveOnly) { - using Pointers = std::tuple, std::unique_ptr>; - Matcher matcher = Eq(); - Pointers pointers; - // Tested values don't matter; the point is that matcher does not copy the - // matched values. - EXPECT_TRUE(matcher.Matches(pointers)); -} - -// Tests that IsNan() matches a NaN, with float. -TEST(IsNan, FloatMatchesNan) { - float quiet_nan = std::numeric_limits::quiet_NaN(); - float other_nan = std::nanf("1"); - float real_value = 1.0f; - - Matcher m = IsNan(); - EXPECT_TRUE(m.Matches(quiet_nan)); - EXPECT_TRUE(m.Matches(other_nan)); - EXPECT_FALSE(m.Matches(real_value)); - - Matcher m_ref = IsNan(); - EXPECT_TRUE(m_ref.Matches(quiet_nan)); - EXPECT_TRUE(m_ref.Matches(other_nan)); - EXPECT_FALSE(m_ref.Matches(real_value)); - - Matcher m_cref = IsNan(); - EXPECT_TRUE(m_cref.Matches(quiet_nan)); - EXPECT_TRUE(m_cref.Matches(other_nan)); - EXPECT_FALSE(m_cref.Matches(real_value)); -} - -// Tests that IsNan() matches a NaN, with double. -TEST(IsNan, DoubleMatchesNan) { - double quiet_nan = std::numeric_limits::quiet_NaN(); - double other_nan = std::nan("1"); - double real_value = 1.0; - - Matcher m = IsNan(); - EXPECT_TRUE(m.Matches(quiet_nan)); - EXPECT_TRUE(m.Matches(other_nan)); - EXPECT_FALSE(m.Matches(real_value)); - - Matcher m_ref = IsNan(); - EXPECT_TRUE(m_ref.Matches(quiet_nan)); - EXPECT_TRUE(m_ref.Matches(other_nan)); - EXPECT_FALSE(m_ref.Matches(real_value)); - - Matcher m_cref = IsNan(); - EXPECT_TRUE(m_cref.Matches(quiet_nan)); - EXPECT_TRUE(m_cref.Matches(other_nan)); - EXPECT_FALSE(m_cref.Matches(real_value)); -} - -// Tests that IsNan() matches a NaN, with long double. -TEST(IsNan, LongDoubleMatchesNan) { - long double quiet_nan = std::numeric_limits::quiet_NaN(); - long double other_nan = std::nan("1"); - long double real_value = 1.0; - - Matcher m = IsNan(); - EXPECT_TRUE(m.Matches(quiet_nan)); - EXPECT_TRUE(m.Matches(other_nan)); - EXPECT_FALSE(m.Matches(real_value)); - - Matcher m_ref = IsNan(); - EXPECT_TRUE(m_ref.Matches(quiet_nan)); - EXPECT_TRUE(m_ref.Matches(other_nan)); - EXPECT_FALSE(m_ref.Matches(real_value)); - - Matcher m_cref = IsNan(); - EXPECT_TRUE(m_cref.Matches(quiet_nan)); - EXPECT_TRUE(m_cref.Matches(other_nan)); - EXPECT_FALSE(m_cref.Matches(real_value)); -} - -// Tests that IsNan() works with Not. -TEST(IsNan, NotMatchesNan) { - Matcher mf = Not(IsNan()); - EXPECT_FALSE(mf.Matches(std::numeric_limits::quiet_NaN())); - EXPECT_FALSE(mf.Matches(std::nanf("1"))); - EXPECT_TRUE(mf.Matches(1.0)); - - Matcher md = Not(IsNan()); - EXPECT_FALSE(md.Matches(std::numeric_limits::quiet_NaN())); - EXPECT_FALSE(md.Matches(std::nan("1"))); - EXPECT_TRUE(md.Matches(1.0)); - - Matcher mld = Not(IsNan()); - EXPECT_FALSE(mld.Matches(std::numeric_limits::quiet_NaN())); - EXPECT_FALSE(mld.Matches(std::nanl("1"))); - EXPECT_TRUE(mld.Matches(1.0)); -} - -// Tests that IsNan() can describe itself. -TEST(IsNan, CanDescribeSelf) { - Matcher mf = IsNan(); - EXPECT_EQ("is NaN", Describe(mf)); - - Matcher md = IsNan(); - EXPECT_EQ("is NaN", Describe(md)); - - Matcher mld = IsNan(); - EXPECT_EQ("is NaN", Describe(mld)); -} - -// Tests that IsNan() can describe itself with Not. -TEST(IsNan, CanDescribeSelfWithNot) { - Matcher mf = Not(IsNan()); - EXPECT_EQ("isn't NaN", Describe(mf)); - - Matcher md = Not(IsNan()); - EXPECT_EQ("isn't NaN", Describe(md)); - - Matcher mld = Not(IsNan()); - EXPECT_EQ("isn't NaN", Describe(mld)); -} - -// Tests that FloatEq() matches a 2-tuple where -// FloatEq(first field) matches the second field. -TEST(FloatEq2Test, MatchesEqualArguments) { - typedef ::std::tuple Tpl; - Matcher m = FloatEq(); - EXPECT_TRUE(m.Matches(Tpl(1.0f, 1.0f))); - EXPECT_TRUE(m.Matches(Tpl(0.3f, 0.1f + 0.1f + 0.1f))); - EXPECT_FALSE(m.Matches(Tpl(1.1f, 1.0f))); -} - -// Tests that FloatEq() describes itself properly. -TEST(FloatEq2Test, CanDescribeSelf) { - Matcher&> m = FloatEq(); - EXPECT_EQ("are an almost-equal pair", Describe(m)); -} - -// Tests that NanSensitiveFloatEq() matches a 2-tuple where -// NanSensitiveFloatEq(first field) matches the second field. -TEST(NanSensitiveFloatEqTest, MatchesEqualArgumentsWithNaN) { - typedef ::std::tuple Tpl; - Matcher m = NanSensitiveFloatEq(); - EXPECT_TRUE(m.Matches(Tpl(1.0f, 1.0f))); - EXPECT_TRUE(m.Matches(Tpl(std::numeric_limits::quiet_NaN(), - std::numeric_limits::quiet_NaN()))); - EXPECT_FALSE(m.Matches(Tpl(1.1f, 1.0f))); - EXPECT_FALSE(m.Matches(Tpl(1.0f, std::numeric_limits::quiet_NaN()))); - EXPECT_FALSE(m.Matches(Tpl(std::numeric_limits::quiet_NaN(), 1.0f))); -} - -// Tests that NanSensitiveFloatEq() describes itself properly. -TEST(NanSensitiveFloatEqTest, CanDescribeSelfWithNaNs) { - Matcher&> m = NanSensitiveFloatEq(); - EXPECT_EQ("are an almost-equal pair", Describe(m)); -} - -// Tests that DoubleEq() matches a 2-tuple where -// DoubleEq(first field) matches the second field. -TEST(DoubleEq2Test, MatchesEqualArguments) { - typedef ::std::tuple Tpl; - Matcher m = DoubleEq(); - EXPECT_TRUE(m.Matches(Tpl(1.0, 1.0))); - EXPECT_TRUE(m.Matches(Tpl(0.3, 0.1 + 0.1 + 0.1))); - EXPECT_FALSE(m.Matches(Tpl(1.1, 1.0))); -} - -// Tests that DoubleEq() describes itself properly. -TEST(DoubleEq2Test, CanDescribeSelf) { - Matcher&> m = DoubleEq(); - EXPECT_EQ("are an almost-equal pair", Describe(m)); -} - -// Tests that NanSensitiveDoubleEq() matches a 2-tuple where -// NanSensitiveDoubleEq(first field) matches the second field. -TEST(NanSensitiveDoubleEqTest, MatchesEqualArgumentsWithNaN) { - typedef ::std::tuple Tpl; - Matcher m = NanSensitiveDoubleEq(); - EXPECT_TRUE(m.Matches(Tpl(1.0f, 1.0f))); - EXPECT_TRUE(m.Matches(Tpl(std::numeric_limits::quiet_NaN(), - std::numeric_limits::quiet_NaN()))); - EXPECT_FALSE(m.Matches(Tpl(1.1f, 1.0f))); - EXPECT_FALSE(m.Matches(Tpl(1.0f, std::numeric_limits::quiet_NaN()))); - EXPECT_FALSE(m.Matches(Tpl(std::numeric_limits::quiet_NaN(), 1.0f))); -} - -// Tests that DoubleEq() describes itself properly. -TEST(NanSensitiveDoubleEqTest, CanDescribeSelfWithNaNs) { - Matcher&> m = NanSensitiveDoubleEq(); - EXPECT_EQ("are an almost-equal pair", Describe(m)); -} - -// Tests that FloatEq() matches a 2-tuple where -// FloatNear(first field, max_abs_error) matches the second field. -TEST(FloatNear2Test, MatchesEqualArguments) { - typedef ::std::tuple Tpl; - Matcher m = FloatNear(0.5f); - EXPECT_TRUE(m.Matches(Tpl(1.0f, 1.0f))); - EXPECT_TRUE(m.Matches(Tpl(1.3f, 1.0f))); - EXPECT_FALSE(m.Matches(Tpl(1.8f, 1.0f))); -} - -// Tests that FloatNear() describes itself properly. -TEST(FloatNear2Test, CanDescribeSelf) { - Matcher&> m = FloatNear(0.5f); - EXPECT_EQ("are an almost-equal pair", Describe(m)); -} - -// Tests that NanSensitiveFloatNear() matches a 2-tuple where -// NanSensitiveFloatNear(first field) matches the second field. -TEST(NanSensitiveFloatNearTest, MatchesNearbyArgumentsWithNaN) { - typedef ::std::tuple Tpl; - Matcher m = NanSensitiveFloatNear(0.5f); - EXPECT_TRUE(m.Matches(Tpl(1.0f, 1.0f))); - EXPECT_TRUE(m.Matches(Tpl(1.1f, 1.0f))); - EXPECT_TRUE(m.Matches(Tpl(std::numeric_limits::quiet_NaN(), - std::numeric_limits::quiet_NaN()))); - EXPECT_FALSE(m.Matches(Tpl(1.6f, 1.0f))); - EXPECT_FALSE(m.Matches(Tpl(1.0f, std::numeric_limits::quiet_NaN()))); - EXPECT_FALSE(m.Matches(Tpl(std::numeric_limits::quiet_NaN(), 1.0f))); -} - -// Tests that NanSensitiveFloatNear() describes itself properly. -TEST(NanSensitiveFloatNearTest, CanDescribeSelfWithNaNs) { - Matcher&> m = NanSensitiveFloatNear(0.5f); - EXPECT_EQ("are an almost-equal pair", Describe(m)); -} - -// Tests that FloatEq() matches a 2-tuple where -// DoubleNear(first field, max_abs_error) matches the second field. -TEST(DoubleNear2Test, MatchesEqualArguments) { - typedef ::std::tuple Tpl; - Matcher m = DoubleNear(0.5); - EXPECT_TRUE(m.Matches(Tpl(1.0, 1.0))); - EXPECT_TRUE(m.Matches(Tpl(1.3, 1.0))); - EXPECT_FALSE(m.Matches(Tpl(1.8, 1.0))); -} - -// Tests that DoubleNear() describes itself properly. -TEST(DoubleNear2Test, CanDescribeSelf) { - Matcher&> m = DoubleNear(0.5); - EXPECT_EQ("are an almost-equal pair", Describe(m)); -} - -// Tests that NanSensitiveDoubleNear() matches a 2-tuple where -// NanSensitiveDoubleNear(first field) matches the second field. -TEST(NanSensitiveDoubleNearTest, MatchesNearbyArgumentsWithNaN) { - typedef ::std::tuple Tpl; - Matcher m = NanSensitiveDoubleNear(0.5f); - EXPECT_TRUE(m.Matches(Tpl(1.0f, 1.0f))); - EXPECT_TRUE(m.Matches(Tpl(1.1f, 1.0f))); - EXPECT_TRUE(m.Matches(Tpl(std::numeric_limits::quiet_NaN(), - std::numeric_limits::quiet_NaN()))); - EXPECT_FALSE(m.Matches(Tpl(1.6f, 1.0f))); - EXPECT_FALSE(m.Matches(Tpl(1.0f, std::numeric_limits::quiet_NaN()))); - EXPECT_FALSE(m.Matches(Tpl(std::numeric_limits::quiet_NaN(), 1.0f))); -} - -// Tests that NanSensitiveDoubleNear() describes itself properly. -TEST(NanSensitiveDoubleNearTest, CanDescribeSelfWithNaNs) { - Matcher&> m = NanSensitiveDoubleNear(0.5f); - EXPECT_EQ("are an almost-equal pair", Describe(m)); -} - -// Tests that Not(m) matches any value that doesn't match m. -TEST(NotTest, NegatesMatcher) { - Matcher m; - m = Not(Eq(2)); - EXPECT_TRUE(m.Matches(3)); - EXPECT_FALSE(m.Matches(2)); -} - -// Tests that Not(m) describes itself properly. -TEST(NotTest, CanDescribeSelf) { - Matcher m = Not(Eq(5)); - EXPECT_EQ("isn't equal to 5", Describe(m)); -} - -// Tests that monomorphic matchers are safely cast by the Not matcher. -TEST(NotTest, NotMatcherSafelyCastsMonomorphicMatchers) { - // greater_than_5 is a monomorphic matcher. - Matcher greater_than_5 = Gt(5); - - Matcher m = Not(greater_than_5); - Matcher m2 = Not(greater_than_5); - Matcher m3 = Not(m); -} - -// Helper to allow easy testing of AllOf matchers with num parameters. -void AllOfMatches(int num, const Matcher& m) { - SCOPED_TRACE(Describe(m)); - EXPECT_TRUE(m.Matches(0)); - for (int i = 1; i <= num; ++i) { - EXPECT_FALSE(m.Matches(i)); - } - EXPECT_TRUE(m.Matches(num + 1)); -} - -// Tests that AllOf(m1, ..., mn) matches any value that matches all of -// the given matchers. -TEST(AllOfTest, MatchesWhenAllMatch) { - Matcher m; - m = AllOf(Le(2), Ge(1)); - EXPECT_TRUE(m.Matches(1)); - EXPECT_TRUE(m.Matches(2)); - EXPECT_FALSE(m.Matches(0)); - EXPECT_FALSE(m.Matches(3)); - - m = AllOf(Gt(0), Ne(1), Ne(2)); - EXPECT_TRUE(m.Matches(3)); - EXPECT_FALSE(m.Matches(2)); - EXPECT_FALSE(m.Matches(1)); - EXPECT_FALSE(m.Matches(0)); - - m = AllOf(Gt(0), Ne(1), Ne(2), Ne(3)); - EXPECT_TRUE(m.Matches(4)); - EXPECT_FALSE(m.Matches(3)); - EXPECT_FALSE(m.Matches(2)); - EXPECT_FALSE(m.Matches(1)); - EXPECT_FALSE(m.Matches(0)); - - m = AllOf(Ge(0), Lt(10), Ne(3), Ne(5), Ne(7)); - EXPECT_TRUE(m.Matches(0)); - EXPECT_TRUE(m.Matches(1)); - EXPECT_FALSE(m.Matches(3)); - - // The following tests for varying number of sub-matchers. Due to the way - // the sub-matchers are handled it is enough to test every sub-matcher once - // with sub-matchers using the same matcher type. Varying matcher types are - // checked for above. - AllOfMatches(2, AllOf(Ne(1), Ne(2))); - AllOfMatches(3, AllOf(Ne(1), Ne(2), Ne(3))); - AllOfMatches(4, AllOf(Ne(1), Ne(2), Ne(3), Ne(4))); - AllOfMatches(5, AllOf(Ne(1), Ne(2), Ne(3), Ne(4), Ne(5))); - AllOfMatches(6, AllOf(Ne(1), Ne(2), Ne(3), Ne(4), Ne(5), Ne(6))); - AllOfMatches(7, AllOf(Ne(1), Ne(2), Ne(3), Ne(4), Ne(5), Ne(6), Ne(7))); - AllOfMatches(8, AllOf(Ne(1), Ne(2), Ne(3), Ne(4), Ne(5), Ne(6), Ne(7), - Ne(8))); - AllOfMatches(9, AllOf(Ne(1), Ne(2), Ne(3), Ne(4), Ne(5), Ne(6), Ne(7), - Ne(8), Ne(9))); - AllOfMatches(10, AllOf(Ne(1), Ne(2), Ne(3), Ne(4), Ne(5), Ne(6), Ne(7), Ne(8), - Ne(9), Ne(10))); - AllOfMatches( - 50, AllOf(Ne(1), Ne(2), Ne(3), Ne(4), Ne(5), Ne(6), Ne(7), Ne(8), Ne(9), - Ne(10), Ne(11), Ne(12), Ne(13), Ne(14), Ne(15), Ne(16), Ne(17), - Ne(18), Ne(19), Ne(20), Ne(21), Ne(22), Ne(23), Ne(24), Ne(25), - Ne(26), Ne(27), Ne(28), Ne(29), Ne(30), Ne(31), Ne(32), Ne(33), - Ne(34), Ne(35), Ne(36), Ne(37), Ne(38), Ne(39), Ne(40), Ne(41), - Ne(42), Ne(43), Ne(44), Ne(45), Ne(46), Ne(47), Ne(48), Ne(49), - Ne(50))); -} - - -// Tests that AllOf(m1, ..., mn) describes itself properly. -TEST(AllOfTest, CanDescribeSelf) { - Matcher m; - m = AllOf(Le(2), Ge(1)); - EXPECT_EQ("(is <= 2) and (is >= 1)", Describe(m)); - - m = AllOf(Gt(0), Ne(1), Ne(2)); - std::string expected_descr1 = - "(is > 0) and (isn't equal to 1) and (isn't equal to 2)"; - EXPECT_EQ(expected_descr1, Describe(m)); - - m = AllOf(Gt(0), Ne(1), Ne(2), Ne(3)); - std::string expected_descr2 = - "(is > 0) and (isn't equal to 1) and (isn't equal to 2) and (isn't equal " - "to 3)"; - EXPECT_EQ(expected_descr2, Describe(m)); - - m = AllOf(Ge(0), Lt(10), Ne(3), Ne(5), Ne(7)); - std::string expected_descr3 = - "(is >= 0) and (is < 10) and (isn't equal to 3) and (isn't equal to 5) " - "and (isn't equal to 7)"; - EXPECT_EQ(expected_descr3, Describe(m)); -} - -// Tests that AllOf(m1, ..., mn) describes its negation properly. -TEST(AllOfTest, CanDescribeNegation) { - Matcher m; - m = AllOf(Le(2), Ge(1)); - std::string expected_descr4 = "(isn't <= 2) or (isn't >= 1)"; - EXPECT_EQ(expected_descr4, DescribeNegation(m)); - - m = AllOf(Gt(0), Ne(1), Ne(2)); - std::string expected_descr5 = - "(isn't > 0) or (is equal to 1) or (is equal to 2)"; - EXPECT_EQ(expected_descr5, DescribeNegation(m)); - - m = AllOf(Gt(0), Ne(1), Ne(2), Ne(3)); - std::string expected_descr6 = - "(isn't > 0) or (is equal to 1) or (is equal to 2) or (is equal to 3)"; - EXPECT_EQ(expected_descr6, DescribeNegation(m)); - - m = AllOf(Ge(0), Lt(10), Ne(3), Ne(5), Ne(7)); - std::string expected_desr7 = - "(isn't >= 0) or (isn't < 10) or (is equal to 3) or (is equal to 5) or " - "(is equal to 7)"; - EXPECT_EQ(expected_desr7, DescribeNegation(m)); - - m = AllOf(Ne(1), Ne(2), Ne(3), Ne(4), Ne(5), Ne(6), Ne(7), Ne(8), Ne(9), - Ne(10), Ne(11)); - AllOf(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11); - EXPECT_THAT(Describe(m), EndsWith("and (isn't equal to 11)")); - AllOfMatches(11, m); -} - -// Tests that monomorphic matchers are safely cast by the AllOf matcher. -TEST(AllOfTest, AllOfMatcherSafelyCastsMonomorphicMatchers) { - // greater_than_5 and less_than_10 are monomorphic matchers. - Matcher greater_than_5 = Gt(5); - Matcher less_than_10 = Lt(10); - - Matcher m = AllOf(greater_than_5, less_than_10); - Matcher m2 = AllOf(greater_than_5, less_than_10); - Matcher m3 = AllOf(greater_than_5, m2); - - // Tests that BothOf works when composing itself. - Matcher m4 = AllOf(greater_than_5, less_than_10, less_than_10); - Matcher m5 = AllOf(greater_than_5, less_than_10, less_than_10); -} - -TEST(AllOfTest, ExplainsResult) { - Matcher m; - - // Successful match. Both matchers need to explain. The second - // matcher doesn't give an explanation, so only the first matcher's - // explanation is printed. - m = AllOf(GreaterThan(10), Lt(30)); - EXPECT_EQ("which is 15 more than 10", Explain(m, 25)); - - // Successful match. Both matchers need to explain. - m = AllOf(GreaterThan(10), GreaterThan(20)); - EXPECT_EQ("which is 20 more than 10, and which is 10 more than 20", - Explain(m, 30)); - - // Successful match. All matchers need to explain. The second - // matcher doesn't given an explanation. - m = AllOf(GreaterThan(10), Lt(30), GreaterThan(20)); - EXPECT_EQ("which is 15 more than 10, and which is 5 more than 20", - Explain(m, 25)); - - // Successful match. All matchers need to explain. - m = AllOf(GreaterThan(10), GreaterThan(20), GreaterThan(30)); - EXPECT_EQ("which is 30 more than 10, and which is 20 more than 20, " - "and which is 10 more than 30", - Explain(m, 40)); - - // Failed match. The first matcher, which failed, needs to - // explain. - m = AllOf(GreaterThan(10), GreaterThan(20)); - EXPECT_EQ("which is 5 less than 10", Explain(m, 5)); - - // Failed match. The second matcher, which failed, needs to - // explain. Since it doesn't given an explanation, nothing is - // printed. - m = AllOf(GreaterThan(10), Lt(30)); - EXPECT_EQ("", Explain(m, 40)); - - // Failed match. The second matcher, which failed, needs to - // explain. - m = AllOf(GreaterThan(10), GreaterThan(20)); - EXPECT_EQ("which is 5 less than 20", Explain(m, 15)); -} - -// Helper to allow easy testing of AnyOf matchers with num parameters. -static void AnyOfMatches(int num, const Matcher& m) { - SCOPED_TRACE(Describe(m)); - EXPECT_FALSE(m.Matches(0)); - for (int i = 1; i <= num; ++i) { - EXPECT_TRUE(m.Matches(i)); - } - EXPECT_FALSE(m.Matches(num + 1)); -} - -static void AnyOfStringMatches(int num, const Matcher& m) { - SCOPED_TRACE(Describe(m)); - EXPECT_FALSE(m.Matches(std::to_string(0))); - - for (int i = 1; i <= num; ++i) { - EXPECT_TRUE(m.Matches(std::to_string(i))); - } - EXPECT_FALSE(m.Matches(std::to_string(num + 1))); -} - -// Tests that AnyOf(m1, ..., mn) matches any value that matches at -// least one of the given matchers. -TEST(AnyOfTest, MatchesWhenAnyMatches) { - Matcher m; - m = AnyOf(Le(1), Ge(3)); - EXPECT_TRUE(m.Matches(1)); - EXPECT_TRUE(m.Matches(4)); - EXPECT_FALSE(m.Matches(2)); - - m = AnyOf(Lt(0), Eq(1), Eq(2)); - EXPECT_TRUE(m.Matches(-1)); - EXPECT_TRUE(m.Matches(1)); - EXPECT_TRUE(m.Matches(2)); - EXPECT_FALSE(m.Matches(0)); - - m = AnyOf(Lt(0), Eq(1), Eq(2), Eq(3)); - EXPECT_TRUE(m.Matches(-1)); - EXPECT_TRUE(m.Matches(1)); - EXPECT_TRUE(m.Matches(2)); - EXPECT_TRUE(m.Matches(3)); - EXPECT_FALSE(m.Matches(0)); - - m = AnyOf(Le(0), Gt(10), 3, 5, 7); - EXPECT_TRUE(m.Matches(0)); - EXPECT_TRUE(m.Matches(11)); - EXPECT_TRUE(m.Matches(3)); - EXPECT_FALSE(m.Matches(2)); - - // The following tests for varying number of sub-matchers. Due to the way - // the sub-matchers are handled it is enough to test every sub-matcher once - // with sub-matchers using the same matcher type. Varying matcher types are - // checked for above. - AnyOfMatches(2, AnyOf(1, 2)); - AnyOfMatches(3, AnyOf(1, 2, 3)); - AnyOfMatches(4, AnyOf(1, 2, 3, 4)); - AnyOfMatches(5, AnyOf(1, 2, 3, 4, 5)); - AnyOfMatches(6, AnyOf(1, 2, 3, 4, 5, 6)); - AnyOfMatches(7, AnyOf(1, 2, 3, 4, 5, 6, 7)); - AnyOfMatches(8, AnyOf(1, 2, 3, 4, 5, 6, 7, 8)); - AnyOfMatches(9, AnyOf(1, 2, 3, 4, 5, 6, 7, 8, 9)); - AnyOfMatches(10, AnyOf(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)); -} - -// Tests the variadic version of the AnyOfMatcher. -TEST(AnyOfTest, VariadicMatchesWhenAnyMatches) { - // Also make sure AnyOf is defined in the right namespace and does not depend - // on ADL. - Matcher m = ::testing::AnyOf(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11); - - EXPECT_THAT(Describe(m), EndsWith("or (is equal to 11)")); - AnyOfMatches(11, m); - AnyOfMatches(50, AnyOf(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, - 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, - 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, - 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, - 41, 42, 43, 44, 45, 46, 47, 48, 49, 50)); - AnyOfStringMatches( - 50, AnyOf("1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", - "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", - "23", "24", "25", "26", "27", "28", "29", "30", "31", "32", - "33", "34", "35", "36", "37", "38", "39", "40", "41", "42", - "43", "44", "45", "46", "47", "48", "49", "50")); -} - -TEST(ConditionalTest, MatchesFirstIfCondition) { - Matcher eq_red = Eq("red"); - Matcher ne_red = Ne("red"); - Matcher m = Conditional(true, eq_red, ne_red); - EXPECT_TRUE(m.Matches("red")); - EXPECT_FALSE(m.Matches("green")); - - StringMatchResultListener listener; - StringMatchResultListener expected; - EXPECT_FALSE(m.MatchAndExplain("green", &listener)); - EXPECT_FALSE(eq_red.MatchAndExplain("green", &expected)); - EXPECT_THAT(listener.str(), Eq(expected.str())); -} - -TEST(ConditionalTest, MatchesSecondIfCondition) { - Matcher eq_red = Eq("red"); - Matcher ne_red = Ne("red"); - Matcher m = Conditional(false, eq_red, ne_red); - EXPECT_FALSE(m.Matches("red")); - EXPECT_TRUE(m.Matches("green")); - - StringMatchResultListener listener; - StringMatchResultListener expected; - EXPECT_FALSE(m.MatchAndExplain("red", &listener)); - EXPECT_FALSE(ne_red.MatchAndExplain("red", &expected)); - EXPECT_THAT(listener.str(), Eq(expected.str())); -} - -// Tests the variadic version of the ElementsAreMatcher -TEST(ElementsAreTest, HugeMatcher) { - vector test_vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; - - EXPECT_THAT(test_vector, - ElementsAre(Eq(1), Eq(2), Lt(13), Eq(4), Eq(5), Eq(6), Eq(7), - Eq(8), Eq(9), Eq(10), Gt(1), Eq(12))); -} - -// Tests the variadic version of the UnorderedElementsAreMatcher -TEST(ElementsAreTest, HugeMatcherStr) { - vector test_vector{ - "literal_string", "", "", "", "", "", "", "", "", "", "", ""}; - - EXPECT_THAT(test_vector, UnorderedElementsAre("literal_string", _, _, _, _, _, - _, _, _, _, _, _)); -} - -// Tests the variadic version of the UnorderedElementsAreMatcher -TEST(ElementsAreTest, HugeMatcherUnordered) { - vector test_vector{2, 1, 8, 5, 4, 6, 7, 3, 9, 12, 11, 10}; - - EXPECT_THAT(test_vector, UnorderedElementsAre( - Eq(2), Eq(1), Gt(7), Eq(5), Eq(4), Eq(6), Eq(7), - Eq(3), Eq(9), Eq(12), Eq(11), Ne(122))); -} - - -// Tests that AnyOf(m1, ..., mn) describes itself properly. -TEST(AnyOfTest, CanDescribeSelf) { - Matcher m; - m = AnyOf(Le(1), Ge(3)); - - EXPECT_EQ("(is <= 1) or (is >= 3)", - Describe(m)); - - m = AnyOf(Lt(0), Eq(1), Eq(2)); - EXPECT_EQ("(is < 0) or (is equal to 1) or (is equal to 2)", Describe(m)); - - m = AnyOf(Lt(0), Eq(1), Eq(2), Eq(3)); - EXPECT_EQ("(is < 0) or (is equal to 1) or (is equal to 2) or (is equal to 3)", - Describe(m)); - - m = AnyOf(Le(0), Gt(10), 3, 5, 7); - EXPECT_EQ( - "(is <= 0) or (is > 10) or (is equal to 3) or (is equal to 5) or (is " - "equal to 7)", - Describe(m)); -} - -// Tests that AnyOf(m1, ..., mn) describes its negation properly. -TEST(AnyOfTest, CanDescribeNegation) { - Matcher m; - m = AnyOf(Le(1), Ge(3)); - EXPECT_EQ("(isn't <= 1) and (isn't >= 3)", - DescribeNegation(m)); - - m = AnyOf(Lt(0), Eq(1), Eq(2)); - EXPECT_EQ("(isn't < 0) and (isn't equal to 1) and (isn't equal to 2)", - DescribeNegation(m)); - - m = AnyOf(Lt(0), Eq(1), Eq(2), Eq(3)); - EXPECT_EQ( - "(isn't < 0) and (isn't equal to 1) and (isn't equal to 2) and (isn't " - "equal to 3)", - DescribeNegation(m)); - - m = AnyOf(Le(0), Gt(10), 3, 5, 7); - EXPECT_EQ( - "(isn't <= 0) and (isn't > 10) and (isn't equal to 3) and (isn't equal " - "to 5) and (isn't equal to 7)", - DescribeNegation(m)); -} - -// Tests that monomorphic matchers are safely cast by the AnyOf matcher. -TEST(AnyOfTest, AnyOfMatcherSafelyCastsMonomorphicMatchers) { - // greater_than_5 and less_than_10 are monomorphic matchers. - Matcher greater_than_5 = Gt(5); - Matcher less_than_10 = Lt(10); - - Matcher m = AnyOf(greater_than_5, less_than_10); - Matcher m2 = AnyOf(greater_than_5, less_than_10); - Matcher m3 = AnyOf(greater_than_5, m2); - - // Tests that EitherOf works when composing itself. - Matcher m4 = AnyOf(greater_than_5, less_than_10, less_than_10); - Matcher m5 = AnyOf(greater_than_5, less_than_10, less_than_10); -} - -TEST(AnyOfTest, ExplainsResult) { - Matcher m; - - // Failed match. Both matchers need to explain. The second - // matcher doesn't give an explanation, so only the first matcher's - // explanation is printed. - m = AnyOf(GreaterThan(10), Lt(0)); - EXPECT_EQ("which is 5 less than 10", Explain(m, 5)); - - // Failed match. Both matchers need to explain. - m = AnyOf(GreaterThan(10), GreaterThan(20)); - EXPECT_EQ("which is 5 less than 10, and which is 15 less than 20", - Explain(m, 5)); - - // Failed match. All matchers need to explain. The second - // matcher doesn't given an explanation. - m = AnyOf(GreaterThan(10), Gt(20), GreaterThan(30)); - EXPECT_EQ("which is 5 less than 10, and which is 25 less than 30", - Explain(m, 5)); - - // Failed match. All matchers need to explain. - m = AnyOf(GreaterThan(10), GreaterThan(20), GreaterThan(30)); - EXPECT_EQ("which is 5 less than 10, and which is 15 less than 20, " - "and which is 25 less than 30", - Explain(m, 5)); - - // Successful match. The first matcher, which succeeded, needs to - // explain. - m = AnyOf(GreaterThan(10), GreaterThan(20)); - EXPECT_EQ("which is 5 more than 10", Explain(m, 15)); - - // Successful match. The second matcher, which succeeded, needs to - // explain. Since it doesn't given an explanation, nothing is - // printed. - m = AnyOf(GreaterThan(10), Lt(30)); - EXPECT_EQ("", Explain(m, 0)); - - // Successful match. The second matcher, which succeeded, needs to - // explain. - m = AnyOf(GreaterThan(30), GreaterThan(20)); - EXPECT_EQ("which is 5 more than 20", Explain(m, 25)); -} - -// The following predicate function and predicate functor are for -// testing the Truly(predicate) matcher. - -// Returns non-zero if the input is positive. Note that the return -// type of this function is not bool. It's OK as Truly() accepts any -// unary function or functor whose return type can be implicitly -// converted to bool. -int IsPositive(double x) { - return x > 0 ? 1 : 0; -} - -// This functor returns true if the input is greater than the given -// number. -class IsGreaterThan { - public: - explicit IsGreaterThan(int threshold) : threshold_(threshold) {} - - bool operator()(int n) const { return n > threshold_; } - - private: - int threshold_; -}; - -// For testing Truly(). -const int foo = 0; - -// This predicate returns true if and only if the argument references foo and -// has a zero value. -bool ReferencesFooAndIsZero(const int& n) { - return (&n == &foo) && (n == 0); -} - -// Tests that Truly(predicate) matches what satisfies the given -// predicate. -TEST(TrulyTest, MatchesWhatSatisfiesThePredicate) { - Matcher m = Truly(IsPositive); - EXPECT_TRUE(m.Matches(2.0)); - EXPECT_FALSE(m.Matches(-1.5)); -} - -// Tests that Truly(predicate_functor) works too. -TEST(TrulyTest, CanBeUsedWithFunctor) { - Matcher m = Truly(IsGreaterThan(5)); - EXPECT_TRUE(m.Matches(6)); - EXPECT_FALSE(m.Matches(4)); -} - -// A class that can be implicitly converted to bool. -class ConvertibleToBool { - public: - explicit ConvertibleToBool(int number) : number_(number) {} - operator bool() const { return number_ != 0; } - - private: - int number_; -}; - -ConvertibleToBool IsNotZero(int number) { - return ConvertibleToBool(number); -} - -// Tests that the predicate used in Truly() may return a class that's -// implicitly convertible to bool, even when the class has no -// operator!(). -TEST(TrulyTest, PredicateCanReturnAClassConvertibleToBool) { - Matcher m = Truly(IsNotZero); - EXPECT_TRUE(m.Matches(1)); - EXPECT_FALSE(m.Matches(0)); -} - -// Tests that Truly(predicate) can describe itself properly. -TEST(TrulyTest, CanDescribeSelf) { - Matcher m = Truly(IsPositive); - EXPECT_EQ("satisfies the given predicate", - Describe(m)); -} - -// Tests that Truly(predicate) works when the matcher takes its -// argument by reference. -TEST(TrulyTest, WorksForByRefArguments) { - Matcher m = Truly(ReferencesFooAndIsZero); - EXPECT_TRUE(m.Matches(foo)); - int n = 0; - EXPECT_FALSE(m.Matches(n)); -} - -// Tests that Truly(predicate) provides a helpful reason when it fails. -TEST(TrulyTest, ExplainsFailures) { - StringMatchResultListener listener; - EXPECT_FALSE(ExplainMatchResult(Truly(IsPositive), -1, &listener)); - EXPECT_EQ(listener.str(), "didn't satisfy the given predicate"); -} - -// Tests that Matches(m) is a predicate satisfied by whatever that -// matches matcher m. -TEST(MatchesTest, IsSatisfiedByWhatMatchesTheMatcher) { - EXPECT_TRUE(Matches(Ge(0))(1)); - EXPECT_FALSE(Matches(Eq('a'))('b')); -} - -// Tests that Matches(m) works when the matcher takes its argument by -// reference. -TEST(MatchesTest, WorksOnByRefArguments) { - int m = 0, n = 0; - EXPECT_TRUE(Matches(AllOf(Ref(n), Eq(0)))(n)); - EXPECT_FALSE(Matches(Ref(m))(n)); -} - -// Tests that a Matcher on non-reference type can be used in -// Matches(). -TEST(MatchesTest, WorksWithMatcherOnNonRefType) { - Matcher eq5 = Eq(5); - EXPECT_TRUE(Matches(eq5)(5)); - EXPECT_FALSE(Matches(eq5)(2)); -} - -// Tests Value(value, matcher). Since Value() is a simple wrapper for -// Matches(), which has been tested already, we don't spend a lot of -// effort on testing Value(). -TEST(ValueTest, WorksWithPolymorphicMatcher) { - EXPECT_TRUE(Value("hi", StartsWith("h"))); - EXPECT_FALSE(Value(5, Gt(10))); -} - -TEST(ValueTest, WorksWithMonomorphicMatcher) { - const Matcher is_zero = Eq(0); - EXPECT_TRUE(Value(0, is_zero)); - EXPECT_FALSE(Value('a', is_zero)); - - int n = 0; - const Matcher ref_n = Ref(n); - EXPECT_TRUE(Value(n, ref_n)); - EXPECT_FALSE(Value(1, ref_n)); -} - -TEST(ExplainMatchResultTest, WorksWithPolymorphicMatcher) { - StringMatchResultListener listener1; - EXPECT_TRUE(ExplainMatchResult(PolymorphicIsEven(), 42, &listener1)); - EXPECT_EQ("% 2 == 0", listener1.str()); - - StringMatchResultListener listener2; - EXPECT_FALSE(ExplainMatchResult(Ge(42), 1.5, &listener2)); - EXPECT_EQ("", listener2.str()); -} - -TEST(ExplainMatchResultTest, WorksWithMonomorphicMatcher) { - const Matcher is_even = PolymorphicIsEven(); - StringMatchResultListener listener1; - EXPECT_TRUE(ExplainMatchResult(is_even, 42, &listener1)); - EXPECT_EQ("% 2 == 0", listener1.str()); - - const Matcher is_zero = Eq(0); - StringMatchResultListener listener2; - EXPECT_FALSE(ExplainMatchResult(is_zero, 1.5, &listener2)); - EXPECT_EQ("", listener2.str()); -} - -MATCHER(ConstructNoArg, "") { return true; } -MATCHER_P(Construct1Arg, arg1, "") { return true; } -MATCHER_P2(Construct2Args, arg1, arg2, "") { return true; } - -TEST(MatcherConstruct, ExplicitVsImplicit) { - { - // No arg constructor can be constructed with empty brace. - ConstructNoArgMatcher m = {}; - (void)m; - // And with no args - ConstructNoArgMatcher m2; - (void)m2; - } - { - // The one arg constructor has an explicit constructor. - // This is to prevent the implicit conversion. - using M = Construct1ArgMatcherP; - EXPECT_TRUE((std::is_constructible::value)); - EXPECT_FALSE((std::is_convertible::value)); - } - { - // Multiple arg matchers can be constructed with an implicit construction. - Construct2ArgsMatcherP2 m = {1, 2.2}; - (void)m; - } -} - -MATCHER_P(Really, inner_matcher, "") { - return ExplainMatchResult(inner_matcher, arg, result_listener); -} - -TEST(ExplainMatchResultTest, WorksInsideMATCHER) { - EXPECT_THAT(0, Really(Eq(0))); -} - -TEST(DescribeMatcherTest, WorksWithValue) { - EXPECT_EQ("is equal to 42", DescribeMatcher(42)); - EXPECT_EQ("isn't equal to 42", DescribeMatcher(42, true)); -} - -TEST(DescribeMatcherTest, WorksWithMonomorphicMatcher) { - const Matcher monomorphic = Le(0); - EXPECT_EQ("is <= 0", DescribeMatcher(monomorphic)); - EXPECT_EQ("isn't <= 0", DescribeMatcher(monomorphic, true)); -} - -TEST(DescribeMatcherTest, WorksWithPolymorphicMatcher) { - EXPECT_EQ("is even", DescribeMatcher(PolymorphicIsEven())); - EXPECT_EQ("is odd", DescribeMatcher(PolymorphicIsEven(), true)); -} - -TEST(AllArgsTest, WorksForTuple) { - EXPECT_THAT(std::make_tuple(1, 2L), AllArgs(Lt())); - EXPECT_THAT(std::make_tuple(2L, 1), Not(AllArgs(Lt()))); -} - -TEST(AllArgsTest, WorksForNonTuple) { - EXPECT_THAT(42, AllArgs(Gt(0))); - EXPECT_THAT('a', Not(AllArgs(Eq('b')))); -} - -class AllArgsHelper { - public: - AllArgsHelper() {} - - MOCK_METHOD2(Helper, int(char x, int y)); - - private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(AllArgsHelper); -}; - -TEST(AllArgsTest, WorksInWithClause) { - AllArgsHelper helper; - ON_CALL(helper, Helper(_, _)) - .With(AllArgs(Lt())) - .WillByDefault(Return(1)); - EXPECT_CALL(helper, Helper(_, _)); - EXPECT_CALL(helper, Helper(_, _)) - .With(AllArgs(Gt())) - .WillOnce(Return(2)); - - EXPECT_EQ(1, helper.Helper('\1', 2)); - EXPECT_EQ(2, helper.Helper('a', 1)); -} - -class OptionalMatchersHelper { - public: - OptionalMatchersHelper() {} - - MOCK_METHOD0(NoArgs, int()); - - MOCK_METHOD1(OneArg, int(int y)); - - MOCK_METHOD2(TwoArgs, int(char x, int y)); - - MOCK_METHOD1(Overloaded, int(char x)); - MOCK_METHOD2(Overloaded, int(char x, int y)); - - private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(OptionalMatchersHelper); -}; - -TEST(AllArgsTest, WorksWithoutMatchers) { - OptionalMatchersHelper helper; - - ON_CALL(helper, NoArgs).WillByDefault(Return(10)); - ON_CALL(helper, OneArg).WillByDefault(Return(20)); - ON_CALL(helper, TwoArgs).WillByDefault(Return(30)); - - EXPECT_EQ(10, helper.NoArgs()); - EXPECT_EQ(20, helper.OneArg(1)); - EXPECT_EQ(30, helper.TwoArgs('\1', 2)); - - EXPECT_CALL(helper, NoArgs).Times(1); - EXPECT_CALL(helper, OneArg).WillOnce(Return(100)); - EXPECT_CALL(helper, OneArg(17)).WillOnce(Return(200)); - EXPECT_CALL(helper, TwoArgs).Times(0); - - EXPECT_EQ(10, helper.NoArgs()); - EXPECT_EQ(100, helper.OneArg(1)); - EXPECT_EQ(200, helper.OneArg(17)); -} - -// Tests that ASSERT_THAT() and EXPECT_THAT() work when the value -// matches the matcher. -TEST(MatcherAssertionTest, WorksWhenMatcherIsSatisfied) { - ASSERT_THAT(5, Ge(2)) << "This should succeed."; - ASSERT_THAT("Foo", EndsWith("oo")); - EXPECT_THAT(2, AllOf(Le(7), Ge(0))) << "This should succeed too."; - EXPECT_THAT("Hello", StartsWith("Hell")); -} - -// Tests that ASSERT_THAT() and EXPECT_THAT() work when the value -// doesn't match the matcher. -TEST(MatcherAssertionTest, WorksWhenMatcherIsNotSatisfied) { - // 'n' must be static as it is used in an EXPECT_FATAL_FAILURE(), - // which cannot reference auto variables. - static unsigned short n; // NOLINT - n = 5; - - EXPECT_FATAL_FAILURE(ASSERT_THAT(n, Gt(10)), - "Value of: n\n" - "Expected: is > 10\n" - " Actual: 5" + OfType("unsigned short")); - n = 0; - EXPECT_NONFATAL_FAILURE( - EXPECT_THAT(n, AllOf(Le(7), Ge(5))), - "Value of: n\n" - "Expected: (is <= 7) and (is >= 5)\n" - " Actual: 0" + OfType("unsigned short")); -} - -// Tests that ASSERT_THAT() and EXPECT_THAT() work when the argument -// has a reference type. -TEST(MatcherAssertionTest, WorksForByRefArguments) { - // We use a static variable here as EXPECT_FATAL_FAILURE() cannot - // reference auto variables. - static int n; - n = 0; - EXPECT_THAT(n, AllOf(Le(7), Ref(n))); - EXPECT_FATAL_FAILURE(ASSERT_THAT(n, Not(Ref(n))), - "Value of: n\n" - "Expected: does not reference the variable @"); - // Tests the "Actual" part. - EXPECT_FATAL_FAILURE(ASSERT_THAT(n, Not(Ref(n))), - "Actual: 0" + OfType("int") + ", which is located @"); -} - -// Tests that ASSERT_THAT() and EXPECT_THAT() work when the matcher is -// monomorphic. -TEST(MatcherAssertionTest, WorksForMonomorphicMatcher) { - Matcher starts_with_he = StartsWith("he"); - ASSERT_THAT("hello", starts_with_he); - - Matcher ends_with_ok = EndsWith("ok"); - ASSERT_THAT("book", ends_with_ok); - const std::string bad = "bad"; - EXPECT_NONFATAL_FAILURE(EXPECT_THAT(bad, ends_with_ok), - "Value of: bad\n" - "Expected: ends with \"ok\"\n" - " Actual: \"bad\""); - Matcher is_greater_than_5 = Gt(5); - EXPECT_NONFATAL_FAILURE(EXPECT_THAT(5, is_greater_than_5), - "Value of: 5\n" - "Expected: is > 5\n" - " Actual: 5" + OfType("int")); -} - -// Tests floating-point matchers. -template -class FloatingPointTest : public testing::Test { - protected: - typedef testing::internal::FloatingPoint Floating; - typedef typename Floating::Bits Bits; - - FloatingPointTest() - : max_ulps_(Floating::kMaxUlps), - zero_bits_(Floating(0).bits()), - one_bits_(Floating(1).bits()), - infinity_bits_(Floating(Floating::Infinity()).bits()), - close_to_positive_zero_( - Floating::ReinterpretBits(zero_bits_ + max_ulps_/2)), - close_to_negative_zero_( - -Floating::ReinterpretBits(zero_bits_ + max_ulps_ - max_ulps_/2)), - further_from_negative_zero_(-Floating::ReinterpretBits( - zero_bits_ + max_ulps_ + 1 - max_ulps_/2)), - close_to_one_(Floating::ReinterpretBits(one_bits_ + max_ulps_)), - further_from_one_(Floating::ReinterpretBits(one_bits_ + max_ulps_ + 1)), - infinity_(Floating::Infinity()), - close_to_infinity_( - Floating::ReinterpretBits(infinity_bits_ - max_ulps_)), - further_from_infinity_( - Floating::ReinterpretBits(infinity_bits_ - max_ulps_ - 1)), - max_(Floating::Max()), - nan1_(Floating::ReinterpretBits(Floating::kExponentBitMask | 1)), - nan2_(Floating::ReinterpretBits(Floating::kExponentBitMask | 200)) { - } - - void TestSize() { - EXPECT_EQ(sizeof(RawType), sizeof(Bits)); - } - - // A battery of tests for FloatingEqMatcher::Matches. - // matcher_maker is a pointer to a function which creates a FloatingEqMatcher. - void TestMatches( - testing::internal::FloatingEqMatcher (*matcher_maker)(RawType)) { - Matcher m1 = matcher_maker(0.0); - EXPECT_TRUE(m1.Matches(-0.0)); - EXPECT_TRUE(m1.Matches(close_to_positive_zero_)); - EXPECT_TRUE(m1.Matches(close_to_negative_zero_)); - EXPECT_FALSE(m1.Matches(1.0)); - - Matcher m2 = matcher_maker(close_to_positive_zero_); - EXPECT_FALSE(m2.Matches(further_from_negative_zero_)); - - Matcher m3 = matcher_maker(1.0); - EXPECT_TRUE(m3.Matches(close_to_one_)); - EXPECT_FALSE(m3.Matches(further_from_one_)); - - // Test commutativity: matcher_maker(0.0).Matches(1.0) was tested above. - EXPECT_FALSE(m3.Matches(0.0)); - - Matcher m4 = matcher_maker(-infinity_); - EXPECT_TRUE(m4.Matches(-close_to_infinity_)); - - Matcher m5 = matcher_maker(infinity_); - EXPECT_TRUE(m5.Matches(close_to_infinity_)); - - // This is interesting as the representations of infinity_ and nan1_ - // are only 1 DLP apart. - EXPECT_FALSE(m5.Matches(nan1_)); - - // matcher_maker can produce a Matcher, which is needed in - // some cases. - Matcher m6 = matcher_maker(0.0); - EXPECT_TRUE(m6.Matches(-0.0)); - EXPECT_TRUE(m6.Matches(close_to_positive_zero_)); - EXPECT_FALSE(m6.Matches(1.0)); - - // matcher_maker can produce a Matcher, which is needed in some - // cases. - Matcher m7 = matcher_maker(0.0); - RawType x = 0.0; - EXPECT_TRUE(m7.Matches(x)); - x = 0.01f; - EXPECT_FALSE(m7.Matches(x)); - } - - // Pre-calculated numbers to be used by the tests. - - const Bits max_ulps_; - - const Bits zero_bits_; // The bits that represent 0.0. - const Bits one_bits_; // The bits that represent 1.0. - const Bits infinity_bits_; // The bits that represent +infinity. - - // Some numbers close to 0.0. - const RawType close_to_positive_zero_; - const RawType close_to_negative_zero_; - const RawType further_from_negative_zero_; - - // Some numbers close to 1.0. - const RawType close_to_one_; - const RawType further_from_one_; - - // Some numbers close to +infinity. - const RawType infinity_; - const RawType close_to_infinity_; - const RawType further_from_infinity_; - - // Maximum representable value that's not infinity. - const RawType max_; - - // Some NaNs. - const RawType nan1_; - const RawType nan2_; -}; - -// Tests floating-point matchers with fixed epsilons. -template -class FloatingPointNearTest : public FloatingPointTest { - protected: - typedef FloatingPointTest ParentType; - - // A battery of tests for FloatingEqMatcher::Matches with a fixed epsilon. - // matcher_maker is a pointer to a function which creates a FloatingEqMatcher. - void TestNearMatches( - testing::internal::FloatingEqMatcher - (*matcher_maker)(RawType, RawType)) { - Matcher m1 = matcher_maker(0.0, 0.0); - EXPECT_TRUE(m1.Matches(0.0)); - EXPECT_TRUE(m1.Matches(-0.0)); - EXPECT_FALSE(m1.Matches(ParentType::close_to_positive_zero_)); - EXPECT_FALSE(m1.Matches(ParentType::close_to_negative_zero_)); - EXPECT_FALSE(m1.Matches(1.0)); - - Matcher m2 = matcher_maker(0.0, 1.0); - EXPECT_TRUE(m2.Matches(0.0)); - EXPECT_TRUE(m2.Matches(-0.0)); - EXPECT_TRUE(m2.Matches(1.0)); - EXPECT_TRUE(m2.Matches(-1.0)); - EXPECT_FALSE(m2.Matches(ParentType::close_to_one_)); - EXPECT_FALSE(m2.Matches(-ParentType::close_to_one_)); - - // Check that inf matches inf, regardless of the of the specified max - // absolute error. - Matcher m3 = matcher_maker(ParentType::infinity_, 0.0); - EXPECT_TRUE(m3.Matches(ParentType::infinity_)); - EXPECT_FALSE(m3.Matches(ParentType::close_to_infinity_)); - EXPECT_FALSE(m3.Matches(-ParentType::infinity_)); - - Matcher m4 = matcher_maker(-ParentType::infinity_, 0.0); - EXPECT_TRUE(m4.Matches(-ParentType::infinity_)); - EXPECT_FALSE(m4.Matches(-ParentType::close_to_infinity_)); - EXPECT_FALSE(m4.Matches(ParentType::infinity_)); - - // Test various overflow scenarios. - Matcher m5 = matcher_maker(ParentType::max_, ParentType::max_); - EXPECT_TRUE(m5.Matches(ParentType::max_)); - EXPECT_FALSE(m5.Matches(-ParentType::max_)); - - Matcher m6 = matcher_maker(-ParentType::max_, ParentType::max_); - EXPECT_FALSE(m6.Matches(ParentType::max_)); - EXPECT_TRUE(m6.Matches(-ParentType::max_)); - - Matcher m7 = matcher_maker(ParentType::max_, 0); - EXPECT_TRUE(m7.Matches(ParentType::max_)); - EXPECT_FALSE(m7.Matches(-ParentType::max_)); - - Matcher m8 = matcher_maker(-ParentType::max_, 0); - EXPECT_FALSE(m8.Matches(ParentType::max_)); - EXPECT_TRUE(m8.Matches(-ParentType::max_)); - - // The difference between max() and -max() normally overflows to infinity, - // but it should still match if the max_abs_error is also infinity. - Matcher m9 = matcher_maker( - ParentType::max_, ParentType::infinity_); - EXPECT_TRUE(m8.Matches(-ParentType::max_)); - - // matcher_maker can produce a Matcher, which is needed in - // some cases. - Matcher m10 = matcher_maker(0.0, 1.0); - EXPECT_TRUE(m10.Matches(-0.0)); - EXPECT_TRUE(m10.Matches(ParentType::close_to_positive_zero_)); - EXPECT_FALSE(m10.Matches(ParentType::close_to_one_)); - - // matcher_maker can produce a Matcher, which is needed in some - // cases. - Matcher m11 = matcher_maker(0.0, 1.0); - RawType x = 0.0; - EXPECT_TRUE(m11.Matches(x)); - x = 1.0f; - EXPECT_TRUE(m11.Matches(x)); - x = -1.0f; - EXPECT_TRUE(m11.Matches(x)); - x = 1.1f; - EXPECT_FALSE(m11.Matches(x)); - x = -1.1f; - EXPECT_FALSE(m11.Matches(x)); - } -}; - -// Instantiate FloatingPointTest for testing floats. -typedef FloatingPointTest FloatTest; - -TEST_F(FloatTest, FloatEqApproximatelyMatchesFloats) { - TestMatches(&FloatEq); -} - -TEST_F(FloatTest, NanSensitiveFloatEqApproximatelyMatchesFloats) { - TestMatches(&NanSensitiveFloatEq); -} - -TEST_F(FloatTest, FloatEqCannotMatchNaN) { - // FloatEq never matches NaN. - Matcher m = FloatEq(nan1_); - EXPECT_FALSE(m.Matches(nan1_)); - EXPECT_FALSE(m.Matches(nan2_)); - EXPECT_FALSE(m.Matches(1.0)); -} - -TEST_F(FloatTest, NanSensitiveFloatEqCanMatchNaN) { - // NanSensitiveFloatEq will match NaN. - Matcher m = NanSensitiveFloatEq(nan1_); - EXPECT_TRUE(m.Matches(nan1_)); - EXPECT_TRUE(m.Matches(nan2_)); - EXPECT_FALSE(m.Matches(1.0)); -} - -TEST_F(FloatTest, FloatEqCanDescribeSelf) { - Matcher m1 = FloatEq(2.0f); - EXPECT_EQ("is approximately 2", Describe(m1)); - EXPECT_EQ("isn't approximately 2", DescribeNegation(m1)); - - Matcher m2 = FloatEq(0.5f); - EXPECT_EQ("is approximately 0.5", Describe(m2)); - EXPECT_EQ("isn't approximately 0.5", DescribeNegation(m2)); - - Matcher m3 = FloatEq(nan1_); - EXPECT_EQ("never matches", Describe(m3)); - EXPECT_EQ("is anything", DescribeNegation(m3)); -} - -TEST_F(FloatTest, NanSensitiveFloatEqCanDescribeSelf) { - Matcher m1 = NanSensitiveFloatEq(2.0f); - EXPECT_EQ("is approximately 2", Describe(m1)); - EXPECT_EQ("isn't approximately 2", DescribeNegation(m1)); - - Matcher m2 = NanSensitiveFloatEq(0.5f); - EXPECT_EQ("is approximately 0.5", Describe(m2)); - EXPECT_EQ("isn't approximately 0.5", DescribeNegation(m2)); - - Matcher m3 = NanSensitiveFloatEq(nan1_); - EXPECT_EQ("is NaN", Describe(m3)); - EXPECT_EQ("isn't NaN", DescribeNegation(m3)); -} - -// Instantiate FloatingPointTest for testing floats with a user-specified -// max absolute error. -typedef FloatingPointNearTest FloatNearTest; - -TEST_F(FloatNearTest, FloatNearMatches) { - TestNearMatches(&FloatNear); -} - -TEST_F(FloatNearTest, NanSensitiveFloatNearApproximatelyMatchesFloats) { - TestNearMatches(&NanSensitiveFloatNear); -} - -TEST_F(FloatNearTest, FloatNearCanDescribeSelf) { - Matcher m1 = FloatNear(2.0f, 0.5f); - EXPECT_EQ("is approximately 2 (absolute error <= 0.5)", Describe(m1)); - EXPECT_EQ( - "isn't approximately 2 (absolute error > 0.5)", DescribeNegation(m1)); - - Matcher m2 = FloatNear(0.5f, 0.5f); - EXPECT_EQ("is approximately 0.5 (absolute error <= 0.5)", Describe(m2)); - EXPECT_EQ( - "isn't approximately 0.5 (absolute error > 0.5)", DescribeNegation(m2)); - - Matcher m3 = FloatNear(nan1_, 0.0); - EXPECT_EQ("never matches", Describe(m3)); - EXPECT_EQ("is anything", DescribeNegation(m3)); -} - -TEST_F(FloatNearTest, NanSensitiveFloatNearCanDescribeSelf) { - Matcher m1 = NanSensitiveFloatNear(2.0f, 0.5f); - EXPECT_EQ("is approximately 2 (absolute error <= 0.5)", Describe(m1)); - EXPECT_EQ( - "isn't approximately 2 (absolute error > 0.5)", DescribeNegation(m1)); - - Matcher m2 = NanSensitiveFloatNear(0.5f, 0.5f); - EXPECT_EQ("is approximately 0.5 (absolute error <= 0.5)", Describe(m2)); - EXPECT_EQ( - "isn't approximately 0.5 (absolute error > 0.5)", DescribeNegation(m2)); - - Matcher m3 = NanSensitiveFloatNear(nan1_, 0.1f); - EXPECT_EQ("is NaN", Describe(m3)); - EXPECT_EQ("isn't NaN", DescribeNegation(m3)); -} - -TEST_F(FloatNearTest, FloatNearCannotMatchNaN) { - // FloatNear never matches NaN. - Matcher m = FloatNear(ParentType::nan1_, 0.1f); - EXPECT_FALSE(m.Matches(nan1_)); - EXPECT_FALSE(m.Matches(nan2_)); - EXPECT_FALSE(m.Matches(1.0)); -} - -TEST_F(FloatNearTest, NanSensitiveFloatNearCanMatchNaN) { - // NanSensitiveFloatNear will match NaN. - Matcher m = NanSensitiveFloatNear(nan1_, 0.1f); - EXPECT_TRUE(m.Matches(nan1_)); - EXPECT_TRUE(m.Matches(nan2_)); - EXPECT_FALSE(m.Matches(1.0)); -} - -// Instantiate FloatingPointTest for testing doubles. -typedef FloatingPointTest DoubleTest; - -TEST_F(DoubleTest, DoubleEqApproximatelyMatchesDoubles) { - TestMatches(&DoubleEq); -} - -TEST_F(DoubleTest, NanSensitiveDoubleEqApproximatelyMatchesDoubles) { - TestMatches(&NanSensitiveDoubleEq); -} - -TEST_F(DoubleTest, DoubleEqCannotMatchNaN) { - // DoubleEq never matches NaN. - Matcher m = DoubleEq(nan1_); - EXPECT_FALSE(m.Matches(nan1_)); - EXPECT_FALSE(m.Matches(nan2_)); - EXPECT_FALSE(m.Matches(1.0)); -} - -TEST_F(DoubleTest, NanSensitiveDoubleEqCanMatchNaN) { - // NanSensitiveDoubleEq will match NaN. - Matcher m = NanSensitiveDoubleEq(nan1_); - EXPECT_TRUE(m.Matches(nan1_)); - EXPECT_TRUE(m.Matches(nan2_)); - EXPECT_FALSE(m.Matches(1.0)); -} - -TEST_F(DoubleTest, DoubleEqCanDescribeSelf) { - Matcher m1 = DoubleEq(2.0); - EXPECT_EQ("is approximately 2", Describe(m1)); - EXPECT_EQ("isn't approximately 2", DescribeNegation(m1)); - - Matcher m2 = DoubleEq(0.5); - EXPECT_EQ("is approximately 0.5", Describe(m2)); - EXPECT_EQ("isn't approximately 0.5", DescribeNegation(m2)); - - Matcher m3 = DoubleEq(nan1_); - EXPECT_EQ("never matches", Describe(m3)); - EXPECT_EQ("is anything", DescribeNegation(m3)); -} - -TEST_F(DoubleTest, NanSensitiveDoubleEqCanDescribeSelf) { - Matcher m1 = NanSensitiveDoubleEq(2.0); - EXPECT_EQ("is approximately 2", Describe(m1)); - EXPECT_EQ("isn't approximately 2", DescribeNegation(m1)); - - Matcher m2 = NanSensitiveDoubleEq(0.5); - EXPECT_EQ("is approximately 0.5", Describe(m2)); - EXPECT_EQ("isn't approximately 0.5", DescribeNegation(m2)); - - Matcher m3 = NanSensitiveDoubleEq(nan1_); - EXPECT_EQ("is NaN", Describe(m3)); - EXPECT_EQ("isn't NaN", DescribeNegation(m3)); -} - -// Instantiate FloatingPointTest for testing floats with a user-specified -// max absolute error. -typedef FloatingPointNearTest DoubleNearTest; - -TEST_F(DoubleNearTest, DoubleNearMatches) { - TestNearMatches(&DoubleNear); -} - -TEST_F(DoubleNearTest, NanSensitiveDoubleNearApproximatelyMatchesDoubles) { - TestNearMatches(&NanSensitiveDoubleNear); -} - -TEST_F(DoubleNearTest, DoubleNearCanDescribeSelf) { - Matcher m1 = DoubleNear(2.0, 0.5); - EXPECT_EQ("is approximately 2 (absolute error <= 0.5)", Describe(m1)); - EXPECT_EQ( - "isn't approximately 2 (absolute error > 0.5)", DescribeNegation(m1)); - - Matcher m2 = DoubleNear(0.5, 0.5); - EXPECT_EQ("is approximately 0.5 (absolute error <= 0.5)", Describe(m2)); - EXPECT_EQ( - "isn't approximately 0.5 (absolute error > 0.5)", DescribeNegation(m2)); - - Matcher m3 = DoubleNear(nan1_, 0.0); - EXPECT_EQ("never matches", Describe(m3)); - EXPECT_EQ("is anything", DescribeNegation(m3)); -} - -TEST_F(DoubleNearTest, ExplainsResultWhenMatchFails) { - EXPECT_EQ("", Explain(DoubleNear(2.0, 0.1), 2.05)); - EXPECT_EQ("which is 0.2 from 2", Explain(DoubleNear(2.0, 0.1), 2.2)); - EXPECT_EQ("which is -0.3 from 2", Explain(DoubleNear(2.0, 0.1), 1.7)); - - const std::string explanation = - Explain(DoubleNear(2.1, 1e-10), 2.1 + 1.2e-10); - // Different C++ implementations may print floating-point numbers - // slightly differently. - EXPECT_TRUE(explanation == "which is 1.2e-10 from 2.1" || // GCC - explanation == "which is 1.2e-010 from 2.1") // MSVC - << " where explanation is \"" << explanation << "\"."; -} - -TEST_F(DoubleNearTest, NanSensitiveDoubleNearCanDescribeSelf) { - Matcher m1 = NanSensitiveDoubleNear(2.0, 0.5); - EXPECT_EQ("is approximately 2 (absolute error <= 0.5)", Describe(m1)); - EXPECT_EQ( - "isn't approximately 2 (absolute error > 0.5)", DescribeNegation(m1)); - - Matcher m2 = NanSensitiveDoubleNear(0.5, 0.5); - EXPECT_EQ("is approximately 0.5 (absolute error <= 0.5)", Describe(m2)); - EXPECT_EQ( - "isn't approximately 0.5 (absolute error > 0.5)", DescribeNegation(m2)); - - Matcher m3 = NanSensitiveDoubleNear(nan1_, 0.1); - EXPECT_EQ("is NaN", Describe(m3)); - EXPECT_EQ("isn't NaN", DescribeNegation(m3)); -} - -TEST_F(DoubleNearTest, DoubleNearCannotMatchNaN) { - // DoubleNear never matches NaN. - Matcher m = DoubleNear(ParentType::nan1_, 0.1); - EXPECT_FALSE(m.Matches(nan1_)); - EXPECT_FALSE(m.Matches(nan2_)); - EXPECT_FALSE(m.Matches(1.0)); -} - -TEST_F(DoubleNearTest, NanSensitiveDoubleNearCanMatchNaN) { - // NanSensitiveDoubleNear will match NaN. - Matcher m = NanSensitiveDoubleNear(nan1_, 0.1); - EXPECT_TRUE(m.Matches(nan1_)); - EXPECT_TRUE(m.Matches(nan2_)); - EXPECT_FALSE(m.Matches(1.0)); -} - -TEST(PointeeTest, RawPointer) { - const Matcher m = Pointee(Ge(0)); - - int n = 1; - EXPECT_TRUE(m.Matches(&n)); - n = -1; - EXPECT_FALSE(m.Matches(&n)); - EXPECT_FALSE(m.Matches(nullptr)); -} - -TEST(PointeeTest, RawPointerToConst) { - const Matcher m = Pointee(Ge(0)); - - double x = 1; - EXPECT_TRUE(m.Matches(&x)); - x = -1; - EXPECT_FALSE(m.Matches(&x)); - EXPECT_FALSE(m.Matches(nullptr)); -} - -TEST(PointeeTest, ReferenceToConstRawPointer) { - const Matcher m = Pointee(Ge(0)); - - int n = 1; - EXPECT_TRUE(m.Matches(&n)); - n = -1; - EXPECT_FALSE(m.Matches(&n)); - EXPECT_FALSE(m.Matches(nullptr)); -} - -TEST(PointeeTest, ReferenceToNonConstRawPointer) { - const Matcher m = Pointee(Ge(0)); - - double x = 1.0; - double* p = &x; - EXPECT_TRUE(m.Matches(p)); - x = -1; - EXPECT_FALSE(m.Matches(p)); - p = nullptr; - EXPECT_FALSE(m.Matches(p)); -} - -TEST(PointeeTest, SmartPointer) { - const Matcher> m = Pointee(Ge(0)); - - std::unique_ptr n(new int(1)); - EXPECT_TRUE(m.Matches(n)); -} - -TEST(PointeeTest, SmartPointerToConst) { - const Matcher> m = Pointee(Ge(0)); - - // There's no implicit conversion from unique_ptr to const - // unique_ptr, so we must pass a unique_ptr into the - // matcher. - std::unique_ptr n(new int(1)); - EXPECT_TRUE(m.Matches(n)); -} - -TEST(PointerTest, RawPointer) { - int n = 1; - const Matcher m = Pointer(Eq(&n)); - - EXPECT_TRUE(m.Matches(&n)); - - int* p = nullptr; - EXPECT_FALSE(m.Matches(p)); - EXPECT_FALSE(m.Matches(nullptr)); -} - -TEST(PointerTest, RawPointerToConst) { - int n = 1; - const Matcher m = Pointer(Eq(&n)); - - EXPECT_TRUE(m.Matches(&n)); - - int* p = nullptr; - EXPECT_FALSE(m.Matches(p)); - EXPECT_FALSE(m.Matches(nullptr)); -} - -TEST(PointerTest, SmartPointer) { - std::unique_ptr n(new int(10)); - int* raw_n = n.get(); - const Matcher> m = Pointer(Eq(raw_n)); - - EXPECT_TRUE(m.Matches(n)); -} - -TEST(PointerTest, SmartPointerToConst) { - std::unique_ptr n(new int(10)); - const int* raw_n = n.get(); - const Matcher> m = Pointer(Eq(raw_n)); - - // There's no implicit conversion from unique_ptr to const - // unique_ptr, so we must pass a unique_ptr into the - // matcher. - std::unique_ptr p(new int(10)); - EXPECT_FALSE(m.Matches(p)); -} - -TEST(AddressTest, NonConst) { - int n = 1; - const Matcher m = Address(Eq(&n)); - - EXPECT_TRUE(m.Matches(n)); - - int other = 5; - - EXPECT_FALSE(m.Matches(other)); - - int& n_ref = n; - - EXPECT_TRUE(m.Matches(n_ref)); -} - -TEST(AddressTest, Const) { - const int n = 1; - const Matcher m = Address(Eq(&n)); - - EXPECT_TRUE(m.Matches(n)); - - int other = 5; - - EXPECT_FALSE(m.Matches(other)); -} - -TEST(AddressTest, MatcherDoesntCopy) { - std::unique_ptr n(new int(1)); - const Matcher> m = Address(Eq(&n)); - - EXPECT_TRUE(m.Matches(n)); -} - -TEST(AddressTest, Describe) { - Matcher matcher = Address(_); - EXPECT_EQ("has address that is anything", Describe(matcher)); - EXPECT_EQ("does not have address that is anything", - DescribeNegation(matcher)); -} - -MATCHER_P(FieldIIs, inner_matcher, "") { - return ExplainMatchResult(inner_matcher, arg.i, result_listener); -} - -#if GTEST_HAS_RTTI -TEST(WhenDynamicCastToTest, SameType) { - Derived derived; - derived.i = 4; - - // Right type. A pointer is passed down. - Base* as_base_ptr = &derived; - EXPECT_THAT(as_base_ptr, WhenDynamicCastTo(Not(IsNull()))); - EXPECT_THAT(as_base_ptr, WhenDynamicCastTo(Pointee(FieldIIs(4)))); - EXPECT_THAT(as_base_ptr, - Not(WhenDynamicCastTo(Pointee(FieldIIs(5))))); -} - -TEST(WhenDynamicCastToTest, WrongTypes) { - Base base; - Derived derived; - OtherDerived other_derived; - - // Wrong types. NULL is passed. - EXPECT_THAT(&base, Not(WhenDynamicCastTo(Pointee(_)))); - EXPECT_THAT(&base, WhenDynamicCastTo(IsNull())); - Base* as_base_ptr = &derived; - EXPECT_THAT(as_base_ptr, Not(WhenDynamicCastTo(Pointee(_)))); - EXPECT_THAT(as_base_ptr, WhenDynamicCastTo(IsNull())); - as_base_ptr = &other_derived; - EXPECT_THAT(as_base_ptr, Not(WhenDynamicCastTo(Pointee(_)))); - EXPECT_THAT(as_base_ptr, WhenDynamicCastTo(IsNull())); -} - -TEST(WhenDynamicCastToTest, AlreadyNull) { - // Already NULL. - Base* as_base_ptr = nullptr; - EXPECT_THAT(as_base_ptr, WhenDynamicCastTo(IsNull())); -} - -struct AmbiguousCastTypes { - class VirtualDerived : public virtual Base {}; - class DerivedSub1 : public VirtualDerived {}; - class DerivedSub2 : public VirtualDerived {}; - class ManyDerivedInHierarchy : public DerivedSub1, public DerivedSub2 {}; -}; - -TEST(WhenDynamicCastToTest, AmbiguousCast) { - AmbiguousCastTypes::DerivedSub1 sub1; - AmbiguousCastTypes::ManyDerivedInHierarchy many_derived; - // Multiply derived from Base. dynamic_cast<> returns NULL. - Base* as_base_ptr = - static_cast(&many_derived); - EXPECT_THAT(as_base_ptr, - WhenDynamicCastTo(IsNull())); - as_base_ptr = &sub1; - EXPECT_THAT( - as_base_ptr, - WhenDynamicCastTo(Not(IsNull()))); -} - -TEST(WhenDynamicCastToTest, Describe) { - Matcher matcher = WhenDynamicCastTo(Pointee(_)); - const std::string prefix = - "when dynamic_cast to " + internal::GetTypeName() + ", "; - EXPECT_EQ(prefix + "points to a value that is anything", Describe(matcher)); - EXPECT_EQ(prefix + "does not point to a value that is anything", - DescribeNegation(matcher)); -} - -TEST(WhenDynamicCastToTest, Explain) { - Matcher matcher = WhenDynamicCastTo(Pointee(_)); - Base* null = nullptr; - EXPECT_THAT(Explain(matcher, null), HasSubstr("NULL")); - Derived derived; - EXPECT_TRUE(matcher.Matches(&derived)); - EXPECT_THAT(Explain(matcher, &derived), HasSubstr("which points to ")); - - // With references, the matcher itself can fail. Test for that one. - Matcher ref_matcher = WhenDynamicCastTo(_); - EXPECT_THAT(Explain(ref_matcher, derived), - HasSubstr("which cannot be dynamic_cast")); -} - -TEST(WhenDynamicCastToTest, GoodReference) { - Derived derived; - derived.i = 4; - Base& as_base_ref = derived; - EXPECT_THAT(as_base_ref, WhenDynamicCastTo(FieldIIs(4))); - EXPECT_THAT(as_base_ref, WhenDynamicCastTo(Not(FieldIIs(5)))); -} - -TEST(WhenDynamicCastToTest, BadReference) { - Derived derived; - Base& as_base_ref = derived; - EXPECT_THAT(as_base_ref, Not(WhenDynamicCastTo(_))); -} -#endif // GTEST_HAS_RTTI - -// Minimal const-propagating pointer. -template -class ConstPropagatingPtr { - public: - typedef T element_type; - - ConstPropagatingPtr() : val_() {} - explicit ConstPropagatingPtr(T* t) : val_(t) {} - ConstPropagatingPtr(const ConstPropagatingPtr& other) : val_(other.val_) {} - - T* get() { return val_; } - T& operator*() { return *val_; } - // Most smart pointers return non-const T* and T& from the next methods. - const T* get() const { return val_; } - const T& operator*() const { return *val_; } - - private: - T* val_; -}; - -TEST(PointeeTest, WorksWithConstPropagatingPointers) { - const Matcher< ConstPropagatingPtr > m = Pointee(Lt(5)); - int three = 3; - const ConstPropagatingPtr co(&three); - ConstPropagatingPtr o(&three); - EXPECT_TRUE(m.Matches(o)); - EXPECT_TRUE(m.Matches(co)); - *o = 6; - EXPECT_FALSE(m.Matches(o)); - EXPECT_FALSE(m.Matches(ConstPropagatingPtr())); -} - -TEST(PointeeTest, NeverMatchesNull) { - const Matcher m = Pointee(_); - EXPECT_FALSE(m.Matches(nullptr)); -} - -// Tests that we can write Pointee(value) instead of Pointee(Eq(value)). -TEST(PointeeTest, MatchesAgainstAValue) { - const Matcher m = Pointee(5); - - int n = 5; - EXPECT_TRUE(m.Matches(&n)); - n = -1; - EXPECT_FALSE(m.Matches(&n)); - EXPECT_FALSE(m.Matches(nullptr)); -} - -TEST(PointeeTest, CanDescribeSelf) { - const Matcher m = Pointee(Gt(3)); - EXPECT_EQ("points to a value that is > 3", Describe(m)); - EXPECT_EQ("does not point to a value that is > 3", - DescribeNegation(m)); -} - -TEST(PointeeTest, CanExplainMatchResult) { - const Matcher m = Pointee(StartsWith("Hi")); - - EXPECT_EQ("", Explain(m, static_cast(nullptr))); - - const Matcher m2 = Pointee(GreaterThan(1)); // NOLINT - long n = 3; // NOLINT - EXPECT_EQ("which points to 3" + OfType("long") + ", which is 2 more than 1", - Explain(m2, &n)); -} - -TEST(PointeeTest, AlwaysExplainsPointee) { - const Matcher m = Pointee(0); - int n = 42; - EXPECT_EQ("which points to 42" + OfType("int"), Explain(m, &n)); -} - -// An uncopyable class. -class Uncopyable { - public: - Uncopyable() : value_(-1) {} - explicit Uncopyable(int a_value) : value_(a_value) {} - - int value() const { return value_; } - void set_value(int i) { value_ = i; } - - private: - int value_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(Uncopyable); -}; - -// Returns true if and only if x.value() is positive. -bool ValueIsPositive(const Uncopyable& x) { return x.value() > 0; } - -MATCHER_P(UncopyableIs, inner_matcher, "") { - return ExplainMatchResult(inner_matcher, arg.value(), result_listener); -} - -// A user-defined struct for testing Field(). -struct AStruct { - AStruct() : x(0), y(1.0), z(5), p(nullptr) {} - AStruct(const AStruct& rhs) - : x(rhs.x), y(rhs.y), z(rhs.z.value()), p(rhs.p) {} - - int x; // A non-const field. - const double y; // A const field. - Uncopyable z; // An uncopyable field. - const char* p; // A pointer field. -}; - -// A derived struct for testing Field(). -struct DerivedStruct : public AStruct { - char ch; -}; - -// Tests that Field(&Foo::field, ...) works when field is non-const. -TEST(FieldTest, WorksForNonConstField) { - Matcher m = Field(&AStruct::x, Ge(0)); - Matcher m_with_name = Field("x", &AStruct::x, Ge(0)); - - AStruct a; - EXPECT_TRUE(m.Matches(a)); - EXPECT_TRUE(m_with_name.Matches(a)); - a.x = -1; - EXPECT_FALSE(m.Matches(a)); - EXPECT_FALSE(m_with_name.Matches(a)); -} - -// Tests that Field(&Foo::field, ...) works when field is const. -TEST(FieldTest, WorksForConstField) { - AStruct a; - - Matcher m = Field(&AStruct::y, Ge(0.0)); - Matcher m_with_name = Field("y", &AStruct::y, Ge(0.0)); - EXPECT_TRUE(m.Matches(a)); - EXPECT_TRUE(m_with_name.Matches(a)); - m = Field(&AStruct::y, Le(0.0)); - m_with_name = Field("y", &AStruct::y, Le(0.0)); - EXPECT_FALSE(m.Matches(a)); - EXPECT_FALSE(m_with_name.Matches(a)); -} - -// Tests that Field(&Foo::field, ...) works when field is not copyable. -TEST(FieldTest, WorksForUncopyableField) { - AStruct a; - - Matcher m = Field(&AStruct::z, Truly(ValueIsPositive)); - EXPECT_TRUE(m.Matches(a)); - m = Field(&AStruct::z, Not(Truly(ValueIsPositive))); - EXPECT_FALSE(m.Matches(a)); -} - -// Tests that Field(&Foo::field, ...) works when field is a pointer. -TEST(FieldTest, WorksForPointerField) { - // Matching against NULL. - Matcher m = Field(&AStruct::p, static_cast(nullptr)); - AStruct a; - EXPECT_TRUE(m.Matches(a)); - a.p = "hi"; - EXPECT_FALSE(m.Matches(a)); - - // Matching a pointer that is not NULL. - m = Field(&AStruct::p, StartsWith("hi")); - a.p = "hill"; - EXPECT_TRUE(m.Matches(a)); - a.p = "hole"; - EXPECT_FALSE(m.Matches(a)); -} - -// Tests that Field() works when the object is passed by reference. -TEST(FieldTest, WorksForByRefArgument) { - Matcher m = Field(&AStruct::x, Ge(0)); - - AStruct a; - EXPECT_TRUE(m.Matches(a)); - a.x = -1; - EXPECT_FALSE(m.Matches(a)); -} - -// Tests that Field(&Foo::field, ...) works when the argument's type -// is a sub-type of Foo. -TEST(FieldTest, WorksForArgumentOfSubType) { - // Note that the matcher expects DerivedStruct but we say AStruct - // inside Field(). - Matcher m = Field(&AStruct::x, Ge(0)); - - DerivedStruct d; - EXPECT_TRUE(m.Matches(d)); - d.x = -1; - EXPECT_FALSE(m.Matches(d)); -} - -// Tests that Field(&Foo::field, m) works when field's type and m's -// argument type are compatible but not the same. -TEST(FieldTest, WorksForCompatibleMatcherType) { - // The field is an int, but the inner matcher expects a signed char. - Matcher m = Field(&AStruct::x, - Matcher(Ge(0))); - - AStruct a; - EXPECT_TRUE(m.Matches(a)); - a.x = -1; - EXPECT_FALSE(m.Matches(a)); -} - -// Tests that Field() can describe itself. -TEST(FieldTest, CanDescribeSelf) { - Matcher m = Field(&AStruct::x, Ge(0)); - - EXPECT_EQ("is an object whose given field is >= 0", Describe(m)); - EXPECT_EQ("is an object whose given field isn't >= 0", DescribeNegation(m)); -} - -TEST(FieldTest, CanDescribeSelfWithFieldName) { - Matcher m = Field("field_name", &AStruct::x, Ge(0)); - - EXPECT_EQ("is an object whose field `field_name` is >= 0", Describe(m)); - EXPECT_EQ("is an object whose field `field_name` isn't >= 0", - DescribeNegation(m)); -} - -// Tests that Field() can explain the match result. -TEST(FieldTest, CanExplainMatchResult) { - Matcher m = Field(&AStruct::x, Ge(0)); - - AStruct a; - a.x = 1; - EXPECT_EQ("whose given field is 1" + OfType("int"), Explain(m, a)); - - m = Field(&AStruct::x, GreaterThan(0)); - EXPECT_EQ( - "whose given field is 1" + OfType("int") + ", which is 1 more than 0", - Explain(m, a)); -} - -TEST(FieldTest, CanExplainMatchResultWithFieldName) { - Matcher m = Field("field_name", &AStruct::x, Ge(0)); - - AStruct a; - a.x = 1; - EXPECT_EQ("whose field `field_name` is 1" + OfType("int"), Explain(m, a)); - - m = Field("field_name", &AStruct::x, GreaterThan(0)); - EXPECT_EQ("whose field `field_name` is 1" + OfType("int") + - ", which is 1 more than 0", - Explain(m, a)); -} - -// Tests that Field() works when the argument is a pointer to const. -TEST(FieldForPointerTest, WorksForPointerToConst) { - Matcher m = Field(&AStruct::x, Ge(0)); - - AStruct a; - EXPECT_TRUE(m.Matches(&a)); - a.x = -1; - EXPECT_FALSE(m.Matches(&a)); -} - -// Tests that Field() works when the argument is a pointer to non-const. -TEST(FieldForPointerTest, WorksForPointerToNonConst) { - Matcher m = Field(&AStruct::x, Ge(0)); - - AStruct a; - EXPECT_TRUE(m.Matches(&a)); - a.x = -1; - EXPECT_FALSE(m.Matches(&a)); -} - -// Tests that Field() works when the argument is a reference to a const pointer. -TEST(FieldForPointerTest, WorksForReferenceToConstPointer) { - Matcher m = Field(&AStruct::x, Ge(0)); - - AStruct a; - EXPECT_TRUE(m.Matches(&a)); - a.x = -1; - EXPECT_FALSE(m.Matches(&a)); -} - -// Tests that Field() does not match the NULL pointer. -TEST(FieldForPointerTest, DoesNotMatchNull) { - Matcher m = Field(&AStruct::x, _); - EXPECT_FALSE(m.Matches(nullptr)); -} - -// Tests that Field(&Foo::field, ...) works when the argument's type -// is a sub-type of const Foo*. -TEST(FieldForPointerTest, WorksForArgumentOfSubType) { - // Note that the matcher expects DerivedStruct but we say AStruct - // inside Field(). - Matcher m = Field(&AStruct::x, Ge(0)); - - DerivedStruct d; - EXPECT_TRUE(m.Matches(&d)); - d.x = -1; - EXPECT_FALSE(m.Matches(&d)); -} - -// Tests that Field() can describe itself when used to match a pointer. -TEST(FieldForPointerTest, CanDescribeSelf) { - Matcher m = Field(&AStruct::x, Ge(0)); - - EXPECT_EQ("is an object whose given field is >= 0", Describe(m)); - EXPECT_EQ("is an object whose given field isn't >= 0", DescribeNegation(m)); -} - -TEST(FieldForPointerTest, CanDescribeSelfWithFieldName) { - Matcher m = Field("field_name", &AStruct::x, Ge(0)); - - EXPECT_EQ("is an object whose field `field_name` is >= 0", Describe(m)); - EXPECT_EQ("is an object whose field `field_name` isn't >= 0", - DescribeNegation(m)); -} - -// Tests that Field() can explain the result of matching a pointer. -TEST(FieldForPointerTest, CanExplainMatchResult) { - Matcher m = Field(&AStruct::x, Ge(0)); - - AStruct a; - a.x = 1; - EXPECT_EQ("", Explain(m, static_cast(nullptr))); - EXPECT_EQ("which points to an object whose given field is 1" + OfType("int"), - Explain(m, &a)); - - m = Field(&AStruct::x, GreaterThan(0)); - EXPECT_EQ("which points to an object whose given field is 1" + OfType("int") + - ", which is 1 more than 0", Explain(m, &a)); -} - -TEST(FieldForPointerTest, CanExplainMatchResultWithFieldName) { - Matcher m = Field("field_name", &AStruct::x, Ge(0)); - - AStruct a; - a.x = 1; - EXPECT_EQ("", Explain(m, static_cast(nullptr))); - EXPECT_EQ( - "which points to an object whose field `field_name` is 1" + OfType("int"), - Explain(m, &a)); - - m = Field("field_name", &AStruct::x, GreaterThan(0)); - EXPECT_EQ("which points to an object whose field `field_name` is 1" + - OfType("int") + ", which is 1 more than 0", - Explain(m, &a)); -} - -// A user-defined class for testing Property(). -class AClass { - public: - AClass() : n_(0) {} - - // A getter that returns a non-reference. - int n() const { return n_; } - - void set_n(int new_n) { n_ = new_n; } - - // A getter that returns a reference to const. - const std::string& s() const { return s_; } - - const std::string& s_ref() const & { return s_; } - - void set_s(const std::string& new_s) { s_ = new_s; } - - // A getter that returns a reference to non-const. - double& x() const { return x_; } - - private: - int n_; - std::string s_; - - static double x_; -}; - -double AClass::x_ = 0.0; - -// A derived class for testing Property(). -class DerivedClass : public AClass { - public: - int k() const { return k_; } - private: - int k_; -}; - -// Tests that Property(&Foo::property, ...) works when property() -// returns a non-reference. -TEST(PropertyTest, WorksForNonReferenceProperty) { - Matcher m = Property(&AClass::n, Ge(0)); - Matcher m_with_name = Property("n", &AClass::n, Ge(0)); - - AClass a; - a.set_n(1); - EXPECT_TRUE(m.Matches(a)); - EXPECT_TRUE(m_with_name.Matches(a)); - - a.set_n(-1); - EXPECT_FALSE(m.Matches(a)); - EXPECT_FALSE(m_with_name.Matches(a)); -} - -// Tests that Property(&Foo::property, ...) works when property() -// returns a reference to const. -TEST(PropertyTest, WorksForReferenceToConstProperty) { - Matcher m = Property(&AClass::s, StartsWith("hi")); - Matcher m_with_name = - Property("s", &AClass::s, StartsWith("hi")); - - AClass a; - a.set_s("hill"); - EXPECT_TRUE(m.Matches(a)); - EXPECT_TRUE(m_with_name.Matches(a)); - - a.set_s("hole"); - EXPECT_FALSE(m.Matches(a)); - EXPECT_FALSE(m_with_name.Matches(a)); -} - -// Tests that Property(&Foo::property, ...) works when property() is -// ref-qualified. -TEST(PropertyTest, WorksForRefQualifiedProperty) { - Matcher m = Property(&AClass::s_ref, StartsWith("hi")); - Matcher m_with_name = - Property("s", &AClass::s_ref, StartsWith("hi")); - - AClass a; - a.set_s("hill"); - EXPECT_TRUE(m.Matches(a)); - EXPECT_TRUE(m_with_name.Matches(a)); - - a.set_s("hole"); - EXPECT_FALSE(m.Matches(a)); - EXPECT_FALSE(m_with_name.Matches(a)); -} - -// Tests that Property(&Foo::property, ...) works when property() -// returns a reference to non-const. -TEST(PropertyTest, WorksForReferenceToNonConstProperty) { - double x = 0.0; - AClass a; - - Matcher m = Property(&AClass::x, Ref(x)); - EXPECT_FALSE(m.Matches(a)); - - m = Property(&AClass::x, Not(Ref(x))); - EXPECT_TRUE(m.Matches(a)); -} - -// Tests that Property(&Foo::property, ...) works when the argument is -// passed by value. -TEST(PropertyTest, WorksForByValueArgument) { - Matcher m = Property(&AClass::s, StartsWith("hi")); - - AClass a; - a.set_s("hill"); - EXPECT_TRUE(m.Matches(a)); - - a.set_s("hole"); - EXPECT_FALSE(m.Matches(a)); -} - -// Tests that Property(&Foo::property, ...) works when the argument's -// type is a sub-type of Foo. -TEST(PropertyTest, WorksForArgumentOfSubType) { - // The matcher expects a DerivedClass, but inside the Property() we - // say AClass. - Matcher m = Property(&AClass::n, Ge(0)); - - DerivedClass d; - d.set_n(1); - EXPECT_TRUE(m.Matches(d)); - - d.set_n(-1); - EXPECT_FALSE(m.Matches(d)); -} - -// Tests that Property(&Foo::property, m) works when property()'s type -// and m's argument type are compatible but different. -TEST(PropertyTest, WorksForCompatibleMatcherType) { - // n() returns an int but the inner matcher expects a signed char. - Matcher m = Property(&AClass::n, - Matcher(Ge(0))); - - Matcher m_with_name = - Property("n", &AClass::n, Matcher(Ge(0))); - - AClass a; - EXPECT_TRUE(m.Matches(a)); - EXPECT_TRUE(m_with_name.Matches(a)); - a.set_n(-1); - EXPECT_FALSE(m.Matches(a)); - EXPECT_FALSE(m_with_name.Matches(a)); -} - -// Tests that Property() can describe itself. -TEST(PropertyTest, CanDescribeSelf) { - Matcher m = Property(&AClass::n, Ge(0)); - - EXPECT_EQ("is an object whose given property is >= 0", Describe(m)); - EXPECT_EQ("is an object whose given property isn't >= 0", - DescribeNegation(m)); -} - -TEST(PropertyTest, CanDescribeSelfWithPropertyName) { - Matcher m = Property("fancy_name", &AClass::n, Ge(0)); - - EXPECT_EQ("is an object whose property `fancy_name` is >= 0", Describe(m)); - EXPECT_EQ("is an object whose property `fancy_name` isn't >= 0", - DescribeNegation(m)); -} - -// Tests that Property() can explain the match result. -TEST(PropertyTest, CanExplainMatchResult) { - Matcher m = Property(&AClass::n, Ge(0)); - - AClass a; - a.set_n(1); - EXPECT_EQ("whose given property is 1" + OfType("int"), Explain(m, a)); - - m = Property(&AClass::n, GreaterThan(0)); - EXPECT_EQ( - "whose given property is 1" + OfType("int") + ", which is 1 more than 0", - Explain(m, a)); -} - -TEST(PropertyTest, CanExplainMatchResultWithPropertyName) { - Matcher m = Property("fancy_name", &AClass::n, Ge(0)); - - AClass a; - a.set_n(1); - EXPECT_EQ("whose property `fancy_name` is 1" + OfType("int"), Explain(m, a)); - - m = Property("fancy_name", &AClass::n, GreaterThan(0)); - EXPECT_EQ("whose property `fancy_name` is 1" + OfType("int") + - ", which is 1 more than 0", - Explain(m, a)); -} - -// Tests that Property() works when the argument is a pointer to const. -TEST(PropertyForPointerTest, WorksForPointerToConst) { - Matcher m = Property(&AClass::n, Ge(0)); - - AClass a; - a.set_n(1); - EXPECT_TRUE(m.Matches(&a)); - - a.set_n(-1); - EXPECT_FALSE(m.Matches(&a)); -} - -// Tests that Property() works when the argument is a pointer to non-const. -TEST(PropertyForPointerTest, WorksForPointerToNonConst) { - Matcher m = Property(&AClass::s, StartsWith("hi")); - - AClass a; - a.set_s("hill"); - EXPECT_TRUE(m.Matches(&a)); - - a.set_s("hole"); - EXPECT_FALSE(m.Matches(&a)); -} - -// Tests that Property() works when the argument is a reference to a -// const pointer. -TEST(PropertyForPointerTest, WorksForReferenceToConstPointer) { - Matcher m = Property(&AClass::s, StartsWith("hi")); - - AClass a; - a.set_s("hill"); - EXPECT_TRUE(m.Matches(&a)); - - a.set_s("hole"); - EXPECT_FALSE(m.Matches(&a)); -} - -// Tests that Property() does not match the NULL pointer. -TEST(PropertyForPointerTest, WorksForReferenceToNonConstProperty) { - Matcher m = Property(&AClass::x, _); - EXPECT_FALSE(m.Matches(nullptr)); -} - -// Tests that Property(&Foo::property, ...) works when the argument's -// type is a sub-type of const Foo*. -TEST(PropertyForPointerTest, WorksForArgumentOfSubType) { - // The matcher expects a DerivedClass, but inside the Property() we - // say AClass. - Matcher m = Property(&AClass::n, Ge(0)); - - DerivedClass d; - d.set_n(1); - EXPECT_TRUE(m.Matches(&d)); - - d.set_n(-1); - EXPECT_FALSE(m.Matches(&d)); -} - -// Tests that Property() can describe itself when used to match a pointer. -TEST(PropertyForPointerTest, CanDescribeSelf) { - Matcher m = Property(&AClass::n, Ge(0)); - - EXPECT_EQ("is an object whose given property is >= 0", Describe(m)); - EXPECT_EQ("is an object whose given property isn't >= 0", - DescribeNegation(m)); -} - -TEST(PropertyForPointerTest, CanDescribeSelfWithPropertyDescription) { - Matcher m = Property("fancy_name", &AClass::n, Ge(0)); - - EXPECT_EQ("is an object whose property `fancy_name` is >= 0", Describe(m)); - EXPECT_EQ("is an object whose property `fancy_name` isn't >= 0", - DescribeNegation(m)); -} - -// Tests that Property() can explain the result of matching a pointer. -TEST(PropertyForPointerTest, CanExplainMatchResult) { - Matcher m = Property(&AClass::n, Ge(0)); - - AClass a; - a.set_n(1); - EXPECT_EQ("", Explain(m, static_cast(nullptr))); - EXPECT_EQ( - "which points to an object whose given property is 1" + OfType("int"), - Explain(m, &a)); - - m = Property(&AClass::n, GreaterThan(0)); - EXPECT_EQ("which points to an object whose given property is 1" + - OfType("int") + ", which is 1 more than 0", - Explain(m, &a)); -} - -TEST(PropertyForPointerTest, CanExplainMatchResultWithPropertyName) { - Matcher m = Property("fancy_name", &AClass::n, Ge(0)); - - AClass a; - a.set_n(1); - EXPECT_EQ("", Explain(m, static_cast(nullptr))); - EXPECT_EQ("which points to an object whose property `fancy_name` is 1" + - OfType("int"), - Explain(m, &a)); - - m = Property("fancy_name", &AClass::n, GreaterThan(0)); - EXPECT_EQ("which points to an object whose property `fancy_name` is 1" + - OfType("int") + ", which is 1 more than 0", - Explain(m, &a)); -} - -// Tests ResultOf. - -// Tests that ResultOf(f, ...) compiles and works as expected when f is a -// function pointer. -std::string IntToStringFunction(int input) { - return input == 1 ? "foo" : "bar"; -} - -TEST(ResultOfTest, WorksForFunctionPointers) { - Matcher matcher = ResultOf(&IntToStringFunction, Eq(std::string("foo"))); - - EXPECT_TRUE(matcher.Matches(1)); - EXPECT_FALSE(matcher.Matches(2)); -} - -// Tests that ResultOf() can describe itself. -TEST(ResultOfTest, CanDescribeItself) { - Matcher matcher = ResultOf(&IntToStringFunction, StrEq("foo")); - - EXPECT_EQ("is mapped by the given callable to a value that " - "is equal to \"foo\"", Describe(matcher)); - EXPECT_EQ("is mapped by the given callable to a value that " - "isn't equal to \"foo\"", DescribeNegation(matcher)); -} - -// Tests that ResultOf() can explain the match result. -int IntFunction(int input) { return input == 42 ? 80 : 90; } - -TEST(ResultOfTest, CanExplainMatchResult) { - Matcher matcher = ResultOf(&IntFunction, Ge(85)); - EXPECT_EQ("which is mapped by the given callable to 90" + OfType("int"), - Explain(matcher, 36)); - - matcher = ResultOf(&IntFunction, GreaterThan(85)); - EXPECT_EQ("which is mapped by the given callable to 90" + OfType("int") + - ", which is 5 more than 85", Explain(matcher, 36)); -} - -// Tests that ResultOf(f, ...) compiles and works as expected when f(x) -// returns a non-reference. -TEST(ResultOfTest, WorksForNonReferenceResults) { - Matcher matcher = ResultOf(&IntFunction, Eq(80)); - - EXPECT_TRUE(matcher.Matches(42)); - EXPECT_FALSE(matcher.Matches(36)); -} - -// Tests that ResultOf(f, ...) compiles and works as expected when f(x) -// returns a reference to non-const. -double& DoubleFunction(double& input) { return input; } // NOLINT - -Uncopyable& RefUncopyableFunction(Uncopyable& obj) { // NOLINT - return obj; -} - -TEST(ResultOfTest, WorksForReferenceToNonConstResults) { - double x = 3.14; - double x2 = x; - Matcher matcher = ResultOf(&DoubleFunction, Ref(x)); - - EXPECT_TRUE(matcher.Matches(x)); - EXPECT_FALSE(matcher.Matches(x2)); - - // Test that ResultOf works with uncopyable objects - Uncopyable obj(0); - Uncopyable obj2(0); - Matcher matcher2 = - ResultOf(&RefUncopyableFunction, Ref(obj)); - - EXPECT_TRUE(matcher2.Matches(obj)); - EXPECT_FALSE(matcher2.Matches(obj2)); -} - -// Tests that ResultOf(f, ...) compiles and works as expected when f(x) -// returns a reference to const. -const std::string& StringFunction(const std::string& input) { return input; } - -TEST(ResultOfTest, WorksForReferenceToConstResults) { - std::string s = "foo"; - std::string s2 = s; - Matcher matcher = ResultOf(&StringFunction, Ref(s)); - - EXPECT_TRUE(matcher.Matches(s)); - EXPECT_FALSE(matcher.Matches(s2)); -} - -// Tests that ResultOf(f, m) works when f(x) and m's -// argument types are compatible but different. -TEST(ResultOfTest, WorksForCompatibleMatcherTypes) { - // IntFunction() returns int but the inner matcher expects a signed char. - Matcher matcher = ResultOf(IntFunction, Matcher(Ge(85))); - - EXPECT_TRUE(matcher.Matches(36)); - EXPECT_FALSE(matcher.Matches(42)); -} - -// Tests that the program aborts when ResultOf is passed -// a NULL function pointer. -TEST(ResultOfDeathTest, DiesOnNullFunctionPointers) { - EXPECT_DEATH_IF_SUPPORTED( - ResultOf(static_cast(nullptr), - Eq(std::string("foo"))), - "NULL function pointer is passed into ResultOf\\(\\)\\."); -} - -// Tests that ResultOf(f, ...) compiles and works as expected when f is a -// function reference. -TEST(ResultOfTest, WorksForFunctionReferences) { - Matcher matcher = ResultOf(IntToStringFunction, StrEq("foo")); - EXPECT_TRUE(matcher.Matches(1)); - EXPECT_FALSE(matcher.Matches(2)); -} - -// Tests that ResultOf(f, ...) compiles and works as expected when f is a -// function object. -struct Functor { - std::string operator()(int input) const { - return IntToStringFunction(input); - } -}; - -TEST(ResultOfTest, WorksForFunctors) { - Matcher matcher = ResultOf(Functor(), Eq(std::string("foo"))); - - EXPECT_TRUE(matcher.Matches(1)); - EXPECT_FALSE(matcher.Matches(2)); -} - -// Tests that ResultOf(f, ...) compiles and works as expected when f is a -// functor with more than one operator() defined. ResultOf() must work -// for each defined operator(). -struct PolymorphicFunctor { - typedef int result_type; - int operator()(int n) { return n; } - int operator()(const char* s) { return static_cast(strlen(s)); } - std::string operator()(int *p) { return p ? "good ptr" : "null"; } -}; - -TEST(ResultOfTest, WorksForPolymorphicFunctors) { - Matcher matcher_int = ResultOf(PolymorphicFunctor(), Ge(5)); - - EXPECT_TRUE(matcher_int.Matches(10)); - EXPECT_FALSE(matcher_int.Matches(2)); - - Matcher matcher_string = ResultOf(PolymorphicFunctor(), Ge(5)); - - EXPECT_TRUE(matcher_string.Matches("long string")); - EXPECT_FALSE(matcher_string.Matches("shrt")); -} - -TEST(ResultOfTest, WorksForPolymorphicFunctorsIgnoringResultType) { - Matcher matcher = ResultOf(PolymorphicFunctor(), "good ptr"); - - int n = 0; - EXPECT_TRUE(matcher.Matches(&n)); - EXPECT_FALSE(matcher.Matches(nullptr)); -} - -TEST(ResultOfTest, WorksForLambdas) { - Matcher matcher = ResultOf( - [](int str_len) { - return std::string(static_cast(str_len), 'x'); - }, - "xxx"); - EXPECT_TRUE(matcher.Matches(3)); - EXPECT_FALSE(matcher.Matches(1)); -} - -TEST(ResultOfTest, WorksForNonCopyableArguments) { - Matcher> matcher = ResultOf( - [](const std::unique_ptr& str_len) { - return std::string(static_cast(*str_len), 'x'); - }, - "xxx"); - EXPECT_TRUE(matcher.Matches(std::unique_ptr(new int(3)))); - EXPECT_FALSE(matcher.Matches(std::unique_ptr(new int(1)))); -} - -const int* ReferencingFunction(const int& n) { return &n; } - -struct ReferencingFunctor { - typedef const int* result_type; - result_type operator()(const int& n) { return &n; } -}; - -TEST(ResultOfTest, WorksForReferencingCallables) { - const int n = 1; - const int n2 = 1; - Matcher matcher2 = ResultOf(ReferencingFunction, Eq(&n)); - EXPECT_TRUE(matcher2.Matches(n)); - EXPECT_FALSE(matcher2.Matches(n2)); - - Matcher matcher3 = ResultOf(ReferencingFunctor(), Eq(&n)); - EXPECT_TRUE(matcher3.Matches(n)); - EXPECT_FALSE(matcher3.Matches(n2)); -} - -class DivisibleByImpl { - public: - explicit DivisibleByImpl(int a_divider) : divider_(a_divider) {} - - // For testing using ExplainMatchResultTo() with polymorphic matchers. - template - bool MatchAndExplain(const T& n, MatchResultListener* listener) const { - *listener << "which is " << (n % divider_) << " modulo " - << divider_; - return (n % divider_) == 0; - } - - void DescribeTo(ostream* os) const { - *os << "is divisible by " << divider_; - } - - void DescribeNegationTo(ostream* os) const { - *os << "is not divisible by " << divider_; - } - - void set_divider(int a_divider) { divider_ = a_divider; } - int divider() const { return divider_; } - - private: - int divider_; -}; - -PolymorphicMatcher DivisibleBy(int n) { - return MakePolymorphicMatcher(DivisibleByImpl(n)); -} - -// Tests that when AllOf() fails, only the first failing matcher is -// asked to explain why. -TEST(ExplainMatchResultTest, AllOf_False_False) { - const Matcher m = AllOf(DivisibleBy(4), DivisibleBy(3)); - EXPECT_EQ("which is 1 modulo 4", Explain(m, 5)); -} - -// Tests that when AllOf() fails, only the first failing matcher is -// asked to explain why. -TEST(ExplainMatchResultTest, AllOf_False_True) { - const Matcher m = AllOf(DivisibleBy(4), DivisibleBy(3)); - EXPECT_EQ("which is 2 modulo 4", Explain(m, 6)); -} - -// Tests that when AllOf() fails, only the first failing matcher is -// asked to explain why. -TEST(ExplainMatchResultTest, AllOf_True_False) { - const Matcher m = AllOf(Ge(1), DivisibleBy(3)); - EXPECT_EQ("which is 2 modulo 3", Explain(m, 5)); -} - -// Tests that when AllOf() succeeds, all matchers are asked to explain -// why. -TEST(ExplainMatchResultTest, AllOf_True_True) { - const Matcher m = AllOf(DivisibleBy(2), DivisibleBy(3)); - EXPECT_EQ("which is 0 modulo 2, and which is 0 modulo 3", Explain(m, 6)); -} - -TEST(ExplainMatchResultTest, AllOf_True_True_2) { - const Matcher m = AllOf(Ge(2), Le(3)); - EXPECT_EQ("", Explain(m, 2)); -} - -TEST(ExplainmatcherResultTest, MonomorphicMatcher) { - const Matcher m = GreaterThan(5); - EXPECT_EQ("which is 1 more than 5", Explain(m, 6)); -} - -// The following two tests verify that values without a public copy -// ctor can be used as arguments to matchers like Eq(), Ge(), and etc -// with the help of ByRef(). - -class NotCopyable { - public: - explicit NotCopyable(int a_value) : value_(a_value) {} - - int value() const { return value_; } - - bool operator==(const NotCopyable& rhs) const { - return value() == rhs.value(); - } - - bool operator>=(const NotCopyable& rhs) const { - return value() >= rhs.value(); - } - private: - int value_; - - GTEST_DISALLOW_COPY_AND_ASSIGN_(NotCopyable); -}; - -TEST(ByRefTest, AllowsNotCopyableConstValueInMatchers) { - const NotCopyable const_value1(1); - const Matcher m = Eq(ByRef(const_value1)); - - const NotCopyable n1(1), n2(2); - EXPECT_TRUE(m.Matches(n1)); - EXPECT_FALSE(m.Matches(n2)); -} - -TEST(ByRefTest, AllowsNotCopyableValueInMatchers) { - NotCopyable value2(2); - const Matcher m = Ge(ByRef(value2)); - - NotCopyable n1(1), n2(2); - EXPECT_FALSE(m.Matches(n1)); - EXPECT_TRUE(m.Matches(n2)); -} - -TEST(IsEmptyTest, ImplementsIsEmpty) { - vector container; - EXPECT_THAT(container, IsEmpty()); - container.push_back(0); - EXPECT_THAT(container, Not(IsEmpty())); - container.push_back(1); - EXPECT_THAT(container, Not(IsEmpty())); -} - -TEST(IsEmptyTest, WorksWithString) { - std::string text; - EXPECT_THAT(text, IsEmpty()); - text = "foo"; - EXPECT_THAT(text, Not(IsEmpty())); - text = std::string("\0", 1); - EXPECT_THAT(text, Not(IsEmpty())); -} - -TEST(IsEmptyTest, CanDescribeSelf) { - Matcher > m = IsEmpty(); - EXPECT_EQ("is empty", Describe(m)); - EXPECT_EQ("isn't empty", DescribeNegation(m)); -} - -TEST(IsEmptyTest, ExplainsResult) { - Matcher > m = IsEmpty(); - vector container; - EXPECT_EQ("", Explain(m, container)); - container.push_back(0); - EXPECT_EQ("whose size is 1", Explain(m, container)); -} - -TEST(IsEmptyTest, WorksWithMoveOnly) { - ContainerHelper helper; - EXPECT_CALL(helper, Call(IsEmpty())); - helper.Call({}); -} - -TEST(IsTrueTest, IsTrueIsFalse) { - EXPECT_THAT(true, IsTrue()); - EXPECT_THAT(false, IsFalse()); - EXPECT_THAT(true, Not(IsFalse())); - EXPECT_THAT(false, Not(IsTrue())); - EXPECT_THAT(0, Not(IsTrue())); - EXPECT_THAT(0, IsFalse()); - EXPECT_THAT(nullptr, Not(IsTrue())); - EXPECT_THAT(nullptr, IsFalse()); - EXPECT_THAT(-1, IsTrue()); - EXPECT_THAT(-1, Not(IsFalse())); - EXPECT_THAT(1, IsTrue()); - EXPECT_THAT(1, Not(IsFalse())); - EXPECT_THAT(2, IsTrue()); - EXPECT_THAT(2, Not(IsFalse())); - int a = 42; - EXPECT_THAT(a, IsTrue()); - EXPECT_THAT(a, Not(IsFalse())); - EXPECT_THAT(&a, IsTrue()); - EXPECT_THAT(&a, Not(IsFalse())); - EXPECT_THAT(false, Not(IsTrue())); - EXPECT_THAT(true, Not(IsFalse())); - EXPECT_THAT(std::true_type(), IsTrue()); - EXPECT_THAT(std::true_type(), Not(IsFalse())); - EXPECT_THAT(std::false_type(), IsFalse()); - EXPECT_THAT(std::false_type(), Not(IsTrue())); - EXPECT_THAT(nullptr, Not(IsTrue())); - EXPECT_THAT(nullptr, IsFalse()); - std::unique_ptr null_unique; - std::unique_ptr nonnull_unique(new int(0)); - EXPECT_THAT(null_unique, Not(IsTrue())); - EXPECT_THAT(null_unique, IsFalse()); - EXPECT_THAT(nonnull_unique, IsTrue()); - EXPECT_THAT(nonnull_unique, Not(IsFalse())); -} - -TEST(SizeIsTest, ImplementsSizeIs) { - vector container; - EXPECT_THAT(container, SizeIs(0)); - EXPECT_THAT(container, Not(SizeIs(1))); - container.push_back(0); - EXPECT_THAT(container, Not(SizeIs(0))); - EXPECT_THAT(container, SizeIs(1)); - container.push_back(0); - EXPECT_THAT(container, Not(SizeIs(0))); - EXPECT_THAT(container, SizeIs(2)); -} - -TEST(SizeIsTest, WorksWithMap) { - map container; - EXPECT_THAT(container, SizeIs(0)); - EXPECT_THAT(container, Not(SizeIs(1))); - container.insert(make_pair("foo", 1)); - EXPECT_THAT(container, Not(SizeIs(0))); - EXPECT_THAT(container, SizeIs(1)); - container.insert(make_pair("bar", 2)); - EXPECT_THAT(container, Not(SizeIs(0))); - EXPECT_THAT(container, SizeIs(2)); -} - -TEST(SizeIsTest, WorksWithReferences) { - vector container; - Matcher&> m = SizeIs(1); - EXPECT_THAT(container, Not(m)); - container.push_back(0); - EXPECT_THAT(container, m); -} - -TEST(SizeIsTest, WorksWithMoveOnly) { - ContainerHelper helper; - EXPECT_CALL(helper, Call(SizeIs(3))); - helper.Call(MakeUniquePtrs({1, 2, 3})); -} - -// SizeIs should work for any type that provides a size() member function. -// For example, a size_type member type should not need to be provided. -struct MinimalistCustomType { - int size() const { return 1; } -}; -TEST(SizeIsTest, WorksWithMinimalistCustomType) { - MinimalistCustomType container; - EXPECT_THAT(container, SizeIs(1)); - EXPECT_THAT(container, Not(SizeIs(0))); -} - -TEST(SizeIsTest, CanDescribeSelf) { - Matcher > m = SizeIs(2); - EXPECT_EQ("size is equal to 2", Describe(m)); - EXPECT_EQ("size isn't equal to 2", DescribeNegation(m)); -} - -TEST(SizeIsTest, ExplainsResult) { - Matcher > m1 = SizeIs(2); - Matcher > m2 = SizeIs(Lt(2u)); - Matcher > m3 = SizeIs(AnyOf(0, 3)); - Matcher > m4 = SizeIs(Gt(1u)); - vector container; - EXPECT_EQ("whose size 0 doesn't match", Explain(m1, container)); - EXPECT_EQ("whose size 0 matches", Explain(m2, container)); - EXPECT_EQ("whose size 0 matches", Explain(m3, container)); - EXPECT_EQ("whose size 0 doesn't match", Explain(m4, container)); - container.push_back(0); - container.push_back(0); - EXPECT_EQ("whose size 2 matches", Explain(m1, container)); - EXPECT_EQ("whose size 2 doesn't match", Explain(m2, container)); - EXPECT_EQ("whose size 2 doesn't match", Explain(m3, container)); - EXPECT_EQ("whose size 2 matches", Explain(m4, container)); -} - -#if GTEST_HAS_TYPED_TEST -// Tests ContainerEq with different container types, and -// different element types. - -template -class ContainerEqTest : public testing::Test {}; - -typedef testing::Types< - set, - vector, - multiset, - list > - ContainerEqTestTypes; - -TYPED_TEST_SUITE(ContainerEqTest, ContainerEqTestTypes); - -// Tests that the filled container is equal to itself. -TYPED_TEST(ContainerEqTest, EqualsSelf) { - static const int vals[] = {1, 1, 2, 3, 5, 8}; - TypeParam my_set(vals, vals + 6); - const Matcher m = ContainerEq(my_set); - EXPECT_TRUE(m.Matches(my_set)); - EXPECT_EQ("", Explain(m, my_set)); -} - -// Tests that missing values are reported. -TYPED_TEST(ContainerEqTest, ValueMissing) { - static const int vals[] = {1, 1, 2, 3, 5, 8}; - static const int test_vals[] = {2, 1, 8, 5}; - TypeParam my_set(vals, vals + 6); - TypeParam test_set(test_vals, test_vals + 4); - const Matcher m = ContainerEq(my_set); - EXPECT_FALSE(m.Matches(test_set)); - EXPECT_EQ("which doesn't have these expected elements: 3", - Explain(m, test_set)); -} - -// Tests that added values are reported. -TYPED_TEST(ContainerEqTest, ValueAdded) { - static const int vals[] = {1, 1, 2, 3, 5, 8}; - static const int test_vals[] = {1, 2, 3, 5, 8, 46}; - TypeParam my_set(vals, vals + 6); - TypeParam test_set(test_vals, test_vals + 6); - const Matcher m = ContainerEq(my_set); - EXPECT_FALSE(m.Matches(test_set)); - EXPECT_EQ("which has these unexpected elements: 46", Explain(m, test_set)); -} - -// Tests that added and missing values are reported together. -TYPED_TEST(ContainerEqTest, ValueAddedAndRemoved) { - static const int vals[] = {1, 1, 2, 3, 5, 8}; - static const int test_vals[] = {1, 2, 3, 8, 46}; - TypeParam my_set(vals, vals + 6); - TypeParam test_set(test_vals, test_vals + 5); - const Matcher m = ContainerEq(my_set); - EXPECT_FALSE(m.Matches(test_set)); - EXPECT_EQ("which has these unexpected elements: 46,\n" - "and doesn't have these expected elements: 5", - Explain(m, test_set)); -} - -// Tests duplicated value -- expect no explanation. -TYPED_TEST(ContainerEqTest, DuplicateDifference) { - static const int vals[] = {1, 1, 2, 3, 5, 8}; - static const int test_vals[] = {1, 2, 3, 5, 8}; - TypeParam my_set(vals, vals + 6); - TypeParam test_set(test_vals, test_vals + 5); - const Matcher m = ContainerEq(my_set); - // Depending on the container, match may be true or false - // But in any case there should be no explanation. - EXPECT_EQ("", Explain(m, test_set)); -} -#endif // GTEST_HAS_TYPED_TEST - -// Tests that multiple missing values are reported. -// Using just vector here, so order is predictable. -TEST(ContainerEqExtraTest, MultipleValuesMissing) { - static const int vals[] = {1, 1, 2, 3, 5, 8}; - static const int test_vals[] = {2, 1, 5}; - vector my_set(vals, vals + 6); - vector test_set(test_vals, test_vals + 3); - const Matcher > m = ContainerEq(my_set); - EXPECT_FALSE(m.Matches(test_set)); - EXPECT_EQ("which doesn't have these expected elements: 3, 8", - Explain(m, test_set)); -} - -// Tests that added values are reported. -// Using just vector here, so order is predictable. -TEST(ContainerEqExtraTest, MultipleValuesAdded) { - static const int vals[] = {1, 1, 2, 3, 5, 8}; - static const int test_vals[] = {1, 2, 92, 3, 5, 8, 46}; - list my_set(vals, vals + 6); - list test_set(test_vals, test_vals + 7); - const Matcher&> m = ContainerEq(my_set); - EXPECT_FALSE(m.Matches(test_set)); - EXPECT_EQ("which has these unexpected elements: 92, 46", - Explain(m, test_set)); -} - -// Tests that added and missing values are reported together. -TEST(ContainerEqExtraTest, MultipleValuesAddedAndRemoved) { - static const int vals[] = {1, 1, 2, 3, 5, 8}; - static const int test_vals[] = {1, 2, 3, 92, 46}; - list my_set(vals, vals + 6); - list test_set(test_vals, test_vals + 5); - const Matcher > m = ContainerEq(my_set); - EXPECT_FALSE(m.Matches(test_set)); - EXPECT_EQ("which has these unexpected elements: 92, 46,\n" - "and doesn't have these expected elements: 5, 8", - Explain(m, test_set)); -} - -// Tests to see that duplicate elements are detected, -// but (as above) not reported in the explanation. -TEST(ContainerEqExtraTest, MultiSetOfIntDuplicateDifference) { - static const int vals[] = {1, 1, 2, 3, 5, 8}; - static const int test_vals[] = {1, 2, 3, 5, 8}; - vector my_set(vals, vals + 6); - vector test_set(test_vals, test_vals + 5); - const Matcher > m = ContainerEq(my_set); - EXPECT_TRUE(m.Matches(my_set)); - EXPECT_FALSE(m.Matches(test_set)); - // There is nothing to report when both sets contain all the same values. - EXPECT_EQ("", Explain(m, test_set)); -} - -// Tests that ContainerEq works for non-trivial associative containers, -// like maps. -TEST(ContainerEqExtraTest, WorksForMaps) { - map my_map; - my_map[0] = "a"; - my_map[1] = "b"; - - map test_map; - test_map[0] = "aa"; - test_map[1] = "b"; - - const Matcher&> m = ContainerEq(my_map); - EXPECT_TRUE(m.Matches(my_map)); - EXPECT_FALSE(m.Matches(test_map)); - - EXPECT_EQ("which has these unexpected elements: (0, \"aa\"),\n" - "and doesn't have these expected elements: (0, \"a\")", - Explain(m, test_map)); -} - -TEST(ContainerEqExtraTest, WorksForNativeArray) { - int a1[] = {1, 2, 3}; - int a2[] = {1, 2, 3}; - int b[] = {1, 2, 4}; - - EXPECT_THAT(a1, ContainerEq(a2)); - EXPECT_THAT(a1, Not(ContainerEq(b))); -} - -TEST(ContainerEqExtraTest, WorksForTwoDimensionalNativeArray) { - const char a1[][3] = {"hi", "lo"}; - const char a2[][3] = {"hi", "lo"}; - const char b[][3] = {"lo", "hi"}; - - // Tests using ContainerEq() in the first dimension. - EXPECT_THAT(a1, ContainerEq(a2)); - EXPECT_THAT(a1, Not(ContainerEq(b))); - - // Tests using ContainerEq() in the second dimension. - EXPECT_THAT(a1, ElementsAre(ContainerEq(a2[0]), ContainerEq(a2[1]))); - EXPECT_THAT(a1, ElementsAre(Not(ContainerEq(b[0])), ContainerEq(a2[1]))); -} - -TEST(ContainerEqExtraTest, WorksForNativeArrayAsTuple) { - const int a1[] = {1, 2, 3}; - const int a2[] = {1, 2, 3}; - const int b[] = {1, 2, 3, 4}; - - const int* const p1 = a1; - EXPECT_THAT(std::make_tuple(p1, 3), ContainerEq(a2)); - EXPECT_THAT(std::make_tuple(p1, 3), Not(ContainerEq(b))); - - const int c[] = {1, 3, 2}; - EXPECT_THAT(std::make_tuple(p1, 3), Not(ContainerEq(c))); -} - -TEST(ContainerEqExtraTest, CopiesNativeArrayParameter) { - std::string a1[][3] = { - {"hi", "hello", "ciao"}, - {"bye", "see you", "ciao"} - }; - - std::string a2[][3] = { - {"hi", "hello", "ciao"}, - {"bye", "see you", "ciao"} - }; - - const Matcher m = ContainerEq(a2); - EXPECT_THAT(a1, m); - - a2[0][0] = "ha"; - EXPECT_THAT(a1, m); -} - -TEST(WhenSortedByTest, WorksForEmptyContainer) { - const vector numbers; - EXPECT_THAT(numbers, WhenSortedBy(less(), ElementsAre())); - EXPECT_THAT(numbers, Not(WhenSortedBy(less(), ElementsAre(1)))); -} - -TEST(WhenSortedByTest, WorksForNonEmptyContainer) { - vector numbers; - numbers.push_back(3); - numbers.push_back(1); - numbers.push_back(2); - numbers.push_back(2); - EXPECT_THAT(numbers, WhenSortedBy(greater(), - ElementsAre(3, 2, 2, 1))); - EXPECT_THAT(numbers, Not(WhenSortedBy(greater(), - ElementsAre(1, 2, 2, 3)))); -} - -TEST(WhenSortedByTest, WorksForNonVectorContainer) { - list words; - words.push_back("say"); - words.push_back("hello"); - words.push_back("world"); - EXPECT_THAT(words, WhenSortedBy(less(), - ElementsAre("hello", "say", "world"))); - EXPECT_THAT(words, Not(WhenSortedBy(less(), - ElementsAre("say", "hello", "world")))); -} - -TEST(WhenSortedByTest, WorksForNativeArray) { - const int numbers[] = {1, 3, 2, 4}; - const int sorted_numbers[] = {1, 2, 3, 4}; - EXPECT_THAT(numbers, WhenSortedBy(less(), ElementsAre(1, 2, 3, 4))); - EXPECT_THAT(numbers, WhenSortedBy(less(), - ElementsAreArray(sorted_numbers))); - EXPECT_THAT(numbers, Not(WhenSortedBy(less(), ElementsAre(1, 3, 2, 4)))); -} - -TEST(WhenSortedByTest, CanDescribeSelf) { - const Matcher > m = WhenSortedBy(less(), ElementsAre(1, 2)); - EXPECT_EQ("(when sorted) has 2 elements where\n" - "element #0 is equal to 1,\n" - "element #1 is equal to 2", - Describe(m)); - EXPECT_EQ("(when sorted) doesn't have 2 elements, or\n" - "element #0 isn't equal to 1, or\n" - "element #1 isn't equal to 2", - DescribeNegation(m)); -} - -TEST(WhenSortedByTest, ExplainsMatchResult) { - const int a[] = {2, 1}; - EXPECT_EQ("which is { 1, 2 } when sorted, whose element #0 doesn't match", - Explain(WhenSortedBy(less(), ElementsAre(2, 3)), a)); - EXPECT_EQ("which is { 1, 2 } when sorted", - Explain(WhenSortedBy(less(), ElementsAre(1, 2)), a)); -} - -// WhenSorted() is a simple wrapper on WhenSortedBy(). Hence we don't -// need to test it as exhaustively as we test the latter. - -TEST(WhenSortedTest, WorksForEmptyContainer) { - const vector numbers; - EXPECT_THAT(numbers, WhenSorted(ElementsAre())); - EXPECT_THAT(numbers, Not(WhenSorted(ElementsAre(1)))); -} - -TEST(WhenSortedTest, WorksForNonEmptyContainer) { - list words; - words.push_back("3"); - words.push_back("1"); - words.push_back("2"); - words.push_back("2"); - EXPECT_THAT(words, WhenSorted(ElementsAre("1", "2", "2", "3"))); - EXPECT_THAT(words, Not(WhenSorted(ElementsAre("3", "1", "2", "2")))); -} - -TEST(WhenSortedTest, WorksForMapTypes) { - map word_counts; - word_counts["and"] = 1; - word_counts["the"] = 1; - word_counts["buffalo"] = 2; - EXPECT_THAT(word_counts, - WhenSorted(ElementsAre(Pair("and", 1), Pair("buffalo", 2), - Pair("the", 1)))); - EXPECT_THAT(word_counts, - Not(WhenSorted(ElementsAre(Pair("and", 1), Pair("the", 1), - Pair("buffalo", 2))))); -} - -TEST(WhenSortedTest, WorksForMultiMapTypes) { - multimap ifib; - ifib.insert(make_pair(8, 6)); - ifib.insert(make_pair(2, 3)); - ifib.insert(make_pair(1, 1)); - ifib.insert(make_pair(3, 4)); - ifib.insert(make_pair(1, 2)); - ifib.insert(make_pair(5, 5)); - EXPECT_THAT(ifib, WhenSorted(ElementsAre(Pair(1, 1), - Pair(1, 2), - Pair(2, 3), - Pair(3, 4), - Pair(5, 5), - Pair(8, 6)))); - EXPECT_THAT(ifib, Not(WhenSorted(ElementsAre(Pair(8, 6), - Pair(2, 3), - Pair(1, 1), - Pair(3, 4), - Pair(1, 2), - Pair(5, 5))))); -} - -TEST(WhenSortedTest, WorksForPolymorphicMatcher) { - std::deque d; - d.push_back(2); - d.push_back(1); - EXPECT_THAT(d, WhenSorted(ElementsAre(1, 2))); - EXPECT_THAT(d, Not(WhenSorted(ElementsAre(2, 1)))); -} - -TEST(WhenSortedTest, WorksForVectorConstRefMatcher) { - std::deque d; - d.push_back(2); - d.push_back(1); - Matcher&> vector_match = ElementsAre(1, 2); - EXPECT_THAT(d, WhenSorted(vector_match)); - Matcher&> not_vector_match = ElementsAre(2, 1); - EXPECT_THAT(d, Not(WhenSorted(not_vector_match))); -} - -// Deliberately bare pseudo-container. -// Offers only begin() and end() accessors, yielding InputIterator. -template -class Streamlike { - private: - class ConstIter; - public: - typedef ConstIter const_iterator; - typedef T value_type; - - template - Streamlike(InIter first, InIter last) : remainder_(first, last) {} - - const_iterator begin() const { - return const_iterator(this, remainder_.begin()); - } - const_iterator end() const { - return const_iterator(this, remainder_.end()); - } - - private: - class ConstIter : public std::iterator { - public: - ConstIter(const Streamlike* s, - typename std::list::iterator pos) - : s_(s), pos_(pos) {} - - const value_type& operator*() const { return *pos_; } - const value_type* operator->() const { return &*pos_; } - ConstIter& operator++() { - s_->remainder_.erase(pos_++); - return *this; - } - - // *iter++ is required to work (see std::istreambuf_iterator). - // (void)iter++ is also required to work. - class PostIncrProxy { - public: - explicit PostIncrProxy(const value_type& value) : value_(value) {} - value_type operator*() const { return value_; } - private: - value_type value_; - }; - PostIncrProxy operator++(int) { - PostIncrProxy proxy(**this); - ++(*this); - return proxy; - } - - friend bool operator==(const ConstIter& a, const ConstIter& b) { - return a.s_ == b.s_ && a.pos_ == b.pos_; - } - friend bool operator!=(const ConstIter& a, const ConstIter& b) { - return !(a == b); - } - - private: - const Streamlike* s_; - typename std::list::iterator pos_; - }; - - friend std::ostream& operator<<(std::ostream& os, const Streamlike& s) { - os << "["; - typedef typename std::list::const_iterator Iter; - const char* sep = ""; - for (Iter it = s.remainder_.begin(); it != s.remainder_.end(); ++it) { - os << sep << *it; - sep = ","; - } - os << "]"; - return os; - } - - mutable std::list remainder_; // modified by iteration -}; - -TEST(StreamlikeTest, Iteration) { - const int a[5] = {2, 1, 4, 5, 3}; - Streamlike s(a, a + 5); - Streamlike::const_iterator it = s.begin(); - const int* ip = a; - while (it != s.end()) { - SCOPED_TRACE(ip - a); - EXPECT_EQ(*ip++, *it++); - } -} - -TEST(BeginEndDistanceIsTest, WorksWithForwardList) { - std::forward_list container; - EXPECT_THAT(container, BeginEndDistanceIs(0)); - EXPECT_THAT(container, Not(BeginEndDistanceIs(1))); - container.push_front(0); - EXPECT_THAT(container, Not(BeginEndDistanceIs(0))); - EXPECT_THAT(container, BeginEndDistanceIs(1)); - container.push_front(0); - EXPECT_THAT(container, Not(BeginEndDistanceIs(0))); - EXPECT_THAT(container, BeginEndDistanceIs(2)); -} - -TEST(BeginEndDistanceIsTest, WorksWithNonStdList) { - const int a[5] = {1, 2, 3, 4, 5}; - Streamlike s(a, a + 5); - EXPECT_THAT(s, BeginEndDistanceIs(5)); -} - -TEST(BeginEndDistanceIsTest, CanDescribeSelf) { - Matcher > m = BeginEndDistanceIs(2); - EXPECT_EQ("distance between begin() and end() is equal to 2", Describe(m)); - EXPECT_EQ("distance between begin() and end() isn't equal to 2", - DescribeNegation(m)); -} - -TEST(BeginEndDistanceIsTest, WorksWithMoveOnly) { - ContainerHelper helper; - EXPECT_CALL(helper, Call(BeginEndDistanceIs(2))); - helper.Call(MakeUniquePtrs({1, 2})); -} - -TEST(BeginEndDistanceIsTest, ExplainsResult) { - Matcher > m1 = BeginEndDistanceIs(2); - Matcher > m2 = BeginEndDistanceIs(Lt(2)); - Matcher > m3 = BeginEndDistanceIs(AnyOf(0, 3)); - Matcher > m4 = BeginEndDistanceIs(GreaterThan(1)); - vector container; - EXPECT_EQ("whose distance between begin() and end() 0 doesn't match", - Explain(m1, container)); - EXPECT_EQ("whose distance between begin() and end() 0 matches", - Explain(m2, container)); - EXPECT_EQ("whose distance between begin() and end() 0 matches", - Explain(m3, container)); - EXPECT_EQ( - "whose distance between begin() and end() 0 doesn't match, which is 1 " - "less than 1", - Explain(m4, container)); - container.push_back(0); - container.push_back(0); - EXPECT_EQ("whose distance between begin() and end() 2 matches", - Explain(m1, container)); - EXPECT_EQ("whose distance between begin() and end() 2 doesn't match", - Explain(m2, container)); - EXPECT_EQ("whose distance between begin() and end() 2 doesn't match", - Explain(m3, container)); - EXPECT_EQ( - "whose distance between begin() and end() 2 matches, which is 1 more " - "than 1", - Explain(m4, container)); -} - -TEST(WhenSortedTest, WorksForStreamlike) { - // Streamlike 'container' provides only minimal iterator support. - // Its iterators are tagged with input_iterator_tag. - const int a[5] = {2, 1, 4, 5, 3}; - Streamlike s(std::begin(a), std::end(a)); - EXPECT_THAT(s, WhenSorted(ElementsAre(1, 2, 3, 4, 5))); - EXPECT_THAT(s, Not(WhenSorted(ElementsAre(2, 1, 4, 5, 3)))); -} - -TEST(WhenSortedTest, WorksForVectorConstRefMatcherOnStreamlike) { - const int a[] = {2, 1, 4, 5, 3}; - Streamlike s(std::begin(a), std::end(a)); - Matcher&> vector_match = ElementsAre(1, 2, 3, 4, 5); - EXPECT_THAT(s, WhenSorted(vector_match)); - EXPECT_THAT(s, Not(WhenSorted(ElementsAre(2, 1, 4, 5, 3)))); -} - -TEST(IsSupersetOfTest, WorksForNativeArray) { - const int subset[] = {1, 4}; - const int superset[] = {1, 2, 4}; - const int disjoint[] = {1, 0, 3}; - EXPECT_THAT(subset, IsSupersetOf(subset)); - EXPECT_THAT(subset, Not(IsSupersetOf(superset))); - EXPECT_THAT(superset, IsSupersetOf(subset)); - EXPECT_THAT(subset, Not(IsSupersetOf(disjoint))); - EXPECT_THAT(disjoint, Not(IsSupersetOf(subset))); -} - -TEST(IsSupersetOfTest, WorksWithDuplicates) { - const int not_enough[] = {1, 2}; - const int enough[] = {1, 1, 2}; - const int expected[] = {1, 1}; - EXPECT_THAT(not_enough, Not(IsSupersetOf(expected))); - EXPECT_THAT(enough, IsSupersetOf(expected)); -} - -TEST(IsSupersetOfTest, WorksForEmpty) { - vector numbers; - vector expected; - EXPECT_THAT(numbers, IsSupersetOf(expected)); - expected.push_back(1); - EXPECT_THAT(numbers, Not(IsSupersetOf(expected))); - expected.clear(); - numbers.push_back(1); - numbers.push_back(2); - EXPECT_THAT(numbers, IsSupersetOf(expected)); - expected.push_back(1); - EXPECT_THAT(numbers, IsSupersetOf(expected)); - expected.push_back(2); - EXPECT_THAT(numbers, IsSupersetOf(expected)); - expected.push_back(3); - EXPECT_THAT(numbers, Not(IsSupersetOf(expected))); -} - -TEST(IsSupersetOfTest, WorksForStreamlike) { - const int a[5] = {1, 2, 3, 4, 5}; - Streamlike s(std::begin(a), std::end(a)); - - vector expected; - expected.push_back(1); - expected.push_back(2); - expected.push_back(5); - EXPECT_THAT(s, IsSupersetOf(expected)); - - expected.push_back(0); - EXPECT_THAT(s, Not(IsSupersetOf(expected))); -} - -TEST(IsSupersetOfTest, TakesStlContainer) { - const int actual[] = {3, 1, 2}; - - ::std::list expected; - expected.push_back(1); - expected.push_back(3); - EXPECT_THAT(actual, IsSupersetOf(expected)); - - expected.push_back(4); - EXPECT_THAT(actual, Not(IsSupersetOf(expected))); -} - -TEST(IsSupersetOfTest, Describe) { - typedef std::vector IntVec; - IntVec expected; - expected.push_back(111); - expected.push_back(222); - expected.push_back(333); - EXPECT_THAT( - Describe(IsSupersetOf(expected)), - Eq("a surjection from elements to requirements exists such that:\n" - " - an element is equal to 111\n" - " - an element is equal to 222\n" - " - an element is equal to 333")); -} - -TEST(IsSupersetOfTest, DescribeNegation) { - typedef std::vector IntVec; - IntVec expected; - expected.push_back(111); - expected.push_back(222); - expected.push_back(333); - EXPECT_THAT( - DescribeNegation(IsSupersetOf(expected)), - Eq("no surjection from elements to requirements exists such that:\n" - " - an element is equal to 111\n" - " - an element is equal to 222\n" - " - an element is equal to 333")); -} - -TEST(IsSupersetOfTest, MatchAndExplain) { - std::vector v; - v.push_back(2); - v.push_back(3); - std::vector expected; - expected.push_back(1); - expected.push_back(2); - StringMatchResultListener listener; - ASSERT_FALSE(ExplainMatchResult(IsSupersetOf(expected), v, &listener)) - << listener.str(); - EXPECT_THAT(listener.str(), - Eq("where the following matchers don't match any elements:\n" - "matcher #0: is equal to 1")); - - v.push_back(1); - listener.Clear(); - ASSERT_TRUE(ExplainMatchResult(IsSupersetOf(expected), v, &listener)) - << listener.str(); - EXPECT_THAT(listener.str(), Eq("where:\n" - " - element #0 is matched by matcher #1,\n" - " - element #2 is matched by matcher #0")); -} - -TEST(IsSupersetOfTest, WorksForRhsInitializerList) { - const int numbers[] = {1, 3, 6, 2, 4, 5}; - EXPECT_THAT(numbers, IsSupersetOf({1, 2})); - EXPECT_THAT(numbers, Not(IsSupersetOf({3, 0}))); -} - -TEST(IsSupersetOfTest, WorksWithMoveOnly) { - ContainerHelper helper; - EXPECT_CALL(helper, Call(IsSupersetOf({Pointee(1)}))); - helper.Call(MakeUniquePtrs({1, 2})); - EXPECT_CALL(helper, Call(Not(IsSupersetOf({Pointee(1), Pointee(2)})))); - helper.Call(MakeUniquePtrs({2})); -} - -TEST(IsSubsetOfTest, WorksForNativeArray) { - const int subset[] = {1, 4}; - const int superset[] = {1, 2, 4}; - const int disjoint[] = {1, 0, 3}; - EXPECT_THAT(subset, IsSubsetOf(subset)); - EXPECT_THAT(subset, IsSubsetOf(superset)); - EXPECT_THAT(superset, Not(IsSubsetOf(subset))); - EXPECT_THAT(subset, Not(IsSubsetOf(disjoint))); - EXPECT_THAT(disjoint, Not(IsSubsetOf(subset))); -} - -TEST(IsSubsetOfTest, WorksWithDuplicates) { - const int not_enough[] = {1, 2}; - const int enough[] = {1, 1, 2}; - const int actual[] = {1, 1}; - EXPECT_THAT(actual, Not(IsSubsetOf(not_enough))); - EXPECT_THAT(actual, IsSubsetOf(enough)); -} - -TEST(IsSubsetOfTest, WorksForEmpty) { - vector numbers; - vector expected; - EXPECT_THAT(numbers, IsSubsetOf(expected)); - expected.push_back(1); - EXPECT_THAT(numbers, IsSubsetOf(expected)); - expected.clear(); - numbers.push_back(1); - numbers.push_back(2); - EXPECT_THAT(numbers, Not(IsSubsetOf(expected))); - expected.push_back(1); - EXPECT_THAT(numbers, Not(IsSubsetOf(expected))); - expected.push_back(2); - EXPECT_THAT(numbers, IsSubsetOf(expected)); - expected.push_back(3); - EXPECT_THAT(numbers, IsSubsetOf(expected)); -} - -TEST(IsSubsetOfTest, WorksForStreamlike) { - const int a[5] = {1, 2}; - Streamlike s(std::begin(a), std::end(a)); - - vector expected; - expected.push_back(1); - EXPECT_THAT(s, Not(IsSubsetOf(expected))); - expected.push_back(2); - expected.push_back(5); - EXPECT_THAT(s, IsSubsetOf(expected)); -} - -TEST(IsSubsetOfTest, TakesStlContainer) { - const int actual[] = {3, 1, 2}; - - ::std::list expected; - expected.push_back(1); - expected.push_back(3); - EXPECT_THAT(actual, Not(IsSubsetOf(expected))); - - expected.push_back(2); - expected.push_back(4); - EXPECT_THAT(actual, IsSubsetOf(expected)); -} - -TEST(IsSubsetOfTest, Describe) { - typedef std::vector IntVec; - IntVec expected; - expected.push_back(111); - expected.push_back(222); - expected.push_back(333); - - EXPECT_THAT( - Describe(IsSubsetOf(expected)), - Eq("an injection from elements to requirements exists such that:\n" - " - an element is equal to 111\n" - " - an element is equal to 222\n" - " - an element is equal to 333")); -} - -TEST(IsSubsetOfTest, DescribeNegation) { - typedef std::vector IntVec; - IntVec expected; - expected.push_back(111); - expected.push_back(222); - expected.push_back(333); - EXPECT_THAT( - DescribeNegation(IsSubsetOf(expected)), - Eq("no injection from elements to requirements exists such that:\n" - " - an element is equal to 111\n" - " - an element is equal to 222\n" - " - an element is equal to 333")); -} - -TEST(IsSubsetOfTest, MatchAndExplain) { - std::vector v; - v.push_back(2); - v.push_back(3); - std::vector expected; - expected.push_back(1); - expected.push_back(2); - StringMatchResultListener listener; - ASSERT_FALSE(ExplainMatchResult(IsSubsetOf(expected), v, &listener)) - << listener.str(); - EXPECT_THAT(listener.str(), - Eq("where the following elements don't match any matchers:\n" - "element #1: 3")); - - expected.push_back(3); - listener.Clear(); - ASSERT_TRUE(ExplainMatchResult(IsSubsetOf(expected), v, &listener)) - << listener.str(); - EXPECT_THAT(listener.str(), Eq("where:\n" - " - element #0 is matched by matcher #1,\n" - " - element #1 is matched by matcher #2")); -} - -TEST(IsSubsetOfTest, WorksForRhsInitializerList) { - const int numbers[] = {1, 2, 3}; - EXPECT_THAT(numbers, IsSubsetOf({1, 2, 3, 4})); - EXPECT_THAT(numbers, Not(IsSubsetOf({1, 2}))); -} - -TEST(IsSubsetOfTest, WorksWithMoveOnly) { - ContainerHelper helper; - EXPECT_CALL(helper, Call(IsSubsetOf({Pointee(1), Pointee(2)}))); - helper.Call(MakeUniquePtrs({1})); - EXPECT_CALL(helper, Call(Not(IsSubsetOf({Pointee(1)})))); - helper.Call(MakeUniquePtrs({2})); -} - -// Tests using ElementsAre() and ElementsAreArray() with stream-like -// "containers". - -TEST(ElemensAreStreamTest, WorksForStreamlike) { - const int a[5] = {1, 2, 3, 4, 5}; - Streamlike s(std::begin(a), std::end(a)); - EXPECT_THAT(s, ElementsAre(1, 2, 3, 4, 5)); - EXPECT_THAT(s, Not(ElementsAre(2, 1, 4, 5, 3))); -} - -TEST(ElemensAreArrayStreamTest, WorksForStreamlike) { - const int a[5] = {1, 2, 3, 4, 5}; - Streamlike s(std::begin(a), std::end(a)); - - vector expected; - expected.push_back(1); - expected.push_back(2); - expected.push_back(3); - expected.push_back(4); - expected.push_back(5); - EXPECT_THAT(s, ElementsAreArray(expected)); - - expected[3] = 0; - EXPECT_THAT(s, Not(ElementsAreArray(expected))); -} - -TEST(ElementsAreTest, WorksWithUncopyable) { - Uncopyable objs[2]; - objs[0].set_value(-3); - objs[1].set_value(1); - EXPECT_THAT(objs, ElementsAre(UncopyableIs(-3), Truly(ValueIsPositive))); -} - -TEST(ElementsAreTest, WorksWithMoveOnly) { - ContainerHelper helper; - EXPECT_CALL(helper, Call(ElementsAre(Pointee(1), Pointee(2)))); - helper.Call(MakeUniquePtrs({1, 2})); - - EXPECT_CALL(helper, Call(ElementsAreArray({Pointee(3), Pointee(4)}))); - helper.Call(MakeUniquePtrs({3, 4})); -} - -TEST(ElementsAreTest, TakesStlContainer) { - const int actual[] = {3, 1, 2}; - - ::std::list expected; - expected.push_back(3); - expected.push_back(1); - expected.push_back(2); - EXPECT_THAT(actual, ElementsAreArray(expected)); - - expected.push_back(4); - EXPECT_THAT(actual, Not(ElementsAreArray(expected))); -} - -// Tests for UnorderedElementsAreArray() - -TEST(UnorderedElementsAreArrayTest, SucceedsWhenExpected) { - const int a[] = {0, 1, 2, 3, 4}; - std::vector s(std::begin(a), std::end(a)); - do { - StringMatchResultListener listener; - EXPECT_TRUE(ExplainMatchResult(UnorderedElementsAreArray(a), - s, &listener)) << listener.str(); - } while (std::next_permutation(s.begin(), s.end())); -} - -TEST(UnorderedElementsAreArrayTest, VectorBool) { - const bool a[] = {0, 1, 0, 1, 1}; - const bool b[] = {1, 0, 1, 1, 0}; - std::vector expected(std::begin(a), std::end(a)); - std::vector actual(std::begin(b), std::end(b)); - StringMatchResultListener listener; - EXPECT_TRUE(ExplainMatchResult(UnorderedElementsAreArray(expected), - actual, &listener)) << listener.str(); -} - -TEST(UnorderedElementsAreArrayTest, WorksForStreamlike) { - // Streamlike 'container' provides only minimal iterator support. - // Its iterators are tagged with input_iterator_tag, and it has no - // size() or empty() methods. - const int a[5] = {2, 1, 4, 5, 3}; - Streamlike s(std::begin(a), std::end(a)); - - ::std::vector expected; - expected.push_back(1); - expected.push_back(2); - expected.push_back(3); - expected.push_back(4); - expected.push_back(5); - EXPECT_THAT(s, UnorderedElementsAreArray(expected)); - - expected.push_back(6); - EXPECT_THAT(s, Not(UnorderedElementsAreArray(expected))); -} - -TEST(UnorderedElementsAreArrayTest, TakesStlContainer) { - const int actual[] = {3, 1, 2}; - - ::std::list expected; - expected.push_back(1); - expected.push_back(2); - expected.push_back(3); - EXPECT_THAT(actual, UnorderedElementsAreArray(expected)); - - expected.push_back(4); - EXPECT_THAT(actual, Not(UnorderedElementsAreArray(expected))); -} - - -TEST(UnorderedElementsAreArrayTest, TakesInitializerList) { - const int a[5] = {2, 1, 4, 5, 3}; - EXPECT_THAT(a, UnorderedElementsAreArray({1, 2, 3, 4, 5})); - EXPECT_THAT(a, Not(UnorderedElementsAreArray({1, 2, 3, 4, 6}))); -} - -TEST(UnorderedElementsAreArrayTest, TakesInitializerListOfCStrings) { - const std::string a[5] = {"a", "b", "c", "d", "e"}; - EXPECT_THAT(a, UnorderedElementsAreArray({"a", "b", "c", "d", "e"})); - EXPECT_THAT(a, Not(UnorderedElementsAreArray({"a", "b", "c", "d", "ef"}))); -} - -TEST(UnorderedElementsAreArrayTest, TakesInitializerListOfSameTypedMatchers) { - const int a[5] = {2, 1, 4, 5, 3}; - EXPECT_THAT(a, UnorderedElementsAreArray( - {Eq(1), Eq(2), Eq(3), Eq(4), Eq(5)})); - EXPECT_THAT(a, Not(UnorderedElementsAreArray( - {Eq(1), Eq(2), Eq(3), Eq(4), Eq(6)}))); -} - -TEST(UnorderedElementsAreArrayTest, - TakesInitializerListOfDifferentTypedMatchers) { - const int a[5] = {2, 1, 4, 5, 3}; - // The compiler cannot infer the type of the initializer list if its - // elements have different types. We must explicitly specify the - // unified element type in this case. - EXPECT_THAT(a, UnorderedElementsAreArray >( - {Eq(1), Ne(-2), Ge(3), Le(4), Eq(5)})); - EXPECT_THAT(a, Not(UnorderedElementsAreArray >( - {Eq(1), Ne(-2), Ge(3), Le(4), Eq(6)}))); -} - - -TEST(UnorderedElementsAreArrayTest, WorksWithMoveOnly) { - ContainerHelper helper; - EXPECT_CALL(helper, - Call(UnorderedElementsAreArray({Pointee(1), Pointee(2)}))); - helper.Call(MakeUniquePtrs({2, 1})); -} - -class UnorderedElementsAreTest : public testing::Test { - protected: - typedef std::vector IntVec; -}; - -TEST_F(UnorderedElementsAreTest, WorksWithUncopyable) { - Uncopyable objs[2]; - objs[0].set_value(-3); - objs[1].set_value(1); - EXPECT_THAT(objs, - UnorderedElementsAre(Truly(ValueIsPositive), UncopyableIs(-3))); -} - -TEST_F(UnorderedElementsAreTest, SucceedsWhenExpected) { - const int a[] = {1, 2, 3}; - std::vector s(std::begin(a), std::end(a)); - do { - StringMatchResultListener listener; - EXPECT_TRUE(ExplainMatchResult(UnorderedElementsAre(1, 2, 3), - s, &listener)) << listener.str(); - } while (std::next_permutation(s.begin(), s.end())); -} - -TEST_F(UnorderedElementsAreTest, FailsWhenAnElementMatchesNoMatcher) { - const int a[] = {1, 2, 3}; - std::vector s(std::begin(a), std::end(a)); - std::vector > mv; - mv.push_back(1); - mv.push_back(2); - mv.push_back(2); - // The element with value '3' matches nothing: fail fast. - StringMatchResultListener listener; - EXPECT_FALSE(ExplainMatchResult(UnorderedElementsAreArray(mv), - s, &listener)) << listener.str(); -} - -TEST_F(UnorderedElementsAreTest, WorksForStreamlike) { - // Streamlike 'container' provides only minimal iterator support. - // Its iterators are tagged with input_iterator_tag, and it has no - // size() or empty() methods. - const int a[5] = {2, 1, 4, 5, 3}; - Streamlike s(std::begin(a), std::end(a)); - - EXPECT_THAT(s, UnorderedElementsAre(1, 2, 3, 4, 5)); - EXPECT_THAT(s, Not(UnorderedElementsAre(2, 2, 3, 4, 5))); -} - -TEST_F(UnorderedElementsAreTest, WorksWithMoveOnly) { - ContainerHelper helper; - EXPECT_CALL(helper, Call(UnorderedElementsAre(Pointee(1), Pointee(2)))); - helper.Call(MakeUniquePtrs({2, 1})); -} - -// One naive implementation of the matcher runs in O(N!) time, which is too -// slow for many real-world inputs. This test shows that our matcher can match -// 100 inputs very quickly (a few milliseconds). An O(100!) is 10^158 -// iterations and obviously effectively incomputable. -// [ RUN ] UnorderedElementsAreTest.Performance -// [ OK ] UnorderedElementsAreTest.Performance (4 ms) -TEST_F(UnorderedElementsAreTest, Performance) { - std::vector s; - std::vector > mv; - for (int i = 0; i < 100; ++i) { - s.push_back(i); - mv.push_back(_); - } - mv[50] = Eq(0); - StringMatchResultListener listener; - EXPECT_TRUE(ExplainMatchResult(UnorderedElementsAreArray(mv), - s, &listener)) << listener.str(); -} - -// Another variant of 'Performance' with similar expectations. -// [ RUN ] UnorderedElementsAreTest.PerformanceHalfStrict -// [ OK ] UnorderedElementsAreTest.PerformanceHalfStrict (4 ms) -TEST_F(UnorderedElementsAreTest, PerformanceHalfStrict) { - std::vector s; - std::vector > mv; - for (int i = 0; i < 100; ++i) { - s.push_back(i); - if (i & 1) { - mv.push_back(_); - } else { - mv.push_back(i); - } - } - StringMatchResultListener listener; - EXPECT_TRUE(ExplainMatchResult(UnorderedElementsAreArray(mv), - s, &listener)) << listener.str(); -} - -TEST_F(UnorderedElementsAreTest, FailMessageCountWrong) { - std::vector v; - v.push_back(4); - StringMatchResultListener listener; - EXPECT_FALSE(ExplainMatchResult(UnorderedElementsAre(1, 2, 3), - v, &listener)) << listener.str(); - EXPECT_THAT(listener.str(), Eq("which has 1 element")); -} - -TEST_F(UnorderedElementsAreTest, FailMessageCountWrongZero) { - std::vector v; - StringMatchResultListener listener; - EXPECT_FALSE(ExplainMatchResult(UnorderedElementsAre(1, 2, 3), - v, &listener)) << listener.str(); - EXPECT_THAT(listener.str(), Eq("")); -} - -TEST_F(UnorderedElementsAreTest, FailMessageUnmatchedMatchers) { - std::vector v; - v.push_back(1); - v.push_back(1); - StringMatchResultListener listener; - EXPECT_FALSE(ExplainMatchResult(UnorderedElementsAre(1, 2), - v, &listener)) << listener.str(); - EXPECT_THAT( - listener.str(), - Eq("where the following matchers don't match any elements:\n" - "matcher #1: is equal to 2")); -} - -TEST_F(UnorderedElementsAreTest, FailMessageUnmatchedElements) { - std::vector v; - v.push_back(1); - v.push_back(2); - StringMatchResultListener listener; - EXPECT_FALSE(ExplainMatchResult(UnorderedElementsAre(1, 1), - v, &listener)) << listener.str(); - EXPECT_THAT( - listener.str(), - Eq("where the following elements don't match any matchers:\n" - "element #1: 2")); -} - -TEST_F(UnorderedElementsAreTest, FailMessageUnmatchedMatcherAndElement) { - std::vector v; - v.push_back(2); - v.push_back(3); - StringMatchResultListener listener; - EXPECT_FALSE(ExplainMatchResult(UnorderedElementsAre(1, 2), - v, &listener)) << listener.str(); - EXPECT_THAT( - listener.str(), - Eq("where" - " the following matchers don't match any elements:\n" - "matcher #0: is equal to 1\n" - "and" - " where" - " the following elements don't match any matchers:\n" - "element #1: 3")); -} - -// Test helper for formatting element, matcher index pairs in expectations. -static std::string EMString(int element, int matcher) { - stringstream ss; - ss << "(element #" << element << ", matcher #" << matcher << ")"; - return ss.str(); -} - -TEST_F(UnorderedElementsAreTest, FailMessageImperfectMatchOnly) { - // A situation where all elements and matchers have a match - // associated with them, but the max matching is not perfect. - std::vector v; - v.push_back("a"); - v.push_back("b"); - v.push_back("c"); - StringMatchResultListener listener; - EXPECT_FALSE(ExplainMatchResult( - UnorderedElementsAre("a", "a", AnyOf("b", "c")), v, &listener)) - << listener.str(); - - std::string prefix = - "where no permutation of the elements can satisfy all matchers, " - "and the closest match is 2 of 3 matchers with the " - "pairings:\n"; - - // We have to be a bit loose here, because there are 4 valid max matches. - EXPECT_THAT( - listener.str(), - AnyOf(prefix + "{\n " + EMString(0, 0) + - ",\n " + EMString(1, 2) + "\n}", - prefix + "{\n " + EMString(0, 1) + - ",\n " + EMString(1, 2) + "\n}", - prefix + "{\n " + EMString(0, 0) + - ",\n " + EMString(2, 2) + "\n}", - prefix + "{\n " + EMString(0, 1) + - ",\n " + EMString(2, 2) + "\n}")); -} - -TEST_F(UnorderedElementsAreTest, Describe) { - EXPECT_THAT(Describe(UnorderedElementsAre()), - Eq("is empty")); - EXPECT_THAT( - Describe(UnorderedElementsAre(345)), - Eq("has 1 element and that element is equal to 345")); - EXPECT_THAT( - Describe(UnorderedElementsAre(111, 222, 333)), - Eq("has 3 elements and there exists some permutation " - "of elements such that:\n" - " - element #0 is equal to 111, and\n" - " - element #1 is equal to 222, and\n" - " - element #2 is equal to 333")); -} - -TEST_F(UnorderedElementsAreTest, DescribeNegation) { - EXPECT_THAT(DescribeNegation(UnorderedElementsAre()), - Eq("isn't empty")); - EXPECT_THAT( - DescribeNegation(UnorderedElementsAre(345)), - Eq("doesn't have 1 element, or has 1 element that isn't equal to 345")); - EXPECT_THAT( - DescribeNegation(UnorderedElementsAre(123, 234, 345)), - Eq("doesn't have 3 elements, or there exists no permutation " - "of elements such that:\n" - " - element #0 is equal to 123, and\n" - " - element #1 is equal to 234, and\n" - " - element #2 is equal to 345")); -} - -namespace { - -// Used as a check on the more complex max flow method used in the -// real testing::internal::FindMaxBipartiteMatching. This method is -// compatible but runs in worst-case factorial time, so we only -// use it in testing for small problem sizes. -template -class BacktrackingMaxBPMState { - public: - // Does not take ownership of 'g'. - explicit BacktrackingMaxBPMState(const Graph* g) : graph_(g) { } - - ElementMatcherPairs Compute() { - if (graph_->LhsSize() == 0 || graph_->RhsSize() == 0) { - return best_so_far_; - } - lhs_used_.assign(graph_->LhsSize(), kUnused); - rhs_used_.assign(graph_->RhsSize(), kUnused); - for (size_t irhs = 0; irhs < graph_->RhsSize(); ++irhs) { - matches_.clear(); - RecurseInto(irhs); - if (best_so_far_.size() == graph_->RhsSize()) - break; - } - return best_so_far_; - } - - private: - static const size_t kUnused = static_cast(-1); - - void PushMatch(size_t lhs, size_t rhs) { - matches_.push_back(ElementMatcherPair(lhs, rhs)); - lhs_used_[lhs] = rhs; - rhs_used_[rhs] = lhs; - if (matches_.size() > best_so_far_.size()) { - best_so_far_ = matches_; - } - } - - void PopMatch() { - const ElementMatcherPair& back = matches_.back(); - lhs_used_[back.first] = kUnused; - rhs_used_[back.second] = kUnused; - matches_.pop_back(); - } - - bool RecurseInto(size_t irhs) { - if (rhs_used_[irhs] != kUnused) { - return true; - } - for (size_t ilhs = 0; ilhs < graph_->LhsSize(); ++ilhs) { - if (lhs_used_[ilhs] != kUnused) { - continue; - } - if (!graph_->HasEdge(ilhs, irhs)) { - continue; - } - PushMatch(ilhs, irhs); - if (best_so_far_.size() == graph_->RhsSize()) { - return false; - } - for (size_t mi = irhs + 1; mi < graph_->RhsSize(); ++mi) { - if (!RecurseInto(mi)) return false; - } - PopMatch(); - } - return true; - } - - const Graph* graph_; // not owned - std::vector lhs_used_; - std::vector rhs_used_; - ElementMatcherPairs matches_; - ElementMatcherPairs best_so_far_; -}; - -template -const size_t BacktrackingMaxBPMState::kUnused; - -} // namespace - -// Implement a simple backtracking algorithm to determine if it is possible -// to find one element per matcher, without reusing elements. -template -ElementMatcherPairs -FindBacktrackingMaxBPM(const Graph& g) { - return BacktrackingMaxBPMState(&g).Compute(); -} - -class BacktrackingBPMTest : public ::testing::Test { }; - -// Tests the MaxBipartiteMatching algorithm with square matrices. -// The single int param is the # of nodes on each of the left and right sides. -class BipartiteTest : public ::testing::TestWithParam {}; - -// Verify all match graphs up to some moderate number of edges. -TEST_P(BipartiteTest, Exhaustive) { - size_t nodes = GetParam(); - MatchMatrix graph(nodes, nodes); - do { - ElementMatcherPairs matches = - internal::FindMaxBipartiteMatching(graph); - EXPECT_EQ(FindBacktrackingMaxBPM(graph).size(), matches.size()) - << "graph: " << graph.DebugString(); - // Check that all elements of matches are in the graph. - // Check that elements of first and second are unique. - std::vector seen_element(graph.LhsSize()); - std::vector seen_matcher(graph.RhsSize()); - SCOPED_TRACE(PrintToString(matches)); - for (size_t i = 0; i < matches.size(); ++i) { - size_t ilhs = matches[i].first; - size_t irhs = matches[i].second; - EXPECT_TRUE(graph.HasEdge(ilhs, irhs)); - EXPECT_FALSE(seen_element[ilhs]); - EXPECT_FALSE(seen_matcher[irhs]); - seen_element[ilhs] = true; - seen_matcher[irhs] = true; - } - } while (graph.NextGraph()); -} - -INSTANTIATE_TEST_SUITE_P(AllGraphs, BipartiteTest, - ::testing::Range(size_t{0}, size_t{5})); - -// Parameterized by a pair interpreted as (LhsSize, RhsSize). -class BipartiteNonSquareTest - : public ::testing::TestWithParam > { -}; - -TEST_F(BipartiteNonSquareTest, SimpleBacktracking) { - // ....... - // 0:-----\ : - // 1:---\ | : - // 2:---\ | : - // 3:-\ | | : - // :.......: - // 0 1 2 - MatchMatrix g(4, 3); - constexpr std::array, 4> kEdges = { - {{{0, 2}}, {{1, 1}}, {{2, 1}}, {{3, 0}}}}; - for (size_t i = 0; i < kEdges.size(); ++i) { - g.SetEdge(kEdges[i][0], kEdges[i][1], true); - } - EXPECT_THAT(FindBacktrackingMaxBPM(g), - ElementsAre(Pair(3, 0), - Pair(AnyOf(1, 2), 1), - Pair(0, 2))) << g.DebugString(); -} - -// Verify a few nonsquare matrices. -TEST_P(BipartiteNonSquareTest, Exhaustive) { - size_t nlhs = GetParam().first; - size_t nrhs = GetParam().second; - MatchMatrix graph(nlhs, nrhs); - do { - EXPECT_EQ(FindBacktrackingMaxBPM(graph).size(), - internal::FindMaxBipartiteMatching(graph).size()) - << "graph: " << graph.DebugString() - << "\nbacktracking: " - << PrintToString(FindBacktrackingMaxBPM(graph)) - << "\nmax flow: " - << PrintToString(internal::FindMaxBipartiteMatching(graph)); - } while (graph.NextGraph()); -} - -INSTANTIATE_TEST_SUITE_P(AllGraphs, BipartiteNonSquareTest, - testing::Values( - std::make_pair(1, 2), - std::make_pair(2, 1), - std::make_pair(3, 2), - std::make_pair(2, 3), - std::make_pair(4, 1), - std::make_pair(1, 4), - std::make_pair(4, 3), - std::make_pair(3, 4))); - -class BipartiteRandomTest - : public ::testing::TestWithParam > { -}; - -// Verifies a large sample of larger graphs. -TEST_P(BipartiteRandomTest, LargerNets) { - int nodes = GetParam().first; - int iters = GetParam().second; - MatchMatrix graph(static_cast(nodes), static_cast(nodes)); - - auto seed = static_cast(GTEST_FLAG_GET(random_seed)); - if (seed == 0) { - seed = static_cast(time(nullptr)); - } - - for (; iters > 0; --iters, ++seed) { - srand(static_cast(seed)); - graph.Randomize(); - EXPECT_EQ(FindBacktrackingMaxBPM(graph).size(), - internal::FindMaxBipartiteMatching(graph).size()) - << " graph: " << graph.DebugString() - << "\nTo reproduce the failure, rerun the test with the flag" - " --" << GTEST_FLAG_PREFIX_ << "random_seed=" << seed; - } -} - -// Test argument is a std::pair representing (nodes, iters). -INSTANTIATE_TEST_SUITE_P(Samples, BipartiteRandomTest, - testing::Values( - std::make_pair(5, 10000), - std::make_pair(6, 5000), - std::make_pair(7, 2000), - std::make_pair(8, 500), - std::make_pair(9, 100))); - -// Tests IsReadableTypeName(). - -TEST(IsReadableTypeNameTest, ReturnsTrueForShortNames) { - EXPECT_TRUE(IsReadableTypeName("int")); - EXPECT_TRUE(IsReadableTypeName("const unsigned char*")); - EXPECT_TRUE(IsReadableTypeName("MyMap")); - EXPECT_TRUE(IsReadableTypeName("void (*)(int, bool)")); -} - -TEST(IsReadableTypeNameTest, ReturnsTrueForLongNonTemplateNonFunctionNames) { - EXPECT_TRUE(IsReadableTypeName("my_long_namespace::MyClassName")); - EXPECT_TRUE(IsReadableTypeName("int [5][6][7][8][9][10][11]")); - EXPECT_TRUE(IsReadableTypeName("my_namespace::MyOuterClass::MyInnerClass")); -} - -TEST(IsReadableTypeNameTest, ReturnsFalseForLongTemplateNames) { - EXPECT_FALSE( - IsReadableTypeName("basic_string >")); - EXPECT_FALSE(IsReadableTypeName("std::vector >")); -} - -TEST(IsReadableTypeNameTest, ReturnsFalseForLongFunctionTypeNames) { - EXPECT_FALSE(IsReadableTypeName("void (&)(int, bool, char, float)")); -} - -// Tests FormatMatcherDescription(). - -TEST(FormatMatcherDescriptionTest, WorksForEmptyDescription) { - EXPECT_EQ("is even", - FormatMatcherDescription(false, "IsEven", Strings())); - EXPECT_EQ("not (is even)", - FormatMatcherDescription(true, "IsEven", Strings())); - - const char* params[] = {"5"}; - EXPECT_EQ("equals 5", - FormatMatcherDescription(false, "Equals", - Strings(params, params + 1))); - - const char* params2[] = {"5", "8"}; - EXPECT_EQ("is in range (5, 8)", - FormatMatcherDescription(false, "IsInRange", - Strings(params2, params2 + 2))); -} - -// Tests PolymorphicMatcher::mutable_impl(). -TEST(PolymorphicMatcherTest, CanAccessMutableImpl) { - PolymorphicMatcher m(DivisibleByImpl(42)); - DivisibleByImpl& impl = m.mutable_impl(); - EXPECT_EQ(42, impl.divider()); - - impl.set_divider(0); - EXPECT_EQ(0, m.mutable_impl().divider()); -} - -// Tests PolymorphicMatcher::impl(). -TEST(PolymorphicMatcherTest, CanAccessImpl) { - const PolymorphicMatcher m(DivisibleByImpl(42)); - const DivisibleByImpl& impl = m.impl(); - EXPECT_EQ(42, impl.divider()); -} - -TEST(MatcherTupleTest, ExplainsMatchFailure) { - stringstream ss1; - ExplainMatchFailureTupleTo( - std::make_tuple(Matcher(Eq('a')), GreaterThan(5)), - std::make_tuple('a', 10), &ss1); - EXPECT_EQ("", ss1.str()); // Successful match. - - stringstream ss2; - ExplainMatchFailureTupleTo( - std::make_tuple(GreaterThan(5), Matcher(Eq('a'))), - std::make_tuple(2, 'b'), &ss2); - EXPECT_EQ(" Expected arg #0: is > 5\n" - " Actual: 2, which is 3 less than 5\n" - " Expected arg #1: is equal to 'a' (97, 0x61)\n" - " Actual: 'b' (98, 0x62)\n", - ss2.str()); // Failed match where both arguments need explanation. - - stringstream ss3; - ExplainMatchFailureTupleTo( - std::make_tuple(GreaterThan(5), Matcher(Eq('a'))), - std::make_tuple(2, 'a'), &ss3); - EXPECT_EQ(" Expected arg #0: is > 5\n" - " Actual: 2, which is 3 less than 5\n", - ss3.str()); // Failed match where only one argument needs - // explanation. -} - -// Tests Each(). - -TEST(EachTest, ExplainsMatchResultCorrectly) { - set a; // empty - - Matcher > m = Each(2); - EXPECT_EQ("", Explain(m, a)); - - Matcher n = Each(1); // NOLINT - - const int b[1] = {1}; - EXPECT_EQ("", Explain(n, b)); - - n = Each(3); - EXPECT_EQ("whose element #0 doesn't match", Explain(n, b)); - - a.insert(1); - a.insert(2); - a.insert(3); - m = Each(GreaterThan(0)); - EXPECT_EQ("", Explain(m, a)); - - m = Each(GreaterThan(10)); - EXPECT_EQ("whose element #0 doesn't match, which is 9 less than 10", - Explain(m, a)); -} - -TEST(EachTest, DescribesItselfCorrectly) { - Matcher > m = Each(1); - EXPECT_EQ("only contains elements that is equal to 1", Describe(m)); - - Matcher > m2 = Not(m); - EXPECT_EQ("contains some element that isn't equal to 1", Describe(m2)); -} - -TEST(EachTest, MatchesVectorWhenAllElementsMatch) { - vector some_vector; - EXPECT_THAT(some_vector, Each(1)); - some_vector.push_back(3); - EXPECT_THAT(some_vector, Not(Each(1))); - EXPECT_THAT(some_vector, Each(3)); - some_vector.push_back(1); - some_vector.push_back(2); - EXPECT_THAT(some_vector, Not(Each(3))); - EXPECT_THAT(some_vector, Each(Lt(3.5))); - - vector another_vector; - another_vector.push_back("fee"); - EXPECT_THAT(another_vector, Each(std::string("fee"))); - another_vector.push_back("fie"); - another_vector.push_back("foe"); - another_vector.push_back("fum"); - EXPECT_THAT(another_vector, Not(Each(std::string("fee")))); -} - -TEST(EachTest, MatchesMapWhenAllElementsMatch) { - map my_map; - const char* bar = "a string"; - my_map[bar] = 2; - EXPECT_THAT(my_map, Each(make_pair(bar, 2))); - - map another_map; - EXPECT_THAT(another_map, Each(make_pair(std::string("fee"), 1))); - another_map["fee"] = 1; - EXPECT_THAT(another_map, Each(make_pair(std::string("fee"), 1))); - another_map["fie"] = 2; - another_map["foe"] = 3; - another_map["fum"] = 4; - EXPECT_THAT(another_map, Not(Each(make_pair(std::string("fee"), 1)))); - EXPECT_THAT(another_map, Not(Each(make_pair(std::string("fum"), 1)))); - EXPECT_THAT(another_map, Each(Pair(_, Gt(0)))); -} - -TEST(EachTest, AcceptsMatcher) { - const int a[] = {1, 2, 3}; - EXPECT_THAT(a, Each(Gt(0))); - EXPECT_THAT(a, Not(Each(Gt(1)))); -} - -TEST(EachTest, WorksForNativeArrayAsTuple) { - const int a[] = {1, 2}; - const int* const pointer = a; - EXPECT_THAT(std::make_tuple(pointer, 2), Each(Gt(0))); - EXPECT_THAT(std::make_tuple(pointer, 2), Not(Each(Gt(1)))); -} - -TEST(EachTest, WorksWithMoveOnly) { - ContainerHelper helper; - EXPECT_CALL(helper, Call(Each(Pointee(Gt(0))))); - helper.Call(MakeUniquePtrs({1, 2})); -} - -// For testing Pointwise(). -class IsHalfOfMatcher { - public: - template - bool MatchAndExplain(const std::tuple& a_pair, - MatchResultListener* listener) const { - if (std::get<0>(a_pair) == std::get<1>(a_pair) / 2) { - *listener << "where the second is " << std::get<1>(a_pair); - return true; - } else { - *listener << "where the second/2 is " << std::get<1>(a_pair) / 2; - return false; - } - } - - void DescribeTo(ostream* os) const { - *os << "are a pair where the first is half of the second"; - } - - void DescribeNegationTo(ostream* os) const { - *os << "are a pair where the first isn't half of the second"; - } -}; - -PolymorphicMatcher IsHalfOf() { - return MakePolymorphicMatcher(IsHalfOfMatcher()); -} - -TEST(PointwiseTest, DescribesSelf) { - vector rhs; - rhs.push_back(1); - rhs.push_back(2); - rhs.push_back(3); - const Matcher&> m = Pointwise(IsHalfOf(), rhs); - EXPECT_EQ("contains 3 values, where each value and its corresponding value " - "in { 1, 2, 3 } are a pair where the first is half of the second", - Describe(m)); - EXPECT_EQ("doesn't contain exactly 3 values, or contains a value x at some " - "index i where x and the i-th value of { 1, 2, 3 } are a pair " - "where the first isn't half of the second", - DescribeNegation(m)); -} - -TEST(PointwiseTest, MakesCopyOfRhs) { - list rhs; - rhs.push_back(2); - rhs.push_back(4); - - int lhs[] = {1, 2}; - const Matcher m = Pointwise(IsHalfOf(), rhs); - EXPECT_THAT(lhs, m); - - // Changing rhs now shouldn't affect m, which made a copy of rhs. - rhs.push_back(6); - EXPECT_THAT(lhs, m); -} - -TEST(PointwiseTest, WorksForLhsNativeArray) { - const int lhs[] = {1, 2, 3}; - vector rhs; - rhs.push_back(2); - rhs.push_back(4); - rhs.push_back(6); - EXPECT_THAT(lhs, Pointwise(Lt(), rhs)); - EXPECT_THAT(lhs, Not(Pointwise(Gt(), rhs))); -} - -TEST(PointwiseTest, WorksForRhsNativeArray) { - const int rhs[] = {1, 2, 3}; - vector lhs; - lhs.push_back(2); - lhs.push_back(4); - lhs.push_back(6); - EXPECT_THAT(lhs, Pointwise(Gt(), rhs)); - EXPECT_THAT(lhs, Not(Pointwise(Lt(), rhs))); -} - -// Test is effective only with sanitizers. -TEST(PointwiseTest, WorksForVectorOfBool) { - vector rhs(3, false); - rhs[1] = true; - vector lhs = rhs; - EXPECT_THAT(lhs, Pointwise(Eq(), rhs)); - rhs[0] = true; - EXPECT_THAT(lhs, Not(Pointwise(Eq(), rhs))); -} - - -TEST(PointwiseTest, WorksForRhsInitializerList) { - const vector lhs{2, 4, 6}; - EXPECT_THAT(lhs, Pointwise(Gt(), {1, 2, 3})); - EXPECT_THAT(lhs, Not(Pointwise(Lt(), {3, 3, 7}))); -} - - -TEST(PointwiseTest, RejectsWrongSize) { - const double lhs[2] = {1, 2}; - const int rhs[1] = {0}; - EXPECT_THAT(lhs, Not(Pointwise(Gt(), rhs))); - EXPECT_EQ("which contains 2 values", - Explain(Pointwise(Gt(), rhs), lhs)); - - const int rhs2[3] = {0, 1, 2}; - EXPECT_THAT(lhs, Not(Pointwise(Gt(), rhs2))); -} - -TEST(PointwiseTest, RejectsWrongContent) { - const double lhs[3] = {1, 2, 3}; - const int rhs[3] = {2, 6, 4}; - EXPECT_THAT(lhs, Not(Pointwise(IsHalfOf(), rhs))); - EXPECT_EQ("where the value pair (2, 6) at index #1 don't match, " - "where the second/2 is 3", - Explain(Pointwise(IsHalfOf(), rhs), lhs)); -} - -TEST(PointwiseTest, AcceptsCorrectContent) { - const double lhs[3] = {1, 2, 3}; - const int rhs[3] = {2, 4, 6}; - EXPECT_THAT(lhs, Pointwise(IsHalfOf(), rhs)); - EXPECT_EQ("", Explain(Pointwise(IsHalfOf(), rhs), lhs)); -} - -TEST(PointwiseTest, AllowsMonomorphicInnerMatcher) { - const double lhs[3] = {1, 2, 3}; - const int rhs[3] = {2, 4, 6}; - const Matcher> m1 = IsHalfOf(); - EXPECT_THAT(lhs, Pointwise(m1, rhs)); - EXPECT_EQ("", Explain(Pointwise(m1, rhs), lhs)); - - // This type works as a std::tuple can be - // implicitly cast to std::tuple. - const Matcher> m2 = IsHalfOf(); - EXPECT_THAT(lhs, Pointwise(m2, rhs)); - EXPECT_EQ("", Explain(Pointwise(m2, rhs), lhs)); -} - -MATCHER(PointeeEquals, "Points to an equal value") { - return ExplainMatchResult(::testing::Pointee(::testing::get<1>(arg)), - ::testing::get<0>(arg), result_listener); -} - -TEST(PointwiseTest, WorksWithMoveOnly) { - ContainerHelper helper; - EXPECT_CALL(helper, Call(Pointwise(PointeeEquals(), std::vector{1, 2}))); - helper.Call(MakeUniquePtrs({1, 2})); -} - -TEST(UnorderedPointwiseTest, DescribesSelf) { - vector rhs; - rhs.push_back(1); - rhs.push_back(2); - rhs.push_back(3); - const Matcher&> m = UnorderedPointwise(IsHalfOf(), rhs); - EXPECT_EQ( - "has 3 elements and there exists some permutation of elements such " - "that:\n" - " - element #0 and 1 are a pair where the first is half of the second, " - "and\n" - " - element #1 and 2 are a pair where the first is half of the second, " - "and\n" - " - element #2 and 3 are a pair where the first is half of the second", - Describe(m)); - EXPECT_EQ( - "doesn't have 3 elements, or there exists no permutation of elements " - "such that:\n" - " - element #0 and 1 are a pair where the first is half of the second, " - "and\n" - " - element #1 and 2 are a pair where the first is half of the second, " - "and\n" - " - element #2 and 3 are a pair where the first is half of the second", - DescribeNegation(m)); -} - -TEST(UnorderedPointwiseTest, MakesCopyOfRhs) { - list rhs; - rhs.push_back(2); - rhs.push_back(4); - - int lhs[] = {2, 1}; - const Matcher m = UnorderedPointwise(IsHalfOf(), rhs); - EXPECT_THAT(lhs, m); - - // Changing rhs now shouldn't affect m, which made a copy of rhs. - rhs.push_back(6); - EXPECT_THAT(lhs, m); -} - -TEST(UnorderedPointwiseTest, WorksForLhsNativeArray) { - const int lhs[] = {1, 2, 3}; - vector rhs; - rhs.push_back(4); - rhs.push_back(6); - rhs.push_back(2); - EXPECT_THAT(lhs, UnorderedPointwise(Lt(), rhs)); - EXPECT_THAT(lhs, Not(UnorderedPointwise(Gt(), rhs))); -} - -TEST(UnorderedPointwiseTest, WorksForRhsNativeArray) { - const int rhs[] = {1, 2, 3}; - vector lhs; - lhs.push_back(4); - lhs.push_back(2); - lhs.push_back(6); - EXPECT_THAT(lhs, UnorderedPointwise(Gt(), rhs)); - EXPECT_THAT(lhs, Not(UnorderedPointwise(Lt(), rhs))); -} - - -TEST(UnorderedPointwiseTest, WorksForRhsInitializerList) { - const vector lhs{2, 4, 6}; - EXPECT_THAT(lhs, UnorderedPointwise(Gt(), {5, 1, 3})); - EXPECT_THAT(lhs, Not(UnorderedPointwise(Lt(), {1, 1, 7}))); -} - - -TEST(UnorderedPointwiseTest, RejectsWrongSize) { - const double lhs[2] = {1, 2}; - const int rhs[1] = {0}; - EXPECT_THAT(lhs, Not(UnorderedPointwise(Gt(), rhs))); - EXPECT_EQ("which has 2 elements", - Explain(UnorderedPointwise(Gt(), rhs), lhs)); - - const int rhs2[3] = {0, 1, 2}; - EXPECT_THAT(lhs, Not(UnorderedPointwise(Gt(), rhs2))); -} - -TEST(UnorderedPointwiseTest, RejectsWrongContent) { - const double lhs[3] = {1, 2, 3}; - const int rhs[3] = {2, 6, 6}; - EXPECT_THAT(lhs, Not(UnorderedPointwise(IsHalfOf(), rhs))); - EXPECT_EQ("where the following elements don't match any matchers:\n" - "element #1: 2", - Explain(UnorderedPointwise(IsHalfOf(), rhs), lhs)); -} - -TEST(UnorderedPointwiseTest, AcceptsCorrectContentInSameOrder) { - const double lhs[3] = {1, 2, 3}; - const int rhs[3] = {2, 4, 6}; - EXPECT_THAT(lhs, UnorderedPointwise(IsHalfOf(), rhs)); -} - -TEST(UnorderedPointwiseTest, AcceptsCorrectContentInDifferentOrder) { - const double lhs[3] = {1, 2, 3}; - const int rhs[3] = {6, 4, 2}; - EXPECT_THAT(lhs, UnorderedPointwise(IsHalfOf(), rhs)); -} - -TEST(UnorderedPointwiseTest, AllowsMonomorphicInnerMatcher) { - const double lhs[3] = {1, 2, 3}; - const int rhs[3] = {4, 6, 2}; - const Matcher> m1 = IsHalfOf(); - EXPECT_THAT(lhs, UnorderedPointwise(m1, rhs)); - - // This type works as a std::tuple can be - // implicitly cast to std::tuple. - const Matcher> m2 = IsHalfOf(); - EXPECT_THAT(lhs, UnorderedPointwise(m2, rhs)); -} - -TEST(UnorderedPointwiseTest, WorksWithMoveOnly) { - ContainerHelper helper; - EXPECT_CALL(helper, Call(UnorderedPointwise(PointeeEquals(), - std::vector{1, 2}))); - helper.Call(MakeUniquePtrs({2, 1})); -} - -// Sample optional type implementation with minimal requirements for use with -// Optional matcher. -template -class SampleOptional { - public: - using value_type = T; - explicit SampleOptional(T value) - : value_(std::move(value)), has_value_(true) {} - SampleOptional() : value_(), has_value_(false) {} - operator bool() const { return has_value_; } - const T& operator*() const { return value_; } - - private: - T value_; - bool has_value_; -}; - -TEST(OptionalTest, DescribesSelf) { - const Matcher> m = Optional(Eq(1)); - EXPECT_EQ("value is equal to 1", Describe(m)); -} - -TEST(OptionalTest, ExplainsSelf) { - const Matcher> m = Optional(Eq(1)); - EXPECT_EQ("whose value 1 matches", Explain(m, SampleOptional(1))); - EXPECT_EQ("whose value 2 doesn't match", Explain(m, SampleOptional(2))); -} - -TEST(OptionalTest, MatchesNonEmptyOptional) { - const Matcher> m1 = Optional(1); - const Matcher> m2 = Optional(Eq(2)); - const Matcher> m3 = Optional(Lt(3)); - SampleOptional opt(1); - EXPECT_TRUE(m1.Matches(opt)); - EXPECT_FALSE(m2.Matches(opt)); - EXPECT_TRUE(m3.Matches(opt)); -} - -TEST(OptionalTest, DoesNotMatchNullopt) { - const Matcher> m = Optional(1); - SampleOptional empty; - EXPECT_FALSE(m.Matches(empty)); -} - -TEST(OptionalTest, WorksWithMoveOnly) { - Matcher>> m = Optional(Eq(nullptr)); - EXPECT_TRUE(m.Matches(SampleOptional>(nullptr))); -} - -class SampleVariantIntString { - public: - SampleVariantIntString(int i) : i_(i), has_int_(true) {} - SampleVariantIntString(const std::string& s) : s_(s), has_int_(false) {} - - template - friend bool holds_alternative(const SampleVariantIntString& value) { - return value.has_int_ == std::is_same::value; - } - - template - friend const T& get(const SampleVariantIntString& value) { - return value.get_impl(static_cast(nullptr)); - } - - private: - const int& get_impl(int*) const { return i_; } - const std::string& get_impl(std::string*) const { return s_; } - - int i_; - std::string s_; - bool has_int_; -}; - -TEST(VariantTest, DescribesSelf) { - const Matcher m = VariantWith(Eq(1)); - EXPECT_THAT(Describe(m), ContainsRegex("is a variant<> with value of type " - "'.*' and the value is equal to 1")); -} - -TEST(VariantTest, ExplainsSelf) { - const Matcher m = VariantWith(Eq(1)); - EXPECT_THAT(Explain(m, SampleVariantIntString(1)), - ContainsRegex("whose value 1")); - EXPECT_THAT(Explain(m, SampleVariantIntString("A")), - HasSubstr("whose value is not of type '")); - EXPECT_THAT(Explain(m, SampleVariantIntString(2)), - "whose value 2 doesn't match"); -} - -TEST(VariantTest, FullMatch) { - Matcher m = VariantWith(Eq(1)); - EXPECT_TRUE(m.Matches(SampleVariantIntString(1))); - - m = VariantWith(Eq("1")); - EXPECT_TRUE(m.Matches(SampleVariantIntString("1"))); -} - -TEST(VariantTest, TypeDoesNotMatch) { - Matcher m = VariantWith(Eq(1)); - EXPECT_FALSE(m.Matches(SampleVariantIntString("1"))); - - m = VariantWith(Eq("1")); - EXPECT_FALSE(m.Matches(SampleVariantIntString(1))); -} - -TEST(VariantTest, InnerDoesNotMatch) { - Matcher m = VariantWith(Eq(1)); - EXPECT_FALSE(m.Matches(SampleVariantIntString(2))); - - m = VariantWith(Eq("1")); - EXPECT_FALSE(m.Matches(SampleVariantIntString("2"))); -} - -class SampleAnyType { - public: - explicit SampleAnyType(int i) : index_(0), i_(i) {} - explicit SampleAnyType(const std::string& s) : index_(1), s_(s) {} - - template - friend const T* any_cast(const SampleAnyType* any) { - return any->get_impl(static_cast(nullptr)); - } - - private: - int index_; - int i_; - std::string s_; - - const int* get_impl(int*) const { return index_ == 0 ? &i_ : nullptr; } - const std::string* get_impl(std::string*) const { - return index_ == 1 ? &s_ : nullptr; - } -}; - -TEST(AnyWithTest, FullMatch) { - Matcher m = AnyWith(Eq(1)); - EXPECT_TRUE(m.Matches(SampleAnyType(1))); -} - -TEST(AnyWithTest, TestBadCastType) { - Matcher m = AnyWith(Eq("fail")); - EXPECT_FALSE(m.Matches(SampleAnyType(1))); -} - -TEST(AnyWithTest, TestUseInContainers) { - std::vector a; - a.emplace_back(1); - a.emplace_back(2); - a.emplace_back(3); - EXPECT_THAT( - a, ElementsAreArray({AnyWith(1), AnyWith(2), AnyWith(3)})); - - std::vector b; - b.emplace_back("hello"); - b.emplace_back("merhaba"); - b.emplace_back("salut"); - EXPECT_THAT(b, ElementsAreArray({AnyWith("hello"), - AnyWith("merhaba"), - AnyWith("salut")})); -} -TEST(AnyWithTest, TestCompare) { - EXPECT_THAT(SampleAnyType(1), AnyWith(Gt(0))); -} - -TEST(AnyWithTest, DescribesSelf) { - const Matcher m = AnyWith(Eq(1)); - EXPECT_THAT(Describe(m), ContainsRegex("is an 'any' type with value of type " - "'.*' and the value is equal to 1")); -} - -TEST(AnyWithTest, ExplainsSelf) { - const Matcher m = AnyWith(Eq(1)); - - EXPECT_THAT(Explain(m, SampleAnyType(1)), ContainsRegex("whose value 1")); - EXPECT_THAT(Explain(m, SampleAnyType("A")), - HasSubstr("whose value is not of type '")); - EXPECT_THAT(Explain(m, SampleAnyType(2)), "whose value 2 doesn't match"); -} - -TEST(PointeeTest, WorksOnMoveOnlyType) { - std::unique_ptr p(new int(3)); - EXPECT_THAT(p, Pointee(Eq(3))); - EXPECT_THAT(p, Not(Pointee(Eq(2)))); -} - -TEST(NotTest, WorksOnMoveOnlyType) { - std::unique_ptr p(new int(3)); - EXPECT_THAT(p, Pointee(Eq(3))); - EXPECT_THAT(p, Not(Pointee(Eq(2)))); -} - -// Tests Args(m). - -TEST(ArgsTest, AcceptsZeroTemplateArg) { - const std::tuple t(5, true); - EXPECT_THAT(t, Args<>(Eq(std::tuple<>()))); - EXPECT_THAT(t, Not(Args<>(Ne(std::tuple<>())))); -} - -TEST(ArgsTest, AcceptsOneTemplateArg) { - const std::tuple t(5, true); - EXPECT_THAT(t, Args<0>(Eq(std::make_tuple(5)))); - EXPECT_THAT(t, Args<1>(Eq(std::make_tuple(true)))); - EXPECT_THAT(t, Not(Args<1>(Eq(std::make_tuple(false))))); -} - -TEST(ArgsTest, AcceptsTwoTemplateArgs) { - const std::tuple t(4, 5, 6L); // NOLINT - - EXPECT_THAT(t, (Args<0, 1>(Lt()))); - EXPECT_THAT(t, (Args<1, 2>(Lt()))); - EXPECT_THAT(t, Not(Args<0, 2>(Gt()))); -} - -TEST(ArgsTest, AcceptsRepeatedTemplateArgs) { - const std::tuple t(4, 5, 6L); // NOLINT - EXPECT_THAT(t, (Args<0, 0>(Eq()))); - EXPECT_THAT(t, Not(Args<1, 1>(Ne()))); -} - -TEST(ArgsTest, AcceptsDecreasingTemplateArgs) { - const std::tuple t(4, 5, 6L); // NOLINT - EXPECT_THAT(t, (Args<2, 0>(Gt()))); - EXPECT_THAT(t, Not(Args<2, 1>(Lt()))); -} - -MATCHER(SumIsZero, "") { - return std::get<0>(arg) + std::get<1>(arg) + std::get<2>(arg) == 0; -} - -TEST(ArgsTest, AcceptsMoreTemplateArgsThanArityOfOriginalTuple) { - EXPECT_THAT(std::make_tuple(-1, 2), (Args<0, 0, 1>(SumIsZero()))); - EXPECT_THAT(std::make_tuple(1, 2), Not(Args<0, 0, 1>(SumIsZero()))); -} - -TEST(ArgsTest, CanBeNested) { - const std::tuple t(4, 5, 6L, 6); // NOLINT - EXPECT_THAT(t, (Args<1, 2, 3>(Args<1, 2>(Eq())))); - EXPECT_THAT(t, (Args<0, 1, 3>(Args<0, 2>(Lt())))); -} - -TEST(ArgsTest, CanMatchTupleByValue) { - typedef std::tuple Tuple3; - const Matcher m = Args<1, 2>(Lt()); - EXPECT_TRUE(m.Matches(Tuple3('a', 1, 2))); - EXPECT_FALSE(m.Matches(Tuple3('b', 2, 2))); -} - -TEST(ArgsTest, CanMatchTupleByReference) { - typedef std::tuple Tuple3; - const Matcher m = Args<0, 1>(Lt()); - EXPECT_TRUE(m.Matches(Tuple3('a', 'b', 2))); - EXPECT_FALSE(m.Matches(Tuple3('b', 'b', 2))); -} - -// Validates that arg is printed as str. -MATCHER_P(PrintsAs, str, "") { - return testing::PrintToString(arg) == str; -} - -TEST(ArgsTest, AcceptsTenTemplateArgs) { - EXPECT_THAT(std::make_tuple(0, 1L, 2, 3L, 4, 5, 6, 7, 8, 9), - (Args<9, 8, 7, 6, 5, 4, 3, 2, 1, 0>( - PrintsAs("(9, 8, 7, 6, 5, 4, 3, 2, 1, 0)")))); - EXPECT_THAT(std::make_tuple(0, 1L, 2, 3L, 4, 5, 6, 7, 8, 9), - Not(Args<9, 8, 7, 6, 5, 4, 3, 2, 1, 0>( - PrintsAs("(0, 8, 7, 6, 5, 4, 3, 2, 1, 0)")))); -} - -TEST(ArgsTest, DescirbesSelfCorrectly) { - const Matcher > m = Args<2, 0>(Lt()); - EXPECT_EQ("are a tuple whose fields (#2, #0) are a pair where " - "the first < the second", - Describe(m)); -} - -TEST(ArgsTest, DescirbesNestedArgsCorrectly) { - const Matcher&> m = - Args<0, 2, 3>(Args<2, 0>(Lt())); - EXPECT_EQ("are a tuple whose fields (#0, #2, #3) are a tuple " - "whose fields (#2, #0) are a pair where the first < the second", - Describe(m)); -} - -TEST(ArgsTest, DescribesNegationCorrectly) { - const Matcher > m = Args<1, 0>(Gt()); - EXPECT_EQ("are a tuple whose fields (#1, #0) aren't a pair " - "where the first > the second", - DescribeNegation(m)); -} - -TEST(ArgsTest, ExplainsMatchResultWithoutInnerExplanation) { - const Matcher > m = Args<1, 2>(Eq()); - EXPECT_EQ("whose fields (#1, #2) are (42, 42)", - Explain(m, std::make_tuple(false, 42, 42))); - EXPECT_EQ("whose fields (#1, #2) are (42, 43)", - Explain(m, std::make_tuple(false, 42, 43))); -} - -// For testing Args<>'s explanation. -class LessThanMatcher : public MatcherInterface > { - public: - void DescribeTo(::std::ostream* /*os*/) const override {} - - bool MatchAndExplain(std::tuple value, - MatchResultListener* listener) const override { - const int diff = std::get<0>(value) - std::get<1>(value); - if (diff > 0) { - *listener << "where the first value is " << diff - << " more than the second"; - } - return diff < 0; - } -}; - -Matcher > LessThan() { - return MakeMatcher(new LessThanMatcher); -} - -TEST(ArgsTest, ExplainsMatchResultWithInnerExplanation) { - const Matcher > m = Args<0, 2>(LessThan()); - EXPECT_EQ( - "whose fields (#0, #2) are ('a' (97, 0x61), 42), " - "where the first value is 55 more than the second", - Explain(m, std::make_tuple('a', 42, 42))); - EXPECT_EQ("whose fields (#0, #2) are ('\\0', 43)", - Explain(m, std::make_tuple('\0', 42, 43))); -} - -class PredicateFormatterFromMatcherTest : public ::testing::Test { - protected: - enum Behavior { kInitialSuccess, kAlwaysFail, kFlaky }; - - // A matcher that can return different results when used multiple times on the - // same input. No real matcher should do this; but this lets us test that we - // detect such behavior and fail appropriately. - class MockMatcher : public MatcherInterface { - public: - bool MatchAndExplain(Behavior behavior, - MatchResultListener* listener) const override { - *listener << "[MatchAndExplain]"; - switch (behavior) { - case kInitialSuccess: - // The first call to MatchAndExplain should use a "not interested" - // listener; so this is expected to return |true|. There should be no - // subsequent calls. - return !listener->IsInterested(); - - case kAlwaysFail: - return false; - - case kFlaky: - // The first call to MatchAndExplain should use a "not interested" - // listener; so this will return |false|. Subsequent calls should have - // an "interested" listener; so this will return |true|, thus - // simulating a flaky matcher. - return listener->IsInterested(); - } - - GTEST_LOG_(FATAL) << "This should never be reached"; - return false; - } - - void DescribeTo(ostream* os) const override { *os << "[DescribeTo]"; } - - void DescribeNegationTo(ostream* os) const override { - *os << "[DescribeNegationTo]"; - } - }; - - AssertionResult RunPredicateFormatter(Behavior behavior) { - auto matcher = MakeMatcher(new MockMatcher); - PredicateFormatterFromMatcher> predicate_formatter( - matcher); - return predicate_formatter("dummy-name", behavior); - } -}; - -TEST_F(PredicateFormatterFromMatcherTest, ShortCircuitOnSuccess) { - AssertionResult result = RunPredicateFormatter(kInitialSuccess); - EXPECT_TRUE(result); // Implicit cast to bool. - std::string expect; - EXPECT_EQ(expect, result.message()); -} - -TEST_F(PredicateFormatterFromMatcherTest, NoShortCircuitOnFailure) { - AssertionResult result = RunPredicateFormatter(kAlwaysFail); - EXPECT_FALSE(result); // Implicit cast to bool. - std::string expect = - "Value of: dummy-name\nExpected: [DescribeTo]\n" - " Actual: 1" + - OfType(internal::GetTypeName()) + ", [MatchAndExplain]"; - EXPECT_EQ(expect, result.message()); -} - -TEST_F(PredicateFormatterFromMatcherTest, DetectsFlakyShortCircuit) { - AssertionResult result = RunPredicateFormatter(kFlaky); - EXPECT_FALSE(result); // Implicit cast to bool. - std::string expect = - "Value of: dummy-name\nExpected: [DescribeTo]\n" - " The matcher failed on the initial attempt; but passed when rerun to " - "generate the explanation.\n" - " Actual: 2" + - OfType(internal::GetTypeName()) + ", [MatchAndExplain]"; - EXPECT_EQ(expect, result.message()); -} - -// Tests for ElementsAre(). - -TEST(ElementsAreTest, CanDescribeExpectingNoElement) { - Matcher&> m = ElementsAre(); - EXPECT_EQ("is empty", Describe(m)); -} - -TEST(ElementsAreTest, CanDescribeExpectingOneElement) { - Matcher> m = ElementsAre(Gt(5)); - EXPECT_EQ("has 1 element that is > 5", Describe(m)); -} - -TEST(ElementsAreTest, CanDescribeExpectingManyElements) { - Matcher> m = ElementsAre(StrEq("one"), "two"); - EXPECT_EQ( - "has 2 elements where\n" - "element #0 is equal to \"one\",\n" - "element #1 is equal to \"two\"", - Describe(m)); -} - -TEST(ElementsAreTest, CanDescribeNegationOfExpectingNoElement) { - Matcher> m = ElementsAre(); - EXPECT_EQ("isn't empty", DescribeNegation(m)); -} - -TEST(ElementsAreTest, CanDescribeNegationOfExpectingOneElment) { - Matcher&> m = ElementsAre(Gt(5)); - EXPECT_EQ( - "doesn't have 1 element, or\n" - "element #0 isn't > 5", - DescribeNegation(m)); -} - -TEST(ElementsAreTest, CanDescribeNegationOfExpectingManyElements) { - Matcher&> m = ElementsAre("one", "two"); - EXPECT_EQ( - "doesn't have 2 elements, or\n" - "element #0 isn't equal to \"one\", or\n" - "element #1 isn't equal to \"two\"", - DescribeNegation(m)); -} - -TEST(ElementsAreTest, DoesNotExplainTrivialMatch) { - Matcher&> m = ElementsAre(1, Ne(2)); - - list test_list; - test_list.push_back(1); - test_list.push_back(3); - EXPECT_EQ("", Explain(m, test_list)); // No need to explain anything. -} - -TEST(ElementsAreTest, ExplainsNonTrivialMatch) { - Matcher&> m = - ElementsAre(GreaterThan(1), 0, GreaterThan(2)); - - const int a[] = {10, 0, 100}; - vector test_vector(std::begin(a), std::end(a)); - EXPECT_EQ( - "whose element #0 matches, which is 9 more than 1,\n" - "and whose element #2 matches, which is 98 more than 2", - Explain(m, test_vector)); -} - -TEST(ElementsAreTest, CanExplainMismatchWrongSize) { - Matcher&> m = ElementsAre(1, 3); - - list test_list; - // No need to explain when the container is empty. - EXPECT_EQ("", Explain(m, test_list)); - - test_list.push_back(1); - EXPECT_EQ("which has 1 element", Explain(m, test_list)); -} - -TEST(ElementsAreTest, CanExplainMismatchRightSize) { - Matcher&> m = ElementsAre(1, GreaterThan(5)); - - vector v; - v.push_back(2); - v.push_back(1); - EXPECT_EQ("whose element #0 doesn't match", Explain(m, v)); - - v[0] = 1; - EXPECT_EQ("whose element #1 doesn't match, which is 4 less than 5", - Explain(m, v)); -} - -TEST(ElementsAreTest, MatchesOneElementVector) { - vector test_vector; - test_vector.push_back("test string"); - - EXPECT_THAT(test_vector, ElementsAre(StrEq("test string"))); -} - -TEST(ElementsAreTest, MatchesOneElementList) { - list test_list; - test_list.push_back("test string"); - - EXPECT_THAT(test_list, ElementsAre("test string")); -} - -TEST(ElementsAreTest, MatchesThreeElementVector) { - vector test_vector; - test_vector.push_back("one"); - test_vector.push_back("two"); - test_vector.push_back("three"); - - EXPECT_THAT(test_vector, ElementsAre("one", StrEq("two"), _)); -} - -TEST(ElementsAreTest, MatchesOneElementEqMatcher) { - vector test_vector; - test_vector.push_back(4); - - EXPECT_THAT(test_vector, ElementsAre(Eq(4))); -} - -TEST(ElementsAreTest, MatchesOneElementAnyMatcher) { - vector test_vector; - test_vector.push_back(4); - - EXPECT_THAT(test_vector, ElementsAre(_)); -} - -TEST(ElementsAreTest, MatchesOneElementValue) { - vector test_vector; - test_vector.push_back(4); - - EXPECT_THAT(test_vector, ElementsAre(4)); -} - -TEST(ElementsAreTest, MatchesThreeElementsMixedMatchers) { - vector test_vector; - test_vector.push_back(1); - test_vector.push_back(2); - test_vector.push_back(3); - - EXPECT_THAT(test_vector, ElementsAre(1, Eq(2), _)); -} - -TEST(ElementsAreTest, MatchesTenElementVector) { - const int a[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; - vector test_vector(std::begin(a), std::end(a)); - - EXPECT_THAT(test_vector, - // The element list can contain values and/or matchers - // of different types. - ElementsAre(0, Ge(0), _, 3, 4, Ne(2), Eq(6), 7, 8, _)); -} - -TEST(ElementsAreTest, DoesNotMatchWrongSize) { - vector test_vector; - test_vector.push_back("test string"); - test_vector.push_back("test string"); - - Matcher> m = ElementsAre(StrEq("test string")); - EXPECT_FALSE(m.Matches(test_vector)); -} - -TEST(ElementsAreTest, DoesNotMatchWrongValue) { - vector test_vector; - test_vector.push_back("other string"); - - Matcher> m = ElementsAre(StrEq("test string")); - EXPECT_FALSE(m.Matches(test_vector)); -} - -TEST(ElementsAreTest, DoesNotMatchWrongOrder) { - vector test_vector; - test_vector.push_back("one"); - test_vector.push_back("three"); - test_vector.push_back("two"); - - Matcher> m = - ElementsAre(StrEq("one"), StrEq("two"), StrEq("three")); - EXPECT_FALSE(m.Matches(test_vector)); -} - -TEST(ElementsAreTest, WorksForNestedContainer) { - constexpr std::array strings = {{"Hi", "world"}}; - - vector> nested; - for (const auto& s : strings) { - nested.emplace_back(s, s + strlen(s)); - } - - EXPECT_THAT(nested, ElementsAre(ElementsAre('H', Ne('e')), - ElementsAre('w', 'o', _, _, 'd'))); - EXPECT_THAT(nested, Not(ElementsAre(ElementsAre('H', 'e'), - ElementsAre('w', 'o', _, _, 'd')))); -} - -TEST(ElementsAreTest, WorksWithByRefElementMatchers) { - int a[] = {0, 1, 2}; - vector v(std::begin(a), std::end(a)); - - EXPECT_THAT(v, ElementsAre(Ref(v[0]), Ref(v[1]), Ref(v[2]))); - EXPECT_THAT(v, Not(ElementsAre(Ref(v[0]), Ref(v[1]), Ref(a[2])))); -} - -TEST(ElementsAreTest, WorksWithContainerPointerUsingPointee) { - int a[] = {0, 1, 2}; - vector v(std::begin(a), std::end(a)); - - EXPECT_THAT(&v, Pointee(ElementsAre(0, 1, _))); - EXPECT_THAT(&v, Not(Pointee(ElementsAre(0, _, 3)))); -} - -TEST(ElementsAreTest, WorksWithNativeArrayPassedByReference) { - int array[] = {0, 1, 2}; - EXPECT_THAT(array, ElementsAre(0, 1, _)); - EXPECT_THAT(array, Not(ElementsAre(1, _, _))); - EXPECT_THAT(array, Not(ElementsAre(0, _))); -} - -class NativeArrayPassedAsPointerAndSize { - public: - NativeArrayPassedAsPointerAndSize() {} - - MOCK_METHOD(void, Helper, (int* array, int size)); - - private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(NativeArrayPassedAsPointerAndSize); -}; - -TEST(ElementsAreTest, WorksWithNativeArrayPassedAsPointerAndSize) { - int array[] = {0, 1}; - ::std::tuple array_as_tuple(array, 2); - EXPECT_THAT(array_as_tuple, ElementsAre(0, 1)); - EXPECT_THAT(array_as_tuple, Not(ElementsAre(0))); - - NativeArrayPassedAsPointerAndSize helper; - EXPECT_CALL(helper, Helper(_, _)).With(ElementsAre(0, 1)); - helper.Helper(array, 2); -} - -TEST(ElementsAreTest, WorksWithTwoDimensionalNativeArray) { - const char a2[][3] = {"hi", "lo"}; - EXPECT_THAT(a2, ElementsAre(ElementsAre('h', 'i', '\0'), - ElementsAre('l', 'o', '\0'))); - EXPECT_THAT(a2, ElementsAre(StrEq("hi"), StrEq("lo"))); - EXPECT_THAT(a2, ElementsAre(Not(ElementsAre('h', 'o', '\0')), - ElementsAre('l', 'o', '\0'))); -} - -TEST(ElementsAreTest, AcceptsStringLiteral) { - std::string array[] = {"hi", "one", "two"}; - EXPECT_THAT(array, ElementsAre("hi", "one", "two")); - EXPECT_THAT(array, Not(ElementsAre("hi", "one", "too"))); -} - -// Declared here with the size unknown. Defined AFTER the following test. -extern const char kHi[]; - -TEST(ElementsAreTest, AcceptsArrayWithUnknownSize) { - // The size of kHi is not known in this test, but ElementsAre() should - // still accept it. - - std::string array1[] = {"hi"}; - EXPECT_THAT(array1, ElementsAre(kHi)); - - std::string array2[] = {"ho"}; - EXPECT_THAT(array2, Not(ElementsAre(kHi))); -} - -const char kHi[] = "hi"; - -TEST(ElementsAreTest, MakesCopyOfArguments) { - int x = 1; - int y = 2; - // This should make a copy of x and y. - ::testing::internal::ElementsAreMatcher> - polymorphic_matcher = ElementsAre(x, y); - // Changing x and y now shouldn't affect the meaning of the above matcher. - x = y = 0; - const int array1[] = {1, 2}; - EXPECT_THAT(array1, polymorphic_matcher); - const int array2[] = {0, 0}; - EXPECT_THAT(array2, Not(polymorphic_matcher)); -} - -// Tests for ElementsAreArray(). Since ElementsAreArray() shares most -// of the implementation with ElementsAre(), we don't test it as -// thoroughly here. - -TEST(ElementsAreArrayTest, CanBeCreatedWithValueArray) { - const int a[] = {1, 2, 3}; - - vector test_vector(std::begin(a), std::end(a)); - EXPECT_THAT(test_vector, ElementsAreArray(a)); - - test_vector[2] = 0; - EXPECT_THAT(test_vector, Not(ElementsAreArray(a))); -} - -TEST(ElementsAreArrayTest, CanBeCreatedWithArraySize) { - std::array a = {{"one", "two", "three"}}; - - vector test_vector(std::begin(a), std::end(a)); - EXPECT_THAT(test_vector, ElementsAreArray(a.data(), a.size())); - - const char** p = a.data(); - test_vector[0] = "1"; - EXPECT_THAT(test_vector, Not(ElementsAreArray(p, a.size()))); -} - -TEST(ElementsAreArrayTest, CanBeCreatedWithoutArraySize) { - const char* a[] = {"one", "two", "three"}; - - vector test_vector(std::begin(a), std::end(a)); - EXPECT_THAT(test_vector, ElementsAreArray(a)); - - test_vector[0] = "1"; - EXPECT_THAT(test_vector, Not(ElementsAreArray(a))); -} - -TEST(ElementsAreArrayTest, CanBeCreatedWithMatcherArray) { - const Matcher kMatcherArray[] = {StrEq("one"), StrEq("two"), - StrEq("three")}; - - vector test_vector; - test_vector.push_back("one"); - test_vector.push_back("two"); - test_vector.push_back("three"); - EXPECT_THAT(test_vector, ElementsAreArray(kMatcherArray)); - - test_vector.push_back("three"); - EXPECT_THAT(test_vector, Not(ElementsAreArray(kMatcherArray))); -} - -TEST(ElementsAreArrayTest, CanBeCreatedWithVector) { - const int a[] = {1, 2, 3}; - vector test_vector(std::begin(a), std::end(a)); - const vector expected(std::begin(a), std::end(a)); - EXPECT_THAT(test_vector, ElementsAreArray(expected)); - test_vector.push_back(4); - EXPECT_THAT(test_vector, Not(ElementsAreArray(expected))); -} - -TEST(ElementsAreArrayTest, TakesInitializerList) { - const int a[5] = {1, 2, 3, 4, 5}; - EXPECT_THAT(a, ElementsAreArray({1, 2, 3, 4, 5})); - EXPECT_THAT(a, Not(ElementsAreArray({1, 2, 3, 5, 4}))); - EXPECT_THAT(a, Not(ElementsAreArray({1, 2, 3, 4, 6}))); -} - -TEST(ElementsAreArrayTest, TakesInitializerListOfCStrings) { - const std::string a[5] = {"a", "b", "c", "d", "e"}; - EXPECT_THAT(a, ElementsAreArray({"a", "b", "c", "d", "e"})); - EXPECT_THAT(a, Not(ElementsAreArray({"a", "b", "c", "e", "d"}))); - EXPECT_THAT(a, Not(ElementsAreArray({"a", "b", "c", "d", "ef"}))); -} - -TEST(ElementsAreArrayTest, TakesInitializerListOfSameTypedMatchers) { - const int a[5] = {1, 2, 3, 4, 5}; - EXPECT_THAT(a, ElementsAreArray({Eq(1), Eq(2), Eq(3), Eq(4), Eq(5)})); - EXPECT_THAT(a, Not(ElementsAreArray({Eq(1), Eq(2), Eq(3), Eq(4), Eq(6)}))); -} - -TEST(ElementsAreArrayTest, TakesInitializerListOfDifferentTypedMatchers) { - const int a[5] = {1, 2, 3, 4, 5}; - // The compiler cannot infer the type of the initializer list if its - // elements have different types. We must explicitly specify the - // unified element type in this case. - EXPECT_THAT( - a, ElementsAreArray>({Eq(1), Ne(-2), Ge(3), Le(4), Eq(5)})); - EXPECT_THAT(a, Not(ElementsAreArray>( - {Eq(1), Ne(-2), Ge(3), Le(4), Eq(6)}))); -} - -TEST(ElementsAreArrayTest, CanBeCreatedWithMatcherVector) { - const int a[] = {1, 2, 3}; - const Matcher kMatchers[] = {Eq(1), Eq(2), Eq(3)}; - vector test_vector(std::begin(a), std::end(a)); - const vector> expected(std::begin(kMatchers), - std::end(kMatchers)); - EXPECT_THAT(test_vector, ElementsAreArray(expected)); - test_vector.push_back(4); - EXPECT_THAT(test_vector, Not(ElementsAreArray(expected))); -} - -TEST(ElementsAreArrayTest, CanBeCreatedWithIteratorRange) { - const int a[] = {1, 2, 3}; - const vector test_vector(std::begin(a), std::end(a)); - const vector expected(std::begin(a), std::end(a)); - EXPECT_THAT(test_vector, ElementsAreArray(expected.begin(), expected.end())); - // Pointers are iterators, too. - EXPECT_THAT(test_vector, ElementsAreArray(std::begin(a), std::end(a))); - // The empty range of NULL pointers should also be okay. - int* const null_int = nullptr; - EXPECT_THAT(test_vector, Not(ElementsAreArray(null_int, null_int))); - EXPECT_THAT((vector()), ElementsAreArray(null_int, null_int)); -} - -// Since ElementsAre() and ElementsAreArray() share much of the -// implementation, we only do a sanity test for native arrays here. -TEST(ElementsAreArrayTest, WorksWithNativeArray) { - ::std::string a[] = {"hi", "ho"}; - ::std::string b[] = {"hi", "ho"}; - - EXPECT_THAT(a, ElementsAreArray(b)); - EXPECT_THAT(a, ElementsAreArray(b, 2)); - EXPECT_THAT(a, Not(ElementsAreArray(b, 1))); -} - -TEST(ElementsAreArrayTest, SourceLifeSpan) { - const int a[] = {1, 2, 3}; - vector test_vector(std::begin(a), std::end(a)); - vector expect(std::begin(a), std::end(a)); - ElementsAreArrayMatcher matcher_maker = - ElementsAreArray(expect.begin(), expect.end()); - EXPECT_THAT(test_vector, matcher_maker); - // Changing in place the values that initialized matcher_maker should not - // affect matcher_maker anymore. It should have made its own copy of them. - for (int& i : expect) { - i += 10; - } - EXPECT_THAT(test_vector, matcher_maker); - test_vector.push_back(3); - EXPECT_THAT(test_vector, Not(matcher_maker)); -} - -// Tests for the MATCHER*() macro family. - -// Tests that a simple MATCHER() definition works. - -MATCHER(IsEven, "") { return (arg % 2) == 0; } - -TEST(MatcherMacroTest, Works) { - const Matcher m = IsEven(); - EXPECT_TRUE(m.Matches(6)); - EXPECT_FALSE(m.Matches(7)); - - EXPECT_EQ("is even", Describe(m)); - EXPECT_EQ("not (is even)", DescribeNegation(m)); - EXPECT_EQ("", Explain(m, 6)); - EXPECT_EQ("", Explain(m, 7)); -} - -// This also tests that the description string can reference 'negation'. -MATCHER(IsEven2, negation ? "is odd" : "is even") { - if ((arg % 2) == 0) { - // Verifies that we can stream to result_listener, a listener - // supplied by the MATCHER macro implicitly. - *result_listener << "OK"; - return true; - } else { - *result_listener << "% 2 == " << (arg % 2); - return false; - } -} - -// This also tests that the description string can reference matcher -// parameters. -MATCHER_P2(EqSumOf, x, y, - std::string(negation ? "doesn't equal" : "equals") + " the sum of " + - PrintToString(x) + " and " + PrintToString(y)) { - if (arg == (x + y)) { - *result_listener << "OK"; - return true; - } else { - // Verifies that we can stream to the underlying stream of - // result_listener. - if (result_listener->stream() != nullptr) { - *result_listener->stream() << "diff == " << (x + y - arg); - } - return false; - } -} - -// Tests that the matcher description can reference 'negation' and the -// matcher parameters. -TEST(MatcherMacroTest, DescriptionCanReferenceNegationAndParameters) { - const Matcher m1 = IsEven2(); - EXPECT_EQ("is even", Describe(m1)); - EXPECT_EQ("is odd", DescribeNegation(m1)); - - const Matcher m2 = EqSumOf(5, 9); - EXPECT_EQ("equals the sum of 5 and 9", Describe(m2)); - EXPECT_EQ("doesn't equal the sum of 5 and 9", DescribeNegation(m2)); -} - -// Tests explaining match result in a MATCHER* macro. -TEST(MatcherMacroTest, CanExplainMatchResult) { - const Matcher m1 = IsEven2(); - EXPECT_EQ("OK", Explain(m1, 4)); - EXPECT_EQ("% 2 == 1", Explain(m1, 5)); - - const Matcher m2 = EqSumOf(1, 2); - EXPECT_EQ("OK", Explain(m2, 3)); - EXPECT_EQ("diff == -1", Explain(m2, 4)); -} - -// Tests that the body of MATCHER() can reference the type of the -// value being matched. - -MATCHER(IsEmptyString, "") { - StaticAssertTypeEq<::std::string, arg_type>(); - return arg.empty(); -} - -MATCHER(IsEmptyStringByRef, "") { - StaticAssertTypeEq(); - return arg.empty(); -} - -TEST(MatcherMacroTest, CanReferenceArgType) { - const Matcher<::std::string> m1 = IsEmptyString(); - EXPECT_TRUE(m1.Matches("")); - - const Matcher m2 = IsEmptyStringByRef(); - EXPECT_TRUE(m2.Matches("")); -} - -// Tests that MATCHER() can be used in a namespace. - -namespace matcher_test { -MATCHER(IsOdd, "") { return (arg % 2) != 0; } -} // namespace matcher_test - -TEST(MatcherMacroTest, WorksInNamespace) { - Matcher m = matcher_test::IsOdd(); - EXPECT_FALSE(m.Matches(4)); - EXPECT_TRUE(m.Matches(5)); -} - -// Tests that Value() can be used to compose matchers. -MATCHER(IsPositiveOdd, "") { - return Value(arg, matcher_test::IsOdd()) && arg > 0; -} - -TEST(MatcherMacroTest, CanBeComposedUsingValue) { - EXPECT_THAT(3, IsPositiveOdd()); - EXPECT_THAT(4, Not(IsPositiveOdd())); - EXPECT_THAT(-1, Not(IsPositiveOdd())); -} - -// Tests that a simple MATCHER_P() definition works. - -MATCHER_P(IsGreaterThan32And, n, "") { return arg > 32 && arg > n; } - -TEST(MatcherPMacroTest, Works) { - const Matcher m = IsGreaterThan32And(5); - EXPECT_TRUE(m.Matches(36)); - EXPECT_FALSE(m.Matches(5)); - - EXPECT_EQ("is greater than 32 and 5", Describe(m)); - EXPECT_EQ("not (is greater than 32 and 5)", DescribeNegation(m)); - EXPECT_EQ("", Explain(m, 36)); - EXPECT_EQ("", Explain(m, 5)); -} - -// Tests that the description is calculated correctly from the matcher name. -MATCHER_P(_is_Greater_Than32and_, n, "") { return arg > 32 && arg > n; } - -TEST(MatcherPMacroTest, GeneratesCorrectDescription) { - const Matcher m = _is_Greater_Than32and_(5); - - EXPECT_EQ("is greater than 32 and 5", Describe(m)); - EXPECT_EQ("not (is greater than 32 and 5)", DescribeNegation(m)); - EXPECT_EQ("", Explain(m, 36)); - EXPECT_EQ("", Explain(m, 5)); -} - -// Tests that a MATCHER_P matcher can be explicitly instantiated with -// a reference parameter type. - -class UncopyableFoo { - public: - explicit UncopyableFoo(char value) : value_(value) { (void)value_; } - - UncopyableFoo(const UncopyableFoo&) = delete; - void operator=(const UncopyableFoo&) = delete; - - private: - char value_; -}; - -MATCHER_P(ReferencesUncopyable, variable, "") { return &arg == &variable; } - -TEST(MatcherPMacroTest, WorksWhenExplicitlyInstantiatedWithReference) { - UncopyableFoo foo1('1'), foo2('2'); - const Matcher m = - ReferencesUncopyable(foo1); - - EXPECT_TRUE(m.Matches(foo1)); - EXPECT_FALSE(m.Matches(foo2)); - - // We don't want the address of the parameter printed, as most - // likely it will just annoy the user. If the address is - // interesting, the user should consider passing the parameter by - // pointer instead. - EXPECT_EQ("references uncopyable 1-byte object <31>", Describe(m)); -} - -// Tests that the body of MATCHER_Pn() can reference the parameter -// types. - -MATCHER_P3(ParamTypesAreIntLongAndChar, foo, bar, baz, "") { - StaticAssertTypeEq(); - StaticAssertTypeEq(); // NOLINT - StaticAssertTypeEq(); - return arg == 0; -} - -TEST(MatcherPnMacroTest, CanReferenceParamTypes) { - EXPECT_THAT(0, ParamTypesAreIntLongAndChar(10, 20L, 'a')); -} - -// Tests that a MATCHER_Pn matcher can be explicitly instantiated with -// reference parameter types. - -MATCHER_P2(ReferencesAnyOf, variable1, variable2, "") { - return &arg == &variable1 || &arg == &variable2; -} - -TEST(MatcherPnMacroTest, WorksWhenExplicitlyInstantiatedWithReferences) { - UncopyableFoo foo1('1'), foo2('2'), foo3('3'); - const Matcher const_m = - ReferencesAnyOf(foo1, foo2); - - EXPECT_TRUE(const_m.Matches(foo1)); - EXPECT_TRUE(const_m.Matches(foo2)); - EXPECT_FALSE(const_m.Matches(foo3)); - - const Matcher m = - ReferencesAnyOf(foo1, foo2); - - EXPECT_TRUE(m.Matches(foo1)); - EXPECT_TRUE(m.Matches(foo2)); - EXPECT_FALSE(m.Matches(foo3)); -} - -TEST(MatcherPnMacroTest, - GeneratesCorretDescriptionWhenExplicitlyInstantiatedWithReferences) { - UncopyableFoo foo1('1'), foo2('2'); - const Matcher m = - ReferencesAnyOf(foo1, foo2); - - // We don't want the addresses of the parameters printed, as most - // likely they will just annoy the user. If the addresses are - // interesting, the user should consider passing the parameters by - // pointers instead. - EXPECT_EQ("references any of (1-byte object <31>, 1-byte object <32>)", - Describe(m)); -} - -// Tests that a simple MATCHER_P2() definition works. - -MATCHER_P2(IsNotInClosedRange, low, hi, "") { return arg < low || arg > hi; } - -TEST(MatcherPnMacroTest, Works) { - const Matcher m = IsNotInClosedRange(10, 20); // NOLINT - EXPECT_TRUE(m.Matches(36L)); - EXPECT_FALSE(m.Matches(15L)); - - EXPECT_EQ("is not in closed range (10, 20)", Describe(m)); - EXPECT_EQ("not (is not in closed range (10, 20))", DescribeNegation(m)); - EXPECT_EQ("", Explain(m, 36L)); - EXPECT_EQ("", Explain(m, 15L)); -} - -// Tests that MATCHER*() definitions can be overloaded on the number -// of parameters; also tests MATCHER_Pn() where n >= 3. - -MATCHER(EqualsSumOf, "") { return arg == 0; } -MATCHER_P(EqualsSumOf, a, "") { return arg == a; } -MATCHER_P2(EqualsSumOf, a, b, "") { return arg == a + b; } -MATCHER_P3(EqualsSumOf, a, b, c, "") { return arg == a + b + c; } -MATCHER_P4(EqualsSumOf, a, b, c, d, "") { return arg == a + b + c + d; } -MATCHER_P5(EqualsSumOf, a, b, c, d, e, "") { return arg == a + b + c + d + e; } -MATCHER_P6(EqualsSumOf, a, b, c, d, e, f, "") { - return arg == a + b + c + d + e + f; -} -MATCHER_P7(EqualsSumOf, a, b, c, d, e, f, g, "") { - return arg == a + b + c + d + e + f + g; -} -MATCHER_P8(EqualsSumOf, a, b, c, d, e, f, g, h, "") { - return arg == a + b + c + d + e + f + g + h; -} -MATCHER_P9(EqualsSumOf, a, b, c, d, e, f, g, h, i, "") { - return arg == a + b + c + d + e + f + g + h + i; -} -MATCHER_P10(EqualsSumOf, a, b, c, d, e, f, g, h, i, j, "") { - return arg == a + b + c + d + e + f + g + h + i + j; -} - -TEST(MatcherPnMacroTest, CanBeOverloadedOnNumberOfParameters) { - EXPECT_THAT(0, EqualsSumOf()); - EXPECT_THAT(1, EqualsSumOf(1)); - EXPECT_THAT(12, EqualsSumOf(10, 2)); - EXPECT_THAT(123, EqualsSumOf(100, 20, 3)); - EXPECT_THAT(1234, EqualsSumOf(1000, 200, 30, 4)); - EXPECT_THAT(12345, EqualsSumOf(10000, 2000, 300, 40, 5)); - EXPECT_THAT("abcdef", - EqualsSumOf(::std::string("a"), 'b', 'c', "d", "e", 'f')); - EXPECT_THAT("abcdefg", - EqualsSumOf(::std::string("a"), 'b', 'c', "d", "e", 'f', 'g')); - EXPECT_THAT("abcdefgh", EqualsSumOf(::std::string("a"), 'b', 'c', "d", "e", - 'f', 'g', "h")); - EXPECT_THAT("abcdefghi", EqualsSumOf(::std::string("a"), 'b', 'c', "d", "e", - 'f', 'g', "h", 'i')); - EXPECT_THAT("abcdefghij", - EqualsSumOf(::std::string("a"), 'b', 'c', "d", "e", 'f', 'g', "h", - 'i', ::std::string("j"))); - - EXPECT_THAT(1, Not(EqualsSumOf())); - EXPECT_THAT(-1, Not(EqualsSumOf(1))); - EXPECT_THAT(-12, Not(EqualsSumOf(10, 2))); - EXPECT_THAT(-123, Not(EqualsSumOf(100, 20, 3))); - EXPECT_THAT(-1234, Not(EqualsSumOf(1000, 200, 30, 4))); - EXPECT_THAT(-12345, Not(EqualsSumOf(10000, 2000, 300, 40, 5))); - EXPECT_THAT("abcdef ", - Not(EqualsSumOf(::std::string("a"), 'b', 'c', "d", "e", 'f'))); - EXPECT_THAT("abcdefg ", Not(EqualsSumOf(::std::string("a"), 'b', 'c', "d", - "e", 'f', 'g'))); - EXPECT_THAT("abcdefgh ", Not(EqualsSumOf(::std::string("a"), 'b', 'c', "d", - "e", 'f', 'g', "h"))); - EXPECT_THAT("abcdefghi ", Not(EqualsSumOf(::std::string("a"), 'b', 'c', "d", - "e", 'f', 'g', "h", 'i'))); - EXPECT_THAT("abcdefghij ", - Not(EqualsSumOf(::std::string("a"), 'b', 'c', "d", "e", 'f', 'g', - "h", 'i', ::std::string("j")))); -} - -// Tests that a MATCHER_Pn() definition can be instantiated with any -// compatible parameter types. -TEST(MatcherPnMacroTest, WorksForDifferentParameterTypes) { - EXPECT_THAT(123, EqualsSumOf(100L, 20, static_cast(3))); - EXPECT_THAT("abcd", EqualsSumOf(::std::string("a"), "b", 'c', "d")); - - EXPECT_THAT(124, Not(EqualsSumOf(100L, 20, static_cast(3)))); - EXPECT_THAT("abcde", Not(EqualsSumOf(::std::string("a"), "b", 'c', "d"))); -} - -// Tests that the matcher body can promote the parameter types. - -MATCHER_P2(EqConcat, prefix, suffix, "") { - // The following lines promote the two parameters to desired types. - std::string prefix_str(prefix); - char suffix_char = static_cast(suffix); - return arg == prefix_str + suffix_char; -} - -TEST(MatcherPnMacroTest, SimpleTypePromotion) { - Matcher no_promo = EqConcat(std::string("foo"), 't'); - Matcher promo = EqConcat("foo", static_cast('t')); - EXPECT_FALSE(no_promo.Matches("fool")); - EXPECT_FALSE(promo.Matches("fool")); - EXPECT_TRUE(no_promo.Matches("foot")); - EXPECT_TRUE(promo.Matches("foot")); -} - -// Verifies the type of a MATCHER*. - -TEST(MatcherPnMacroTest, TypesAreCorrect) { - // EqualsSumOf() must be assignable to a EqualsSumOfMatcher variable. - EqualsSumOfMatcher a0 = EqualsSumOf(); - - // EqualsSumOf(1) must be assignable to a EqualsSumOfMatcherP variable. - EqualsSumOfMatcherP a1 = EqualsSumOf(1); - - // EqualsSumOf(p1, ..., pk) must be assignable to a EqualsSumOfMatcherPk - // variable, and so on. - EqualsSumOfMatcherP2 a2 = EqualsSumOf(1, '2'); - EqualsSumOfMatcherP3 a3 = EqualsSumOf(1, 2, '3'); - EqualsSumOfMatcherP4 a4 = EqualsSumOf(1, 2, 3, '4'); - EqualsSumOfMatcherP5 a5 = - EqualsSumOf(1, 2, 3, 4, '5'); - EqualsSumOfMatcherP6 a6 = - EqualsSumOf(1, 2, 3, 4, 5, '6'); - EqualsSumOfMatcherP7 a7 = - EqualsSumOf(1, 2, 3, 4, 5, 6, '7'); - EqualsSumOfMatcherP8 a8 = - EqualsSumOf(1, 2, 3, 4, 5, 6, 7, '8'); - EqualsSumOfMatcherP9 a9 = - EqualsSumOf(1, 2, 3, 4, 5, 6, 7, 8, '9'); - EqualsSumOfMatcherP10 a10 = - EqualsSumOf(1, 2, 3, 4, 5, 6, 7, 8, 9, '0'); - - // Avoid "unused variable" warnings. - (void)a0; - (void)a1; - (void)a2; - (void)a3; - (void)a4; - (void)a5; - (void)a6; - (void)a7; - (void)a8; - (void)a9; - (void)a10; -} - -// Tests that matcher-typed parameters can be used in Value() inside a -// MATCHER_Pn definition. - -// Succeeds if arg matches exactly 2 of the 3 matchers. -MATCHER_P3(TwoOf, m1, m2, m3, "") { - const int count = static_cast(Value(arg, m1)) + - static_cast(Value(arg, m2)) + - static_cast(Value(arg, m3)); - return count == 2; -} - -TEST(MatcherPnMacroTest, CanUseMatcherTypedParameterInValue) { - EXPECT_THAT(42, TwoOf(Gt(0), Lt(50), Eq(10))); - EXPECT_THAT(0, Not(TwoOf(Gt(-1), Lt(1), Eq(0)))); -} - -// Tests Contains(). - -TEST(ContainsTest, ListMatchesWhenElementIsInContainer) { - list some_list; - some_list.push_back(3); - some_list.push_back(1); - some_list.push_back(2); - some_list.push_back(3); - EXPECT_THAT(some_list, Contains(1)); - EXPECT_THAT(some_list, Contains(Gt(2.5))); - EXPECT_THAT(some_list, Contains(Eq(2.0f))); - - list another_list; - another_list.push_back("fee"); - another_list.push_back("fie"); - another_list.push_back("foe"); - another_list.push_back("fum"); - EXPECT_THAT(another_list, Contains(std::string("fee"))); -} - -TEST(ContainsTest, ListDoesNotMatchWhenElementIsNotInContainer) { - list some_list; - some_list.push_back(3); - some_list.push_back(1); - EXPECT_THAT(some_list, Not(Contains(4))); -} - -TEST(ContainsTest, SetMatchesWhenElementIsInContainer) { - set some_set; - some_set.insert(3); - some_set.insert(1); - some_set.insert(2); - EXPECT_THAT(some_set, Contains(Eq(1.0))); - EXPECT_THAT(some_set, Contains(Eq(3.0f))); - EXPECT_THAT(some_set, Contains(2)); - - set another_set; - another_set.insert("fee"); - another_set.insert("fie"); - another_set.insert("foe"); - another_set.insert("fum"); - EXPECT_THAT(another_set, Contains(Eq(std::string("fum")))); -} - -TEST(ContainsTest, SetDoesNotMatchWhenElementIsNotInContainer) { - set some_set; - some_set.insert(3); - some_set.insert(1); - EXPECT_THAT(some_set, Not(Contains(4))); - - set c_string_set; - c_string_set.insert("hello"); - EXPECT_THAT(c_string_set, Not(Contains(std::string("goodbye")))); -} - -TEST(ContainsTest, ExplainsMatchResultCorrectly) { - const int a[2] = {1, 2}; - Matcher m = Contains(2); - EXPECT_EQ("whose element #1 matches", Explain(m, a)); - - m = Contains(3); - EXPECT_EQ("", Explain(m, a)); - - m = Contains(GreaterThan(0)); - EXPECT_EQ("whose element #0 matches, which is 1 more than 0", Explain(m, a)); - - m = Contains(GreaterThan(10)); - EXPECT_EQ("", Explain(m, a)); -} - -TEST(ContainsTest, DescribesItselfCorrectly) { - Matcher> m = Contains(1); - EXPECT_EQ("contains at least one element that is equal to 1", Describe(m)); - - Matcher> m2 = Not(m); - EXPECT_EQ("doesn't contain any element that is equal to 1", Describe(m2)); -} - -TEST(ContainsTest, MapMatchesWhenElementIsInContainer) { - map my_map; - const char* bar = "a string"; - my_map[bar] = 2; - EXPECT_THAT(my_map, Contains(pair(bar, 2))); - - map another_map; - another_map["fee"] = 1; - another_map["fie"] = 2; - another_map["foe"] = 3; - another_map["fum"] = 4; - EXPECT_THAT(another_map, - Contains(pair(std::string("fee"), 1))); - EXPECT_THAT(another_map, Contains(pair("fie", 2))); -} - -TEST(ContainsTest, MapDoesNotMatchWhenElementIsNotInContainer) { - map some_map; - some_map[1] = 11; - some_map[2] = 22; - EXPECT_THAT(some_map, Not(Contains(pair(2, 23)))); -} - -TEST(ContainsTest, ArrayMatchesWhenElementIsInContainer) { - const char* string_array[] = {"fee", "fie", "foe", "fum"}; - EXPECT_THAT(string_array, Contains(Eq(std::string("fum")))); -} - -TEST(ContainsTest, ArrayDoesNotMatchWhenElementIsNotInContainer) { - int int_array[] = {1, 2, 3, 4}; - EXPECT_THAT(int_array, Not(Contains(5))); -} - -TEST(ContainsTest, AcceptsMatcher) { - const int a[] = {1, 2, 3}; - EXPECT_THAT(a, Contains(Gt(2))); - EXPECT_THAT(a, Not(Contains(Gt(4)))); -} - -TEST(ContainsTest, WorksForNativeArrayAsTuple) { - const int a[] = {1, 2}; - const int* const pointer = a; - EXPECT_THAT(std::make_tuple(pointer, 2), Contains(1)); - EXPECT_THAT(std::make_tuple(pointer, 2), Not(Contains(Gt(3)))); -} - -TEST(ContainsTest, WorksForTwoDimensionalNativeArray) { - int a[][3] = {{1, 2, 3}, {4, 5, 6}}; - EXPECT_THAT(a, Contains(ElementsAre(4, 5, 6))); - EXPECT_THAT(a, Contains(Contains(5))); - EXPECT_THAT(a, Not(Contains(ElementsAre(3, 4, 5)))); - EXPECT_THAT(a, Contains(Not(Contains(5)))); -} - -// Tests Contains().Times(). - -TEST(ContainsTimes, ListMatchesWhenElementQuantityMatches) { - list some_list; - some_list.push_back(3); - some_list.push_back(1); - some_list.push_back(2); - some_list.push_back(3); - EXPECT_THAT(some_list, Contains(3).Times(2)); - EXPECT_THAT(some_list, Contains(2).Times(1)); - EXPECT_THAT(some_list, Contains(Ge(2)).Times(3)); - EXPECT_THAT(some_list, Contains(Ge(2)).Times(Gt(2))); - EXPECT_THAT(some_list, Contains(4).Times(0)); - EXPECT_THAT(some_list, Contains(_).Times(4)); - EXPECT_THAT(some_list, Not(Contains(5).Times(1))); - EXPECT_THAT(some_list, Contains(5).Times(_)); // Times(_) always matches - EXPECT_THAT(some_list, Not(Contains(3).Times(1))); - EXPECT_THAT(some_list, Contains(3).Times(Not(1))); - EXPECT_THAT(list{}, Not(Contains(_))); -} - -TEST(ContainsTimes, ExplainsMatchResultCorrectly) { - const int a[2] = {1, 2}; - Matcher m = Contains(2).Times(3); - EXPECT_EQ( - "whose element #1 matches but whose match quantity of 1 does not match", - Explain(m, a)); - - m = Contains(3).Times(0); - EXPECT_EQ("has no element that matches and whose match quantity of 0 matches", - Explain(m, a)); - - m = Contains(3).Times(4); - EXPECT_EQ( - "has no element that matches and whose match quantity of 0 does not " - "match", - Explain(m, a)); - - m = Contains(2).Times(4); - EXPECT_EQ( - "whose element #1 matches but whose match quantity of 1 does not " - "match", - Explain(m, a)); - - m = Contains(GreaterThan(0)).Times(2); - EXPECT_EQ("whose elements (0, 1) match and whose match quantity of 2 matches", - Explain(m, a)); - - m = Contains(GreaterThan(10)).Times(Gt(1)); - EXPECT_EQ( - "has no element that matches and whose match quantity of 0 does not " - "match", - Explain(m, a)); - - m = Contains(GreaterThan(0)).Times(GreaterThan(5)); - EXPECT_EQ( - "whose elements (0, 1) match but whose match quantity of 2 does not " - "match, which is 3 less than 5", - Explain(m, a)); -} - -TEST(ContainsTimes, DescribesItselfCorrectly) { - Matcher> m = Contains(1).Times(2); - EXPECT_EQ("quantity of elements that match is equal to 1 is equal to 2", - Describe(m)); - - Matcher> m2 = Not(m); - EXPECT_EQ("quantity of elements that match is equal to 1 isn't equal to 2", - Describe(m2)); -} - -// Tests AllOfArray() - -TEST(AllOfArrayTest, BasicForms) { - // Iterator - std::vector v0{}; - std::vector v1{1}; - std::vector v2{2, 3}; - std::vector v3{4, 4, 4}; - EXPECT_THAT(0, AllOfArray(v0.begin(), v0.end())); - EXPECT_THAT(1, AllOfArray(v1.begin(), v1.end())); - EXPECT_THAT(2, Not(AllOfArray(v1.begin(), v1.end()))); - EXPECT_THAT(3, Not(AllOfArray(v2.begin(), v2.end()))); - EXPECT_THAT(4, AllOfArray(v3.begin(), v3.end())); - // Pointer + size - int ar[6] = {1, 2, 3, 4, 4, 4}; - EXPECT_THAT(0, AllOfArray(ar, 0)); - EXPECT_THAT(1, AllOfArray(ar, 1)); - EXPECT_THAT(2, Not(AllOfArray(ar, 1))); - EXPECT_THAT(3, Not(AllOfArray(ar + 1, 3))); - EXPECT_THAT(4, AllOfArray(ar + 3, 3)); - // Array - // int ar0[0]; Not usable - int ar1[1] = {1}; - int ar2[2] = {2, 3}; - int ar3[3] = {4, 4, 4}; - // EXPECT_THAT(0, Not(AllOfArray(ar0))); // Cannot work - EXPECT_THAT(1, AllOfArray(ar1)); - EXPECT_THAT(2, Not(AllOfArray(ar1))); - EXPECT_THAT(3, Not(AllOfArray(ar2))); - EXPECT_THAT(4, AllOfArray(ar3)); - // Container - EXPECT_THAT(0, AllOfArray(v0)); - EXPECT_THAT(1, AllOfArray(v1)); - EXPECT_THAT(2, Not(AllOfArray(v1))); - EXPECT_THAT(3, Not(AllOfArray(v2))); - EXPECT_THAT(4, AllOfArray(v3)); - // Initializer - EXPECT_THAT(0, AllOfArray({})); // Requires template arg. - EXPECT_THAT(1, AllOfArray({1})); - EXPECT_THAT(2, Not(AllOfArray({1}))); - EXPECT_THAT(3, Not(AllOfArray({2, 3}))); - EXPECT_THAT(4, AllOfArray({4, 4, 4})); -} - -TEST(AllOfArrayTest, Matchers) { - // vector - std::vector> matchers{Ge(1), Lt(2)}; - EXPECT_THAT(0, Not(AllOfArray(matchers))); - EXPECT_THAT(1, AllOfArray(matchers)); - EXPECT_THAT(2, Not(AllOfArray(matchers))); - // initializer_list - EXPECT_THAT(0, Not(AllOfArray({Ge(0), Ge(1)}))); - EXPECT_THAT(1, AllOfArray({Ge(0), Ge(1)})); -} - -TEST(AnyOfArrayTest, BasicForms) { - // Iterator - std::vector v0{}; - std::vector v1{1}; - std::vector v2{2, 3}; - EXPECT_THAT(0, Not(AnyOfArray(v0.begin(), v0.end()))); - EXPECT_THAT(1, AnyOfArray(v1.begin(), v1.end())); - EXPECT_THAT(2, Not(AnyOfArray(v1.begin(), v1.end()))); - EXPECT_THAT(3, AnyOfArray(v2.begin(), v2.end())); - EXPECT_THAT(4, Not(AnyOfArray(v2.begin(), v2.end()))); - // Pointer + size - int ar[3] = {1, 2, 3}; - EXPECT_THAT(0, Not(AnyOfArray(ar, 0))); - EXPECT_THAT(1, AnyOfArray(ar, 1)); - EXPECT_THAT(2, Not(AnyOfArray(ar, 1))); - EXPECT_THAT(3, AnyOfArray(ar + 1, 2)); - EXPECT_THAT(4, Not(AnyOfArray(ar + 1, 2))); - // Array - // int ar0[0]; Not usable - int ar1[1] = {1}; - int ar2[2] = {2, 3}; - // EXPECT_THAT(0, Not(AnyOfArray(ar0))); // Cannot work - EXPECT_THAT(1, AnyOfArray(ar1)); - EXPECT_THAT(2, Not(AnyOfArray(ar1))); - EXPECT_THAT(3, AnyOfArray(ar2)); - EXPECT_THAT(4, Not(AnyOfArray(ar2))); - // Container - EXPECT_THAT(0, Not(AnyOfArray(v0))); - EXPECT_THAT(1, AnyOfArray(v1)); - EXPECT_THAT(2, Not(AnyOfArray(v1))); - EXPECT_THAT(3, AnyOfArray(v2)); - EXPECT_THAT(4, Not(AnyOfArray(v2))); - // Initializer - EXPECT_THAT(0, Not(AnyOfArray({}))); // Requires template arg. - EXPECT_THAT(1, AnyOfArray({1})); - EXPECT_THAT(2, Not(AnyOfArray({1}))); - EXPECT_THAT(3, AnyOfArray({2, 3})); - EXPECT_THAT(4, Not(AnyOfArray({2, 3}))); -} - -TEST(AnyOfArrayTest, Matchers) { - // We negate test AllOfArrayTest.Matchers. - // vector - std::vector> matchers{Lt(1), Ge(2)}; - EXPECT_THAT(0, AnyOfArray(matchers)); - EXPECT_THAT(1, Not(AnyOfArray(matchers))); - EXPECT_THAT(2, AnyOfArray(matchers)); - // initializer_list - EXPECT_THAT(0, AnyOfArray({Lt(0), Lt(1)})); - EXPECT_THAT(1, Not(AllOfArray({Lt(0), Lt(1)}))); -} - -TEST(AnyOfArrayTest, ExplainsMatchResultCorrectly) { - // AnyOfArray and AllOfArry use the same underlying template-template, - // thus it is sufficient to test one here. - const std::vector v0{}; - const std::vector v1{1}; - const std::vector v2{2, 3}; - const Matcher m0 = AnyOfArray(v0); - const Matcher m1 = AnyOfArray(v1); - const Matcher m2 = AnyOfArray(v2); - EXPECT_EQ("", Explain(m0, 0)); - EXPECT_EQ("", Explain(m1, 1)); - EXPECT_EQ("", Explain(m1, 2)); - EXPECT_EQ("", Explain(m2, 3)); - EXPECT_EQ("", Explain(m2, 4)); - EXPECT_EQ("()", Describe(m0)); - EXPECT_EQ("(is equal to 1)", Describe(m1)); - EXPECT_EQ("(is equal to 2) or (is equal to 3)", Describe(m2)); - EXPECT_EQ("()", DescribeNegation(m0)); - EXPECT_EQ("(isn't equal to 1)", DescribeNegation(m1)); - EXPECT_EQ("(isn't equal to 2) and (isn't equal to 3)", DescribeNegation(m2)); - // Explain with matchers - const Matcher g1 = AnyOfArray({GreaterThan(1)}); - const Matcher g2 = AnyOfArray({GreaterThan(1), GreaterThan(2)}); - // Explains the first positiv match and all prior negative matches... - EXPECT_EQ("which is 1 less than 1", Explain(g1, 0)); - EXPECT_EQ("which is the same as 1", Explain(g1, 1)); - EXPECT_EQ("which is 1 more than 1", Explain(g1, 2)); - EXPECT_EQ("which is 1 less than 1, and which is 2 less than 2", - Explain(g2, 0)); - EXPECT_EQ("which is the same as 1, and which is 1 less than 2", - Explain(g2, 1)); - EXPECT_EQ("which is 1 more than 1", // Only the first - Explain(g2, 2)); -} - -TEST(AllOfTest, HugeMatcher) { - // Verify that using AllOf with many arguments doesn't cause - // the compiler to exceed template instantiation depth limit. - EXPECT_THAT(0, testing::AllOf(_, _, _, _, _, _, _, _, _, - testing::AllOf(_, _, _, _, _, _, _, _, _, _))); -} - -TEST(AnyOfTest, HugeMatcher) { - // Verify that using AnyOf with many arguments doesn't cause - // the compiler to exceed template instantiation depth limit. - EXPECT_THAT(0, testing::AnyOf(_, _, _, _, _, _, _, _, _, - testing::AnyOf(_, _, _, _, _, _, _, _, _, _))); -} - -namespace adl_test { - -// Verifies that the implementation of ::testing::AllOf and ::testing::AnyOf -// don't issue unqualified recursive calls. If they do, the argument dependent -// name lookup will cause AllOf/AnyOf in the 'adl_test' namespace to be found -// as a candidate and the compilation will break due to an ambiguous overload. - -// The matcher must be in the same namespace as AllOf/AnyOf to make argument -// dependent lookup find those. -MATCHER(M, "") { - (void)arg; - return true; -} - -template -bool AllOf(const T1& /*t1*/, const T2& /*t2*/) { - return true; -} - -TEST(AllOfTest, DoesNotCallAllOfUnqualified) { - EXPECT_THAT(42, - testing::AllOf(M(), M(), M(), M(), M(), M(), M(), M(), M(), M())); -} - -template -bool AnyOf(const T1&, const T2&) { - return true; -} - -TEST(AnyOfTest, DoesNotCallAnyOfUnqualified) { - EXPECT_THAT(42, - testing::AnyOf(M(), M(), M(), M(), M(), M(), M(), M(), M(), M())); -} - -} // namespace adl_test - -TEST(AllOfTest, WorksOnMoveOnlyType) { - std::unique_ptr p(new int(3)); - EXPECT_THAT(p, AllOf(Pointee(Eq(3)), Pointee(Gt(0)), Pointee(Lt(5)))); - EXPECT_THAT(p, Not(AllOf(Pointee(Eq(3)), Pointee(Gt(0)), Pointee(Lt(3))))); -} - -TEST(AnyOfTest, WorksOnMoveOnlyType) { - std::unique_ptr p(new int(3)); - EXPECT_THAT(p, AnyOf(Pointee(Eq(5)), Pointee(Lt(0)), Pointee(Lt(5)))); - EXPECT_THAT(p, Not(AnyOf(Pointee(Eq(5)), Pointee(Lt(0)), Pointee(Gt(5))))); -} - -MATCHER(IsNotNull, "") { return arg != nullptr; } - -// Verifies that a matcher defined using MATCHER() can work on -// move-only types. -TEST(MatcherMacroTest, WorksOnMoveOnlyType) { - std::unique_ptr p(new int(3)); - EXPECT_THAT(p, IsNotNull()); - EXPECT_THAT(std::unique_ptr(), Not(IsNotNull())); -} - -MATCHER_P(UniquePointee, pointee, "") { return *arg == pointee; } - -// Verifies that a matcher defined using MATCHER_P*() can work on -// move-only types. -TEST(MatcherPMacroTest, WorksOnMoveOnlyType) { - std::unique_ptr p(new int(3)); - EXPECT_THAT(p, UniquePointee(3)); - EXPECT_THAT(p, Not(UniquePointee(2))); -} - -#if GTEST_HAS_EXCEPTIONS - -// std::function is used below for compatibility with older copies of -// GCC. Normally, a raw lambda is all that is needed. - -// Test that examples from documentation compile -TEST(ThrowsTest, Examples) { - EXPECT_THAT( - std::function([]() { throw std::runtime_error("message"); }), - Throws()); - - EXPECT_THAT( - std::function([]() { throw std::runtime_error("message"); }), - ThrowsMessage(HasSubstr("message"))); -} - -TEST(ThrowsTest, DoesNotGenerateDuplicateCatchClauseWarning) { - EXPECT_THAT(std::function([]() { throw std::exception(); }), - Throws()); -} - -TEST(ThrowsTest, CallableExecutedExactlyOnce) { - size_t a = 0; - - EXPECT_THAT(std::function([&a]() { - a++; - throw 10; - }), - Throws()); - EXPECT_EQ(a, 1u); - - EXPECT_THAT(std::function([&a]() { - a++; - throw std::runtime_error("message"); - }), - Throws()); - EXPECT_EQ(a, 2u); - - EXPECT_THAT(std::function([&a]() { - a++; - throw std::runtime_error("message"); - }), - ThrowsMessage(HasSubstr("message"))); - EXPECT_EQ(a, 3u); - - EXPECT_THAT(std::function([&a]() { - a++; - throw std::runtime_error("message"); - }), - Throws( - Property(&std::runtime_error::what, HasSubstr("message")))); - EXPECT_EQ(a, 4u); -} - -TEST(ThrowsTest, Describe) { - Matcher> matcher = Throws(); - std::stringstream ss; - matcher.DescribeTo(&ss); - auto explanation = ss.str(); - EXPECT_THAT(explanation, HasSubstr("std::runtime_error")); -} - -TEST(ThrowsTest, Success) { - Matcher> matcher = Throws(); - StringMatchResultListener listener; - EXPECT_TRUE(matcher.MatchAndExplain( - []() { throw std::runtime_error("error message"); }, &listener)); - EXPECT_THAT(listener.str(), HasSubstr("std::runtime_error")); -} - -TEST(ThrowsTest, FailWrongType) { - Matcher> matcher = Throws(); - StringMatchResultListener listener; - EXPECT_FALSE(matcher.MatchAndExplain( - []() { throw std::logic_error("error message"); }, &listener)); - EXPECT_THAT(listener.str(), HasSubstr("std::logic_error")); - EXPECT_THAT(listener.str(), HasSubstr("\"error message\"")); -} - -TEST(ThrowsTest, FailWrongTypeNonStd) { - Matcher> matcher = Throws(); - StringMatchResultListener listener; - EXPECT_FALSE(matcher.MatchAndExplain([]() { throw 10; }, &listener)); - EXPECT_THAT(listener.str(), - HasSubstr("throws an exception of an unknown type")); -} - -TEST(ThrowsTest, FailNoThrow) { - Matcher> matcher = Throws(); - StringMatchResultListener listener; - EXPECT_FALSE(matcher.MatchAndExplain([]() { (void)0; }, &listener)); - EXPECT_THAT(listener.str(), HasSubstr("does not throw any exception")); -} - -class ThrowsPredicateTest - : public TestWithParam>> {}; - -TEST_P(ThrowsPredicateTest, Describe) { - Matcher> matcher = GetParam(); - std::stringstream ss; - matcher.DescribeTo(&ss); - auto explanation = ss.str(); - EXPECT_THAT(explanation, HasSubstr("std::runtime_error")); - EXPECT_THAT(explanation, HasSubstr("error message")); -} - -TEST_P(ThrowsPredicateTest, Success) { - Matcher> matcher = GetParam(); - StringMatchResultListener listener; - EXPECT_TRUE(matcher.MatchAndExplain( - []() { throw std::runtime_error("error message"); }, &listener)); - EXPECT_THAT(listener.str(), HasSubstr("std::runtime_error")); -} - -TEST_P(ThrowsPredicateTest, FailWrongType) { - Matcher> matcher = GetParam(); - StringMatchResultListener listener; - EXPECT_FALSE(matcher.MatchAndExplain( - []() { throw std::logic_error("error message"); }, &listener)); - EXPECT_THAT(listener.str(), HasSubstr("std::logic_error")); - EXPECT_THAT(listener.str(), HasSubstr("\"error message\"")); -} - -TEST_P(ThrowsPredicateTest, FailWrongTypeNonStd) { - Matcher> matcher = GetParam(); - StringMatchResultListener listener; - EXPECT_FALSE(matcher.MatchAndExplain([]() { throw 10; }, &listener)); - EXPECT_THAT(listener.str(), - HasSubstr("throws an exception of an unknown type")); -} - -TEST_P(ThrowsPredicateTest, FailWrongMessage) { - Matcher> matcher = GetParam(); - StringMatchResultListener listener; - EXPECT_FALSE(matcher.MatchAndExplain( - []() { throw std::runtime_error("wrong message"); }, &listener)); - EXPECT_THAT(listener.str(), HasSubstr("std::runtime_error")); - EXPECT_THAT(listener.str(), Not(HasSubstr("wrong message"))); -} - -TEST_P(ThrowsPredicateTest, FailNoThrow) { - Matcher> matcher = GetParam(); - StringMatchResultListener listener; - EXPECT_FALSE(matcher.MatchAndExplain([]() {}, &listener)); - EXPECT_THAT(listener.str(), HasSubstr("does not throw any exception")); -} - -INSTANTIATE_TEST_SUITE_P( - AllMessagePredicates, ThrowsPredicateTest, - Values(Matcher>( - ThrowsMessage(HasSubstr("error message"))))); - -// Tests that Throws(Matcher{}) compiles even when E2 != const E1&. -TEST(ThrowsPredicateCompilesTest, ExceptionMatcherAcceptsBroadType) { - { - Matcher> matcher = - ThrowsMessage(HasSubstr("error message")); - EXPECT_TRUE( - matcher.Matches([]() { throw std::runtime_error("error message"); })); - EXPECT_FALSE( - matcher.Matches([]() { throw std::runtime_error("wrong message"); })); - } - - { - Matcher inner = Eq(10); - Matcher> matcher = Throws(inner); - EXPECT_TRUE(matcher.Matches([]() { throw(uint32_t) 10; })); - EXPECT_FALSE(matcher.Matches([]() { throw(uint32_t) 11; })); - } -} - -// Tests that ThrowsMessage("message") is equivalent -// to ThrowsMessage(Eq("message")). -TEST(ThrowsPredicateCompilesTest, MessageMatcherAcceptsNonMatcher) { - Matcher> matcher = - ThrowsMessage("error message"); - EXPECT_TRUE( - matcher.Matches([]() { throw std::runtime_error("error message"); })); - EXPECT_FALSE(matcher.Matches( - []() { throw std::runtime_error("wrong error message"); })); -} - -#endif // GTEST_HAS_EXCEPTIONS - -} // namespace -} // namespace gmock_matchers_test -} // namespace testing - -#ifdef _MSC_VER -# pragma warning(pop) -#endif diff --git a/ext/googletest/googlemock/test/gmock-matchers_test.h b/ext/googletest/googlemock/test/gmock-matchers_test.h new file mode 100644 index 0000000000..6c986e9ff0 --- /dev/null +++ b/ext/googletest/googlemock/test/gmock-matchers_test.h @@ -0,0 +1,192 @@ +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Google Mock - a framework for writing C++ mock classes. +// +// This file tests some commonly used argument matchers. + +#ifndef GOOGLEMOCK_TEST_GMOCK_MATCHERS_TEST_H_ +#define GOOGLEMOCK_TEST_GMOCK_MATCHERS_TEST_H_ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gmock/gmock-matchers.h" +#include "gmock/gmock-more-matchers.h" +#include "gmock/gmock.h" +#include "gtest/gtest-spi.h" +#include "gtest/gtest.h" + +namespace testing { +namespace gmock_matchers_test { + +using std::greater; +using std::less; +using std::list; +using std::make_pair; +using std::map; +using std::multimap; +using std::multiset; +using std::ostream; +using std::pair; +using std::set; +using std::stringstream; +using std::vector; +using testing::internal::DummyMatchResultListener; +using testing::internal::ElementMatcherPair; +using testing::internal::ElementMatcherPairs; +using testing::internal::ElementsAreArrayMatcher; +using testing::internal::ExplainMatchFailureTupleTo; +using testing::internal::FloatingEqMatcher; +using testing::internal::FormatMatcherDescription; +using testing::internal::IsReadableTypeName; +using testing::internal::MatchMatrix; +using testing::internal::PredicateFormatterFromMatcher; +using testing::internal::RE; +using testing::internal::StreamMatchResultListener; +using testing::internal::Strings; + +// Helper for testing container-valued matchers in mock method context. It is +// important to test matchers in this context, since it requires additional type +// deduction beyond what EXPECT_THAT does, thus making it more restrictive. +struct ContainerHelper { + MOCK_METHOD1(Call, void(std::vector>)); +}; + +// For testing ExplainMatchResultTo(). +template +struct GtestGreaterThanMatcher { + using is_gtest_matcher = void; + + void DescribeTo(ostream* os) const { *os << "is > " << rhs; } + void DescribeNegationTo(ostream* os) const { *os << "is <= " << rhs; } + + bool MatchAndExplain(T lhs, MatchResultListener* listener) const { + if (lhs > rhs) { + *listener << "which is " << (lhs - rhs) << " more than " << rhs; + } else if (lhs == rhs) { + *listener << "which is the same as " << rhs; + } else { + *listener << "which is " << (rhs - lhs) << " less than " << rhs; + } + + return lhs > rhs; + } + + T rhs; +}; + +template +GtestGreaterThanMatcher::type> GtestGreaterThan( + T&& rhs) { + return {rhs}; +} + +// As the matcher above, but using the base class with virtual functions. +template +class GreaterThanMatcher : public MatcherInterface { + public: + explicit GreaterThanMatcher(T rhs) : impl_{rhs} {} + + void DescribeTo(ostream* os) const override { impl_.DescribeTo(os); } + void DescribeNegationTo(ostream* os) const override { + impl_.DescribeNegationTo(os); + } + + bool MatchAndExplain(T lhs, MatchResultListener* listener) const override { + return impl_.MatchAndExplain(lhs, listener); + } + + private: + const GtestGreaterThanMatcher impl_; +}; + +// Names and instantiates a new instance of GTestMatcherTestP. +#define INSTANTIATE_GTEST_MATCHER_TEST_P(TestSuite) \ + using TestSuite##P = GTestMatcherTestP; \ + INSTANTIATE_TEST_SUITE_P(MatcherInterface, TestSuite##P, Values(false)); \ + INSTANTIATE_TEST_SUITE_P(GtestMatcher, TestSuite##P, Values(true)) + +class GTestMatcherTestP : public testing::TestWithParam { + public: + template + Matcher GreaterThan(T n) { + if (use_gtest_matcher_) { + return GtestGreaterThan(n); + } else { + return MakeMatcher(new GreaterThanMatcher(n)); + } + } + const bool use_gtest_matcher_ = GetParam(); +}; + +// Returns the description of the given matcher. +template +std::string Describe(const Matcher& m) { + return DescribeMatcher(m); +} + +// Returns the description of the negation of the given matcher. +template +std::string DescribeNegation(const Matcher& m) { + return DescribeMatcher(m, true); +} + +// Returns the reason why x matches, or doesn't match, m. +template +std::string Explain(const MatcherType& m, const Value& x) { + StringMatchResultListener listener; + ExplainMatchResult(m, x, &listener); + return listener.str(); +} + +} // namespace gmock_matchers_test +} // namespace testing + +#endif // GOOGLEMOCK_TEST_GMOCK_MATCHERS_TEST_H_ diff --git a/ext/googletest/googlemock/test/gmock-more-actions_test.cc b/ext/googletest/googlemock/test/gmock-more-actions_test.cc index 53bb029f45..b9b66bf900 100644 --- a/ext/googletest/googlemock/test/gmock-more-actions_test.cc +++ b/ext/googletest/googlemock/test/gmock-more-actions_test.cc @@ -145,7 +145,7 @@ class Foo { std::string Binary(const std::string& str, char c) const { return str + c; } - int Ternary(int x, bool y, char z) { return value_ + x + y*z; } + int Ternary(int x, bool y, char z) { return value_ + x + y * z; } int SumOf4(int a, int b, int c, int d) const { return a + b + c + d + value_; @@ -291,8 +291,7 @@ TEST(InvokeTest, FunctionWithUnusedParameters) { std::make_tuple(10, 2, 5.6, std::string("hi")); EXPECT_EQ(12, a1.Perform(dummy)); - Action a2 = - Invoke(SumOfFirst2); + Action a2 = Invoke(SumOfFirst2); EXPECT_EQ( 23, a2.Perform(std::make_tuple(20, 3, true, static_cast(nullptr)))); } @@ -303,8 +302,7 @@ TEST(InvokeTest, MethodWithUnusedParameters) { Action a1 = Invoke(&foo, &Foo::SumOfLast2); EXPECT_EQ(12, a1.Perform(std::make_tuple(CharPtr("hi"), true, 10, 2))); - Action a2 = - Invoke(&foo, &Foo::SumOfLast2); + Action a2 = Invoke(&foo, &Foo::SumOfLast2); EXPECT_EQ(23, a2.Perform(std::make_tuple('a', 2.5, 20, 3))); } @@ -362,7 +360,8 @@ TEST(InvokeMethodTest, MethodThatTakes4Arguments) { // Tests using Invoke() with a 5-argument method. TEST(InvokeMethodTest, MethodThatTakes5Arguments) { Foo foo; - Action a = Invoke(&foo, &Foo::SumOf5); // NOLINT + Action a = + Invoke(&foo, &Foo::SumOf5); // NOLINT EXPECT_EQ(12345, a.Perform(std::make_tuple(10000, 2000, 300, 40, 5))); } @@ -462,6 +461,12 @@ TEST(ReturnArgActionTest, WorksForMultiArgStringArg2) { EXPECT_EQ("seven", a.Perform(std::make_tuple(5, 6, std::string("seven"), 8))); } +TEST(ReturnArgActionTest, WorksForNonConstRefArg0) { + const Action a = ReturnArg<0>(); + std::string s = "12345"; + EXPECT_EQ(&s, &a.Perform(std::forward_as_tuple(s))); +} + TEST(SaveArgActionTest, WorksForSameType) { int result = 0; const Action a1 = SaveArg<0>(&result); @@ -517,15 +522,12 @@ TEST(SetArgRefereeActionTest, WorksWithExtraArguments) { // the bool provided to the constructor to true when destroyed. class DeletionTester { public: - explicit DeletionTester(bool* is_deleted) - : is_deleted_(is_deleted) { + explicit DeletionTester(bool* is_deleted) : is_deleted_(is_deleted) { // Make sure the bit is set to false. *is_deleted_ = false; } - ~DeletionTester() { - *is_deleted_ = true; - } + ~DeletionTester() { *is_deleted_ = true; } private: bool* is_deleted_; @@ -534,7 +536,7 @@ class DeletionTester { TEST(DeleteArgActionTest, OneArg) { bool is_deleted = false; DeletionTester* t = new DeletionTester(&is_deleted); - const Action a1 = DeleteArg<0>(); // NOLINT + const Action a1 = DeleteArg<0>(); // NOLINT EXPECT_FALSE(is_deleted); a1.Perform(std::make_tuple(t)); EXPECT_TRUE(is_deleted); @@ -543,8 +545,9 @@ TEST(DeleteArgActionTest, OneArg) { TEST(DeleteArgActionTest, TenArgs) { bool is_deleted = false; DeletionTester* t = new DeletionTester(&is_deleted); - const Action a1 = DeleteArg<9>(); + const Action + a1 = DeleteArg<9>(); EXPECT_FALSE(is_deleted); a1.Perform(std::make_tuple(true, 5, 6, CharPtr("hi"), false, 7, 8, 9, 10, t)); EXPECT_TRUE(is_deleted); @@ -602,7 +605,7 @@ TEST(ThrowActionTest, Times0) { // pointed to by the N-th (0-based) argument to values in range [first, last). TEST(SetArrayArgumentTest, SetsTheNthArray) { using MyFunction = void(bool, int*, char*); - int numbers[] = { 1, 2, 3 }; + int numbers[] = {1, 2, 3}; Action a = SetArrayArgument<1>(numbers, numbers + 3); int n[4] = {}; @@ -638,7 +641,7 @@ TEST(SetArrayArgumentTest, SetsTheNthArray) { // Tests SetArrayArgument(first, last) where first == last. TEST(SetArrayArgumentTest, SetsTheNthArrayWithEmptyRange) { using MyFunction = void(bool, int*); - int numbers[] = { 1, 2, 3 }; + int numbers[] = {1, 2, 3}; Action a = SetArrayArgument<1>(numbers, numbers); int n[4] = {}; @@ -654,10 +657,10 @@ TEST(SetArrayArgumentTest, SetsTheNthArrayWithEmptyRange) { // (but not equal) to the argument type. TEST(SetArrayArgumentTest, SetsTheNthArrayWithConvertibleType) { using MyFunction = void(bool, int*); - char chars[] = { 97, 98, 99 }; + char chars[] = {97, 98, 99}; Action a = SetArrayArgument<1>(chars, chars + 3); - int codes[4] = { 111, 222, 333, 444 }; + int codes[4] = {111, 222, 333, 444}; int* pcodes = codes; a.Perform(std::make_tuple(true, pcodes)); EXPECT_EQ(97, codes[0]); diff --git a/ext/googletest/googlemock/test/gmock-nice-strict_test.cc b/ext/googletest/googlemock/test/gmock-nice-strict_test.cc index 25558ebffe..08254e1acd 100644 --- a/ext/googletest/googlemock/test/gmock-nice-strict_test.cc +++ b/ext/googletest/googlemock/test/gmock-nice-strict_test.cc @@ -31,6 +31,7 @@ #include #include + #include "gmock/gmock.h" #include "gtest/gtest-spi.h" #include "gtest/gtest.h" @@ -44,13 +45,13 @@ class Mock { MOCK_METHOD0(DoThis, void()); private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(Mock); + Mock(const Mock&) = delete; + Mock& operator=(const Mock&) = delete; }; namespace testing { namespace gmock_nice_strict_test { -using testing::GMOCK_FLAG(verbose); using testing::HasSubstr; using testing::NaggyMock; using testing::NiceMock; @@ -93,7 +94,8 @@ class MockFoo : public Foo { MOCK_METHOD0(ReturnNonDefaultConstructible, NotDefaultConstructible()); private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFoo); + MockFoo(const MockFoo&) = delete; + MockFoo& operator=(const MockFoo&) = delete; }; class MockBar { @@ -103,7 +105,8 @@ class MockBar { MockBar(char a1, char a2, std::string a3, std::string a4, int a5, int a6, const std::string& a7, const std::string& a8, bool a9, bool a10) { str_ = std::string() + a1 + a2 + a3 + a4 + static_cast(a5) + - static_cast(a6) + a7 + a8 + (a9 ? 'T' : 'F') + (a10 ? 'T' : 'F'); + static_cast(a6) + a7 + a8 + (a9 ? 'T' : 'F') + + (a10 ? 'T' : 'F'); } virtual ~MockBar() {} @@ -116,10 +119,10 @@ class MockBar { private: std::string str_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(MockBar); + MockBar(const MockBar&) = delete; + MockBar& operator=(const MockBar&) = delete; }; - class MockBaz { public: class MoveOnly { @@ -140,8 +143,8 @@ class MockBaz { // Tests that a raw mock generates warnings for uninteresting calls. TEST(RawMockTest, WarningForUninterestingCall) { - const std::string saved_flag = GMOCK_FLAG(verbose); - GMOCK_FLAG(verbose) = "warning"; + const std::string saved_flag = GMOCK_FLAG_GET(verbose); + GMOCK_FLAG_SET(verbose, "warning"); MockFoo raw_foo; @@ -151,26 +154,25 @@ TEST(RawMockTest, WarningForUninterestingCall) { EXPECT_THAT(GetCapturedStdout(), HasSubstr("Uninteresting mock function call")); - GMOCK_FLAG(verbose) = saved_flag; + GMOCK_FLAG_SET(verbose, saved_flag); } // Tests that a raw mock generates warnings for uninteresting calls // that delete the mock object. TEST(RawMockTest, WarningForUninterestingCallAfterDeath) { - const std::string saved_flag = GMOCK_FLAG(verbose); - GMOCK_FLAG(verbose) = "warning"; + const std::string saved_flag = GMOCK_FLAG_GET(verbose); + GMOCK_FLAG_SET(verbose, "warning"); MockFoo* const raw_foo = new MockFoo; - ON_CALL(*raw_foo, DoThis()) - .WillByDefault(Invoke(raw_foo, &MockFoo::Delete)); + ON_CALL(*raw_foo, DoThis()).WillByDefault(Invoke(raw_foo, &MockFoo::Delete)); CaptureStdout(); raw_foo->DoThis(); EXPECT_THAT(GetCapturedStdout(), HasSubstr("Uninteresting mock function call")); - GMOCK_FLAG(verbose) = saved_flag; + GMOCK_FLAG_SET(verbose, saved_flag); } // Tests that a raw mock generates informational logs for @@ -178,14 +180,14 @@ TEST(RawMockTest, WarningForUninterestingCallAfterDeath) { TEST(RawMockTest, InfoForUninterestingCall) { MockFoo raw_foo; - const std::string saved_flag = GMOCK_FLAG(verbose); - GMOCK_FLAG(verbose) = "info"; + const std::string saved_flag = GMOCK_FLAG_GET(verbose); + GMOCK_FLAG_SET(verbose, "info"); CaptureStdout(); raw_foo.DoThis(); EXPECT_THAT(GetCapturedStdout(), HasSubstr("Uninteresting mock function call")); - GMOCK_FLAG(verbose) = saved_flag; + GMOCK_FLAG_SET(verbose, saved_flag); } TEST(RawMockTest, IsNaggy_IsNice_IsStrict) { @@ -223,14 +225,14 @@ TEST(NiceMockTest, NoWarningForUninterestingCallAfterDeath) { TEST(NiceMockTest, InfoForUninterestingCall) { NiceMock nice_foo; - const std::string saved_flag = GMOCK_FLAG(verbose); - GMOCK_FLAG(verbose) = "info"; + const std::string saved_flag = GMOCK_FLAG_GET(verbose); + GMOCK_FLAG_SET(verbose, "info"); CaptureStdout(); nice_foo.DoThis(); EXPECT_THAT(GetCapturedStdout(), HasSubstr("Uninteresting mock function call")); - GMOCK_FLAG(verbose) = saved_flag; + GMOCK_FLAG_SET(verbose, saved_flag); } #endif // GTEST_HAS_STREAM_REDIRECTION @@ -281,8 +283,8 @@ TEST(NiceMockTest, NonDefaultConstructor) { // Tests that NiceMock works with a mock class that has a 10-ary // non-default constructor. TEST(NiceMockTest, NonDefaultConstructor10) { - NiceMock nice_bar('a', 'b', "c", "d", 'e', 'f', - "g", "h", true, false); + NiceMock nice_bar('a', 'b', "c", "d", 'e', 'f', "g", "h", true, + false); EXPECT_EQ("abcdefghTF", nice_bar.str()); nice_bar.This(); @@ -326,8 +328,8 @@ TEST(NiceMockTest, IsNaggy_IsNice_IsStrict) { // Tests that a naggy mock generates warnings for uninteresting calls. TEST(NaggyMockTest, WarningForUninterestingCall) { - const std::string saved_flag = GMOCK_FLAG(verbose); - GMOCK_FLAG(verbose) = "warning"; + const std::string saved_flag = GMOCK_FLAG_GET(verbose); + GMOCK_FLAG_SET(verbose, "warning"); NaggyMock naggy_foo; @@ -337,14 +339,14 @@ TEST(NaggyMockTest, WarningForUninterestingCall) { EXPECT_THAT(GetCapturedStdout(), HasSubstr("Uninteresting mock function call")); - GMOCK_FLAG(verbose) = saved_flag; + GMOCK_FLAG_SET(verbose, saved_flag); } // Tests that a naggy mock generates a warning for an uninteresting call // that deletes the mock object. TEST(NaggyMockTest, WarningForUninterestingCallAfterDeath) { - const std::string saved_flag = GMOCK_FLAG(verbose); - GMOCK_FLAG(verbose) = "warning"; + const std::string saved_flag = GMOCK_FLAG_GET(verbose); + GMOCK_FLAG_SET(verbose, "warning"); NaggyMock* const naggy_foo = new NaggyMock; @@ -356,7 +358,7 @@ TEST(NaggyMockTest, WarningForUninterestingCallAfterDeath) { EXPECT_THAT(GetCapturedStdout(), HasSubstr("Uninteresting mock function call")); - GMOCK_FLAG(verbose) = saved_flag; + GMOCK_FLAG_SET(verbose, saved_flag); } #endif // GTEST_HAS_STREAM_REDIRECTION @@ -391,8 +393,8 @@ TEST(NaggyMockTest, NonDefaultConstructor) { // Tests that NaggyMock works with a mock class that has a 10-ary // non-default constructor. TEST(NaggyMockTest, NonDefaultConstructor10) { - NaggyMock naggy_bar('0', '1', "2", "3", '4', '5', - "6", "7", true, false); + NaggyMock naggy_bar('0', '1', "2", "3", '4', '5', "6", "7", true, + false); EXPECT_EQ("01234567TF", naggy_bar.str()); naggy_bar.This(); @@ -419,8 +421,8 @@ TEST(NaggyMockTest, AcceptsClassNamedMock) { } TEST(NaggyMockTest, IsNaggyInDestructor) { - const std::string saved_flag = GMOCK_FLAG(verbose); - GMOCK_FLAG(verbose) = "warning"; + const std::string saved_flag = GMOCK_FLAG_GET(verbose); + GMOCK_FLAG_SET(verbose, "warning"); CaptureStdout(); { @@ -431,7 +433,7 @@ TEST(NaggyMockTest, IsNaggyInDestructor) { EXPECT_THAT(GetCapturedStdout(), HasSubstr("Uninteresting mock function call")); - GMOCK_FLAG(verbose) = saved_flag; + GMOCK_FLAG_SET(verbose, saved_flag); } TEST(NaggyMockTest, IsNaggy_IsNice_IsStrict) { @@ -491,8 +493,8 @@ TEST(StrictMockTest, NonDefaultConstructor) { // Tests that StrictMock works with a mock class that has a 10-ary // non-default constructor. TEST(StrictMockTest, NonDefaultConstructor10) { - StrictMock strict_bar('a', 'b', "c", "d", 'e', 'f', - "g", "h", true, false); + StrictMock strict_bar('a', 'b', "c", "d", 'e', 'f', "g", "h", true, + false); EXPECT_EQ("abcdefghTF", strict_bar.str()); EXPECT_NONFATAL_FAILURE(strict_bar.That(5, true), diff --git a/ext/googletest/googlemock/test/gmock-port_test.cc b/ext/googletest/googlemock/test/gmock-port_test.cc index a2c2be2488..c31af82155 100644 --- a/ext/googletest/googlemock/test/gmock-port_test.cc +++ b/ext/googletest/googlemock/test/gmock-port_test.cc @@ -27,12 +27,12 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Mock - a framework for writing C++ mock classes. // // This file tests the internal cross-platform support utilities. #include "gmock/internal/gmock-port.h" + #include "gtest/gtest.h" // NOTE: if this file is left without tests for some reason, put a dummy diff --git a/ext/googletest/googlemock/test/gmock-pp-string_test.cc b/ext/googletest/googlemock/test/gmock-pp-string_test.cc index 6f66cf156a..53c80f4e3d 100644 --- a/ext/googletest/googlemock/test/gmock-pp-string_test.cc +++ b/ext/googletest/googlemock/test/gmock-pp-string_test.cc @@ -30,11 +30,10 @@ // Google Mock - a framework for writing C++ mock classes. // // This file tests the internal preprocessor macro library. -#include "gmock/internal/gmock-pp.h" - #include #include "gmock/gmock.h" +#include "gmock/internal/gmock-pp.h" namespace testing { namespace { diff --git a/ext/googletest/googlemock/test/gmock-spec-builders_test.cc b/ext/googletest/googlemock/test/gmock-spec-builders_test.cc index fa97411332..122d5b94e4 100644 --- a/ext/googletest/googlemock/test/gmock-spec-builders_test.cc +++ b/ext/googletest/googlemock/test/gmock-spec-builders_test.cc @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Mock - a framework for writing C++ mock classes. // // This file tests the spec builder syntax. @@ -38,74 +37,28 @@ #include // NOLINT #include #include +#include #include "gmock/gmock.h" #include "gmock/internal/gmock-port.h" -#include "gtest/gtest.h" #include "gtest/gtest-spi.h" +#include "gtest/gtest.h" #include "gtest/internal/gtest-port.h" namespace testing { -namespace internal { - -// Helper class for testing the Expectation class template. -class ExpectationTester { - public: - // Sets the call count of the given expectation to the given number. - void SetCallCount(int n, ExpectationBase* exp) { - exp->call_count_ = n; - } -}; - -} // namespace internal -} // namespace testing - namespace { -using testing::_; -using testing::AnyNumber; -using testing::AtLeast; -using testing::AtMost; -using testing::Between; -using testing::Cardinality; -using testing::CardinalityInterface; -using testing::Const; -using testing::ContainsRegex; -using testing::DoAll; -using testing::DoDefault; -using testing::Eq; -using testing::Expectation; -using testing::ExpectationSet; -using testing::GMOCK_FLAG(verbose); -using testing::Gt; -using testing::IgnoreResult; -using testing::InSequence; -using testing::Invoke; -using testing::InvokeWithoutArgs; -using testing::IsNotSubstring; -using testing::IsSubstring; -using testing::Lt; -using testing::Message; -using testing::Mock; -using testing::NaggyMock; -using testing::Ne; -using testing::Return; -using testing::SaveArg; -using testing::Sequence; -using testing::SetArgPointee; -using testing::internal::ExpectationTester; -using testing::internal::FormatFileLocation; -using testing::internal::kAllow; -using testing::internal::kErrorVerbosity; -using testing::internal::kFail; -using testing::internal::kInfoVerbosity; -using testing::internal::kWarn; -using testing::internal::kWarningVerbosity; +using ::testing::internal::FormatFileLocation; +using ::testing::internal::kAllow; +using ::testing::internal::kErrorVerbosity; +using ::testing::internal::kFail; +using ::testing::internal::kInfoVerbosity; +using ::testing::internal::kWarn; +using ::testing::internal::kWarningVerbosity; #if GTEST_HAS_STREAM_REDIRECTION -using testing::HasSubstr; -using testing::internal::CaptureStdout; -using testing::internal::GetCapturedStdout; +using ::testing::internal::CaptureStdout; +using ::testing::internal::GetCapturedStdout; #endif class Incomplete; @@ -126,8 +79,7 @@ TEST(MockMethodTest, CanInstantiateWithIncompleteArgType) { // use the mock, as long as Google Mock knows how to print the // argument. MockIncomplete incomplete; - EXPECT_CALL(incomplete, ByRefFunc(_)) - .Times(AnyNumber()); + EXPECT_CALL(incomplete, ByRefFunc(_)).Times(AnyNumber()); } // The definition of the printer for the argument type doesn't have to @@ -155,7 +107,8 @@ class MockA { MOCK_METHOD2(ReturnInt, int(int x, int y)); private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(MockA); + MockA(const MockA&) = delete; + MockA& operator=(const MockA&) = delete; }; class MockB { @@ -163,10 +116,11 @@ class MockB { MockB() {} MOCK_CONST_METHOD0(DoB, int()); // NOLINT - MOCK_METHOD1(DoB, int(int n)); // NOLINT + MOCK_METHOD1(DoB, int(int n)); // NOLINT private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(MockB); + MockB(const MockB&) = delete; + MockB& operator=(const MockB&) = delete; }; class ReferenceHoldingMock { @@ -176,7 +130,8 @@ class ReferenceHoldingMock { MOCK_METHOD1(AcceptReference, void(std::shared_ptr*)); private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(ReferenceHoldingMock); + ReferenceHoldingMock(const ReferenceHoldingMock&) = delete; + ReferenceHoldingMock& operator=(const ReferenceHoldingMock&) = delete; }; // Tests that EXPECT_CALL and ON_CALL compile in a presence of macro @@ -198,7 +153,8 @@ class MockCC : public CC { MOCK_METHOD0(Method, int()); private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(MockCC); + MockCC(const MockCC&) = delete; + MockCC& operator=(const MockCC&) = delete; }; // Tests that a method with expanded name compiles. @@ -254,41 +210,42 @@ TEST(OnCallSyntaxTest, EvaluatesSecondArgumentOnce) { TEST(OnCallSyntaxTest, WithIsOptional) { MockA a; - ON_CALL(a, DoA(5)) - .WillByDefault(Return()); - ON_CALL(a, DoA(_)) - .With(_) - .WillByDefault(Return()); + ON_CALL(a, DoA(5)).WillByDefault(Return()); + ON_CALL(a, DoA(_)).With(_).WillByDefault(Return()); } TEST(OnCallSyntaxTest, WithCanAppearAtMostOnce) { MockA a; - EXPECT_NONFATAL_FAILURE({ // NOLINT - ON_CALL(a, ReturnResult(_)) - .With(_) - .With(_) - .WillByDefault(Return(Result())); - }, ".With() cannot appear more than once in an ON_CALL()"); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + ON_CALL(a, ReturnResult(_)) + .With(_) + .With(_) + .WillByDefault(Return(Result())); + }, + ".With() cannot appear more than once in an ON_CALL()"); } TEST(OnCallSyntaxTest, WillByDefaultIsMandatory) { MockA a; - EXPECT_DEATH_IF_SUPPORTED({ - ON_CALL(a, DoA(5)); - a.DoA(5); - }, ""); + EXPECT_DEATH_IF_SUPPORTED( + { + ON_CALL(a, DoA(5)); + a.DoA(5); + }, + ""); } TEST(OnCallSyntaxTest, WillByDefaultCanAppearAtMostOnce) { MockA a; - EXPECT_NONFATAL_FAILURE({ // NOLINT - ON_CALL(a, DoA(5)) - .WillByDefault(Return()) - .WillByDefault(Return()); - }, ".WillByDefault() must appear exactly once in an ON_CALL()"); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + ON_CALL(a, DoA(5)).WillByDefault(Return()).WillByDefault(Return()); + }, + ".WillByDefault() must appear exactly once in an ON_CALL()"); } // Tests that EXPECT_CALL evaluates its arguments exactly once as @@ -316,21 +273,18 @@ TEST(ExpectCallSyntaxTest, EvaluatesSecondArgumentOnce) { TEST(ExpectCallSyntaxTest, WithIsOptional) { MockA a; - EXPECT_CALL(a, DoA(5)) - .Times(0); - EXPECT_CALL(a, DoA(6)) - .With(_) - .Times(0); + EXPECT_CALL(a, DoA(5)).Times(0); + EXPECT_CALL(a, DoA(6)).With(_).Times(0); } TEST(ExpectCallSyntaxTest, WithCanAppearAtMostOnce) { MockA a; - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_CALL(a, DoA(6)) - .With(_) - .With(_); - }, ".With() cannot appear more than once in an EXPECT_CALL()"); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_CALL(a, DoA(6)).With(_).With(_); + }, + ".With() cannot appear more than once in an EXPECT_CALL()"); a.DoA(6); } @@ -338,19 +292,19 @@ TEST(ExpectCallSyntaxTest, WithCanAppearAtMostOnce) { TEST(ExpectCallSyntaxTest, WithMustBeFirstClause) { MockA a; - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_CALL(a, DoA(1)) - .Times(1) - .With(_); - }, ".With() must be the first clause in an EXPECT_CALL()"); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_CALL(a, DoA(1)).Times(1).With(_); + }, + ".With() must be the first clause in an EXPECT_CALL()"); a.DoA(1); - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_CALL(a, DoA(2)) - .WillOnce(Return()) - .With(_); - }, ".With() must be the first clause in an EXPECT_CALL()"); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_CALL(a, DoA(2)).WillOnce(Return()).With(_); + }, + ".With() must be the first clause in an EXPECT_CALL()"); a.DoA(2); } @@ -358,12 +312,9 @@ TEST(ExpectCallSyntaxTest, WithMustBeFirstClause) { TEST(ExpectCallSyntaxTest, TimesCanBeInferred) { MockA a; - EXPECT_CALL(a, DoA(1)) - .WillOnce(Return()); + EXPECT_CALL(a, DoA(1)).WillOnce(Return()); - EXPECT_CALL(a, DoA(2)) - .WillOnce(Return()) - .WillRepeatedly(Return()); + EXPECT_CALL(a, DoA(2)).WillOnce(Return()).WillRepeatedly(Return()); a.DoA(1); a.DoA(2); @@ -373,11 +324,11 @@ TEST(ExpectCallSyntaxTest, TimesCanBeInferred) { TEST(ExpectCallSyntaxTest, TimesCanAppearAtMostOnce) { MockA a; - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_CALL(a, DoA(1)) - .Times(1) - .Times(2); - }, ".Times() cannot appear more than once in an EXPECT_CALL()"); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_CALL(a, DoA(1)).Times(1).Times(2); + }, + ".Times() cannot appear more than once in an EXPECT_CALL()"); a.DoA(1); a.DoA(1); @@ -387,11 +338,11 @@ TEST(ExpectCallSyntaxTest, TimesMustBeBeforeInSequence) { MockA a; Sequence s; - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_CALL(a, DoA(1)) - .InSequence(s) - .Times(1); - }, ".Times() cannot appear after "); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_CALL(a, DoA(1)).InSequence(s).Times(1); + }, + ".Times() may only appear *before* "); a.DoA(1); } @@ -401,8 +352,7 @@ TEST(ExpectCallSyntaxTest, InSequenceIsOptional) { Sequence s; EXPECT_CALL(a, DoA(1)); - EXPECT_CALL(a, DoA(2)) - .InSequence(s); + EXPECT_CALL(a, DoA(2)).InSequence(s); a.DoA(1); a.DoA(2); @@ -412,9 +362,7 @@ TEST(ExpectCallSyntaxTest, InSequenceCanAppearMultipleTimes) { MockA a; Sequence s1, s2; - EXPECT_CALL(a, DoA(1)) - .InSequence(s1, s2) - .InSequence(s1); + EXPECT_CALL(a, DoA(1)).InSequence(s1, s2).InSequence(s1); a.DoA(1); } @@ -423,13 +371,12 @@ TEST(ExpectCallSyntaxTest, InSequenceMustBeBeforeAfter) { MockA a; Sequence s; - Expectation e = EXPECT_CALL(a, DoA(1)) - .Times(AnyNumber()); - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_CALL(a, DoA(2)) - .After(e) - .InSequence(s); - }, ".InSequence() cannot appear after "); + Expectation e = EXPECT_CALL(a, DoA(1)).Times(AnyNumber()); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_CALL(a, DoA(2)).After(e).InSequence(s); + }, + ".InSequence() cannot appear after "); a.DoA(2); } @@ -438,11 +385,11 @@ TEST(ExpectCallSyntaxTest, InSequenceMustBeBeforeWillOnce) { MockA a; Sequence s; - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_CALL(a, DoA(1)) - .WillOnce(Return()) - .InSequence(s); - }, ".InSequence() cannot appear after "); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_CALL(a, DoA(1)).WillOnce(Return()).InSequence(s); + }, + ".InSequence() cannot appear after "); a.DoA(1); } @@ -451,11 +398,9 @@ TEST(ExpectCallSyntaxTest, AfterMustBeBeforeWillOnce) { MockA a; Expectation e = EXPECT_CALL(a, DoA(1)); - EXPECT_NONFATAL_FAILURE({ - EXPECT_CALL(a, DoA(2)) - .WillOnce(Return()) - .After(e); - }, ".After() cannot appear after "); + EXPECT_NONFATAL_FAILURE( + { EXPECT_CALL(a, DoA(2)).WillOnce(Return()).After(e); }, + ".After() cannot appear after "); a.DoA(1); a.DoA(2); @@ -465,8 +410,7 @@ TEST(ExpectCallSyntaxTest, WillIsOptional) { MockA a; EXPECT_CALL(a, DoA(1)); - EXPECT_CALL(a, DoA(2)) - .WillOnce(Return()); + EXPECT_CALL(a, DoA(2)).WillOnce(Return()); a.DoA(1); a.DoA(2); @@ -485,11 +429,11 @@ TEST(ExpectCallSyntaxTest, WillCanAppearMultipleTimes) { TEST(ExpectCallSyntaxTest, WillMustBeBeforeWillRepeatedly) { MockA a; - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_CALL(a, DoA(1)) - .WillRepeatedly(Return()) - .WillOnce(Return()); - }, ".WillOnce() cannot appear after "); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_CALL(a, DoA(1)).WillRepeatedly(Return()).WillOnce(Return()); + }, + ".WillOnce() cannot appear after "); a.DoA(1); } @@ -497,11 +441,8 @@ TEST(ExpectCallSyntaxTest, WillMustBeBeforeWillRepeatedly) { TEST(ExpectCallSyntaxTest, WillRepeatedlyIsOptional) { MockA a; - EXPECT_CALL(a, DoA(1)) - .WillOnce(Return()); - EXPECT_CALL(a, DoA(2)) - .WillOnce(Return()) - .WillRepeatedly(Return()); + EXPECT_CALL(a, DoA(1)).WillOnce(Return()); + EXPECT_CALL(a, DoA(2)).WillOnce(Return()).WillRepeatedly(Return()); a.DoA(1); a.DoA(2); @@ -511,30 +452,30 @@ TEST(ExpectCallSyntaxTest, WillRepeatedlyIsOptional) { TEST(ExpectCallSyntaxTest, WillRepeatedlyCannotAppearMultipleTimes) { MockA a; - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_CALL(a, DoA(1)) - .WillRepeatedly(Return()) - .WillRepeatedly(Return()); - }, ".WillRepeatedly() cannot appear more than once in an " - "EXPECT_CALL()"); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_CALL(a, DoA(1)).WillRepeatedly(Return()).WillRepeatedly( + Return()); + }, + ".WillRepeatedly() cannot appear more than once in an " + "EXPECT_CALL()"); } TEST(ExpectCallSyntaxTest, WillRepeatedlyMustBeBeforeRetiresOnSaturation) { MockA a; - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_CALL(a, DoA(1)) - .RetiresOnSaturation() - .WillRepeatedly(Return()); - }, ".WillRepeatedly() cannot appear after "); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_CALL(a, DoA(1)).RetiresOnSaturation().WillRepeatedly(Return()); + }, + ".WillRepeatedly() cannot appear after "); } TEST(ExpectCallSyntaxTest, RetiresOnSaturationIsOptional) { MockA a; EXPECT_CALL(a, DoA(1)); - EXPECT_CALL(a, DoA(1)) - .RetiresOnSaturation(); + EXPECT_CALL(a, DoA(1)).RetiresOnSaturation(); a.DoA(1); a.DoA(1); @@ -543,11 +484,11 @@ TEST(ExpectCallSyntaxTest, RetiresOnSaturationIsOptional) { TEST(ExpectCallSyntaxTest, RetiresOnSaturationCannotAppearMultipleTimes) { MockA a; - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_CALL(a, DoA(1)) - .RetiresOnSaturation() - .RetiresOnSaturation(); - }, ".RetiresOnSaturation() cannot appear more than once"); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_CALL(a, DoA(1)).RetiresOnSaturation().RetiresOnSaturation(); + }, + ".RetiresOnSaturation() cannot appear more than once"); a.DoA(1); } @@ -558,16 +499,20 @@ TEST(ExpectCallSyntaxTest, DefaultCardinalityIsOnce) { EXPECT_CALL(a, DoA(1)); a.DoA(1); } - EXPECT_NONFATAL_FAILURE({ // NOLINT - MockA a; - EXPECT_CALL(a, DoA(1)); - }, "to be called once"); - EXPECT_NONFATAL_FAILURE({ // NOLINT - MockA a; - EXPECT_CALL(a, DoA(1)); - a.DoA(1); - a.DoA(1); - }, "to be called once"); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + MockA a; + EXPECT_CALL(a, DoA(1)); + }, + "to be called once"); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + MockA a; + EXPECT_CALL(a, DoA(1)); + a.DoA(1); + a.DoA(1); + }, + "to be called once"); } #if GTEST_HAS_STREAM_REDIRECTION @@ -580,13 +525,9 @@ TEST(ExpectCallSyntaxTest, DoesNotWarnOnAdequateActionCount) { MockB b; // It's always fine to omit WillOnce() entirely. - EXPECT_CALL(b, DoB()) - .Times(0); - EXPECT_CALL(b, DoB(1)) - .Times(AtMost(1)); - EXPECT_CALL(b, DoB(2)) - .Times(1) - .WillRepeatedly(Return(1)); + EXPECT_CALL(b, DoB()).Times(0); + EXPECT_CALL(b, DoB(1)).Times(AtMost(1)); + EXPECT_CALL(b, DoB(2)).Times(1).WillRepeatedly(Return(1)); // It's fine for the number of WillOnce()s to equal the upper bound. EXPECT_CALL(b, DoB(3)) @@ -596,10 +537,8 @@ TEST(ExpectCallSyntaxTest, DoesNotWarnOnAdequateActionCount) { // It's fine for the number of WillOnce()s to be smaller than the // upper bound when there is a WillRepeatedly(). - EXPECT_CALL(b, DoB(4)) - .Times(AtMost(3)) - .WillOnce(Return(1)) - .WillRepeatedly(Return(2)); + EXPECT_CALL(b, DoB(4)).Times(AtMost(3)).WillOnce(Return(1)).WillRepeatedly( + Return(2)); // Satisfies the above expectations. b.DoB(2); @@ -616,13 +555,9 @@ TEST(ExpectCallSyntaxTest, WarnsOnTooManyActions) { MockB b; // Warns when the number of WillOnce()s is larger than the upper bound. - EXPECT_CALL(b, DoB()) - .Times(0) - .WillOnce(Return(1)); // #1 - EXPECT_CALL(b, DoB()) - .Times(AtMost(1)) - .WillOnce(Return(1)) - .WillOnce(Return(2)); // #2 + EXPECT_CALL(b, DoB()).Times(0).WillOnce(Return(1)); // #1 + EXPECT_CALL(b, DoB()).Times(AtMost(1)).WillOnce(Return(1)).WillOnce( + Return(2)); // #2 EXPECT_CALL(b, DoB(1)) .Times(1) .WillOnce(Return(1)) @@ -631,41 +566,34 @@ TEST(ExpectCallSyntaxTest, WarnsOnTooManyActions) { // Warns when the number of WillOnce()s equals the upper bound and // there is a WillRepeatedly(). - EXPECT_CALL(b, DoB()) - .Times(0) - .WillRepeatedly(Return(1)); // #4 - EXPECT_CALL(b, DoB(2)) - .Times(1) - .WillOnce(Return(1)) - .WillRepeatedly(Return(2)); // #5 + EXPECT_CALL(b, DoB()).Times(0).WillRepeatedly(Return(1)); // #4 + EXPECT_CALL(b, DoB(2)).Times(1).WillOnce(Return(1)).WillRepeatedly( + Return(2)); // #5 // Satisfies the above expectations. b.DoB(1); b.DoB(2); } const std::string output = GetCapturedStdout(); - EXPECT_PRED_FORMAT2( - IsSubstring, - "Too many actions specified in EXPECT_CALL(b, DoB())...\n" - "Expected to be never called, but has 1 WillOnce().", - output); // #1 - EXPECT_PRED_FORMAT2( - IsSubstring, - "Too many actions specified in EXPECT_CALL(b, DoB())...\n" - "Expected to be called at most once, " - "but has 2 WillOnce()s.", - output); // #2 + EXPECT_PRED_FORMAT2(IsSubstring, + "Too many actions specified in EXPECT_CALL(b, DoB())...\n" + "Expected to be never called, but has 1 WillOnce().", + output); // #1 + EXPECT_PRED_FORMAT2(IsSubstring, + "Too many actions specified in EXPECT_CALL(b, DoB())...\n" + "Expected to be called at most once, " + "but has 2 WillOnce()s.", + output); // #2 EXPECT_PRED_FORMAT2( IsSubstring, "Too many actions specified in EXPECT_CALL(b, DoB(1))...\n" "Expected to be called once, but has 2 WillOnce()s.", output); // #3 - EXPECT_PRED_FORMAT2( - IsSubstring, - "Too many actions specified in EXPECT_CALL(b, DoB())...\n" - "Expected to be never called, but has 0 WillOnce()s " - "and a WillRepeatedly().", - output); // #4 + EXPECT_PRED_FORMAT2(IsSubstring, + "Too many actions specified in EXPECT_CALL(b, DoB())...\n" + "Expected to be never called, but has 0 WillOnce()s " + "and a WillRepeatedly().", + output); // #4 EXPECT_PRED_FORMAT2( IsSubstring, "Too many actions specified in EXPECT_CALL(b, DoB(2))...\n" @@ -679,26 +607,23 @@ TEST(ExpectCallSyntaxTest, WarnsOnTooManyActions) { TEST(ExpectCallSyntaxTest, WarnsOnTooFewActions) { MockB b; - EXPECT_CALL(b, DoB()) - .Times(Between(2, 3)) - .WillOnce(Return(1)); + EXPECT_CALL(b, DoB()).Times(Between(2, 3)).WillOnce(Return(1)); CaptureStdout(); b.DoB(); const std::string output = GetCapturedStdout(); - EXPECT_PRED_FORMAT2( - IsSubstring, - "Too few actions specified in EXPECT_CALL(b, DoB())...\n" - "Expected to be called between 2 and 3 times, " - "but has only 1 WillOnce().", - output); + EXPECT_PRED_FORMAT2(IsSubstring, + "Too few actions specified in EXPECT_CALL(b, DoB())...\n" + "Expected to be called between 2 and 3 times, " + "but has only 1 WillOnce().", + output); b.DoB(); } TEST(ExpectCallSyntaxTest, WarningIsErrorWithFlag) { - int original_behavior = testing::GMOCK_FLAG(default_mock_behavior); + int original_behavior = GMOCK_FLAG_GET(default_mock_behavior); - testing::GMOCK_FLAG(default_mock_behavior) = kAllow; + GMOCK_FLAG_SET(default_mock_behavior, kAllow); CaptureStdout(); { MockA a; @@ -707,7 +632,7 @@ TEST(ExpectCallSyntaxTest, WarningIsErrorWithFlag) { std::string output = GetCapturedStdout(); EXPECT_TRUE(output.empty()) << output; - testing::GMOCK_FLAG(default_mock_behavior) = kWarn; + GMOCK_FLAG_SET(default_mock_behavior, kWarn); CaptureStdout(); { MockA a; @@ -718,14 +643,16 @@ TEST(ExpectCallSyntaxTest, WarningIsErrorWithFlag) { EXPECT_PRED_FORMAT2(IsSubstring, "Uninteresting mock function call", warning_output); - testing::GMOCK_FLAG(default_mock_behavior) = kFail; - EXPECT_NONFATAL_FAILURE({ - MockA a; - a.DoA(0); - }, "Uninteresting mock function call"); + GMOCK_FLAG_SET(default_mock_behavior, kFail); + EXPECT_NONFATAL_FAILURE( + { + MockA a; + a.DoA(0); + }, + "Uninteresting mock function call"); // Out of bounds values are converted to kWarn - testing::GMOCK_FLAG(default_mock_behavior) = -1; + GMOCK_FLAG_SET(default_mock_behavior, -1); CaptureStdout(); { MockA a; @@ -735,7 +662,7 @@ TEST(ExpectCallSyntaxTest, WarningIsErrorWithFlag) { EXPECT_PRED_FORMAT2(IsSubstring, "GMOCK WARNING", warning_output); EXPECT_PRED_FORMAT2(IsSubstring, "Uninteresting mock function call", warning_output); - testing::GMOCK_FLAG(default_mock_behavior) = 3; + GMOCK_FLAG_SET(default_mock_behavior, 3); CaptureStdout(); { MockA a; @@ -746,7 +673,7 @@ TEST(ExpectCallSyntaxTest, WarningIsErrorWithFlag) { EXPECT_PRED_FORMAT2(IsSubstring, "Uninteresting mock function call", warning_output); - testing::GMOCK_FLAG(default_mock_behavior) = original_behavior; + GMOCK_FLAG_SET(default_mock_behavior, original_behavior); } #endif // GTEST_HAS_STREAM_REDIRECTION @@ -766,8 +693,7 @@ TEST(OnCallTest, TakesBuiltInDefaultActionWhenNoOnCall) { // matches the invocation. TEST(OnCallTest, TakesBuiltInDefaultActionWhenNoOnCallMatches) { MockB b; - ON_CALL(b, DoB(1)) - .WillByDefault(Return(1)); + ON_CALL(b, DoB(1)).WillByDefault(Return(1)); EXPECT_CALL(b, DoB(_)); EXPECT_EQ(0, b.DoB(2)); @@ -776,12 +702,9 @@ TEST(OnCallTest, TakesBuiltInDefaultActionWhenNoOnCallMatches) { // Tests that the last matching ON_CALL() action is taken. TEST(OnCallTest, PicksLastMatchingOnCall) { MockB b; - ON_CALL(b, DoB(_)) - .WillByDefault(Return(3)); - ON_CALL(b, DoB(2)) - .WillByDefault(Return(2)); - ON_CALL(b, DoB(1)) - .WillByDefault(Return(1)); + ON_CALL(b, DoB(_)).WillByDefault(Return(3)); + ON_CALL(b, DoB(2)).WillByDefault(Return(2)); + ON_CALL(b, DoB(1)).WillByDefault(Return(1)); EXPECT_CALL(b, DoB(_)); EXPECT_EQ(2, b.DoB(2)); @@ -805,25 +728,24 @@ TEST(ExpectCallTest, AllowsAnyCallWhenNoSpec) { // Tests that the last matching EXPECT_CALL() fires. TEST(ExpectCallTest, PicksLastMatchingExpectCall) { MockB b; - EXPECT_CALL(b, DoB(_)) - .WillRepeatedly(Return(2)); - EXPECT_CALL(b, DoB(1)) - .WillRepeatedly(Return(1)); + EXPECT_CALL(b, DoB(_)).WillRepeatedly(Return(2)); + EXPECT_CALL(b, DoB(1)).WillRepeatedly(Return(1)); EXPECT_EQ(1, b.DoB(1)); } // Tests lower-bound violation. TEST(ExpectCallTest, CatchesTooFewCalls) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - MockB b; - EXPECT_CALL(b, DoB(5)) - .Times(AtLeast(2)); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + MockB b; + EXPECT_CALL(b, DoB(5)).Times(AtLeast(2)); - b.DoB(5); - }, "Actual function call count doesn't match EXPECT_CALL(b, DoB(5))...\n" - " Expected: to be called at least twice\n" - " Actual: called once - unsatisfied and active"); + b.DoB(5); + }, + "Actual function call count doesn't match EXPECT_CALL(b, DoB(5))...\n" + " Expected: to be called at least twice\n" + " Actual: called once - unsatisfied and active"); } // Tests that the cardinality can be inferred when no Times(...) is @@ -831,28 +753,24 @@ TEST(ExpectCallTest, CatchesTooFewCalls) { TEST(ExpectCallTest, InfersCardinalityWhenThereIsNoWillRepeatedly) { { MockB b; - EXPECT_CALL(b, DoB()) - .WillOnce(Return(1)) - .WillOnce(Return(2)); + EXPECT_CALL(b, DoB()).WillOnce(Return(1)).WillOnce(Return(2)); EXPECT_EQ(1, b.DoB()); EXPECT_EQ(2, b.DoB()); } - EXPECT_NONFATAL_FAILURE({ // NOLINT - MockB b; - EXPECT_CALL(b, DoB()) - .WillOnce(Return(1)) - .WillOnce(Return(2)); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + MockB b; + EXPECT_CALL(b, DoB()).WillOnce(Return(1)).WillOnce(Return(2)); - EXPECT_EQ(1, b.DoB()); - }, "to be called twice"); + EXPECT_EQ(1, b.DoB()); + }, + "to be called twice"); { // NOLINT MockB b; - EXPECT_CALL(b, DoB()) - .WillOnce(Return(1)) - .WillOnce(Return(2)); + EXPECT_CALL(b, DoB()).WillOnce(Return(1)).WillOnce(Return(2)); EXPECT_EQ(1, b.DoB()); EXPECT_EQ(2, b.DoB()); @@ -863,40 +781,78 @@ TEST(ExpectCallTest, InfersCardinalityWhenThereIsNoWillRepeatedly) { TEST(ExpectCallTest, InfersCardinality1WhenThereIsWillRepeatedly) { { MockB b; - EXPECT_CALL(b, DoB()) - .WillOnce(Return(1)) - .WillRepeatedly(Return(2)); + EXPECT_CALL(b, DoB()).WillOnce(Return(1)).WillRepeatedly(Return(2)); EXPECT_EQ(1, b.DoB()); } { // NOLINT MockB b; - EXPECT_CALL(b, DoB()) - .WillOnce(Return(1)) - .WillRepeatedly(Return(2)); + EXPECT_CALL(b, DoB()).WillOnce(Return(1)).WillRepeatedly(Return(2)); EXPECT_EQ(1, b.DoB()); EXPECT_EQ(2, b.DoB()); EXPECT_EQ(2, b.DoB()); } - EXPECT_NONFATAL_FAILURE({ // NOLINT - MockB b; - EXPECT_CALL(b, DoB()) - .WillOnce(Return(1)) - .WillRepeatedly(Return(2)); - }, "to be called at least once"); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + MockB b; + EXPECT_CALL(b, DoB()).WillOnce(Return(1)).WillRepeatedly(Return(2)); + }, + "to be called at least once"); } +#if defined(__cplusplus) && __cplusplus >= 201703L + +// It should be possible to return a non-moveable type from a mock action in +// C++17 and above, where it's guaranteed that such a type can be initialized +// from a prvalue returned from a function. +TEST(ExpectCallTest, NonMoveableType) { + // Define a non-moveable result type. + struct Result { + explicit Result(int x_in) : x(x_in) {} + Result(Result&&) = delete; + + int x; + }; + + static_assert(!std::is_move_constructible_v); + static_assert(!std::is_copy_constructible_v); + + static_assert(!std::is_move_assignable_v); + static_assert(!std::is_copy_assignable_v); + + // We should be able to use a callable that returns that result as both a + // OnceAction and an Action, whether the callable ignores arguments or not. + const auto return_17 = [] { return Result(17); }; + + static_cast(OnceAction{return_17}); + static_cast(Action{return_17}); + + static_cast(OnceAction{return_17}); + static_cast(Action{return_17}); + + // It should be possible to return the result end to end through an + // EXPECT_CALL statement, with both WillOnce and WillRepeatedly. + MockFunction mock; + EXPECT_CALL(mock, Call) // + .WillOnce(return_17) // + .WillRepeatedly(return_17); + + EXPECT_EQ(17, mock.AsStdFunction()().x); + EXPECT_EQ(17, mock.AsStdFunction()().x); + EXPECT_EQ(17, mock.AsStdFunction()().x); +} + +#endif // C++17 and above + // Tests that the n-th action is taken for the n-th matching // invocation. TEST(ExpectCallTest, NthMatchTakesNthAction) { MockB b; - EXPECT_CALL(b, DoB()) - .WillOnce(Return(1)) - .WillOnce(Return(2)) - .WillOnce(Return(3)); + EXPECT_CALL(b, DoB()).WillOnce(Return(1)).WillOnce(Return(2)).WillOnce( + Return(3)); EXPECT_EQ(1, b.DoB()); EXPECT_EQ(2, b.DoB()); @@ -907,9 +863,7 @@ TEST(ExpectCallTest, NthMatchTakesNthAction) { // list is exhausted. TEST(ExpectCallTest, TakesRepeatedActionWhenWillListIsExhausted) { MockB b; - EXPECT_CALL(b, DoB()) - .WillOnce(Return(1)) - .WillRepeatedly(Return(2)); + EXPECT_CALL(b, DoB()).WillOnce(Return(1)).WillRepeatedly(Return(2)); EXPECT_EQ(1, b.DoB()); EXPECT_EQ(2, b.DoB()); @@ -922,8 +876,7 @@ TEST(ExpectCallTest, TakesRepeatedActionWhenWillListIsExhausted) { // exhausted and there is no WillRepeatedly(). TEST(ExpectCallTest, TakesDefaultActionWhenWillListIsExhausted) { MockB b; - EXPECT_CALL(b, DoB(_)) - .Times(1); + EXPECT_CALL(b, DoB(_)).Times(1); EXPECT_CALL(b, DoB()) .Times(AnyNumber()) .WillOnce(Return(1)) @@ -985,8 +938,7 @@ TEST(UninterestingCallTest, DoesDefaultAction) { // When there is an ON_CALL() statement, the action specified by it // should be taken. MockA a; - ON_CALL(a, Binary(_, _)) - .WillByDefault(Return(true)); + ON_CALL(a, Binary(_, _)).WillByDefault(Return(true)); EXPECT_TRUE(a.Binary(1, 2)); // When there is no ON_CALL(), the default value for the return type @@ -1000,8 +952,7 @@ TEST(UnexpectedCallTest, DoesDefaultAction) { // When there is an ON_CALL() statement, the action specified by it // should be taken. MockA a; - ON_CALL(a, Binary(_, _)) - .WillByDefault(Return(true)); + ON_CALL(a, Binary(_, _)).WillByDefault(Return(true)); EXPECT_CALL(a, Binary(0, 0)); a.Binary(0, 0); bool result = false; @@ -1012,11 +963,9 @@ TEST(UnexpectedCallTest, DoesDefaultAction) { // When there is no ON_CALL(), the default value for the return type // should be returned. MockB b; - EXPECT_CALL(b, DoB(0)) - .Times(0); + EXPECT_CALL(b, DoB(0)).Times(0); int n = -1; - EXPECT_NONFATAL_FAILURE(n = b.DoB(1), - "Unexpected mock function call"); + EXPECT_NONFATAL_FAILURE(n = b.DoB(1), "Unexpected mock function call"); EXPECT_EQ(0, n); } @@ -1093,14 +1042,12 @@ TEST(UnexpectedCallTest, GeneartesFailureForNonVoidFunction) { // match the call. TEST(UnexpectedCallTest, RetiredExpectation) { MockB b; - EXPECT_CALL(b, DoB(1)) - .RetiresOnSaturation(); + EXPECT_CALL(b, DoB(1)).RetiresOnSaturation(); b.DoB(1); - EXPECT_NONFATAL_FAILURE( - b.DoB(1), - " Expected: the expectation is active\n" - " Actual: it is retired"); + EXPECT_NONFATAL_FAILURE(b.DoB(1), + " Expected: the expectation is active\n" + " Actual: it is retired"); } // Tests that Google Mock explains that an expectation that doesn't @@ -1109,10 +1056,9 @@ TEST(UnexpectedCallTest, UnmatchedArguments) { MockB b; EXPECT_CALL(b, DoB(1)); - EXPECT_NONFATAL_FAILURE( - b.DoB(2), - " Expected arg #0: is equal to 1\n" - " Actual: 2\n"); + EXPECT_NONFATAL_FAILURE(b.DoB(2), + " Expected arg #0: is equal to 1\n" + " Actual: 2\n"); b.DoB(1); } @@ -1121,15 +1067,10 @@ TEST(UnexpectedCallTest, UnmatchedArguments) { TEST(UnexpectedCallTest, UnsatisifiedPrerequisites) { Sequence s1, s2; MockB b; - EXPECT_CALL(b, DoB(1)) - .InSequence(s1); - EXPECT_CALL(b, DoB(2)) - .Times(AnyNumber()) - .InSequence(s1); - EXPECT_CALL(b, DoB(3)) - .InSequence(s2); - EXPECT_CALL(b, DoB(4)) - .InSequence(s1, s2); + EXPECT_CALL(b, DoB(1)).InSequence(s1); + EXPECT_CALL(b, DoB(2)).Times(AnyNumber()).InSequence(s1); + EXPECT_CALL(b, DoB(3)).InSequence(s2); + EXPECT_CALL(b, DoB(4)).InSequence(s1, s2); ::testing::TestPartResultArray failures; { @@ -1147,23 +1088,27 @@ TEST(UnexpectedCallTest, UnsatisifiedPrerequisites) { // Verifies that the failure message contains the two unsatisfied // pre-requisites but not the satisfied one. #if GTEST_USES_PCRE - EXPECT_THAT(r.message(), ContainsRegex( - // PCRE has trouble using (.|\n) to match any character, but - // supports the (?s) prefix for using . to match any character. - "(?s)the following immediate pre-requisites are not satisfied:\n" - ".*: pre-requisite #0\n" - ".*: pre-requisite #1")); + EXPECT_THAT( + r.message(), + ContainsRegex( + // PCRE has trouble using (.|\n) to match any character, but + // supports the (?s) prefix for using . to match any character. + "(?s)the following immediate pre-requisites are not satisfied:\n" + ".*: pre-requisite #0\n" + ".*: pre-requisite #1")); #elif GTEST_USES_POSIX_RE - EXPECT_THAT(r.message(), ContainsRegex( - // POSIX RE doesn't understand the (?s) prefix, but has no trouble - // with (.|\n). - "the following immediate pre-requisites are not satisfied:\n" - "(.|\n)*: pre-requisite #0\n" - "(.|\n)*: pre-requisite #1")); + EXPECT_THAT(r.message(), + ContainsRegex( + // POSIX RE doesn't understand the (?s) prefix, but has no + // trouble with (.|\n). + "the following immediate pre-requisites are not satisfied:\n" + "(.|\n)*: pre-requisite #0\n" + "(.|\n)*: pre-requisite #1")); #else // We can only use Google Test's own simple regex. - EXPECT_THAT(r.message(), ContainsRegex( - "the following immediate pre-requisites are not satisfied:")); + EXPECT_THAT(r.message(), + ContainsRegex( + "the following immediate pre-requisites are not satisfied:")); EXPECT_THAT(r.message(), ContainsRegex(": pre-requisite #0")); EXPECT_THAT(r.message(), ContainsRegex(": pre-requisite #1")); #endif // GTEST_USES_PCRE @@ -1192,8 +1137,7 @@ TEST(ExcessiveCallTest, DoesDefaultAction) { // When there is an ON_CALL() statement, the action specified by it // should be taken. MockA a; - ON_CALL(a, Binary(_, _)) - .WillByDefault(Return(true)); + ON_CALL(a, Binary(_, _)).WillByDefault(Return(true)); EXPECT_CALL(a, Binary(0, 0)); a.Binary(0, 0); bool result = false; @@ -1204,8 +1148,7 @@ TEST(ExcessiveCallTest, DoesDefaultAction) { // When there is no ON_CALL(), the default value for the return type // should be returned. MockB b; - EXPECT_CALL(b, DoB(0)) - .Times(0); + EXPECT_CALL(b, DoB(0)).Times(0); int n = -1; EXPECT_NONFATAL_FAILURE(n = b.DoB(0), "Mock function called more times than expected"); @@ -1216,8 +1159,7 @@ TEST(ExcessiveCallTest, DoesDefaultAction) { // the failure message contains the argument values. TEST(ExcessiveCallTest, GeneratesFailureForVoidFunction) { MockA a; - EXPECT_CALL(a, DoA(_)) - .Times(0); + EXPECT_CALL(a, DoA(_)).Times(0); EXPECT_NONFATAL_FAILURE( a.DoA(9), "Mock function called more times than expected - returning directly.\n" @@ -1253,9 +1195,11 @@ TEST(InSequenceTest, AllExpectationInScopeAreInSequence) { EXPECT_CALL(a, DoA(2)); } - EXPECT_NONFATAL_FAILURE({ // NOLINT - a.DoA(2); - }, "Unexpected mock function call"); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + a.DoA(2); + }, + "Unexpected mock function call"); a.DoA(1); a.DoA(2); @@ -1275,10 +1219,12 @@ TEST(InSequenceTest, NestedInSequence) { } } - EXPECT_NONFATAL_FAILURE({ // NOLINT - a.DoA(1); - a.DoA(3); - }, "Unexpected mock function call"); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + a.DoA(1); + a.DoA(3); + }, + "Unexpected mock function call"); a.DoA(2); a.DoA(3); @@ -1294,9 +1240,11 @@ TEST(InSequenceTest, ExpectationsOutOfScopeAreNotAffected) { } EXPECT_CALL(a, DoA(3)); - EXPECT_NONFATAL_FAILURE({ // NOLINT - a.DoA(2); - }, "Unexpected mock function call"); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + a.DoA(2); + }, + "Unexpected mock function call"); a.DoA(3); a.DoA(1); @@ -1310,8 +1258,7 @@ TEST(SequenceTest, AnyOrderIsOkByDefault) { MockB b; EXPECT_CALL(a, DoA(1)); - EXPECT_CALL(b, DoB()) - .Times(AnyNumber()); + EXPECT_CALL(b, DoB()).Times(AnyNumber()); a.DoA(1); b.DoB(); @@ -1322,8 +1269,7 @@ TEST(SequenceTest, AnyOrderIsOkByDefault) { MockB b; EXPECT_CALL(a, DoA(1)); - EXPECT_CALL(b, DoB()) - .Times(AnyNumber()); + EXPECT_CALL(b, DoB()).Times(AnyNumber()); b.DoB(); a.DoA(1); @@ -1334,16 +1280,12 @@ TEST(SequenceTest, AnyOrderIsOkByDefault) { // is specified. TEST(SequenceTest, CallsMustBeInStrictOrderWhenSaidSo1) { MockA a; - ON_CALL(a, ReturnResult(_)) - .WillByDefault(Return(Result())); + ON_CALL(a, ReturnResult(_)).WillByDefault(Return(Result())); Sequence s; - EXPECT_CALL(a, ReturnResult(1)) - .InSequence(s); - EXPECT_CALL(a, ReturnResult(2)) - .InSequence(s); - EXPECT_CALL(a, ReturnResult(3)) - .InSequence(s); + EXPECT_CALL(a, ReturnResult(1)).InSequence(s); + EXPECT_CALL(a, ReturnResult(2)).InSequence(s); + EXPECT_CALL(a, ReturnResult(3)).InSequence(s); a.ReturnResult(1); @@ -1358,14 +1300,11 @@ TEST(SequenceTest, CallsMustBeInStrictOrderWhenSaidSo1) { // is specified. TEST(SequenceTest, CallsMustBeInStrictOrderWhenSaidSo2) { MockA a; - ON_CALL(a, ReturnResult(_)) - .WillByDefault(Return(Result())); + ON_CALL(a, ReturnResult(_)).WillByDefault(Return(Result())); Sequence s; - EXPECT_CALL(a, ReturnResult(1)) - .InSequence(s); - EXPECT_CALL(a, ReturnResult(2)) - .InSequence(s); + EXPECT_CALL(a, ReturnResult(1)).InSequence(s); + EXPECT_CALL(a, ReturnResult(2)).InSequence(s); // May only be called after a.ReturnResult(1). EXPECT_NONFATAL_FAILURE(a.ReturnResult(2), "Unexpected mock function call"); @@ -1378,8 +1317,7 @@ TEST(SequenceTest, CallsMustBeInStrictOrderWhenSaidSo2) { class PartialOrderTest : public testing::Test { protected: PartialOrderTest() { - ON_CALL(a_, ReturnResult(_)) - .WillByDefault(Return(Result())); + ON_CALL(a_, ReturnResult(_)).WillByDefault(Return(Result())); // Specifies this partial ordering: // @@ -1387,16 +1325,10 @@ class PartialOrderTest : public testing::Test { // a.ReturnResult(2) * n ==> a.ReturnResult(3) // b.DoB() * 2 ==> Sequence x, y; - EXPECT_CALL(a_, ReturnResult(1)) - .InSequence(x); - EXPECT_CALL(b_, DoB()) - .Times(2) - .InSequence(y); - EXPECT_CALL(a_, ReturnResult(2)) - .Times(AnyNumber()) - .InSequence(x, y); - EXPECT_CALL(a_, ReturnResult(3)) - .InSequence(x); + EXPECT_CALL(a_, ReturnResult(1)).InSequence(x); + EXPECT_CALL(b_, DoB()).Times(2).InSequence(y); + EXPECT_CALL(a_, ReturnResult(2)).Times(AnyNumber()).InSequence(x, y); + EXPECT_CALL(a_, ReturnResult(3)).InSequence(x); } MockA a_; @@ -1448,13 +1380,9 @@ TEST(SequenceTest, Retirement) { MockA a; Sequence s; - EXPECT_CALL(a, DoA(1)) - .InSequence(s); - EXPECT_CALL(a, DoA(_)) - .InSequence(s) - .RetiresOnSaturation(); - EXPECT_CALL(a, DoA(1)) - .InSequence(s); + EXPECT_CALL(a, DoA(1)).InSequence(s); + EXPECT_CALL(a, DoA(_)).InSequence(s).RetiresOnSaturation(); + EXPECT_CALL(a, DoA(1)).InSequence(s); a.DoA(1); a.DoA(2); @@ -1519,12 +1447,12 @@ TEST(ExpectationSetTest, ConstructorsWork) { Expectation e1; const Expectation e2; - ExpectationSet es1; // Default ctor. + ExpectationSet es1; // Default ctor. ExpectationSet es2 = EXPECT_CALL(a, DoA(1)); // Ctor from EXPECT_CALL. - ExpectationSet es3 = e1; // Ctor from Expectation. - ExpectationSet es4(e1); // Ctor from Expectation; alternative syntax. - ExpectationSet es5 = e2; // Ctor from const Expectation. - ExpectationSet es6(e2); // Ctor from const Expectation; alternative syntax. + ExpectationSet es3 = e1; // Ctor from Expectation. + ExpectationSet es4(e1); // Ctor from Expectation; alternative syntax. + ExpectationSet es5 = e2; // Ctor from const Expectation. + ExpectationSet es6(e2); // Ctor from const Expectation; alternative syntax. ExpectationSet es7 = es2; // Copy ctor. EXPECT_EQ(0, es1.size()); @@ -1596,7 +1524,7 @@ TEST(ExpectationSetTest, IsEnumerable) { EXPECT_TRUE(it != es.end()); EXPECT_THAT(*it, Eq(Expectation())); ++it; - EXPECT_TRUE(it== es.end()); + EXPECT_TRUE(it == es.end()); } // Tests the .After() clause. @@ -1606,8 +1534,7 @@ TEST(AfterTest, SucceedsWhenPartialOrderIsSatisfied) { ExpectationSet es; es += EXPECT_CALL(a, DoA(1)); es += EXPECT_CALL(a, DoA(2)); - EXPECT_CALL(a, DoA(3)) - .After(es); + EXPECT_CALL(a, DoA(3)).After(es); a.DoA(1); a.DoA(2); @@ -1620,9 +1547,7 @@ TEST(AfterTest, SucceedsWhenTotalOrderIsSatisfied) { // The following also verifies that const Expectation objects work // too. Do not remove the const modifiers. const Expectation e1 = EXPECT_CALL(a, DoA(1)); - const Expectation e2 = EXPECT_CALL(b, DoB()) - .Times(2) - .After(e1); + const Expectation e2 = EXPECT_CALL(b, DoB()).Times(2).After(e1); EXPECT_CALL(a, DoA(2)).After(e2); a.DoA(1); @@ -1639,10 +1564,8 @@ TEST(AfterTest, CallsMustBeInStrictOrderWhenSpecifiedSo1) { // Define ordering: // a.DoA(1) ==> b.DoB() ==> a.DoA(2) Expectation e1 = EXPECT_CALL(a, DoA(1)); - Expectation e2 = EXPECT_CALL(b, DoB()) - .After(e1); - EXPECT_CALL(a, DoA(2)) - .After(e2); + Expectation e2 = EXPECT_CALL(b, DoB()).After(e1); + EXPECT_CALL(a, DoA(2)).After(e2); a.DoA(1); @@ -1661,11 +1584,8 @@ TEST(AfterTest, CallsMustBeInStrictOrderWhenSpecifiedSo2) { // Define ordering: // a.DoA(1) ==> b.DoB() * 2 ==> a.DoA(2) Expectation e1 = EXPECT_CALL(a, DoA(1)); - Expectation e2 = EXPECT_CALL(b, DoB()) - .Times(2) - .After(e1); - EXPECT_CALL(a, DoA(2)) - .After(e2); + Expectation e2 = EXPECT_CALL(b, DoB()).Times(2).After(e1); + EXPECT_CALL(a, DoA(2)).After(e2); a.DoA(1); b.DoB(); @@ -1680,16 +1600,14 @@ TEST(AfterTest, CallsMustBeInStrictOrderWhenSpecifiedSo2) { // Calls must satisfy the partial order when specified so. TEST(AfterTest, CallsMustSatisfyPartialOrderWhenSpecifiedSo) { MockA a; - ON_CALL(a, ReturnResult(_)) - .WillByDefault(Return(Result())); + ON_CALL(a, ReturnResult(_)).WillByDefault(Return(Result())); // Define ordering: // a.DoA(1) ==> // a.DoA(2) ==> a.ReturnResult(3) Expectation e = EXPECT_CALL(a, DoA(1)); const ExpectationSet es = EXPECT_CALL(a, DoA(2)); - EXPECT_CALL(a, ReturnResult(3)) - .After(e, es); + EXPECT_CALL(a, ReturnResult(3)).After(e, es); // May only be called last. EXPECT_NONFATAL_FAILURE(a.ReturnResult(3), "Unexpected mock function call"); @@ -1708,8 +1626,7 @@ TEST(AfterTest, CallsMustSatisfyPartialOrderWhenSpecifiedSo2) { // a.DoA(2) ==> a.DoA(3) Expectation e = EXPECT_CALL(a, DoA(1)); const ExpectationSet es = EXPECT_CALL(a, DoA(2)); - EXPECT_CALL(a, DoA(3)) - .After(e, es); + EXPECT_CALL(a, DoA(3)).After(e, es); a.DoA(2); @@ -1726,9 +1643,7 @@ TEST(AfterTest, CanBeUsedWithInSequence) { Sequence s; Expectation e = EXPECT_CALL(a, DoA(1)); EXPECT_CALL(a, DoA(2)).InSequence(s); - EXPECT_CALL(a, DoA(3)) - .InSequence(s) - .After(e); + EXPECT_CALL(a, DoA(3)).InSequence(s).After(e); a.DoA(1); @@ -1745,10 +1660,7 @@ TEST(AfterTest, CanBeCalledManyTimes) { Expectation e1 = EXPECT_CALL(a, DoA(1)); Expectation e2 = EXPECT_CALL(a, DoA(2)); Expectation e3 = EXPECT_CALL(a, DoA(3)); - EXPECT_CALL(a, DoA(4)) - .After(e1) - .After(e2) - .After(e3); + EXPECT_CALL(a, DoA(4)).After(e1).After(e2).After(e3); a.DoA(3); a.DoA(1); @@ -1764,8 +1676,7 @@ TEST(AfterTest, AcceptsUpToFiveArguments) { Expectation e3 = EXPECT_CALL(a, DoA(3)); ExpectationSet es1 = EXPECT_CALL(a, DoA(4)); ExpectationSet es2 = EXPECT_CALL(a, DoA(5)); - EXPECT_CALL(a, DoA(6)) - .After(e1, e2, e3, es1, es2); + EXPECT_CALL(a, DoA(6)).After(e1, e2, e3, es1, es2); a.DoA(5); a.DoA(2); @@ -1778,8 +1689,7 @@ TEST(AfterTest, AcceptsUpToFiveArguments) { // .After() allows input to contain duplicated Expectations. TEST(AfterTest, AcceptsDuplicatedInput) { MockA a; - ON_CALL(a, ReturnResult(_)) - .WillByDefault(Return(Result())); + ON_CALL(a, ReturnResult(_)).WillByDefault(Return(Result())); // Define ordering: // DoA(1) ==> @@ -1789,8 +1699,7 @@ TEST(AfterTest, AcceptsDuplicatedInput) { ExpectationSet es; es += e1; es += e2; - EXPECT_CALL(a, ReturnResult(3)) - .After(e1, e2, es, e1); + EXPECT_CALL(a, ReturnResult(3)).After(e1, e2, es, e1); a.DoA(1); @@ -1807,8 +1716,7 @@ TEST(AfterTest, ChangesToExpectationSetHaveNoEffectAfterwards) { MockA a; ExpectationSet es1 = EXPECT_CALL(a, DoA(1)); Expectation e2 = EXPECT_CALL(a, DoA(2)); - EXPECT_CALL(a, DoA(3)) - .After(es1); + EXPECT_CALL(a, DoA(3)).After(es1); es1 += e2; a.DoA(1); @@ -1827,14 +1735,11 @@ TEST(DeletingMockEarlyTest, Success1) { { InSequence dummy; - EXPECT_CALL(*b1, DoB(_)) - .WillOnce(Return(1)); + EXPECT_CALL(*b1, DoB(_)).WillOnce(Return(1)); EXPECT_CALL(*a, Binary(_, _)) .Times(AnyNumber()) .WillRepeatedly(Return(true)); - EXPECT_CALL(*b2, DoB(_)) - .Times(AnyNumber()) - .WillRepeatedly(Return(2)); + EXPECT_CALL(*b2, DoB(_)).Times(AnyNumber()).WillRepeatedly(Return(2)); } EXPECT_EQ(1, b1->DoB(1)); @@ -1855,13 +1760,9 @@ TEST(DeletingMockEarlyTest, Success2) { { InSequence dummy; - EXPECT_CALL(*b1, DoB(_)) - .WillOnce(Return(1)); - EXPECT_CALL(*a, Binary(_, _)) - .Times(AnyNumber()); - EXPECT_CALL(*b2, DoB(_)) - .Times(AnyNumber()) - .WillRepeatedly(Return(2)); + EXPECT_CALL(*b1, DoB(_)).WillOnce(Return(1)); + EXPECT_CALL(*a, Binary(_, _)).Times(AnyNumber()); + EXPECT_CALL(*b2, DoB(_)).Times(AnyNumber()).WillRepeatedly(Return(2)); } delete a; // a is trivially satisfied. @@ -1876,14 +1777,14 @@ TEST(DeletingMockEarlyTest, Success2) { // Suppresses warning on unreferenced formal parameter in MSVC with // -W4. #ifdef _MSC_VER -# pragma warning(push) -# pragma warning(disable:4100) +#pragma warning(push) +#pragma warning(disable : 4100) #endif ACTION_P(Delete, ptr) { delete ptr; } #ifdef _MSC_VER -# pragma warning(pop) +#pragma warning(pop) #endif TEST(DeletingMockEarlyTest, CanDeleteSelfInActionReturningVoid) { @@ -1894,8 +1795,7 @@ TEST(DeletingMockEarlyTest, CanDeleteSelfInActionReturningVoid) { TEST(DeletingMockEarlyTest, CanDeleteSelfInActionReturningValue) { MockA* const a = new MockA; - EXPECT_CALL(*a, ReturnResult(_)) - .WillOnce(DoAll(Delete(a), Return(Result()))); + EXPECT_CALL(*a, ReturnResult(_)).WillOnce(DoAll(Delete(a), Return(Result()))); a->ReturnResult(42); // This will cause a to be deleted. } @@ -1907,19 +1807,13 @@ TEST(DeletingMockEarlyTest, Failure1) { { InSequence dummy; - EXPECT_CALL(*b1, DoB(_)) - .WillOnce(Return(1)); - EXPECT_CALL(*a, Binary(_, _)) - .Times(AnyNumber()); - EXPECT_CALL(*b2, DoB(_)) - .Times(AnyNumber()) - .WillRepeatedly(Return(2)); + EXPECT_CALL(*b1, DoB(_)).WillOnce(Return(1)); + EXPECT_CALL(*a, Binary(_, _)).Times(AnyNumber()); + EXPECT_CALL(*b2, DoB(_)).Times(AnyNumber()).WillRepeatedly(Return(2)); } delete a; // a is trivially satisfied. - EXPECT_NONFATAL_FAILURE({ - b2->DoB(2); - }, "Unexpected mock function call"); + EXPECT_NONFATAL_FAILURE({ b2->DoB(2); }, "Unexpected mock function call"); EXPECT_EQ(1, b1->DoB(1)); delete b1; delete b2; @@ -1934,18 +1828,13 @@ TEST(DeletingMockEarlyTest, Failure2) { { InSequence dummy; EXPECT_CALL(*b1, DoB(_)); - EXPECT_CALL(*a, Binary(_, _)) - .Times(AnyNumber()); - EXPECT_CALL(*b2, DoB(_)) - .Times(AnyNumber()); + EXPECT_CALL(*a, Binary(_, _)).Times(AnyNumber()); + EXPECT_CALL(*b2, DoB(_)).Times(AnyNumber()); } - EXPECT_NONFATAL_FAILURE(delete b1, - "Actual: never called"); - EXPECT_NONFATAL_FAILURE(a->Binary(0, 1), - "Unexpected mock function call"); - EXPECT_NONFATAL_FAILURE(b2->DoB(1), - "Unexpected mock function call"); + EXPECT_NONFATAL_FAILURE(delete b1, "Actual: never called"); + EXPECT_NONFATAL_FAILURE(a->Binary(0, 1), "Unexpected mock function call"); + EXPECT_NONFATAL_FAILURE(b2->DoB(1), "Unexpected mock function call"); delete a; delete b2; } @@ -1970,23 +1859,16 @@ class EvenNumberCardinality : public CardinalityInterface { } }; -Cardinality EvenNumber() { - return Cardinality(new EvenNumberCardinality); -} +Cardinality EvenNumber() { return Cardinality(new EvenNumberCardinality); } TEST(ExpectationBaseTest, AllPrerequisitesAreSatisfiedWorksForNonMonotonicCardinality) { MockA* a = new MockA; Sequence s; - EXPECT_CALL(*a, DoA(1)) - .Times(EvenNumber()) - .InSequence(s); - EXPECT_CALL(*a, DoA(2)) - .Times(AnyNumber()) - .InSequence(s); - EXPECT_CALL(*a, DoA(3)) - .Times(AnyNumber()); + EXPECT_CALL(*a, DoA(1)).Times(EvenNumber()).InSequence(s); + EXPECT_CALL(*a, DoA(2)).Times(AnyNumber()).InSequence(s); + EXPECT_CALL(*a, DoA(3)).Times(AnyNumber()); a->DoA(3); a->DoA(1); @@ -1997,8 +1879,7 @@ TEST(ExpectationBaseTest, // The following tests verify the message generated when a mock // function is called. -struct Printable { -}; +struct Printable {}; inline void operator<<(::std::ostream& os, const Printable&) { os << "Printable"; @@ -2018,22 +1899,25 @@ class MockC { MOCK_METHOD0(NonVoidMethod, int()); // NOLINT private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(MockC); + MockC(const MockC&) = delete; + MockC& operator=(const MockC&) = delete; }; class VerboseFlagPreservingFixture : public testing::Test { protected: VerboseFlagPreservingFixture() - : saved_verbose_flag_(GMOCK_FLAG(verbose)) {} + : saved_verbose_flag_(GMOCK_FLAG_GET(verbose)) {} ~VerboseFlagPreservingFixture() override { - GMOCK_FLAG(verbose) = saved_verbose_flag_; + GMOCK_FLAG_SET(verbose, saved_verbose_flag_); } private: const std::string saved_verbose_flag_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(VerboseFlagPreservingFixture); + VerboseFlagPreservingFixture(const VerboseFlagPreservingFixture&) = delete; + VerboseFlagPreservingFixture& operator=(const VerboseFlagPreservingFixture&) = + delete; }; #if GTEST_HAS_STREAM_REDIRECTION @@ -2043,7 +1927,7 @@ class VerboseFlagPreservingFixture : public testing::Test { // --gmock_verbose=warning is specified. TEST(FunctionCallMessageTest, UninterestingCallOnNaggyMockGeneratesNoStackTraceWhenVerboseWarning) { - GMOCK_FLAG(verbose) = kWarningVerbosity; + GMOCK_FLAG_SET(verbose, kWarningVerbosity); NaggyMock c; CaptureStdout(); c.VoidMethod(false, 5, "Hi", nullptr, Printable(), Unprintable()); @@ -2057,7 +1941,7 @@ TEST(FunctionCallMessageTest, // --gmock_verbose=info is specified. TEST(FunctionCallMessageTest, UninterestingCallOnNaggyMockGeneratesFyiWithStackTraceWhenVerboseInfo) { - GMOCK_FLAG(verbose) = kInfoVerbosity; + GMOCK_FLAG_SET(verbose, kInfoVerbosity); NaggyMock c; CaptureStdout(); c.VoidMethod(false, 5, "Hi", nullptr, Printable(), Unprintable()); @@ -2065,7 +1949,7 @@ TEST(FunctionCallMessageTest, EXPECT_PRED_FORMAT2(IsSubstring, "GMOCK WARNING", output); EXPECT_PRED_FORMAT2(IsSubstring, "Stack trace:", output); -# ifndef NDEBUG +#ifndef NDEBUG // We check the stack trace content in dbg-mode only, as opt-mode // may inline the call we are interested in seeing. @@ -2081,7 +1965,7 @@ TEST(FunctionCallMessageTest, const std::string output2 = GetCapturedStdout(); EXPECT_PRED_FORMAT2(IsSubstring, "NonVoidMethod(", output2); -# endif // NDEBUG +#endif // NDEBUG } // Tests that an uninteresting mock function call on a naggy mock @@ -2097,7 +1981,8 @@ TEST(FunctionCallMessageTest, IsSubstring, "Uninteresting mock function call - returning default value.\n" " Function call: DoB()\n" - " Returns: 0\n", output1.c_str()); + " Returns: 0\n", + output1.c_str()); // Makes sure the return value is printed. // A void mock function. @@ -2105,12 +1990,12 @@ TEST(FunctionCallMessageTest, CaptureStdout(); c.VoidMethod(false, 5, "Hi", nullptr, Printable(), Unprintable()); const std::string output2 = GetCapturedStdout(); - EXPECT_THAT(output2.c_str(), - ContainsRegex( - "Uninteresting mock function call - returning directly\\.\n" - " Function call: VoidMethod" - "\\(false, 5, \"Hi\", NULL, @.+ " - "Printable, 4-byte object <00-00 00-00>\\)")); + EXPECT_THAT( + output2.c_str(), + ContainsRegex("Uninteresting mock function call - returning directly\\.\n" + " Function call: VoidMethod" + "\\(false, 5, \"Hi\", NULL, @.+ " + "Printable, 4-byte object <00-00 00-00>\\)")); // A void function has no return value to print. } @@ -2127,14 +2012,14 @@ class GMockVerboseFlagTest : public VerboseFlagPreservingFixture { const std::string& function_name) { if (should_print) { EXPECT_THAT(output.c_str(), HasSubstr(expected_substring)); -# ifndef NDEBUG +#ifndef NDEBUG // We check the stack trace content in dbg-mode only, as opt-mode // may inline the call we are interested in seeing. EXPECT_THAT(output.c_str(), HasSubstr(function_name)); -# else +#else // Suppresses 'unused function parameter' warnings. static_cast(function_name); -# endif // NDEBUG +#endif // NDEBUG } else { EXPECT_STREQ("", output.c_str()); } @@ -2144,31 +2029,26 @@ class GMockVerboseFlagTest : public VerboseFlagPreservingFixture { void TestExpectedCall(bool should_print) { MockA a; EXPECT_CALL(a, DoA(5)); - EXPECT_CALL(a, Binary(_, 1)) - .WillOnce(Return(true)); + EXPECT_CALL(a, Binary(_, 1)).WillOnce(Return(true)); // A void-returning function. CaptureStdout(); a.DoA(5); - VerifyOutput( - GetCapturedStdout(), - should_print, - "Mock function call matches EXPECT_CALL(a, DoA(5))...\n" - " Function call: DoA(5)\n" - "Stack trace:\n", - "DoA"); + VerifyOutput(GetCapturedStdout(), should_print, + "Mock function call matches EXPECT_CALL(a, DoA(5))...\n" + " Function call: DoA(5)\n" + "Stack trace:\n", + "DoA"); // A non-void-returning function. CaptureStdout(); a.Binary(2, 1); - VerifyOutput( - GetCapturedStdout(), - should_print, - "Mock function call matches EXPECT_CALL(a, Binary(_, 1))...\n" - " Function call: Binary(2, 1)\n" - " Returns: true\n" - "Stack trace:\n", - "Binary"); + VerifyOutput(GetCapturedStdout(), should_print, + "Mock function call matches EXPECT_CALL(a, Binary(_, 1))...\n" + " Function call: Binary(2, 1)\n" + " Returns: true\n" + "Stack trace:\n", + "Binary"); } // Tests how the flag affects uninteresting calls on a naggy mock. @@ -2186,34 +2066,30 @@ class GMockVerboseFlagTest : public VerboseFlagPreservingFixture { // A void-returning function. CaptureStdout(); a.DoA(5); - VerifyOutput( - GetCapturedStdout(), - should_print, - "\nGMOCK WARNING:\n" - "Uninteresting mock function call - returning directly.\n" - " Function call: DoA(5)\n" + - note, - "DoA"); + VerifyOutput(GetCapturedStdout(), should_print, + "\nGMOCK WARNING:\n" + "Uninteresting mock function call - returning directly.\n" + " Function call: DoA(5)\n" + + note, + "DoA"); // A non-void-returning function. CaptureStdout(); a.Binary(2, 1); - VerifyOutput( - GetCapturedStdout(), - should_print, - "\nGMOCK WARNING:\n" - "Uninteresting mock function call - returning default value.\n" - " Function call: Binary(2, 1)\n" - " Returns: false\n" + - note, - "Binary"); + VerifyOutput(GetCapturedStdout(), should_print, + "\nGMOCK WARNING:\n" + "Uninteresting mock function call - returning default value.\n" + " Function call: Binary(2, 1)\n" + " Returns: false\n" + + note, + "Binary"); } }; // Tests that --gmock_verbose=info causes both expected and // uninteresting calls to be reported. TEST_F(GMockVerboseFlagTest, Info) { - GMOCK_FLAG(verbose) = kInfoVerbosity; + GMOCK_FLAG_SET(verbose, kInfoVerbosity); TestExpectedCall(true); TestUninterestingCallOnNaggyMock(true); } @@ -2221,7 +2097,7 @@ TEST_F(GMockVerboseFlagTest, Info) { // Tests that --gmock_verbose=warning causes uninteresting calls to be // reported. TEST_F(GMockVerboseFlagTest, Warning) { - GMOCK_FLAG(verbose) = kWarningVerbosity; + GMOCK_FLAG_SET(verbose, kWarningVerbosity); TestExpectedCall(false); TestUninterestingCallOnNaggyMock(true); } @@ -2229,7 +2105,7 @@ TEST_F(GMockVerboseFlagTest, Warning) { // Tests that --gmock_verbose=warning causes neither expected nor // uninteresting calls to be reported. TEST_F(GMockVerboseFlagTest, Error) { - GMOCK_FLAG(verbose) = kErrorVerbosity; + GMOCK_FLAG_SET(verbose, kErrorVerbosity); TestExpectedCall(false); TestUninterestingCallOnNaggyMock(false); } @@ -2237,7 +2113,7 @@ TEST_F(GMockVerboseFlagTest, Error) { // Tests that --gmock_verbose=SOME_INVALID_VALUE has the same effect // as --gmock_verbose=warning. TEST_F(GMockVerboseFlagTest, InvalidFlagIsTreatedAsWarning) { - GMOCK_FLAG(verbose) = "invalid"; // Treated as "warning". + GMOCK_FLAG_SET(verbose, "invalid"); // Treated as "warning". TestExpectedCall(false); TestUninterestingCallOnNaggyMock(true); } @@ -2261,7 +2137,8 @@ class LogTestHelper { MOCK_METHOD1(Foo, PrintMeNot(PrintMeNot)); private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(LogTestHelper); + LogTestHelper(const LogTestHelper&) = delete; + LogTestHelper& operator=(const LogTestHelper&) = delete; }; class GMockLogTest : public VerboseFlagPreservingFixture { @@ -2270,23 +2147,20 @@ class GMockLogTest : public VerboseFlagPreservingFixture { }; TEST_F(GMockLogTest, DoesNotPrintGoodCallInternallyIfVerbosityIsWarning) { - GMOCK_FLAG(verbose) = kWarningVerbosity; - EXPECT_CALL(helper_, Foo(_)) - .WillOnce(Return(PrintMeNot())); + GMOCK_FLAG_SET(verbose, kWarningVerbosity); + EXPECT_CALL(helper_, Foo(_)).WillOnce(Return(PrintMeNot())); helper_.Foo(PrintMeNot()); // This is an expected call. } TEST_F(GMockLogTest, DoesNotPrintGoodCallInternallyIfVerbosityIsError) { - GMOCK_FLAG(verbose) = kErrorVerbosity; - EXPECT_CALL(helper_, Foo(_)) - .WillOnce(Return(PrintMeNot())); + GMOCK_FLAG_SET(verbose, kErrorVerbosity); + EXPECT_CALL(helper_, Foo(_)).WillOnce(Return(PrintMeNot())); helper_.Foo(PrintMeNot()); // This is an expected call. } TEST_F(GMockLogTest, DoesNotPrintWarningInternallyIfVerbosityIsError) { - GMOCK_FLAG(verbose) = kErrorVerbosity; - ON_CALL(helper_, Foo(_)) - .WillByDefault(Return(PrintMeNot())); + GMOCK_FLAG_SET(verbose, kErrorVerbosity); + ON_CALL(helper_, Foo(_)).WillByDefault(Return(PrintMeNot())); helper_.Foo(PrintMeNot()); // This should generate a warning. } @@ -2347,8 +2221,7 @@ TEST(VerifyAndClearExpectationsTest, NoMethodHasExpectations) { // verification succeeds. TEST(VerifyAndClearExpectationsTest, SomeMethodsHaveExpectationsAndSucceed) { MockB b; - EXPECT_CALL(b, DoB()) - .WillOnce(Return(1)); + EXPECT_CALL(b, DoB()).WillOnce(Return(1)); b.DoB(); ASSERT_TRUE(Mock::VerifyAndClearExpectations(&b)); @@ -2363,8 +2236,7 @@ TEST(VerifyAndClearExpectationsTest, SomeMethodsHaveExpectationsAndSucceed) { // verification fails. TEST(VerifyAndClearExpectationsTest, SomeMethodsHaveExpectationsAndFail) { MockB b; - EXPECT_CALL(b, DoB()) - .WillOnce(Return(1)); + EXPECT_CALL(b, DoB()).WillOnce(Return(1)); bool result = true; EXPECT_NONFATAL_FAILURE(result = Mock::VerifyAndClearExpectations(&b), "Actual: never called"); @@ -2380,10 +2252,8 @@ TEST(VerifyAndClearExpectationsTest, SomeMethodsHaveExpectationsAndFail) { // when all of its methods have expectations. TEST(VerifyAndClearExpectationsTest, AllMethodsHaveExpectations) { MockB b; - EXPECT_CALL(b, DoB()) - .WillOnce(Return(1)); - EXPECT_CALL(b, DoB(_)) - .WillOnce(Return(2)); + EXPECT_CALL(b, DoB()).WillOnce(Return(1)); + EXPECT_CALL(b, DoB(_)).WillOnce(Return(2)); b.DoB(); b.DoB(1); ASSERT_TRUE(Mock::VerifyAndClearExpectations(&b)); @@ -2398,10 +2268,8 @@ TEST(VerifyAndClearExpectationsTest, AllMethodsHaveExpectations) { // when a method has more than one expectation. TEST(VerifyAndClearExpectationsTest, AMethodHasManyExpectations) { MockB b; - EXPECT_CALL(b, DoB(0)) - .WillOnce(Return(1)); - EXPECT_CALL(b, DoB(_)) - .WillOnce(Return(2)); + EXPECT_CALL(b, DoB(0)).WillOnce(Return(1)); + EXPECT_CALL(b, DoB(_)).WillOnce(Return(2)); b.DoB(1); bool result = true; EXPECT_NONFATAL_FAILURE(result = Mock::VerifyAndClearExpectations(&b), @@ -2422,8 +2290,7 @@ TEST(VerifyAndClearExpectationsTest, CanCallManyTimes) { b.DoB(); Mock::VerifyAndClearExpectations(&b); - EXPECT_CALL(b, DoB(_)) - .WillOnce(Return(1)); + EXPECT_CALL(b, DoB(_)).WillOnce(Return(1)); b.DoB(1); Mock::VerifyAndClearExpectations(&b); Mock::VerifyAndClearExpectations(&b); @@ -2447,8 +2314,7 @@ TEST(VerifyAndClearTest, NoMethodHasDefaultActions) { // but not all of its methods have default actions. TEST(VerifyAndClearTest, SomeMethodsHaveDefaultActions) { MockB b; - ON_CALL(b, DoB()) - .WillByDefault(Return(1)); + ON_CALL(b, DoB()).WillByDefault(Return(1)); Mock::VerifyAndClear(&b); @@ -2460,10 +2326,8 @@ TEST(VerifyAndClearTest, SomeMethodsHaveDefaultActions) { // its methods have default actions. TEST(VerifyAndClearTest, AllMethodsHaveDefaultActions) { MockB b; - ON_CALL(b, DoB()) - .WillByDefault(Return(1)); - ON_CALL(b, DoB(_)) - .WillByDefault(Return(2)); + ON_CALL(b, DoB()).WillByDefault(Return(1)); + ON_CALL(b, DoB(_)).WillByDefault(Return(2)); Mock::VerifyAndClear(&b); @@ -2478,10 +2342,8 @@ TEST(VerifyAndClearTest, AllMethodsHaveDefaultActions) { // method has more than one ON_CALL() set on it. TEST(VerifyAndClearTest, AMethodHasManyDefaultActions) { MockB b; - ON_CALL(b, DoB(0)) - .WillByDefault(Return(1)); - ON_CALL(b, DoB(_)) - .WillByDefault(Return(2)); + ON_CALL(b, DoB(0)).WillByDefault(Return(1)); + ON_CALL(b, DoB(_)).WillByDefault(Return(2)); Mock::VerifyAndClear(&b); @@ -2495,13 +2357,11 @@ TEST(VerifyAndClearTest, AMethodHasManyDefaultActions) { // times. TEST(VerifyAndClearTest, CanCallManyTimes) { MockB b; - ON_CALL(b, DoB()) - .WillByDefault(Return(1)); + ON_CALL(b, DoB()).WillByDefault(Return(1)); Mock::VerifyAndClear(&b); Mock::VerifyAndClear(&b); - ON_CALL(b, DoB(_)) - .WillByDefault(Return(1)); + ON_CALL(b, DoB(_)).WillByDefault(Return(1)); Mock::VerifyAndClear(&b); EXPECT_EQ(0, b.DoB()); @@ -2511,10 +2371,8 @@ TEST(VerifyAndClearTest, CanCallManyTimes) { // Tests that VerifyAndClear() works when the verification succeeds. TEST(VerifyAndClearTest, Success) { MockB b; - ON_CALL(b, DoB()) - .WillByDefault(Return(1)); - EXPECT_CALL(b, DoB(1)) - .WillOnce(Return(2)); + ON_CALL(b, DoB()).WillByDefault(Return(1)); + EXPECT_CALL(b, DoB(1)).WillOnce(Return(2)); b.DoB(); b.DoB(1); @@ -2529,10 +2387,8 @@ TEST(VerifyAndClearTest, Success) { // Tests that VerifyAndClear() works when the verification fails. TEST(VerifyAndClearTest, Failure) { MockB b; - ON_CALL(b, DoB(_)) - .WillByDefault(Return(1)); - EXPECT_CALL(b, DoB()) - .WillOnce(Return(2)); + ON_CALL(b, DoB(_)).WillByDefault(Return(1)); + EXPECT_CALL(b, DoB()).WillOnce(Return(2)); b.DoB(1); bool result = true; @@ -2550,12 +2406,9 @@ TEST(VerifyAndClearTest, Failure) { // expectations are set on a const mock object. TEST(VerifyAndClearTest, Const) { MockB b; - ON_CALL(Const(b), DoB()) - .WillByDefault(Return(1)); + ON_CALL(Const(b), DoB()).WillByDefault(Return(1)); - EXPECT_CALL(Const(b), DoB()) - .WillOnce(DoDefault()) - .WillOnce(Return(2)); + EXPECT_CALL(Const(b), DoB()).WillOnce(DoDefault()).WillOnce(Return(2)); b.DoB(); b.DoB(); @@ -2571,18 +2424,14 @@ TEST(VerifyAndClearTest, Const) { // object after VerifyAndClear() has been called on it. TEST(VerifyAndClearTest, CanSetDefaultActionsAndExpectationsAfterwards) { MockB b; - ON_CALL(b, DoB()) - .WillByDefault(Return(1)); - EXPECT_CALL(b, DoB(_)) - .WillOnce(Return(2)); + ON_CALL(b, DoB()).WillByDefault(Return(1)); + EXPECT_CALL(b, DoB(_)).WillOnce(Return(2)); b.DoB(1); Mock::VerifyAndClear(&b); - EXPECT_CALL(b, DoB()) - .WillOnce(Return(3)); - ON_CALL(b, DoB(_)) - .WillByDefault(Return(4)); + EXPECT_CALL(b, DoB()).WillOnce(Return(3)); + ON_CALL(b, DoB(_)).WillByDefault(Return(4)); EXPECT_EQ(3, b.DoB()); EXPECT_EQ(4, b.DoB(1)); @@ -2595,19 +2444,13 @@ TEST(VerifyAndClearTest, DoesNotAffectOtherMockObjects) { MockB b1; MockB b2; - ON_CALL(a, Binary(_, _)) - .WillByDefault(Return(true)); - EXPECT_CALL(a, Binary(_, _)) - .WillOnce(DoDefault()) - .WillOnce(Return(false)); + ON_CALL(a, Binary(_, _)).WillByDefault(Return(true)); + EXPECT_CALL(a, Binary(_, _)).WillOnce(DoDefault()).WillOnce(Return(false)); - ON_CALL(b1, DoB()) - .WillByDefault(Return(1)); - EXPECT_CALL(b1, DoB(_)) - .WillOnce(Return(2)); + ON_CALL(b1, DoB()).WillByDefault(Return(1)); + EXPECT_CALL(b1, DoB(_)).WillOnce(Return(2)); - ON_CALL(b2, DoB()) - .WillByDefault(Return(3)); + ON_CALL(b2, DoB()).WillByDefault(Return(3)); EXPECT_CALL(b2, DoB(_)); b2.DoB(0); @@ -2648,8 +2491,7 @@ TEST(VerifyAndClearTest, ReferenceHoldingMock test_mock; // ON_CALL stores a reference to a inside test_mock. - ON_CALL(test_mock, AcceptReference(_)) - .WillByDefault(SetArgPointee<0>(a)); + ON_CALL(test_mock, AcceptReference(_)).WillByDefault(SetArgPointee<0>(a)); // Throw away the reference to the mock that we have in a. After this, the // only reference to it is stored by test_mock. @@ -2670,9 +2512,8 @@ TEST(VerifyAndClearTest, TEST(SynchronizationTest, CanCallMockMethodInAction) { MockA a; MockC c; - ON_CALL(a, DoA(_)) - .WillByDefault(IgnoreResult(InvokeWithoutArgs(&c, - &MockC::NonVoidMethod))); + ON_CALL(a, DoA(_)).WillByDefault( + IgnoreResult(InvokeWithoutArgs(&c, &MockC::NonVoidMethod))); EXPECT_CALL(a, DoA(1)); EXPECT_CALL(a, DoA(1)) .WillOnce(Invoke(&a, &MockA::DoA)) @@ -2756,20 +2597,21 @@ TEST(ParameterlessExpectationsTest, } } // namespace +} // namespace testing // Allows the user to define their own main and then invoke gmock_main // from it. This might be necessary on some platforms which require // specific setup and teardown. #if GMOCK_RENAME_MAIN -int gmock_main(int argc, char **argv) { +int gmock_main(int argc, char** argv) { #else -int main(int argc, char **argv) { +int main(int argc, char** argv) { #endif // GMOCK_RENAME_MAIN testing::InitGoogleMock(&argc, argv); // Ensures that the tests pass no matter what value of // --gmock_catch_leaked_mocks and --gmock_verbose the user specifies. - testing::GMOCK_FLAG(catch_leaked_mocks) = true; - testing::GMOCK_FLAG(verbose) = testing::internal::kWarningVerbosity; + GMOCK_FLAG_SET(catch_leaked_mocks, true); + GMOCK_FLAG_SET(verbose, testing::internal::kWarningVerbosity); return RUN_ALL_TESTS(); } diff --git a/ext/googletest/googlemock/test/gmock_all_test.cc b/ext/googletest/googlemock/test/gmock_all_test.cc index fffbb8b66f..6db0086bb7 100644 --- a/ext/googletest/googlemock/test/gmock_all_test.cc +++ b/ext/googletest/googlemock/test/gmock_all_test.cc @@ -38,7 +38,10 @@ #include "test/gmock-actions_test.cc" #include "test/gmock-cardinalities_test.cc" #include "test/gmock-internal-utils_test.cc" -#include "test/gmock-matchers_test.cc" +#include "test/gmock-matchers-arithmetic_test.cc" +#include "test/gmock-matchers-comparisons_test.cc" +#include "test/gmock-matchers-containers_test.cc" +#include "test/gmock-matchers-misc_test.cc" #include "test/gmock-more-actions_test.cc" #include "test/gmock-nice-strict_test.cc" #include "test/gmock-port_test.cc" diff --git a/ext/googletest/googlemock/test/gmock_ex_test.cc b/ext/googletest/googlemock/test/gmock_ex_test.cc index 72eb43f74e..44e5e35f66 100644 --- a/ext/googletest/googlemock/test/gmock_ex_test.cc +++ b/ext/googletest/googlemock/test/gmock_ex_test.cc @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Tests Google Mock's functionality that depends on exceptions. #include "gmock/gmock.h" @@ -75,6 +74,5 @@ TEST(DefaultValueTest, ThrowsRuntimeErrorWhenNoDefaultValue) { } } - } // unnamed namespace #endif diff --git a/ext/googletest/googlemock/test/gmock_leak_test.py b/ext/googletest/googlemock/test/gmock_leak_test.py index 7e4b1eea9a..4f41c7bbd0 100755 --- a/ext/googletest/googlemock/test/gmock_leak_test.py +++ b/ext/googletest/googlemock/test/gmock_leak_test.py @@ -31,7 +31,7 @@ """Tests that leaked mock objects can be caught be Google Mock.""" -import gmock_test_utils +from googlemock.test import gmock_test_utils PROGRAM_PATH = gmock_test_utils.GetTestExecutablePath('gmock_leak_test_') TEST_WITH_EXPECT_CALL = [PROGRAM_PATH, '--gtest_filter=*ExpectCall*'] diff --git a/ext/googletest/googlemock/test/gmock_leak_test_.cc b/ext/googletest/googlemock/test/gmock_leak_test_.cc index 2e095abcf4..fa645916f8 100644 --- a/ext/googletest/googlemock/test/gmock_leak_test_.cc +++ b/ext/googletest/googlemock/test/gmock_leak_test_.cc @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Mock - a framework for writing C++ mock classes. // // This program is for verifying that a leaked mock object can be @@ -52,7 +51,8 @@ class MockFoo : public FooInterface { MOCK_METHOD0(DoThis, void()); private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFoo); + MockFoo(const MockFoo&) = delete; + MockFoo& operator=(const MockFoo&) = delete; }; TEST(LeakTest, LeakedMockWithExpectCallCausesFailureWhenLeakCheckingIsEnabled) { diff --git a/ext/googletest/googlemock/test/gmock_link2_test.cc b/ext/googletest/googlemock/test/gmock_link2_test.cc index d27ce17688..cd3d690887 100644 --- a/ext/googletest/googlemock/test/gmock_link2_test.cc +++ b/ext/googletest/googlemock/test/gmock_link2_test.cc @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Mock - a framework for writing C++ mock classes. // // This file is for verifying that various Google Mock constructs do not diff --git a/ext/googletest/googlemock/test/gmock_link_test.cc b/ext/googletest/googlemock/test/gmock_link_test.cc index e7c54cc230..f51e3988df 100644 --- a/ext/googletest/googlemock/test/gmock_link_test.cc +++ b/ext/googletest/googlemock/test/gmock_link_test.cc @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Mock - a framework for writing C++ mock classes. // // This file is for verifying that various Google Mock constructs do not diff --git a/ext/googletest/googlemock/test/gmock_link_test.h b/ext/googletest/googlemock/test/gmock_link_test.h index 5734b2e11c..eaf18e9d8c 100644 --- a/ext/googletest/googlemock/test/gmock_link_test.h +++ b/ext/googletest/googlemock/test/gmock_link_test.h @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Mock - a framework for writing C++ mock classes. // // This file tests that: @@ -118,7 +117,7 @@ #include "gmock/gmock.h" #if !GTEST_OS_WINDOWS_MOBILE -# include +#include #endif #include @@ -200,14 +199,14 @@ class Interface { virtual char* StringFromString(char* str) = 0; virtual int IntFromString(char* str) = 0; virtual int& IntRefFromString(char* str) = 0; - virtual void VoidFromFunc(void(*func)(char* str)) = 0; + virtual void VoidFromFunc(void (*func)(char* str)) = 0; virtual void VoidFromIntRef(int& n) = 0; // NOLINT virtual void VoidFromFloat(float n) = 0; virtual void VoidFromDouble(double n) = 0; virtual void VoidFromVector(const std::vector& v) = 0; }; -class Mock: public Interface { +class Mock : public Interface { public: Mock() {} @@ -215,14 +214,15 @@ class Mock: public Interface { MOCK_METHOD1(StringFromString, char*(char* str)); MOCK_METHOD1(IntFromString, int(char* str)); MOCK_METHOD1(IntRefFromString, int&(char* str)); - MOCK_METHOD1(VoidFromFunc, void(void(*func)(char* str))); + MOCK_METHOD1(VoidFromFunc, void(void (*func)(char* str))); MOCK_METHOD1(VoidFromIntRef, void(int& n)); // NOLINT MOCK_METHOD1(VoidFromFloat, void(float n)); MOCK_METHOD1(VoidFromDouble, void(double n)); MOCK_METHOD1(VoidFromVector, void(const std::vector& v)); private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(Mock); + Mock(const Mock&) = delete; + Mock& operator=(const Mock&) = delete; }; class InvokeHelper { @@ -301,8 +301,8 @@ TEST(LinkTest, TestSetArrayArgument) { char ch = 'x'; char ch2 = 'y'; - EXPECT_CALL(mock, VoidFromString(_)).WillOnce(SetArrayArgument<0>(&ch2, - &ch2 + 1)); + EXPECT_CALL(mock, VoidFromString(_)) + .WillOnce(SetArrayArgument<0>(&ch2, &ch2 + 1)); mock.VoidFromString(&ch); } @@ -339,8 +339,8 @@ TEST(LinkTest, TestInvokeWithoutArgs) { EXPECT_CALL(mock, VoidFromString(_)) .WillOnce(InvokeWithoutArgs(&InvokeHelper::StaticVoidFromVoid)) - .WillOnce(InvokeWithoutArgs(&test_invoke_helper, - &InvokeHelper::VoidFromVoid)); + .WillOnce( + InvokeWithoutArgs(&test_invoke_helper, &InvokeHelper::VoidFromVoid)); mock.VoidFromString(nullptr); mock.VoidFromString(nullptr); } @@ -424,14 +424,14 @@ TEST(LinkTest, TestThrow) { // is expanded and macro expansion cannot contain #pragma. Therefore // we suppress them here. #ifdef _MSC_VER -# pragma warning(push) -# pragma warning(disable:4100) +#pragma warning(push) +#pragma warning(disable : 4100) #endif // Tests the linkage of actions created using ACTION macro. namespace { ACTION(Return1) { return 1; } -} +} // namespace TEST(LinkTest, TestActionMacro) { Mock mock; @@ -443,7 +443,7 @@ TEST(LinkTest, TestActionMacro) { // Tests the linkage of actions created using ACTION_P macro. namespace { ACTION_P(ReturnArgument, ret_value) { return ret_value; } -} +} // namespace TEST(LinkTest, TestActionPMacro) { Mock mock; @@ -457,10 +457,10 @@ namespace { ACTION_P2(ReturnEqualsEitherOf, first, second) { return arg0 == first || arg0 == second; } -} +} // namespace #ifdef _MSC_VER -# pragma warning(pop) +#pragma warning(pop) #endif TEST(LinkTest, TestActionP2Macro) { @@ -492,8 +492,7 @@ TEST(LinkTest, TestMatchersEq) { const char* p = "x"; ON_CALL(mock, VoidFromString(Eq(p))).WillByDefault(Return()); - ON_CALL(mock, VoidFromString(const_cast("y"))) - .WillByDefault(Return()); + ON_CALL(mock, VoidFromString(const_cast("y"))).WillByDefault(Return()); } // Tests the linkage of the Lt, Gt, Le, Ge, and Ne matchers. @@ -592,7 +591,7 @@ TEST(LinkTest, TestMatcherElementsAre) { // Tests the linkage of the ElementsAreArray matcher. TEST(LinkTest, TestMatcherElementsAreArray) { Mock mock; - char arr[] = { 'a', 'b' }; + char arr[] = {'a', 'b'}; ON_CALL(mock, VoidFromVector(ElementsAreArray(arr))).WillByDefault(Return()); } diff --git a/ext/googletest/googlemock/test/gmock_output_test.py b/ext/googletest/googlemock/test/gmock_output_test.py index 25f99f2b79..6b4ab9015d 100755 --- a/ext/googletest/googlemock/test/gmock_output_test.py +++ b/ext/googletest/googlemock/test/gmock_output_test.py @@ -43,7 +43,7 @@ from io import open # pylint: disable=redefined-builtin, g-importing-member import os import re import sys -import gmock_test_utils +from googlemock.test import gmock_test_utils # The flag for generating the golden file @@ -161,13 +161,13 @@ class GMockOutputTest(gmock_test_utils.TestCase): golden_file.close() # The normalized output should match the golden file. - self.assertEquals(golden, output) + self.assertEqual(golden, output) # The raw output should contain 2 leaked mock object errors for # test GMockOutputTest.CatchesLeakedMocks. - self.assertEquals(['GMockOutputTest.CatchesLeakedMocks', - 'GMockOutputTest.CatchesLeakedMocks'], - leaky_tests) + self.assertEqual(['GMockOutputTest.CatchesLeakedMocks', + 'GMockOutputTest.CatchesLeakedMocks'], + leaky_tests) if __name__ == '__main__': diff --git a/ext/googletest/googlemock/test/gmock_output_test_.cc b/ext/googletest/googlemock/test/gmock_output_test_.cc index 3955c7331a..a178691591 100644 --- a/ext/googletest/googlemock/test/gmock_output_test_.cc +++ b/ext/googletest/googlemock/test/gmock_output_test_.cc @@ -27,21 +27,20 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Tests Google Mock's output in various scenarios. This ensures that // Google Mock's messages are readable and useful. -#include "gmock/gmock.h" - #include + #include +#include "gmock/gmock.h" #include "gtest/gtest.h" // Silence C4100 (unreferenced formal parameter) #ifdef _MSC_VER -# pragma warning(push) -# pragma warning(disable:4100) +#pragma warning(push) +#pragma warning(disable : 4100) #endif using testing::_; @@ -63,7 +62,8 @@ class MockFoo { MOCK_METHOD2(Bar3, void(int x, int y)); private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFoo); + MockFoo(const MockFoo&) = delete; + MockFoo& operator=(const MockFoo&) = delete; }; class GMockOutputTest : public testing::Test { @@ -72,27 +72,25 @@ class GMockOutputTest : public testing::Test { }; TEST_F(GMockOutputTest, ExpectedCall) { - testing::GMOCK_FLAG(verbose) = "info"; + GMOCK_FLAG_SET(verbose, "info"); EXPECT_CALL(foo_, Bar2(0, _)); foo_.Bar2(0, 0); // Expected call - testing::GMOCK_FLAG(verbose) = "warning"; + GMOCK_FLAG_SET(verbose, "warning"); } TEST_F(GMockOutputTest, ExpectedCallToVoidFunction) { - testing::GMOCK_FLAG(verbose) = "info"; + GMOCK_FLAG_SET(verbose, "info"); EXPECT_CALL(foo_, Bar3(0, _)); foo_.Bar3(0, 0); // Expected call - testing::GMOCK_FLAG(verbose) = "warning"; + GMOCK_FLAG_SET(verbose, "warning"); } TEST_F(GMockOutputTest, ExplicitActionsRunOut) { - EXPECT_CALL(foo_, Bar2(_, _)) - .Times(2) - .WillOnce(Return(false)); + EXPECT_CALL(foo_, Bar2(_, _)).Times(2).WillOnce(Return(false)); foo_.Bar2(2, 2); foo_.Bar2(1, 1); // Explicit actions in EXPECT_CALL run out. } @@ -134,8 +132,7 @@ TEST_F(GMockOutputTest, UninterestingCallToVoidFunction) { } TEST_F(GMockOutputTest, RetiredExpectation) { - EXPECT_CALL(foo_, Bar2(_, _)) - .RetiresOnSaturation(); + EXPECT_CALL(foo_, Bar2(_, _)).RetiresOnSaturation(); EXPECT_CALL(foo_, Bar2(0, 0)); foo_.Bar2(1, 1); @@ -160,12 +157,9 @@ TEST_F(GMockOutputTest, UnsatisfiedPrerequisite) { TEST_F(GMockOutputTest, UnsatisfiedPrerequisites) { Sequence s1, s2; - EXPECT_CALL(foo_, Bar(_, 0, _)) - .InSequence(s1); - EXPECT_CALL(foo_, Bar2(0, 0)) - .InSequence(s2); - EXPECT_CALL(foo_, Bar2(1, _)) - .InSequence(s1, s2); + EXPECT_CALL(foo_, Bar(_, 0, _)).InSequence(s1); + EXPECT_CALL(foo_, Bar2(0, 0)).InSequence(s2); + EXPECT_CALL(foo_, Bar2(1, _)).InSequence(s1, s2); foo_.Bar2(1, 0); // Has two immediate unsatisfied pre-requisites foo_.Bar("Hi", 0, 0); @@ -179,8 +173,7 @@ TEST_F(GMockOutputTest, UnsatisfiedWith) { TEST_F(GMockOutputTest, UnsatisfiedExpectation) { EXPECT_CALL(foo_, Bar(_, _, _)); - EXPECT_CALL(foo_, Bar2(0, _)) - .Times(2); + EXPECT_CALL(foo_, Bar2(0, _)).Times(2); foo_.Bar2(0, 1); } @@ -194,26 +187,22 @@ TEST_F(GMockOutputTest, MismatchArguments) { } TEST_F(GMockOutputTest, MismatchWith) { - EXPECT_CALL(foo_, Bar2(Ge(2), Ge(1))) - .With(Ge()); + EXPECT_CALL(foo_, Bar2(Ge(2), Ge(1))).With(Ge()); foo_.Bar2(2, 3); // Mismatch With() foo_.Bar2(2, 1); } TEST_F(GMockOutputTest, MismatchArgumentsAndWith) { - EXPECT_CALL(foo_, Bar2(Ge(2), Ge(1))) - .With(Ge()); + EXPECT_CALL(foo_, Bar2(Ge(2), Ge(1))).With(Ge()); foo_.Bar2(1, 3); // Mismatch arguments and mismatch With() foo_.Bar2(2, 1); } TEST_F(GMockOutputTest, UnexpectedCallWithDefaultAction) { - ON_CALL(foo_, Bar2(_, _)) - .WillByDefault(Return(true)); // Default action #1 - ON_CALL(foo_, Bar2(1, _)) - .WillByDefault(Return(false)); // Default action #2 + ON_CALL(foo_, Bar2(_, _)).WillByDefault(Return(true)); // Default action #1 + ON_CALL(foo_, Bar2(1, _)).WillByDefault(Return(false)); // Default action #2 EXPECT_CALL(foo_, Bar2(2, 2)); foo_.Bar2(1, 0); // Unexpected call, takes default action #2. @@ -222,10 +211,8 @@ TEST_F(GMockOutputTest, UnexpectedCallWithDefaultAction) { } TEST_F(GMockOutputTest, ExcessiveCallWithDefaultAction) { - ON_CALL(foo_, Bar2(_, _)) - .WillByDefault(Return(true)); // Default action #1 - ON_CALL(foo_, Bar2(1, _)) - .WillByDefault(Return(false)); // Default action #2 + ON_CALL(foo_, Bar2(_, _)).WillByDefault(Return(true)); // Default action #1 + ON_CALL(foo_, Bar2(1, _)).WillByDefault(Return(false)); // Default action #2 EXPECT_CALL(foo_, Bar2(2, 2)); EXPECT_CALL(foo_, Bar2(1, 1)); @@ -237,22 +224,17 @@ TEST_F(GMockOutputTest, ExcessiveCallWithDefaultAction) { } TEST_F(GMockOutputTest, UninterestingCallWithDefaultAction) { - ON_CALL(foo_, Bar2(_, _)) - .WillByDefault(Return(true)); // Default action #1 - ON_CALL(foo_, Bar2(1, _)) - .WillByDefault(Return(false)); // Default action #2 + ON_CALL(foo_, Bar2(_, _)).WillByDefault(Return(true)); // Default action #1 + ON_CALL(foo_, Bar2(1, _)).WillByDefault(Return(false)); // Default action #2 foo_.Bar2(2, 2); // Uninteresting call, takes default action #1. foo_.Bar2(1, 1); // Uninteresting call, takes default action #2. } TEST_F(GMockOutputTest, ExplicitActionsRunOutWithDefaultAction) { - ON_CALL(foo_, Bar2(_, _)) - .WillByDefault(Return(true)); // Default action #1 + ON_CALL(foo_, Bar2(_, _)).WillByDefault(Return(true)); // Default action #1 - EXPECT_CALL(foo_, Bar2(_, _)) - .Times(2) - .WillOnce(Return(false)); + EXPECT_CALL(foo_, Bar2(_, _)).Times(2).WillOnce(Return(false)); foo_.Bar2(2, 2); foo_.Bar2(1, 1); // Explicit actions in EXPECT_CALL run out. } @@ -293,17 +275,17 @@ void TestCatchesLeakedMocksInAdHocTests() { // foo is deliberately leaked. } -int main(int argc, char **argv) { +int main(int argc, char** argv) { testing::InitGoogleMock(&argc, argv); // Ensures that the tests pass no matter what value of // --gmock_catch_leaked_mocks and --gmock_verbose the user specifies. - testing::GMOCK_FLAG(catch_leaked_mocks) = true; - testing::GMOCK_FLAG(verbose) = "warning"; + GMOCK_FLAG_SET(catch_leaked_mocks, true); + GMOCK_FLAG_SET(verbose, "warning"); TestCatchesLeakedMocksInAdHocTests(); return RUN_ALL_TESTS(); } #ifdef _MSC_VER -# pragma warning(pop) +#pragma warning(pop) #endif diff --git a/ext/googletest/googlemock/test/gmock_output_test_golden.txt b/ext/googletest/googlemock/test/gmock_output_test_golden.txt index 4846c12430..fdf224fd0a 100644 --- a/ext/googletest/googlemock/test/gmock_output_test_golden.txt +++ b/ext/googletest/googlemock/test/gmock_output_test_golden.txt @@ -291,7 +291,7 @@ Stack trace: [ RUN ] GMockOutputTest.PrintsMatcher FILE:#: Failure Value of: (std::pair(42, true)) -Expected: is pair (is >= 48, true) +Expected: is pair (first: is >= 48, second: true) Actual: (42, true) (of type std::pair) [ FAILED ] GMockOutputTest.PrintsMatcher [ FAILED ] GMockOutputTest.UnexpectedCall diff --git a/ext/googletest/googlemock/test/gmock_stress_test.cc b/ext/googletest/googlemock/test/gmock_stress_test.cc index 20725d69b7..9e42cd9358 100644 --- a/ext/googletest/googlemock/test/gmock_stress_test.cc +++ b/ext/googletest/googlemock/test/gmock_stress_test.cc @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Tests that Google Mock constructs can be used in a large number of // threads concurrently. @@ -49,7 +48,7 @@ const int kRepeat = 50; class MockFoo { public: - MOCK_METHOD1(Bar, int(int n)); // NOLINT + MOCK_METHOD1(Bar, int(int n)); // NOLINT MOCK_METHOD2(Baz, char(const char* s1, const std::string& s2)); // NOLINT }; @@ -62,21 +61,16 @@ void JoinAndDelete(ThreadWithParam* t) { struct Dummy {}; - // Tests that different mock objects can be used in their respective // threads. This should generate no Google Test failure. void TestConcurrentMockObjects(Dummy /* dummy */) { // Creates a mock and does some typical operations on it. MockFoo foo; - ON_CALL(foo, Bar(_)) - .WillByDefault(Return(1)); - ON_CALL(foo, Baz(_, _)) - .WillByDefault(Return('b')); - ON_CALL(foo, Baz(_, "you")) - .WillByDefault(Return('a')); + ON_CALL(foo, Bar(_)).WillByDefault(Return(1)); + ON_CALL(foo, Baz(_, _)).WillByDefault(Return('b')); + ON_CALL(foo, Baz(_, "you")).WillByDefault(Return('a')); - EXPECT_CALL(foo, Bar(0)) - .Times(AtMost(3)); + EXPECT_CALL(foo, Bar(0)).Times(AtMost(3)); EXPECT_CALL(foo, Baz(_, _)); EXPECT_CALL(foo, Baz("hi", "you")) .WillOnce(Return('z')) @@ -119,22 +113,19 @@ void Helper1(Helper1Param param) { void TestConcurrentCallsOnSameObject(Dummy /* dummy */) { MockFoo foo; - ON_CALL(foo, Bar(_)) - .WillByDefault(Return(1)); - EXPECT_CALL(foo, Baz(_, "b")) - .Times(kRepeat) - .WillRepeatedly(Return('a')); + ON_CALL(foo, Bar(_)).WillByDefault(Return(1)); + EXPECT_CALL(foo, Baz(_, "b")).Times(kRepeat).WillRepeatedly(Return('a')); EXPECT_CALL(foo, Baz(_, "c")); // Expected to be unsatisfied. // This chunk of code should generate kRepeat failures about // excessive calls, and 2*kRepeat failures about unexpected calls. int count1 = 0; - const Helper1Param param = { &foo, &count1 }; + const Helper1Param param = {&foo, &count1}; ThreadWithParam* const t = new ThreadWithParam(Helper1, param, nullptr); int count2 = 0; - const Helper1Param param2 = { &foo, &count2 }; + const Helper1Param param2 = {&foo, &count2}; Helper1(param2); JoinAndDelete(t); @@ -162,22 +153,18 @@ void TestPartiallyOrderedExpectationsWithThreads(Dummy /* dummy */) { { InSequence dummy; EXPECT_CALL(foo, Bar(0)); - EXPECT_CALL(foo, Bar(1)) - .InSequence(s1, s2); + EXPECT_CALL(foo, Bar(1)).InSequence(s1, s2); } EXPECT_CALL(foo, Bar(2)) - .Times(2*kRepeat) + .Times(2 * kRepeat) .InSequence(s1) .RetiresOnSaturation(); - EXPECT_CALL(foo, Bar(3)) - .Times(2*kRepeat) - .InSequence(s2); + EXPECT_CALL(foo, Bar(3)).Times(2 * kRepeat).InSequence(s2); { InSequence dummy; - EXPECT_CALL(foo, Bar(2)) - .InSequence(s1, s2); + EXPECT_CALL(foo, Bar(2)).InSequence(s1, s2); EXPECT_CALL(foo, Bar(4)); } @@ -196,12 +183,12 @@ void TestPartiallyOrderedExpectationsWithThreads(Dummy /* dummy */) { // Tests using Google Mock constructs in many threads concurrently. TEST(StressTest, CanUseGMockWithThreads) { void (*test_routines[])(Dummy dummy) = { - &TestConcurrentMockObjects, - &TestConcurrentCallsOnSameObject, - &TestPartiallyOrderedExpectationsWithThreads, + &TestConcurrentMockObjects, + &TestConcurrentCallsOnSameObject, + &TestPartiallyOrderedExpectationsWithThreads, }; - const int kRoutines = sizeof(test_routines)/sizeof(test_routines[0]); + const int kRoutines = sizeof(test_routines) / sizeof(test_routines[0]); const int kCopiesOfEachRoutine = kMaxTestThreads / kRoutines; const int kTestThreads = kCopiesOfEachRoutine * kRoutines; ThreadWithParam* threads[kTestThreads] = {}; @@ -220,7 +207,7 @@ TEST(StressTest, CanUseGMockWithThreads) { // Ensures that the correct number of failures have been reported. const TestInfo* const info = UnitTest::GetInstance()->current_test_info(); const TestResult& result = *info->result(); - const int kExpectedFailures = (3*kRepeat + 1)*kCopiesOfEachRoutine; + const int kExpectedFailures = (3 * kRepeat + 1) * kCopiesOfEachRoutine; GTEST_CHECK_(kExpectedFailures == result.total_part_count()) << "Expected " << kExpectedFailures << " failures, but got " << result.total_part_count(); @@ -229,7 +216,7 @@ TEST(StressTest, CanUseGMockWithThreads) { } // namespace } // namespace testing -int main(int argc, char **argv) { +int main(int argc, char** argv) { testing::InitGoogleMock(&argc, argv); const int exit_code = RUN_ALL_TESTS(); // Expected to fail. diff --git a/ext/googletest/googlemock/test/gmock_test.cc b/ext/googletest/googlemock/test/gmock_test.cc index e9840a337d..8f1bd5d03e 100644 --- a/ext/googletest/googlemock/test/gmock_test.cc +++ b/ext/googletest/googlemock/test/gmock_test.cc @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Mock - a framework for writing C++ mock classes. // // This file tests code in gmock.cc. @@ -35,13 +34,12 @@ #include "gmock/gmock.h" #include + #include "gtest/gtest.h" #include "gtest/internal/custom/gtest.h" #if !defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_) -using testing::GMOCK_FLAG(default_mock_behavior); -using testing::GMOCK_FLAG(verbose); using testing::InitGoogleMock; // Verifies that calling InitGoogleMock() on argv results in new_argv, @@ -49,7 +47,7 @@ using testing::InitGoogleMock; template void TestInitGoogleMock(const Char* (&argv)[M], const Char* (&new_argv)[N], const ::std::string& expected_gmock_verbose) { - const ::std::string old_verbose = GMOCK_FLAG(verbose); + const ::std::string old_verbose = GMOCK_FLAG_GET(verbose); int argc = M - 1; InitGoogleMock(&argc, const_cast(argv)); @@ -59,8 +57,8 @@ void TestInitGoogleMock(const Char* (&argv)[M], const Char* (&new_argv)[N], EXPECT_STREQ(new_argv[i], argv[i]); } - EXPECT_EQ(expected_gmock_verbose, GMOCK_FLAG(verbose).c_str()); - GMOCK_FLAG(verbose) = old_verbose; // Restores the gmock_verbose flag. + EXPECT_EQ(expected_gmock_verbose, GMOCK_FLAG_GET(verbose)); + GMOCK_FLAG_SET(verbose, old_verbose); // Restores the gmock_verbose flag. } TEST(InitGoogleMockTest, ParsesInvalidCommandLine) { @@ -68,7 +66,7 @@ TEST(InitGoogleMockTest, ParsesInvalidCommandLine) { const char* new_argv[] = {nullptr}; - TestInitGoogleMock(argv, new_argv, GMOCK_FLAG(verbose)); + TestInitGoogleMock(argv, new_argv, GMOCK_FLAG_GET(verbose)); } TEST(InitGoogleMockTest, ParsesEmptyCommandLine) { @@ -76,7 +74,7 @@ TEST(InitGoogleMockTest, ParsesEmptyCommandLine) { const char* new_argv[] = {"foo.exe", nullptr}; - TestInitGoogleMock(argv, new_argv, GMOCK_FLAG(verbose)); + TestInitGoogleMock(argv, new_argv, GMOCK_FLAG_GET(verbose)); } TEST(InitGoogleMockTest, ParsesSingleFlag) { @@ -88,16 +86,16 @@ TEST(InitGoogleMockTest, ParsesSingleFlag) { } TEST(InitGoogleMockTest, ParsesMultipleFlags) { - int old_default_behavior = GMOCK_FLAG(default_mock_behavior); + int old_default_behavior = GMOCK_FLAG_GET(default_mock_behavior); const wchar_t* argv[] = {L"foo.exe", L"--gmock_verbose=info", L"--gmock_default_mock_behavior=2", nullptr}; const wchar_t* new_argv[] = {L"foo.exe", nullptr}; TestInitGoogleMock(argv, new_argv, "info"); - EXPECT_EQ(2, GMOCK_FLAG(default_mock_behavior)); + EXPECT_EQ(2, GMOCK_FLAG_GET(default_mock_behavior)); EXPECT_NE(2, old_default_behavior); - GMOCK_FLAG(default_mock_behavior) = old_default_behavior; + GMOCK_FLAG_SET(default_mock_behavior, old_default_behavior); } TEST(InitGoogleMockTest, ParsesUnrecognizedFlag) { @@ -105,7 +103,7 @@ TEST(InitGoogleMockTest, ParsesUnrecognizedFlag) { const char* new_argv[] = {"foo.exe", "--non_gmock_flag=blah", nullptr}; - TestInitGoogleMock(argv, new_argv, GMOCK_FLAG(verbose)); + TestInitGoogleMock(argv, new_argv, GMOCK_FLAG_GET(verbose)); } TEST(InitGoogleMockTest, ParsesGoogleMockFlagAndUnrecognizedFlag) { @@ -122,7 +120,7 @@ TEST(WideInitGoogleMockTest, ParsesInvalidCommandLine) { const wchar_t* new_argv[] = {nullptr}; - TestInitGoogleMock(argv, new_argv, GMOCK_FLAG(verbose)); + TestInitGoogleMock(argv, new_argv, GMOCK_FLAG_GET(verbose)); } TEST(WideInitGoogleMockTest, ParsesEmptyCommandLine) { @@ -130,7 +128,7 @@ TEST(WideInitGoogleMockTest, ParsesEmptyCommandLine) { const wchar_t* new_argv[] = {L"foo.exe", nullptr}; - TestInitGoogleMock(argv, new_argv, GMOCK_FLAG(verbose)); + TestInitGoogleMock(argv, new_argv, GMOCK_FLAG_GET(verbose)); } TEST(WideInitGoogleMockTest, ParsesSingleFlag) { @@ -142,16 +140,16 @@ TEST(WideInitGoogleMockTest, ParsesSingleFlag) { } TEST(WideInitGoogleMockTest, ParsesMultipleFlags) { - int old_default_behavior = GMOCK_FLAG(default_mock_behavior); + int old_default_behavior = GMOCK_FLAG_GET(default_mock_behavior); const wchar_t* argv[] = {L"foo.exe", L"--gmock_verbose=info", L"--gmock_default_mock_behavior=2", nullptr}; const wchar_t* new_argv[] = {L"foo.exe", nullptr}; TestInitGoogleMock(argv, new_argv, "info"); - EXPECT_EQ(2, GMOCK_FLAG(default_mock_behavior)); + EXPECT_EQ(2, GMOCK_FLAG_GET(default_mock_behavior)); EXPECT_NE(2, old_default_behavior); - GMOCK_FLAG(default_mock_behavior) = old_default_behavior; + GMOCK_FLAG_SET(default_mock_behavior, old_default_behavior); } TEST(WideInitGoogleMockTest, ParsesUnrecognizedFlag) { @@ -159,7 +157,7 @@ TEST(WideInitGoogleMockTest, ParsesUnrecognizedFlag) { const wchar_t* new_argv[] = {L"foo.exe", L"--non_gmock_flag=blah", nullptr}; - TestInitGoogleMock(argv, new_argv, GMOCK_FLAG(verbose)); + TestInitGoogleMock(argv, new_argv, GMOCK_FLAG_GET(verbose)); } TEST(WideInitGoogleMockTest, ParsesGoogleMockFlagAndUnrecognizedFlag) { @@ -175,7 +173,7 @@ TEST(WideInitGoogleMockTest, ParsesGoogleMockFlagAndUnrecognizedFlag) { // Makes sure Google Mock flags can be accessed in code. TEST(FlagTest, IsAccessibleInCode) { - bool dummy = testing::GMOCK_FLAG(catch_leaked_mocks) && - testing::GMOCK_FLAG(verbose) == ""; + bool dummy = + GMOCK_FLAG_GET(catch_leaked_mocks) && GMOCK_FLAG_GET(verbose) == ""; (void)dummy; // Avoids the "unused local variable" warning. } diff --git a/ext/googletest/googlemock/test/gmock_test_utils.py b/ext/googletest/googlemock/test/gmock_test_utils.py index 7dc4e119d3..d7bc0974a7 100755 --- a/ext/googletest/googlemock/test/gmock_test_utils.py +++ b/ext/googletest/googlemock/test/gmock_test_utils.py @@ -30,21 +30,9 @@ """Unit test utilities for Google C++ Mocking Framework.""" import os -import sys - -# Determines path to gtest_test_utils and imports it. -SCRIPT_DIR = os.path.dirname(__file__) or '.' - -# isdir resolves symbolic links. -gtest_tests_util_dir = os.path.join(SCRIPT_DIR, '../../googletest/test') -if os.path.isdir(gtest_tests_util_dir): - GTEST_TESTS_UTIL_DIR = gtest_tests_util_dir -else: - GTEST_TESTS_UTIL_DIR = os.path.join(SCRIPT_DIR, '../../googletest/test') -sys.path.append(GTEST_TESTS_UTIL_DIR) # pylint: disable=C6204 -import gtest_test_utils +from googletest.test import gtest_test_utils def GetSourceDir(): diff --git a/ext/googletest/googletest/CMakeLists.txt b/ext/googletest/googletest/CMakeLists.txt index abdd98b79a..aa00a5f3d2 100644 --- a/ext/googletest/googletest/CMakeLists.txt +++ b/ext/googletest/googletest/CMakeLists.txt @@ -46,14 +46,9 @@ endif() # Project version: -if (CMAKE_VERSION VERSION_LESS 3.0) - project(gtest CXX C) - set(PROJECT_VERSION ${GOOGLETEST_VERSION}) -else() - cmake_policy(SET CMP0048 NEW) - project(gtest VERSION ${GOOGLETEST_VERSION} LANGUAGES CXX C) -endif() -cmake_minimum_required(VERSION 2.8.12) +cmake_minimum_required(VERSION 3.5) +cmake_policy(SET CMP0048 NEW) +project(gtest VERSION ${GOOGLETEST_VERSION} LANGUAGES CXX C) if (POLICY CMP0063) # Visibility cmake_policy(SET CMP0063 NEW) @@ -136,13 +131,17 @@ set_target_properties(gtest_main PROPERTIES VERSION ${GOOGLETEST_VERSION}) # to the targets for when we are part of a parent build (ie being pulled # in via add_subdirectory() rather than being a standalone build). if (DEFINED CMAKE_VERSION AND NOT "${CMAKE_VERSION}" VERSION_LESS "2.8.11") + string(REPLACE ";" "$" dirs "${gtest_build_include_dirs}") target_include_directories(gtest SYSTEM INTERFACE - "$" + "$" "$/${CMAKE_INSTALL_INCLUDEDIR}>") target_include_directories(gtest_main SYSTEM INTERFACE - "$" + "$" "$/${CMAKE_INSTALL_INCLUDEDIR}>") endif() +if(CMAKE_SYSTEM_NAME MATCHES "QNX") + target_link_libraries(gtest PUBLIC regex) +endif() target_link_libraries(gtest_main PUBLIC gtest) ######################################################################## diff --git a/ext/googletest/googletest/README.md b/ext/googletest/googletest/README.md index c8aedb7877..d26b309ed0 100644 --- a/ext/googletest/googletest/README.md +++ b/ext/googletest/googletest/README.md @@ -94,7 +94,7 @@ include(FetchContent) FetchContent_Declare( googletest # Specify the commit you depend on and update it regularly. - URL https://github.com/google/googletest/archive/609281088cfefc76f9d0ce82e1ff6c30cc3591e5.zip + URL https://github.com/google/googletest/archive/e2239ee6043f73722e7aa812a459f54a28552929.zip ) # For Windows: Prevent overriding the parent project's compiler/linker settings set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) @@ -203,7 +203,9 @@ add -DGTEST_DONT_DEFINE_FOO=1 to the compiler flags to tell GoogleTest to change the macro's name from `FOO` -to `GTEST_FOO`. Currently `FOO` can be `FAIL`, `SUCCEED`, or `TEST`. For +to `GTEST_FOO`. Currently `FOO` can be `ASSERT_EQ`, `ASSERT_FALSE`, `ASSERT_GE`, +`ASSERT_GT`, `ASSERT_LE`, `ASSERT_LT`, `ASSERT_NE`, `ASSERT_TRUE`, +`EXPECT_FALSE`, `EXPECT_TRUE`, `FAIL`, `SUCCEED`, `TEST`, or `TEST_F`. For example, with `-DGTEST_DONT_DEFINE_TEST=1`, you'll need to write GTEST_TEST(SomeTest, DoesThis) { ... } diff --git a/ext/googletest/googletest/cmake/internal_utils.cmake b/ext/googletest/googletest/cmake/internal_utils.cmake index 58fc9bfbee..5a34c07a1b 100644 --- a/ext/googletest/googletest/cmake/internal_utils.cmake +++ b/ext/googletest/googletest/cmake/internal_utils.cmake @@ -154,10 +154,6 @@ function(cxx_library_with_type name type cxx_flags) set_target_properties(${name} PROPERTIES COMPILE_FLAGS "${cxx_flags}") - # Generate debug library name with a postfix. - set_target_properties(${name} - PROPERTIES - DEBUG_POSTFIX "d") # Set the output directory for build artifacts set_target_properties(${name} PROPERTIES @@ -304,6 +300,8 @@ function(py_test name) COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test/${name}.py --build_dir=${CMAKE_CURRENT_BINARY_DIR}/\${CTEST_CONFIGURATION_TYPE} ${ARGN}) endif() + # Make the Python import path consistent between Bazel and CMake. + set_tests_properties(${name} PROPERTIES ENVIRONMENT PYTHONPATH=${CMAKE_SOURCE_DIR}) endif(PYTHONINTERP_FOUND) endfunction() diff --git a/ext/googletest/googletest/include/gtest/gtest-assertion-result.h b/ext/googletest/googletest/include/gtest/gtest-assertion-result.h new file mode 100644 index 0000000000..addbb59c64 --- /dev/null +++ b/ext/googletest/googletest/include/gtest/gtest-assertion-result.h @@ -0,0 +1,237 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// The Google C++ Testing and Mocking Framework (Google Test) +// +// This file implements the AssertionResult type. + +// IWYU pragma: private, include "gtest/gtest.h" +// IWYU pragma: friend gtest/.* +// IWYU pragma: friend gmock/.* + +#ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_ASSERTION_RESULT_H_ +#define GOOGLETEST_INCLUDE_GTEST_GTEST_ASSERTION_RESULT_H_ + +#include +#include +#include +#include + +#include "gtest/gtest-message.h" +#include "gtest/internal/gtest-port.h" + +GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \ +/* class A needs to have dll-interface to be used by clients of class B */) + +namespace testing { + +// A class for indicating whether an assertion was successful. When +// the assertion wasn't successful, the AssertionResult object +// remembers a non-empty message that describes how it failed. +// +// To create an instance of this class, use one of the factory functions +// (AssertionSuccess() and AssertionFailure()). +// +// This class is useful for two purposes: +// 1. Defining predicate functions to be used with Boolean test assertions +// EXPECT_TRUE/EXPECT_FALSE and their ASSERT_ counterparts +// 2. Defining predicate-format functions to be +// used with predicate assertions (ASSERT_PRED_FORMAT*, etc). +// +// For example, if you define IsEven predicate: +// +// testing::AssertionResult IsEven(int n) { +// if ((n % 2) == 0) +// return testing::AssertionSuccess(); +// else +// return testing::AssertionFailure() << n << " is odd"; +// } +// +// Then the failed expectation EXPECT_TRUE(IsEven(Fib(5))) +// will print the message +// +// Value of: IsEven(Fib(5)) +// Actual: false (5 is odd) +// Expected: true +// +// instead of a more opaque +// +// Value of: IsEven(Fib(5)) +// Actual: false +// Expected: true +// +// in case IsEven is a simple Boolean predicate. +// +// If you expect your predicate to be reused and want to support informative +// messages in EXPECT_FALSE and ASSERT_FALSE (negative assertions show up +// about half as often as positive ones in our tests), supply messages for +// both success and failure cases: +// +// testing::AssertionResult IsEven(int n) { +// if ((n % 2) == 0) +// return testing::AssertionSuccess() << n << " is even"; +// else +// return testing::AssertionFailure() << n << " is odd"; +// } +// +// Then a statement EXPECT_FALSE(IsEven(Fib(6))) will print +// +// Value of: IsEven(Fib(6)) +// Actual: true (8 is even) +// Expected: false +// +// NB: Predicates that support negative Boolean assertions have reduced +// performance in positive ones so be careful not to use them in tests +// that have lots (tens of thousands) of positive Boolean assertions. +// +// To use this class with EXPECT_PRED_FORMAT assertions such as: +// +// // Verifies that Foo() returns an even number. +// EXPECT_PRED_FORMAT1(IsEven, Foo()); +// +// you need to define: +// +// testing::AssertionResult IsEven(const char* expr, int n) { +// if ((n % 2) == 0) +// return testing::AssertionSuccess(); +// else +// return testing::AssertionFailure() +// << "Expected: " << expr << " is even\n Actual: it's " << n; +// } +// +// If Foo() returns 5, you will see the following message: +// +// Expected: Foo() is even +// Actual: it's 5 +// +class GTEST_API_ AssertionResult { + public: + // Copy constructor. + // Used in EXPECT_TRUE/FALSE(assertion_result). + AssertionResult(const AssertionResult& other); + +// C4800 is a level 3 warning in Visual Studio 2015 and earlier. +// This warning is not emitted in Visual Studio 2017. +// This warning is off by default starting in Visual Studio 2019 but can be +// enabled with command-line options. +#if defined(_MSC_VER) && (_MSC_VER < 1910 || _MSC_VER >= 1920) + GTEST_DISABLE_MSC_WARNINGS_PUSH_(4800 /* forcing value to bool */) +#endif + + // Used in the EXPECT_TRUE/FALSE(bool_expression). + // + // T must be contextually convertible to bool. + // + // The second parameter prevents this overload from being considered if + // the argument is implicitly convertible to AssertionResult. In that case + // we want AssertionResult's copy constructor to be used. + template + explicit AssertionResult( + const T& success, + typename std::enable_if< + !std::is_convertible::value>::type* + /*enabler*/ + = nullptr) + : success_(success) {} + +#if defined(_MSC_VER) && (_MSC_VER < 1910 || _MSC_VER >= 1920) + GTEST_DISABLE_MSC_WARNINGS_POP_() +#endif + + // Assignment operator. + AssertionResult& operator=(AssertionResult other) { + swap(other); + return *this; + } + + // Returns true if and only if the assertion succeeded. + operator bool() const { return success_; } // NOLINT + + // Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE. + AssertionResult operator!() const; + + // Returns the text streamed into this AssertionResult. Test assertions + // use it when they fail (i.e., the predicate's outcome doesn't match the + // assertion's expectation). When nothing has been streamed into the + // object, returns an empty string. + const char* message() const { + return message_.get() != nullptr ? message_->c_str() : ""; + } + // Deprecated; please use message() instead. + const char* failure_message() const { return message(); } + + // Streams a custom failure message into this object. + template + AssertionResult& operator<<(const T& value) { + AppendMessage(Message() << value); + return *this; + } + + // Allows streaming basic output manipulators such as endl or flush into + // this object. + AssertionResult& operator<<( + ::std::ostream& (*basic_manipulator)(::std::ostream& stream)) { + AppendMessage(Message() << basic_manipulator); + return *this; + } + + private: + // Appends the contents of message to message_. + void AppendMessage(const Message& a_message) { + if (message_.get() == nullptr) message_.reset(new ::std::string); + message_->append(a_message.GetString().c_str()); + } + + // Swap the contents of this AssertionResult with other. + void swap(AssertionResult& other); + + // Stores result of the assertion predicate. + bool success_; + // Stores the message describing the condition in case the expectation + // construct is not satisfied with the predicate's outcome. + // Referenced via a pointer to avoid taking too much stack frame space + // with test assertions. + std::unique_ptr< ::std::string> message_; +}; + +// Makes a successful assertion result. +GTEST_API_ AssertionResult AssertionSuccess(); + +// Makes a failed assertion result. +GTEST_API_ AssertionResult AssertionFailure(); + +// Makes a failed assertion result with the given failure message. +// Deprecated; use AssertionFailure() << msg. +GTEST_API_ AssertionResult AssertionFailure(const Message& msg); + +} // namespace testing + +GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251 + +#endif // GOOGLETEST_INCLUDE_GTEST_GTEST_ASSERTION_RESULT_H_ diff --git a/ext/googletest/googletest/include/gtest/gtest-death-test.h b/ext/googletest/googletest/include/gtest/gtest-death-test.h index 4df53d973d..84e5a5bbd3 100644 --- a/ext/googletest/googletest/include/gtest/gtest-death-test.h +++ b/ext/googletest/googletest/include/gtest/gtest-death-test.h @@ -27,13 +27,15 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// // The Google C++ Testing and Mocking Framework (Google Test) // // This header file defines the public API for death tests. It is // #included by gtest.h so a user doesn't need to include this // directly. -// GOOGLETEST_CM0001 DO NOT DELETE + +// IWYU pragma: private, include "gtest/gtest.h" +// IWYU pragma: friend gtest/.* +// IWYU pragma: friend gmock/.* #ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_ #define GOOGLETEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_ @@ -103,7 +105,6 @@ GTEST_API_ bool InDeathTestChild(); // // On the regular expressions used in death tests: // -// GOOGLETEST_CM0005 DO NOT DELETE // On POSIX-compliant systems (*nix), we use the library, // which uses the POSIX extended regex syntax. // @@ -169,24 +170,24 @@ GTEST_API_ bool InDeathTestChild(); // Asserts that a given `statement` causes the program to exit, with an // integer exit status that satisfies `predicate`, and emitting error output // that matches `matcher`. -# define ASSERT_EXIT(statement, predicate, matcher) \ - GTEST_DEATH_TEST_(statement, predicate, matcher, GTEST_FATAL_FAILURE_) +#define ASSERT_EXIT(statement, predicate, matcher) \ + GTEST_DEATH_TEST_(statement, predicate, matcher, GTEST_FATAL_FAILURE_) // Like `ASSERT_EXIT`, but continues on to successive tests in the // test suite, if any: -# define EXPECT_EXIT(statement, predicate, matcher) \ - GTEST_DEATH_TEST_(statement, predicate, matcher, GTEST_NONFATAL_FAILURE_) +#define EXPECT_EXIT(statement, predicate, matcher) \ + GTEST_DEATH_TEST_(statement, predicate, matcher, GTEST_NONFATAL_FAILURE_) // Asserts that a given `statement` causes the program to exit, either by // explicitly exiting with a nonzero exit code or being killed by a // signal, and emitting error output that matches `matcher`. -# define ASSERT_DEATH(statement, matcher) \ - ASSERT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, matcher) +#define ASSERT_DEATH(statement, matcher) \ + ASSERT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, matcher) // Like `ASSERT_DEATH`, but continues on to successive tests in the // test suite, if any: -# define EXPECT_DEATH(statement, matcher) \ - EXPECT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, matcher) +#define EXPECT_DEATH(statement, matcher) \ + EXPECT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, matcher) // Two predicate classes that can be used in {ASSERT,EXPECT}_EXIT*: @@ -197,22 +198,23 @@ class GTEST_API_ ExitedWithCode { ExitedWithCode(const ExitedWithCode&) = default; void operator=(const ExitedWithCode& other) = delete; bool operator()(int exit_status) const; + private: const int exit_code_; }; -# if !GTEST_OS_WINDOWS && !GTEST_OS_FUCHSIA +#if !GTEST_OS_WINDOWS && !GTEST_OS_FUCHSIA // Tests that an exit code describes an exit due to termination by a // given signal. -// GOOGLETEST_CM0006 DO NOT DELETE class GTEST_API_ KilledBySignal { public: explicit KilledBySignal(int signum); bool operator()(int exit_status) const; + private: const int signum_; }; -# endif // !GTEST_OS_WINDOWS +#endif // !GTEST_OS_WINDOWS // EXPECT_DEBUG_DEATH asserts that the given statements die in debug mode. // The death testing framework causes this to have interesting semantics, @@ -257,23 +259,21 @@ class GTEST_API_ KilledBySignal { // EXPECT_EQ(12, DieInDebugOr12(&sideeffect)); // }, "death"); // -# ifdef NDEBUG +#ifdef NDEBUG -# define EXPECT_DEBUG_DEATH(statement, regex) \ +#define EXPECT_DEBUG_DEATH(statement, regex) \ GTEST_EXECUTE_STATEMENT_(statement, regex) -# define ASSERT_DEBUG_DEATH(statement, regex) \ +#define ASSERT_DEBUG_DEATH(statement, regex) \ GTEST_EXECUTE_STATEMENT_(statement, regex) -# else +#else -# define EXPECT_DEBUG_DEATH(statement, regex) \ - EXPECT_DEATH(statement, regex) +#define EXPECT_DEBUG_DEATH(statement, regex) EXPECT_DEATH(statement, regex) -# define ASSERT_DEBUG_DEATH(statement, regex) \ - ASSERT_DEATH(statement, regex) +#define ASSERT_DEBUG_DEATH(statement, regex) ASSERT_DEATH(statement, regex) -# endif // NDEBUG for EXPECT_DEBUG_DEATH +#endif // NDEBUG for EXPECT_DEBUG_DEATH #endif // GTEST_HAS_DEATH_TEST // This macro is used for implementing macros such as @@ -311,18 +311,17 @@ class GTEST_API_ KilledBySignal { // statement unconditionally returns or throws. The Message constructor at // the end allows the syntax of streaming additional messages into the // macro, for compilational compatibility with EXPECT_DEATH/ASSERT_DEATH. -# define GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, terminator) \ - GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ - if (::testing::internal::AlwaysTrue()) { \ - GTEST_LOG_(WARNING) \ - << "Death tests are not supported on this platform.\n" \ - << "Statement '" #statement "' cannot be verified."; \ - } else if (::testing::internal::AlwaysFalse()) { \ - ::testing::internal::RE::PartialMatch(".*", (regex)); \ - GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ - terminator; \ - } else \ - ::testing::Message() +#define GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, terminator) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + GTEST_LOG_(WARNING) << "Death tests are not supported on this platform.\n" \ + << "Statement '" #statement "' cannot be verified."; \ + } else if (::testing::internal::AlwaysFalse()) { \ + ::testing::internal::RE::PartialMatch(".*", (regex)); \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + terminator; \ + } else \ + ::testing::Message() // EXPECT_DEATH_IF_SUPPORTED(statement, regex) and // ASSERT_DEATH_IF_SUPPORTED(statement, regex) expand to real death tests if @@ -330,15 +329,15 @@ class GTEST_API_ KilledBySignal { // useful when you are combining death test assertions with normal test // assertions in one test. #if GTEST_HAS_DEATH_TEST -# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \ - EXPECT_DEATH(statement, regex) -# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \ - ASSERT_DEATH(statement, regex) +#define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \ + EXPECT_DEATH(statement, regex) +#define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \ + ASSERT_DEATH(statement, regex) #else -# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \ - GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, ) -# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \ - GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, return) +#define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \ + GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, ) +#define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \ + GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, return) #endif } // namespace testing diff --git a/ext/googletest/googletest/include/gtest/gtest-matchers.h b/ext/googletest/googletest/include/gtest/gtest-matchers.h index 9fa34a05ba..bffa00c533 100644 --- a/ext/googletest/googletest/include/gtest/gtest-matchers.h +++ b/ext/googletest/googletest/include/gtest/gtest-matchers.h @@ -32,6 +32,10 @@ // This file implements just enough of the matcher interface to allow // EXPECT_DEATH and friends to accept a matcher argument. +// IWYU pragma: private, include "gtest/gtest.h" +// IWYU pragma: friend gtest/.* +// IWYU pragma: friend gmock/.* + #ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_MATCHERS_H_ #define GOOGLETEST_INCLUDE_GTEST_GTEST_MATCHERS_H_ @@ -98,11 +102,11 @@ class MatchResultListener { private: ::std::ostream* const stream_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(MatchResultListener); + MatchResultListener(const MatchResultListener&) = delete; + MatchResultListener& operator=(const MatchResultListener&) = delete; }; -inline MatchResultListener::~MatchResultListener() { -} +inline MatchResultListener::~MatchResultListener() {} // An instance of a subclass of this knows how to describe itself as a // matcher. @@ -176,27 +180,39 @@ namespace internal { struct AnyEq { template - bool operator()(const A& a, const B& b) const { return a == b; } + bool operator()(const A& a, const B& b) const { + return a == b; + } }; struct AnyNe { template - bool operator()(const A& a, const B& b) const { return a != b; } + bool operator()(const A& a, const B& b) const { + return a != b; + } }; struct AnyLt { template - bool operator()(const A& a, const B& b) const { return a < b; } + bool operator()(const A& a, const B& b) const { + return a < b; + } }; struct AnyGt { template - bool operator()(const A& a, const B& b) const { return a > b; } + bool operator()(const A& a, const B& b) const { + return a > b; + } }; struct AnyLe { template - bool operator()(const A& a, const B& b) const { return a <= b; } + bool operator()(const A& a, const B& b) const { + return a <= b; + } }; struct AnyGe { template - bool operator()(const A& a, const B& b) const { return a >= b; } + bool operator()(const A& a, const B& b) const { + return a >= b; + } }; // A match result listener that ignores the explanation. @@ -205,7 +221,8 @@ class DummyMatchResultListener : public MatchResultListener { DummyMatchResultListener() : MatchResultListener(nullptr) {} private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(DummyMatchResultListener); + DummyMatchResultListener(const DummyMatchResultListener&) = delete; + DummyMatchResultListener& operator=(const DummyMatchResultListener&) = delete; }; // A match result listener that forwards the explanation to a given @@ -217,7 +234,9 @@ class StreamMatchResultListener : public MatchResultListener { : MatchResultListener(os) {} private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(StreamMatchResultListener); + StreamMatchResultListener(const StreamMatchResultListener&) = delete; + StreamMatchResultListener& operator=(const StreamMatchResultListener&) = + delete; }; struct SharedPayloadBase { @@ -284,17 +303,18 @@ class MatcherBase : private MatcherDescriberInterface { } protected: - MatcherBase() : vtable_(nullptr) {} + MatcherBase() : vtable_(nullptr), buffer_() {} // Constructs a matcher from its implementation. template - explicit MatcherBase(const MatcherInterface* impl) { + explicit MatcherBase(const MatcherInterface* impl) + : vtable_(nullptr), buffer_() { Init(impl); } template ::type::is_gtest_matcher> - MatcherBase(M&& m) { // NOLINT + MatcherBase(M&& m) : vtable_(nullptr), buffer_() { // NOLINT Init(std::forward(m)); } @@ -420,8 +440,8 @@ class MatcherBase : private MatcherDescriberInterface { static const M& Get(const MatcherBase& m) { // When inlined along with Init, need to be explicit to avoid violating // strict aliasing rules. - const M *ptr = static_cast( - static_cast(&m.buffer_)); + const M* ptr = + static_cast(static_cast(&m.buffer_)); return *ptr; } static void Init(MatcherBase& m, M impl) { @@ -741,7 +761,7 @@ template class EqMatcher : public ComparisonBase, Rhs, AnyEq> { public: explicit EqMatcher(const Rhs& rhs) - : ComparisonBase, Rhs, AnyEq>(rhs) { } + : ComparisonBase, Rhs, AnyEq>(rhs) {} static const char* Desc() { return "is equal to"; } static const char* NegatedDesc() { return "isn't equal to"; } }; @@ -749,7 +769,7 @@ template class NeMatcher : public ComparisonBase, Rhs, AnyNe> { public: explicit NeMatcher(const Rhs& rhs) - : ComparisonBase, Rhs, AnyNe>(rhs) { } + : ComparisonBase, Rhs, AnyNe>(rhs) {} static const char* Desc() { return "isn't equal to"; } static const char* NegatedDesc() { return "is equal to"; } }; @@ -757,7 +777,7 @@ template class LtMatcher : public ComparisonBase, Rhs, AnyLt> { public: explicit LtMatcher(const Rhs& rhs) - : ComparisonBase, Rhs, AnyLt>(rhs) { } + : ComparisonBase, Rhs, AnyLt>(rhs) {} static const char* Desc() { return "is <"; } static const char* NegatedDesc() { return "isn't <"; } }; @@ -765,7 +785,7 @@ template class GtMatcher : public ComparisonBase, Rhs, AnyGt> { public: explicit GtMatcher(const Rhs& rhs) - : ComparisonBase, Rhs, AnyGt>(rhs) { } + : ComparisonBase, Rhs, AnyGt>(rhs) {} static const char* Desc() { return "is >"; } static const char* NegatedDesc() { return "isn't >"; } }; @@ -773,7 +793,7 @@ template class LeMatcher : public ComparisonBase, Rhs, AnyLe> { public: explicit LeMatcher(const Rhs& rhs) - : ComparisonBase, Rhs, AnyLe>(rhs) { } + : ComparisonBase, Rhs, AnyLe>(rhs) {} static const char* Desc() { return "is <="; } static const char* NegatedDesc() { return "isn't <="; } }; @@ -781,7 +801,7 @@ template class GeMatcher : public ComparisonBase, Rhs, AnyGe> { public: explicit GeMatcher(const Rhs& rhs) - : ComparisonBase, Rhs, AnyGe>(rhs) { } + : ComparisonBase, Rhs, AnyGe>(rhs) {} static const char* Desc() { return "is >="; } static const char* NegatedDesc() { return "isn't >="; } }; @@ -872,12 +892,16 @@ PolymorphicMatcher ContainsRegex( // Note: if the parameter of Eq() were declared as const T&, Eq("foo") // wouldn't compile. template -inline internal::EqMatcher Eq(T x) { return internal::EqMatcher(x); } +inline internal::EqMatcher Eq(T x) { + return internal::EqMatcher(x); +} // Constructs a Matcher from a 'value' of type T. The constructed // matcher matches any value that's equal to 'value'. template -Matcher::Matcher(T value) { *this = Eq(value); } +Matcher::Matcher(T value) { + *this = Eq(value); +} // Creates a monomorphic matcher that matches anything with type Lhs // and equal to rhs. A user may need to use this instead of Eq(...) @@ -892,7 +916,9 @@ Matcher::Matcher(T value) { *this = Eq(value); } // can always write Matcher(Lt(5)) to be explicit about the type, // for example. template -inline Matcher TypedEq(const Rhs& rhs) { return Eq(rhs); } +inline Matcher TypedEq(const Rhs& rhs) { + return Eq(rhs); +} // Creates a polymorphic matcher that matches anything >= x. template diff --git a/ext/googletest/googletest/include/gtest/gtest-message.h b/ext/googletest/googletest/include/gtest/gtest-message.h index becfd49fcb..6c8bf90009 100644 --- a/ext/googletest/googletest/include/gtest/gtest-message.h +++ b/ext/googletest/googletest/include/gtest/gtest-message.h @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// // The Google C++ Testing and Mocking Framework (Google Test) // // This header file defines the Message class. @@ -42,7 +41,9 @@ // to CHANGE WITHOUT NOTICE. Therefore DO NOT DEPEND ON IT in a user // program! -// GOOGLETEST_CM0001 DO NOT DELETE +// IWYU pragma: private, include "gtest/gtest.h" +// IWYU pragma: friend gtest/.* +// IWYU pragma: friend gmock/.* #ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_MESSAGE_H_ #define GOOGLETEST_INCLUDE_GTEST_GTEST_MESSAGE_H_ @@ -110,8 +111,8 @@ class GTEST_API_ Message { // Streams a non-pointer value to this object. template - inline Message& operator <<(const T& val) { - // Some libraries overload << for STL containers. These + inline Message& operator<<(const T& val) { + // Some libraries overload << for STL containers. These // overloads are defined in the global namespace instead of ::std. // // C++'s symbol lookup rule (i.e. Koenig lookup) says that these @@ -125,7 +126,7 @@ class GTEST_API_ Message { // from the global namespace. With this using declaration, // overloads of << defined in the global namespace and those // visible via Koenig lookup are both exposed in this function. - using ::operator <<; + using ::operator<<; *ss_ << val; return *this; } @@ -144,7 +145,7 @@ class GTEST_API_ Message { // ensure consistent result across compilers, we always treat NULL // as "(null)". template - inline Message& operator <<(T* const& pointer) { // NOLINT + inline Message& operator<<(T* const& pointer) { // NOLINT if (pointer == nullptr) { *ss_ << "(null)"; } else { @@ -159,25 +160,23 @@ class GTEST_API_ Message { // templatized version above. Without this definition, streaming // endl or other basic IO manipulators to Message will confuse the // compiler. - Message& operator <<(BasicNarrowIoManip val) { + Message& operator<<(BasicNarrowIoManip val) { *ss_ << val; return *this; } // Instead of 1/0, we want to see true/false for bool values. - Message& operator <<(bool b) { - return *this << (b ? "true" : "false"); - } + Message& operator<<(bool b) { return *this << (b ? "true" : "false"); } // These two overloads allow streaming a wide C string to a Message // using the UTF-8 encoding. - Message& operator <<(const wchar_t* wide_c_str); - Message& operator <<(wchar_t* wide_c_str); + Message& operator<<(const wchar_t* wide_c_str); + Message& operator<<(wchar_t* wide_c_str); #if GTEST_HAS_STD_WSTRING // Converts the given wide string to a narrow string using the UTF-8 // encoding, and streams the result to this Message object. - Message& operator <<(const ::std::wstring& wstr); + Message& operator<<(const ::std::wstring& wstr); #endif // GTEST_HAS_STD_WSTRING // Gets the text streamed to this object so far as an std::string. @@ -196,7 +195,7 @@ class GTEST_API_ Message { }; // Streams a Message to an ostream. -inline std::ostream& operator <<(std::ostream& os, const Message& sb) { +inline std::ostream& operator<<(std::ostream& os, const Message& sb) { return os << sb.GetString(); } diff --git a/ext/googletest/googletest/include/gtest/gtest-param-test.h b/ext/googletest/googletest/include/gtest/gtest-param-test.h index 804e702817..b55119ac62 100644 --- a/ext/googletest/googletest/include/gtest/gtest-param-test.h +++ b/ext/googletest/googletest/include/gtest/gtest-param-test.h @@ -26,11 +26,14 @@ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// + // Macros and functions for implementing parameterized tests // in Google C++ Testing and Mocking Framework (Google Test) -// -// GOOGLETEST_CM0001 DO NOT DELETE + +// IWYU pragma: private, include "gtest/gtest.h" +// IWYU pragma: friend gtest/.* +// IWYU pragma: friend gmock/.* + #ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_ #define GOOGLETEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_ @@ -353,9 +356,7 @@ internal::ValueArray Values(T... v) { // } // INSTANTIATE_TEST_SUITE_P(BoolSequence, FlagDependentTest, Bool()); // -inline internal::ParamGenerator Bool() { - return Values(false, true); -} +inline internal::ParamGenerator Bool() { return Values(false, true); } // Combine() allows the user to combine two or more sequences to produce // values of a Cartesian product of those sequences' elements. @@ -428,8 +429,11 @@ internal::CartesianProductHolder Combine(const Generator&... g) { return 0; \ } \ static int gtest_registering_dummy_ GTEST_ATTRIBUTE_UNUSED_; \ - GTEST_DISALLOW_COPY_AND_ASSIGN_(GTEST_TEST_CLASS_NAME_(test_suite_name, \ - test_name)); \ + GTEST_TEST_CLASS_NAME_(test_suite_name, test_name) \ + (const GTEST_TEST_CLASS_NAME_(test_suite_name, test_name) &) = delete; \ + GTEST_TEST_CLASS_NAME_(test_suite_name, test_name) & operator=( \ + const GTEST_TEST_CLASS_NAME_(test_suite_name, \ + test_name) &) = delete; /* NOLINT */ \ }; \ int GTEST_TEST_CLASS_NAME_(test_suite_name, \ test_name)::gtest_registering_dummy_ = \ @@ -453,43 +457,42 @@ internal::CartesianProductHolder Combine(const Generator&... g) { #define GTEST_GET_FIRST_(first, ...) first #define GTEST_GET_SECOND_(first, second, ...) second -#define INSTANTIATE_TEST_SUITE_P(prefix, test_suite_name, ...) \ - static ::testing::internal::ParamGenerator \ - gtest_##prefix##test_suite_name##_EvalGenerator_() { \ - return GTEST_EXPAND_(GTEST_GET_FIRST_(__VA_ARGS__, DUMMY_PARAM_)); \ - } \ - static ::std::string gtest_##prefix##test_suite_name##_EvalGenerateName_( \ - const ::testing::TestParamInfo& info) { \ - if (::testing::internal::AlwaysFalse()) { \ - ::testing::internal::TestNotEmpty(GTEST_EXPAND_(GTEST_GET_SECOND_( \ - __VA_ARGS__, \ - ::testing::internal::DefaultParamName, \ - DUMMY_PARAM_))); \ - auto t = std::make_tuple(__VA_ARGS__); \ - static_assert(std::tuple_size::value <= 2, \ - "Too Many Args!"); \ - } \ - return ((GTEST_EXPAND_(GTEST_GET_SECOND_( \ - __VA_ARGS__, \ - ::testing::internal::DefaultParamName, \ - DUMMY_PARAM_))))(info); \ - } \ - static int gtest_##prefix##test_suite_name##_dummy_ \ - GTEST_ATTRIBUTE_UNUSED_ = \ - ::testing::UnitTest::GetInstance() \ - ->parameterized_test_registry() \ - .GetTestSuitePatternHolder( \ - GTEST_STRINGIFY_(test_suite_name), \ - ::testing::internal::CodeLocation(__FILE__, __LINE__)) \ - ->AddTestSuiteInstantiation( \ - GTEST_STRINGIFY_(prefix), \ - >est_##prefix##test_suite_name##_EvalGenerator_, \ - >est_##prefix##test_suite_name##_EvalGenerateName_, \ +#define INSTANTIATE_TEST_SUITE_P(prefix, test_suite_name, ...) \ + static ::testing::internal::ParamGenerator \ + gtest_##prefix##test_suite_name##_EvalGenerator_() { \ + return GTEST_EXPAND_(GTEST_GET_FIRST_(__VA_ARGS__, DUMMY_PARAM_)); \ + } \ + static ::std::string gtest_##prefix##test_suite_name##_EvalGenerateName_( \ + const ::testing::TestParamInfo& info) { \ + if (::testing::internal::AlwaysFalse()) { \ + ::testing::internal::TestNotEmpty(GTEST_EXPAND_(GTEST_GET_SECOND_( \ + __VA_ARGS__, \ + ::testing::internal::DefaultParamName, \ + DUMMY_PARAM_))); \ + auto t = std::make_tuple(__VA_ARGS__); \ + static_assert(std::tuple_size::value <= 2, \ + "Too Many Args!"); \ + } \ + return ((GTEST_EXPAND_(GTEST_GET_SECOND_( \ + __VA_ARGS__, \ + ::testing::internal::DefaultParamName, \ + DUMMY_PARAM_))))(info); \ + } \ + static int gtest_##prefix##test_suite_name##_dummy_ \ + GTEST_ATTRIBUTE_UNUSED_ = \ + ::testing::UnitTest::GetInstance() \ + ->parameterized_test_registry() \ + .GetTestSuitePatternHolder( \ + GTEST_STRINGIFY_(test_suite_name), \ + ::testing::internal::CodeLocation(__FILE__, __LINE__)) \ + ->AddTestSuiteInstantiation( \ + GTEST_STRINGIFY_(prefix), \ + >est_##prefix##test_suite_name##_EvalGenerator_, \ + >est_##prefix##test_suite_name##_EvalGenerateName_, \ __FILE__, __LINE__) - // Allow Marking a Parameterized test class as not needing to be instantiated. -#define GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(T) \ +#define GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(T) \ namespace gtest_do_not_use_outside_namespace_scope {} \ static const ::testing::internal::MarkAsIgnored gtest_allow_ignore_##T( \ GTEST_STRINGIFY_(T)) diff --git a/ext/googletest/googletest/include/gtest/gtest-printers.h b/ext/googletest/googletest/include/gtest/gtest-printers.h index 8a3431d1b3..a91e8b8b10 100644 --- a/ext/googletest/googletest/include/gtest/gtest-printers.h +++ b/ext/googletest/googletest/include/gtest/gtest-printers.h @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Test - The Google C++ Testing and Mocking Framework // // This file implements a universal value printer that can print a @@ -95,7 +94,9 @@ // being defined as many user-defined container types don't have // value_type. -// GOOGLETEST_CM0001 DO NOT DELETE +// IWYU pragma: private, include "gtest/gtest.h" +// IWYU pragma: friend gtest/.* +// IWYU pragma: friend gmock/.* #ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_PRINTERS_H_ #define GOOGLETEST_INCLUDE_GTEST_GTEST_PRINTERS_H_ @@ -257,12 +258,10 @@ struct ConvertibleToStringViewPrinter { #endif }; - // Prints the given number of bytes in the given object to the given // ostream. GTEST_API_ void PrintBytesInObjectTo(const unsigned char* obj_bytes, - size_t count, - ::std::ostream* os); + size_t count, ::std::ostream* os); struct RawBytesPrinter { // SFINAE on `sizeof` to make sure we have a complete type. template @@ -375,12 +374,12 @@ GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(const char32_t); // to point to a NUL-terminated string, and thus can print it as a string. #define GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(CharType, OtherStringType) \ - template <> \ - class FormatForComparison { \ - public: \ - static ::std::string Format(CharType* value) { \ - return ::testing::PrintToString(value); \ - } \ + template <> \ + class FormatForComparison { \ + public: \ + static ::std::string Format(CharType* value) { \ + return ::testing::PrintToString(value); \ + } \ } GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(char, ::std::string); @@ -410,8 +409,8 @@ GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const wchar_t, ::std::wstring); // // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. template -std::string FormatForComparisonFailureMessage( - const T1& value, const T2& /* other_operand */) { +std::string FormatForComparisonFailureMessage(const T1& value, + const T2& /* other_operand */) { return FormatForComparison::Format(value); } @@ -479,6 +478,12 @@ inline void PrintTo(char8_t c, ::std::ostream* os) { } #endif +// gcc/clang __{u,}int128_t +#if defined(__SIZEOF_INT128__) +GTEST_API_ void PrintTo(__uint128_t v, ::std::ostream* os); +GTEST_API_ void PrintTo(__int128_t v, ::std::ostream* os); +#endif // __SIZEOF_INT128__ + // Overloads for C strings. GTEST_API_ void PrintTo(const char* s, ::std::ostream* os); inline void PrintTo(char* s, ::std::ostream* os) { @@ -545,7 +550,7 @@ void PrintRawArrayTo(const T a[], size_t count, ::std::ostream* os) { } // Overloads for ::std::string. -GTEST_API_ void PrintStringTo(const ::std::string&s, ::std::ostream* os); +GTEST_API_ void PrintStringTo(const ::std::string& s, ::std::ostream* os); inline void PrintTo(const ::std::string& s, ::std::ostream* os) { PrintStringTo(s, os); } @@ -572,7 +577,7 @@ inline void PrintTo(const ::std::u32string& s, ::std::ostream* os) { // Overloads for ::std::wstring. #if GTEST_HAS_STD_WSTRING -GTEST_API_ void PrintWideStringTo(const ::std::wstring&s, ::std::ostream* os); +GTEST_API_ void PrintWideStringTo(const ::std::wstring& s, ::std::ostream* os); inline void PrintTo(const ::std::wstring& s, ::std::ostream* os) { PrintWideStringTo(s, os); } @@ -587,6 +592,12 @@ inline void PrintTo(internal::StringView sp, ::std::ostream* os) { inline void PrintTo(std::nullptr_t, ::std::ostream* os) { *os << "(nullptr)"; } +#if GTEST_HAS_RTTI +inline void PrintTo(const std::type_info& info, std::ostream* os) { + *os << internal::GetTypeName(info); +} +#endif // GTEST_HAS_RTTI + template void PrintTo(std::reference_wrapper ref, ::std::ostream* os) { UniversalPrinter::Print(ref.get(), os); @@ -744,6 +755,14 @@ class UniversalPrinter> { } }; +template <> +class UniversalPrinter { + public: + static void Print(decltype(Nullopt()), ::std::ostream* os) { + *os << "(nullopt)"; + } +}; + #endif // GTEST_INTERNAL_HAS_OPTIONAL #if GTEST_INTERNAL_HAS_VARIANT @@ -802,8 +821,8 @@ void UniversalPrintArray(const T* begin, size_t len, ::std::ostream* os) { } } // This overload prints a (const) char array compactly. -GTEST_API_ void UniversalPrintArray( - const char* begin, size_t len, ::std::ostream* os); +GTEST_API_ void UniversalPrintArray(const char* begin, size_t len, + ::std::ostream* os); #ifdef __cpp_char8_t // This overload prints a (const) char8_t array compactly. @@ -820,8 +839,8 @@ GTEST_API_ void UniversalPrintArray(const char32_t* begin, size_t len, ::std::ostream* os); // This overload prints a (const) wchar_t array compactly. -GTEST_API_ void UniversalPrintArray( - const wchar_t* begin, size_t len, ::std::ostream* os); +GTEST_API_ void UniversalPrintArray(const wchar_t* begin, size_t len, + ::std::ostream* os); // Implements printing an array type T[N]. template @@ -980,10 +999,10 @@ void UniversalPrint(const T& value, ::std::ostream* os) { UniversalPrinter::Print(value, os); } -typedef ::std::vector< ::std::string> Strings; +typedef ::std::vector<::std::string> Strings; - // Tersely prints the first N fields of a tuple to a string vector, - // one element for each field. +// Tersely prints the first N fields of a tuple to a string vector, +// one element for each field. template void TersePrintPrefixToStrings(const Tuple&, std::integral_constant, Strings*) {} diff --git a/ext/googletest/googletest/include/gtest/gtest-spi.h b/ext/googletest/googletest/include/gtest/gtest-spi.h index eacef44669..bec8c4810b 100644 --- a/ext/googletest/googletest/include/gtest/gtest-spi.h +++ b/ext/googletest/googletest/include/gtest/gtest-spi.h @@ -27,12 +27,9 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// // Utilities for testing Google Test itself and code that uses Google Test // (e.g. frameworks built on top of Google Test). -// GOOGLETEST_CM0004 DO NOT DELETE - #ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_SPI_H_ #define GOOGLETEST_INCLUDE_GTEST_GTEST_SPI_H_ @@ -88,7 +85,10 @@ class GTEST_API_ ScopedFakeTestPartResultReporter TestPartResultReporterInterface* old_reporter_; TestPartResultArray* const result_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedFakeTestPartResultReporter); + ScopedFakeTestPartResultReporter(const ScopedFakeTestPartResultReporter&) = + delete; + ScopedFakeTestPartResultReporter& operator=( + const ScopedFakeTestPartResultReporter&) = delete; }; namespace internal { @@ -104,12 +104,14 @@ class GTEST_API_ SingleFailureChecker { SingleFailureChecker(const TestPartResultArray* results, TestPartResult::Type type, const std::string& substr); ~SingleFailureChecker(); + private: const TestPartResultArray* const results_; const TestPartResult::Type type_; const std::string substr_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(SingleFailureChecker); + SingleFailureChecker(const SingleFailureChecker&) = delete; + SingleFailureChecker& operator=(const SingleFailureChecker&) = delete; }; } // namespace internal @@ -119,7 +121,8 @@ class GTEST_API_ SingleFailureChecker { GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251 // A set of macros for testing Google Test assertions or code that's expected -// to generate Google Test fatal failures. It verifies that the given +// to generate Google Test fatal failures (e.g. a failure from an ASSERT_EQ, but +// not a non-fatal failure, as from EXPECT_EQ). It verifies that the given // statement will cause exactly one fatal Google Test failure with 'substr' // being part of the failure message. // @@ -141,44 +144,46 @@ GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251 // helper macro, due to some peculiarity in how the preprocessor // works. The AcceptsMacroThatExpandsToUnprotectedComma test in // gtest_unittest.cc will fail to compile if we do that. -#define EXPECT_FATAL_FAILURE(statement, substr) \ - do { \ - class GTestExpectFatalFailureHelper {\ - public:\ - static void Execute() { statement; }\ - };\ - ::testing::TestPartResultArray gtest_failures;\ - ::testing::internal::SingleFailureChecker gtest_checker(\ - >est_failures, ::testing::TestPartResult::kFatalFailure, (substr));\ - {\ - ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ - ::testing::ScopedFakeTestPartResultReporter:: \ - INTERCEPT_ONLY_CURRENT_THREAD, >est_failures);\ - GTestExpectFatalFailureHelper::Execute();\ - }\ +#define EXPECT_FATAL_FAILURE(statement, substr) \ + do { \ + class GTestExpectFatalFailureHelper { \ + public: \ + static void Execute() { statement; } \ + }; \ + ::testing::TestPartResultArray gtest_failures; \ + ::testing::internal::SingleFailureChecker gtest_checker( \ + >est_failures, ::testing::TestPartResult::kFatalFailure, (substr)); \ + { \ + ::testing::ScopedFakeTestPartResultReporter gtest_reporter( \ + ::testing::ScopedFakeTestPartResultReporter:: \ + INTERCEPT_ONLY_CURRENT_THREAD, \ + >est_failures); \ + GTestExpectFatalFailureHelper::Execute(); \ + } \ } while (::testing::internal::AlwaysFalse()) -#define EXPECT_FATAL_FAILURE_ON_ALL_THREADS(statement, substr) \ - do { \ - class GTestExpectFatalFailureHelper {\ - public:\ - static void Execute() { statement; }\ - };\ - ::testing::TestPartResultArray gtest_failures;\ - ::testing::internal::SingleFailureChecker gtest_checker(\ - >est_failures, ::testing::TestPartResult::kFatalFailure, (substr));\ - {\ - ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ - ::testing::ScopedFakeTestPartResultReporter:: \ - INTERCEPT_ALL_THREADS, >est_failures);\ - GTestExpectFatalFailureHelper::Execute();\ - }\ +#define EXPECT_FATAL_FAILURE_ON_ALL_THREADS(statement, substr) \ + do { \ + class GTestExpectFatalFailureHelper { \ + public: \ + static void Execute() { statement; } \ + }; \ + ::testing::TestPartResultArray gtest_failures; \ + ::testing::internal::SingleFailureChecker gtest_checker( \ + >est_failures, ::testing::TestPartResult::kFatalFailure, (substr)); \ + { \ + ::testing::ScopedFakeTestPartResultReporter gtest_reporter( \ + ::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ALL_THREADS, \ + >est_failures); \ + GTestExpectFatalFailureHelper::Execute(); \ + } \ } while (::testing::internal::AlwaysFalse()) // A macro for testing Google Test assertions or code that's expected to -// generate Google Test non-fatal failures. It asserts that the given -// statement will cause exactly one non-fatal Google Test failure with 'substr' -// being part of the failure message. +// generate Google Test non-fatal failures (e.g. a failure from an EXPECT_EQ, +// but not from an ASSERT_EQ). It asserts that the given statement will cause +// exactly one non-fatal Google Test failure with 'substr' being part of the +// failure message. // // There are two different versions of this macro. EXPECT_NONFATAL_FAILURE only // affects and considers failures generated in the current thread and @@ -207,32 +212,37 @@ GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251 // instead of // GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement) // to avoid an MSVC warning on unreachable code. -#define EXPECT_NONFATAL_FAILURE(statement, substr) \ - do {\ - ::testing::TestPartResultArray gtest_failures;\ - ::testing::internal::SingleFailureChecker gtest_checker(\ +#define EXPECT_NONFATAL_FAILURE(statement, substr) \ + do { \ + ::testing::TestPartResultArray gtest_failures; \ + ::testing::internal::SingleFailureChecker gtest_checker( \ >est_failures, ::testing::TestPartResult::kNonFatalFailure, \ - (substr));\ - {\ - ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ - ::testing::ScopedFakeTestPartResultReporter:: \ - INTERCEPT_ONLY_CURRENT_THREAD, >est_failures);\ - if (::testing::internal::AlwaysTrue()) { statement; }\ - }\ + (substr)); \ + { \ + ::testing::ScopedFakeTestPartResultReporter gtest_reporter( \ + ::testing::ScopedFakeTestPartResultReporter:: \ + INTERCEPT_ONLY_CURRENT_THREAD, \ + >est_failures); \ + if (::testing::internal::AlwaysTrue()) { \ + statement; \ + } \ + } \ } while (::testing::internal::AlwaysFalse()) -#define EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS(statement, substr) \ - do {\ - ::testing::TestPartResultArray gtest_failures;\ - ::testing::internal::SingleFailureChecker gtest_checker(\ - >est_failures, ::testing::TestPartResult::kNonFatalFailure, \ - (substr));\ - {\ - ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ +#define EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS(statement, substr) \ + do { \ + ::testing::TestPartResultArray gtest_failures; \ + ::testing::internal::SingleFailureChecker gtest_checker( \ + >est_failures, ::testing::TestPartResult::kNonFatalFailure, \ + (substr)); \ + { \ + ::testing::ScopedFakeTestPartResultReporter gtest_reporter( \ ::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ALL_THREADS, \ - >est_failures);\ - if (::testing::internal::AlwaysTrue()) { statement; }\ - }\ + >est_failures); \ + if (::testing::internal::AlwaysTrue()) { \ + statement; \ + } \ + } \ } while (::testing::internal::AlwaysFalse()) #endif // GOOGLETEST_INCLUDE_GTEST_GTEST_SPI_H_ diff --git a/ext/googletest/googletest/include/gtest/gtest-test-part.h b/ext/googletest/googletest/include/gtest/gtest-test-part.h index 203fdf98c6..09cc8c34f0 100644 --- a/ext/googletest/googletest/include/gtest/gtest-test-part.h +++ b/ext/googletest/googletest/include/gtest/gtest-test-part.h @@ -26,14 +26,17 @@ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -// GOOGLETEST_CM0001 DO NOT DELETE + +// IWYU pragma: private, include "gtest/gtest.h" +// IWYU pragma: friend gtest/.* +// IWYU pragma: friend gmock/.* #ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_TEST_PART_H_ #define GOOGLETEST_INCLUDE_GTEST_GTEST_TEST_PART_H_ #include #include + #include "gtest/internal/gtest-internal.h" #include "gtest/internal/gtest-string.h" @@ -142,7 +145,8 @@ class GTEST_API_ TestPartResultArray { private: std::vector array_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(TestPartResultArray); + TestPartResultArray(const TestPartResultArray&) = delete; + TestPartResultArray& operator=(const TestPartResultArray&) = delete; }; // This interface knows how to report a test part result. @@ -168,11 +172,13 @@ class GTEST_API_ HasNewFatalFailureHelper ~HasNewFatalFailureHelper() override; void ReportTestPartResult(const TestPartResult& result) override; bool has_new_fatal_failure() const { return has_new_fatal_failure_; } + private: bool has_new_fatal_failure_; TestPartResultReporterInterface* original_reporter_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(HasNewFatalFailureHelper); + HasNewFatalFailureHelper(const HasNewFatalFailureHelper&) = delete; + HasNewFatalFailureHelper& operator=(const HasNewFatalFailureHelper&) = delete; }; } // namespace internal diff --git a/ext/googletest/googletest/include/gtest/gtest-typed-test.h b/ext/googletest/googletest/include/gtest/gtest-typed-test.h index 9fdc6be10d..bd35a32660 100644 --- a/ext/googletest/googletest/include/gtest/gtest-typed-test.h +++ b/ext/googletest/googletest/include/gtest/gtest-typed-test.h @@ -27,7 +27,9 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// GOOGLETEST_CM0001 DO NOT DELETE +// IWYU pragma: private, include "gtest/gtest.h" +// IWYU pragma: friend gtest/.* +// IWYU pragma: friend gmock/.* #ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_ #define GOOGLETEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_ @@ -190,7 +192,7 @@ INSTANTIATE_TYPED_TEST_SUITE_P(My, FooTest, MyTypes); typedef ::testing::internal::GenerateTypeList::type \ GTEST_TYPE_PARAMS_(CaseName); \ typedef ::testing::internal::NameGeneratorSelector<__VA_ARGS__>::type \ - GTEST_NAME_GENERATOR_(CaseName) + GTEST_NAME_GENERATOR_(CaseName) #define TYPED_TEST(CaseName, TestName) \ static_assert(sizeof(GTEST_STRINGIFY_(TestName)) > 1, \ @@ -256,7 +258,7 @@ INSTANTIATE_TYPED_TEST_SUITE_P(My, FooTest, MyTypes); // #included in multiple translation units linked together. #define TYPED_TEST_SUITE_P(SuiteName) \ static ::testing::internal::TypedTestSuitePState \ - GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName) + GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName) // Legacy API is deprecated but still available #ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ @@ -301,21 +303,21 @@ INSTANTIATE_TYPED_TEST_SUITE_P(My, FooTest, MyTypes); REGISTER_TYPED_TEST_SUITE_P #endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ -#define INSTANTIATE_TYPED_TEST_SUITE_P(Prefix, SuiteName, Types, ...) \ - static_assert(sizeof(GTEST_STRINGIFY_(Prefix)) > 1, \ - "test-suit-prefix must not be empty"); \ - static bool gtest_##Prefix##_##SuiteName GTEST_ATTRIBUTE_UNUSED_ = \ - ::testing::internal::TypeParameterizedTestSuite< \ - SuiteName, GTEST_SUITE_NAMESPACE_(SuiteName)::gtest_AllTests_, \ - ::testing::internal::GenerateTypeList::type>:: \ - Register(GTEST_STRINGIFY_(Prefix), \ - ::testing::internal::CodeLocation(__FILE__, __LINE__), \ - >EST_TYPED_TEST_SUITE_P_STATE_(SuiteName), \ - GTEST_STRINGIFY_(SuiteName), \ - GTEST_REGISTERED_TEST_NAMES_(SuiteName), \ - ::testing::internal::GenerateNames< \ - ::testing::internal::NameGeneratorSelector< \ - __VA_ARGS__>::type, \ +#define INSTANTIATE_TYPED_TEST_SUITE_P(Prefix, SuiteName, Types, ...) \ + static_assert(sizeof(GTEST_STRINGIFY_(Prefix)) > 1, \ + "test-suit-prefix must not be empty"); \ + static bool gtest_##Prefix##_##SuiteName GTEST_ATTRIBUTE_UNUSED_ = \ + ::testing::internal::TypeParameterizedTestSuite< \ + SuiteName, GTEST_SUITE_NAMESPACE_(SuiteName)::gtest_AllTests_, \ + ::testing::internal::GenerateTypeList::type>:: \ + Register(GTEST_STRINGIFY_(Prefix), \ + ::testing::internal::CodeLocation(__FILE__, __LINE__), \ + >EST_TYPED_TEST_SUITE_P_STATE_(SuiteName), \ + GTEST_STRINGIFY_(SuiteName), \ + GTEST_REGISTERED_TEST_NAMES_(SuiteName), \ + ::testing::internal::GenerateNames< \ + ::testing::internal::NameGeneratorSelector< \ + __VA_ARGS__>::type, \ ::testing::internal::GenerateTypeList::type>()) // Legacy API is deprecated but still available diff --git a/ext/googletest/googletest/include/gtest/gtest.h b/ext/googletest/googletest/include/gtest/gtest.h index 482228a6a4..d19a587a18 100644 --- a/ext/googletest/googletest/include/gtest/gtest.h +++ b/ext/googletest/googletest/include/gtest/gtest.h @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// // The Google C++ Testing and Mocking Framework (Google Test) // // This header file defines the public API for Google Test. It should be @@ -47,8 +46,6 @@ // registration from Barthelemy Dagenais' (barthelemy@prologique.com) // easyUnit framework. -// GOOGLETEST_CM0001 DO NOT DELETE - #ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_H_ #define GOOGLETEST_INCLUDE_GTEST_GTEST_H_ @@ -59,16 +56,18 @@ #include #include -#include "gtest/internal/gtest-internal.h" -#include "gtest/internal/gtest-string.h" +#include "gtest/gtest-assertion-result.h" #include "gtest/gtest-death-test.h" #include "gtest/gtest-matchers.h" #include "gtest/gtest-message.h" #include "gtest/gtest-param-test.h" #include "gtest/gtest-printers.h" -#include "gtest/gtest_prod.h" #include "gtest/gtest-test-part.h" #include "gtest/gtest-typed-test.h" +#include "gtest/gtest_pred_impl.h" +#include "gtest/gtest_prod.h" +#include "gtest/internal/gtest-internal.h" +#include "gtest/internal/gtest-string.h" GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \ /* class A needs to have dll-interface to be used by clients of class B */) @@ -206,193 +205,6 @@ using TestCase = TestSuite; class TestInfo; class UnitTest; -// A class for indicating whether an assertion was successful. When -// the assertion wasn't successful, the AssertionResult object -// remembers a non-empty message that describes how it failed. -// -// To create an instance of this class, use one of the factory functions -// (AssertionSuccess() and AssertionFailure()). -// -// This class is useful for two purposes: -// 1. Defining predicate functions to be used with Boolean test assertions -// EXPECT_TRUE/EXPECT_FALSE and their ASSERT_ counterparts -// 2. Defining predicate-format functions to be -// used with predicate assertions (ASSERT_PRED_FORMAT*, etc). -// -// For example, if you define IsEven predicate: -// -// testing::AssertionResult IsEven(int n) { -// if ((n % 2) == 0) -// return testing::AssertionSuccess(); -// else -// return testing::AssertionFailure() << n << " is odd"; -// } -// -// Then the failed expectation EXPECT_TRUE(IsEven(Fib(5))) -// will print the message -// -// Value of: IsEven(Fib(5)) -// Actual: false (5 is odd) -// Expected: true -// -// instead of a more opaque -// -// Value of: IsEven(Fib(5)) -// Actual: false -// Expected: true -// -// in case IsEven is a simple Boolean predicate. -// -// If you expect your predicate to be reused and want to support informative -// messages in EXPECT_FALSE and ASSERT_FALSE (negative assertions show up -// about half as often as positive ones in our tests), supply messages for -// both success and failure cases: -// -// testing::AssertionResult IsEven(int n) { -// if ((n % 2) == 0) -// return testing::AssertionSuccess() << n << " is even"; -// else -// return testing::AssertionFailure() << n << " is odd"; -// } -// -// Then a statement EXPECT_FALSE(IsEven(Fib(6))) will print -// -// Value of: IsEven(Fib(6)) -// Actual: true (8 is even) -// Expected: false -// -// NB: Predicates that support negative Boolean assertions have reduced -// performance in positive ones so be careful not to use them in tests -// that have lots (tens of thousands) of positive Boolean assertions. -// -// To use this class with EXPECT_PRED_FORMAT assertions such as: -// -// // Verifies that Foo() returns an even number. -// EXPECT_PRED_FORMAT1(IsEven, Foo()); -// -// you need to define: -// -// testing::AssertionResult IsEven(const char* expr, int n) { -// if ((n % 2) == 0) -// return testing::AssertionSuccess(); -// else -// return testing::AssertionFailure() -// << "Expected: " << expr << " is even\n Actual: it's " << n; -// } -// -// If Foo() returns 5, you will see the following message: -// -// Expected: Foo() is even -// Actual: it's 5 -// -class GTEST_API_ AssertionResult { - public: - // Copy constructor. - // Used in EXPECT_TRUE/FALSE(assertion_result). - AssertionResult(const AssertionResult& other); - -// C4800 is a level 3 warning in Visual Studio 2015 and earlier. -// This warning is not emitted in Visual Studio 2017. -// This warning is off by default starting in Visual Studio 2019 but can be -// enabled with command-line options. -#if defined(_MSC_VER) && (_MSC_VER < 1910 || _MSC_VER >= 1920) - GTEST_DISABLE_MSC_WARNINGS_PUSH_(4800 /* forcing value to bool */) -#endif - - // Used in the EXPECT_TRUE/FALSE(bool_expression). - // - // T must be contextually convertible to bool. - // - // The second parameter prevents this overload from being considered if - // the argument is implicitly convertible to AssertionResult. In that case - // we want AssertionResult's copy constructor to be used. - template - explicit AssertionResult( - const T& success, - typename std::enable_if< - !std::is_convertible::value>::type* - /*enabler*/ - = nullptr) - : success_(success) {} - -#if defined(_MSC_VER) && (_MSC_VER < 1910 || _MSC_VER >= 1920) - GTEST_DISABLE_MSC_WARNINGS_POP_() -#endif - - // Assignment operator. - AssertionResult& operator=(AssertionResult other) { - swap(other); - return *this; - } - - // Returns true if and only if the assertion succeeded. - operator bool() const { return success_; } // NOLINT - - // Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE. - AssertionResult operator!() const; - - // Returns the text streamed into this AssertionResult. Test assertions - // use it when they fail (i.e., the predicate's outcome doesn't match the - // assertion's expectation). When nothing has been streamed into the - // object, returns an empty string. - const char* message() const { - return message_.get() != nullptr ? message_->c_str() : ""; - } - // Deprecated; please use message() instead. - const char* failure_message() const { return message(); } - - // Streams a custom failure message into this object. - template AssertionResult& operator<<(const T& value) { - AppendMessage(Message() << value); - return *this; - } - - // Allows streaming basic output manipulators such as endl or flush into - // this object. - AssertionResult& operator<<( - ::std::ostream& (*basic_manipulator)(::std::ostream& stream)) { - AppendMessage(Message() << basic_manipulator); - return *this; - } - - private: - // Appends the contents of message to message_. - void AppendMessage(const Message& a_message) { - if (message_.get() == nullptr) message_.reset(new ::std::string); - message_->append(a_message.GetString().c_str()); - } - - // Swap the contents of this AssertionResult with other. - void swap(AssertionResult& other); - - // Stores result of the assertion predicate. - bool success_; - // Stores the message describing the condition in case the expectation - // construct is not satisfied with the predicate's outcome. - // Referenced via a pointer to avoid taking too much stack frame space - // with test assertions. - std::unique_ptr< ::std::string> message_; -}; - -// Makes a successful assertion result. -GTEST_API_ AssertionResult AssertionSuccess(); - -// Makes a failed assertion result. -GTEST_API_ AssertionResult AssertionFailure(); - -// Makes a failed assertion result with the given failure message. -// Deprecated; use AssertionFailure() << msg. -GTEST_API_ AssertionResult AssertionFailure(const Message& msg); - -} // namespace testing - -// Includes the auto-generated header that implements a family of generic -// predicate assertion macros. This include comes late because it relies on -// APIs declared above. -#include "gtest/gtest_pred_impl.h" - -namespace testing { - // The abstract class that all tests inherit from. // // In Google Test, a unit test program contains one or many TestSuites, and @@ -527,7 +339,8 @@ class GTEST_API_ Test { virtual Setup_should_be_spelled_SetUp* Setup() { return nullptr; } // We disallow copying Tests. - GTEST_DISALLOW_COPY_AND_ASSIGN_(Test); + Test(const Test&) = delete; + Test& operator=(const Test&) = delete; }; typedef internal::TimeInMillis TimeInMillis; @@ -541,24 +354,17 @@ class TestProperty { // C'tor. TestProperty does NOT have a default constructor. // Always use this constructor (with parameters) to create a // TestProperty object. - TestProperty(const std::string& a_key, const std::string& a_value) : - key_(a_key), value_(a_value) { - } + TestProperty(const std::string& a_key, const std::string& a_value) + : key_(a_key), value_(a_value) {} // Gets the user supplied key. - const char* key() const { - return key_.c_str(); - } + const char* key() const { return key_.c_str(); } // Gets the user supplied value. - const char* value() const { - return value_.c_str(); - } + const char* value() const { return value_.c_str(); } // Sets a new value, overriding the one supplied in the constructor. - void SetValue(const std::string& new_value) { - value_ = new_value; - } + void SetValue(const std::string& new_value) { value_ = new_value; } private: // The key supplied by the user. @@ -692,7 +498,8 @@ class GTEST_API_ TestResult { TimeInMillis elapsed_time_; // We disallow copying TestResult. - GTEST_DISALLOW_COPY_AND_ASSIGN_(TestResult); + TestResult(const TestResult&) = delete; + TestResult& operator=(const TestResult&) = delete; }; // class TestResult // A TestInfo object stores the following information about a test: @@ -816,8 +623,8 @@ class GTEST_API_ TestInfo { } // These fields are immutable properties of the test. - const std::string test_suite_name_; // test suite name - const std::string name_; // Test name + const std::string test_suite_name_; // test suite name + const std::string name_; // Test name // Name of the parameter type, or NULL if this is not a typed or a // type-parameterized test. const std::unique_ptr type_param_; @@ -838,7 +645,8 @@ class GTEST_API_ TestInfo { // test for the second time. TestResult result_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(TestInfo); + TestInfo(const TestInfo&) = delete; + TestInfo& operator=(const TestInfo&) = delete; }; // A test suite, which consists of a vector of TestInfos. @@ -946,7 +754,7 @@ class GTEST_API_ TestSuite { // Adds a TestInfo to this test suite. Will delete the TestInfo upon // destruction of the TestSuite object. - void AddTestInfo(TestInfo * test_info); + void AddTestInfo(TestInfo* test_info); // Clears the results of all tests in this test suite. void ClearResult(); @@ -1047,7 +855,8 @@ class GTEST_API_ TestSuite { TestResult ad_hoc_test_result_; // We disallow copying TestSuites. - GTEST_DISALLOW_COPY_AND_ASSIGN_(TestSuite); + TestSuite(const TestSuite&) = delete; + TestSuite& operator=(const TestSuite&) = delete; }; // An Environment object is capable of setting up and tearing down an @@ -1074,6 +883,7 @@ class Environment { // Override this to define how to tear down the environment. virtual void TearDown() {} + private: // If you see an error about overriding the following function or // about it being private, you have mis-spelled SetUp() as Setup(). @@ -1125,6 +935,9 @@ class TestEventListener { // Fired before the test starts. virtual void OnTestStart(const TestInfo& test_info) = 0; + // Fired when a test is disabled + virtual void OnTestDisabled(const TestInfo& /*test_info*/) {} + // Fired after a failed assertion or a SUCCEED() invocation. // If you want to throw an exception from this function to skip to the next // TEST, it must be AssertionException defined above, or inherited from it. @@ -1148,8 +961,7 @@ class TestEventListener { virtual void OnEnvironmentsTearDownEnd(const UnitTest& unit_test) = 0; // Fired after each iteration of tests finishes. - virtual void OnTestIterationEnd(const UnitTest& unit_test, - int iteration) = 0; + virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration) = 0; // Fired after all test activities have ended. virtual void OnTestProgramEnd(const UnitTest& unit_test) = 0; @@ -1174,6 +986,7 @@ class EmptyTestEventListener : public TestEventListener { #endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ void OnTestStart(const TestInfo& /*test_info*/) override {} + void OnTestDisabled(const TestInfo& /*test_info*/) override {} void OnTestPartResult(const TestPartResult& /*test_part_result*/) override {} void OnTestEnd(const TestInfo& /*test_info*/) override {} void OnTestSuiteEnd(const TestSuite& /*test_suite*/) override {} @@ -1263,7 +1076,8 @@ class GTEST_API_ TestEventListeners { TestEventListener* default_xml_generator_; // We disallow copying TestEventListeners. - GTEST_DISALLOW_COPY_AND_ASSIGN_(TestEventListeners); + TestEventListeners(const TestEventListeners&) = delete; + TestEventListeners& operator=(const TestEventListeners&) = delete; }; // A UnitTest consists of a vector of TestSuites. @@ -1306,8 +1120,7 @@ class GTEST_API_ UnitTest { // Returns the TestInfo object for the test that's currently running, // or NULL if no test is running. - const TestInfo* current_test_info() const - GTEST_LOCK_EXCLUDED_(mutex_); + const TestInfo* current_test_info() const GTEST_LOCK_EXCLUDED_(mutex_); // Returns the random seed used at the start of the current test run. int random_seed() const; @@ -1413,8 +1226,7 @@ class GTEST_API_ UnitTest { // eventually call this to report their results. The user code // should use the assertion macros instead of calling this directly. void AddTestPartResult(TestPartResult::Type result_type, - const char* file_name, - int line_number, + const char* file_name, int line_number, const std::string& message, const std::string& os_stack_trace) GTEST_LOCK_EXCLUDED_(mutex_); @@ -1445,8 +1257,7 @@ class GTEST_API_ UnitTest { friend std::set* internal::GetIgnoredParameterizedTestSuites(); friend internal::UnitTestImpl* internal::GetUnitTestImpl(); friend void internal::ReportFailureInUnknownLocation( - TestPartResult::Type result_type, - const std::string& message); + TestPartResult::Type result_type, const std::string& message); // Creates an empty UnitTest. UnitTest(); @@ -1460,8 +1271,7 @@ class GTEST_API_ UnitTest { GTEST_LOCK_EXCLUDED_(mutex_); // Pops a trace from the per-thread Google Test trace stack. - void PopGTestTrace() - GTEST_LOCK_EXCLUDED_(mutex_); + void PopGTestTrace() GTEST_LOCK_EXCLUDED_(mutex_); // Protects mutable state in *impl_. This is mutable as some const // methods need to lock it too. @@ -1474,7 +1284,8 @@ class GTEST_API_ UnitTest { internal::UnitTestImpl* impl_; // We disallow copying UnitTest. - GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTest); + UnitTest(const UnitTest&) = delete; + UnitTest& operator=(const UnitTest&) = delete; }; // A convenient wrapper for adding an environment for the test @@ -1525,13 +1336,11 @@ namespace internal { // when calling EXPECT_* in a tight loop. template AssertionResult CmpHelperEQFailure(const char* lhs_expression, - const char* rhs_expression, - const T1& lhs, const T2& rhs) { - return EqFailure(lhs_expression, - rhs_expression, + const char* rhs_expression, const T1& lhs, + const T2& rhs) { + return EqFailure(lhs_expression, rhs_expression, FormatForComparisonFailureMessage(lhs, rhs), - FormatForComparisonFailureMessage(rhs, lhs), - false); + FormatForComparisonFailureMessage(rhs, lhs), false); } // This block of code defines operator==/!= @@ -1544,8 +1353,7 @@ inline bool operator!=(faketype, faketype) { return false; } // The helper function for {ASSERT|EXPECT}_EQ. template AssertionResult CmpHelperEQ(const char* lhs_expression, - const char* rhs_expression, - const T1& lhs, + const char* rhs_expression, const T1& lhs, const T2& rhs) { if (lhs == rhs) { return AssertionSuccess(); @@ -1576,8 +1384,7 @@ class EqHelper { // Even though its body looks the same as the above version, we // cannot merge the two, as it will make anonymous enums unhappy. static AssertionResult Compare(const char* lhs_expression, - const char* rhs_expression, - BiggestInt lhs, + const char* rhs_expression, BiggestInt lhs, BiggestInt rhs) { return CmpHelperEQ(lhs_expression, rhs_expression, lhs, rhs); } @@ -1612,16 +1419,16 @@ AssertionResult CmpHelperOpFailure(const char* expr1, const char* expr2, // // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. -#define GTEST_IMPL_CMP_HELPER_(op_name, op)\ -template \ -AssertionResult CmpHelper##op_name(const char* expr1, const char* expr2, \ - const T1& val1, const T2& val2) {\ - if (val1 op val2) {\ - return AssertionSuccess();\ - } else {\ - return CmpHelperOpFailure(expr1, expr2, val1, val2, #op);\ - }\ -} +#define GTEST_IMPL_CMP_HELPER_(op_name, op) \ + template \ + AssertionResult CmpHelper##op_name(const char* expr1, const char* expr2, \ + const T1& val1, const T2& val2) { \ + if (val1 op val2) { \ + return AssertionSuccess(); \ + } else { \ + return CmpHelperOpFailure(expr1, expr2, val1, val2, #op); \ + } \ + } // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. @@ -1643,49 +1450,42 @@ GTEST_IMPL_CMP_HELPER_(GT, >) // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. GTEST_API_ AssertionResult CmpHelperSTREQ(const char* s1_expression, const char* s2_expression, - const char* s1, - const char* s2); + const char* s1, const char* s2); // The helper function for {ASSERT|EXPECT}_STRCASEEQ. // // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. GTEST_API_ AssertionResult CmpHelperSTRCASEEQ(const char* s1_expression, const char* s2_expression, - const char* s1, - const char* s2); + const char* s1, const char* s2); // The helper function for {ASSERT|EXPECT}_STRNE. // // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. GTEST_API_ AssertionResult CmpHelperSTRNE(const char* s1_expression, const char* s2_expression, - const char* s1, - const char* s2); + const char* s1, const char* s2); // The helper function for {ASSERT|EXPECT}_STRCASENE. // // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. GTEST_API_ AssertionResult CmpHelperSTRCASENE(const char* s1_expression, const char* s2_expression, - const char* s1, - const char* s2); - + const char* s1, const char* s2); // Helper function for *_STREQ on wide strings. // // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. GTEST_API_ AssertionResult CmpHelperSTREQ(const char* s1_expression, const char* s2_expression, - const wchar_t* s1, - const wchar_t* s2); + const wchar_t* s1, const wchar_t* s2); // Helper function for *_STRNE on wide strings. // // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. GTEST_API_ AssertionResult CmpHelperSTRNE(const char* s1_expression, const char* s2_expression, - const wchar_t* s1, - const wchar_t* s2); + const wchar_t* s1, const wchar_t* s2); } // namespace internal @@ -1697,32 +1497,40 @@ GTEST_API_ AssertionResult CmpHelperSTRNE(const char* s1_expression, // // The {needle,haystack}_expr arguments are the stringified // expressions that generated the two real arguments. -GTEST_API_ AssertionResult IsSubstring( - const char* needle_expr, const char* haystack_expr, - const char* needle, const char* haystack); -GTEST_API_ AssertionResult IsSubstring( - const char* needle_expr, const char* haystack_expr, - const wchar_t* needle, const wchar_t* haystack); -GTEST_API_ AssertionResult IsNotSubstring( - const char* needle_expr, const char* haystack_expr, - const char* needle, const char* haystack); -GTEST_API_ AssertionResult IsNotSubstring( - const char* needle_expr, const char* haystack_expr, - const wchar_t* needle, const wchar_t* haystack); -GTEST_API_ AssertionResult IsSubstring( - const char* needle_expr, const char* haystack_expr, - const ::std::string& needle, const ::std::string& haystack); -GTEST_API_ AssertionResult IsNotSubstring( - const char* needle_expr, const char* haystack_expr, - const ::std::string& needle, const ::std::string& haystack); +GTEST_API_ AssertionResult IsSubstring(const char* needle_expr, + const char* haystack_expr, + const char* needle, + const char* haystack); +GTEST_API_ AssertionResult IsSubstring(const char* needle_expr, + const char* haystack_expr, + const wchar_t* needle, + const wchar_t* haystack); +GTEST_API_ AssertionResult IsNotSubstring(const char* needle_expr, + const char* haystack_expr, + const char* needle, + const char* haystack); +GTEST_API_ AssertionResult IsNotSubstring(const char* needle_expr, + const char* haystack_expr, + const wchar_t* needle, + const wchar_t* haystack); +GTEST_API_ AssertionResult IsSubstring(const char* needle_expr, + const char* haystack_expr, + const ::std::string& needle, + const ::std::string& haystack); +GTEST_API_ AssertionResult IsNotSubstring(const char* needle_expr, + const char* haystack_expr, + const ::std::string& needle, + const ::std::string& haystack); #if GTEST_HAS_STD_WSTRING -GTEST_API_ AssertionResult IsSubstring( - const char* needle_expr, const char* haystack_expr, - const ::std::wstring& needle, const ::std::wstring& haystack); -GTEST_API_ AssertionResult IsNotSubstring( - const char* needle_expr, const char* haystack_expr, - const ::std::wstring& needle, const ::std::wstring& haystack); +GTEST_API_ AssertionResult IsSubstring(const char* needle_expr, + const char* haystack_expr, + const ::std::wstring& needle, + const ::std::wstring& haystack); +GTEST_API_ AssertionResult IsNotSubstring(const char* needle_expr, + const char* haystack_expr, + const ::std::wstring& needle, + const ::std::wstring& haystack); #endif // GTEST_HAS_STD_WSTRING namespace internal { @@ -1737,8 +1545,7 @@ namespace internal { template AssertionResult CmpHelperFloatingPointEQ(const char* lhs_expression, const char* rhs_expression, - RawType lhs_value, - RawType rhs_value) { + RawType lhs_value, RawType rhs_value) { const FloatingPoint lhs(lhs_value), rhs(rhs_value); if (lhs.AlmostEquals(rhs)) { @@ -1753,10 +1560,8 @@ AssertionResult CmpHelperFloatingPointEQ(const char* lhs_expression, rhs_ss << std::setprecision(std::numeric_limits::digits10 + 2) << rhs_value; - return EqFailure(lhs_expression, - rhs_expression, - StringStreamToString(&lhs_ss), - StringStreamToString(&rhs_ss), + return EqFailure(lhs_expression, rhs_expression, + StringStreamToString(&lhs_ss), StringStreamToString(&rhs_ss), false); } @@ -1766,8 +1571,7 @@ AssertionResult CmpHelperFloatingPointEQ(const char* lhs_expression, GTEST_API_ AssertionResult DoubleNearPredFormat(const char* expr1, const char* expr2, const char* abs_error_expr, - double val1, - double val2, + double val1, double val2, double abs_error); // INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. @@ -1775,9 +1579,7 @@ GTEST_API_ AssertionResult DoubleNearPredFormat(const char* expr1, class GTEST_API_ AssertHelper { public: // Constructor. - AssertHelper(TestPartResult::Type type, - const char* file, - int line, + AssertHelper(TestPartResult::Type type, const char* file, int line, const char* message); ~AssertHelper(); @@ -1791,11 +1593,9 @@ class GTEST_API_ AssertHelper { // re-using stack space even for temporary variables, so every EXPECT_EQ // reserves stack space for another AssertHelper. struct AssertHelperData { - AssertHelperData(TestPartResult::Type t, - const char* srcfile, - int line_num, + AssertHelperData(TestPartResult::Type t, const char* srcfile, int line_num, const char* msg) - : type(t), file(srcfile), line(line_num), message(msg) { } + : type(t), file(srcfile), line(line_num), message(msg) {} TestPartResult::Type const type; const char* const file; @@ -1803,12 +1603,14 @@ class GTEST_API_ AssertHelper { std::string const message; private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(AssertHelperData); + AssertHelperData(const AssertHelperData&) = delete; + AssertHelperData& operator=(const AssertHelperData&) = delete; }; AssertHelperData* const data_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(AssertHelper); + AssertHelper(const AssertHelper&) = delete; + AssertHelper& operator=(const AssertHelper&) = delete; }; } // namespace internal @@ -1865,15 +1667,14 @@ class WithParamInterface { private: // Sets parameter value. The caller is responsible for making sure the value // remains alive and unchanged throughout the current test. - static void SetParam(const ParamType* parameter) { - parameter_ = parameter; - } + static void SetParam(const ParamType* parameter) { parameter_ = parameter; } // Static value used for accessing parameter during a test lifetime. static const ParamType* parameter_; // TestClass must be a subclass of WithParamInterface and Test. - template friend class internal::ParameterizedTestFactory; + template + friend class internal::ParameterizedTestFactory; }; template @@ -1883,8 +1684,7 @@ const T* WithParamInterface::parameter_ = nullptr; // WithParamInterface, and can just inherit from ::testing::TestWithParam. template -class TestWithParam : public Test, public WithParamInterface { -}; +class TestWithParam : public Test, public WithParamInterface {}; // Macros for indicating success/failure in test code. @@ -1915,7 +1715,7 @@ class TestWithParam : public Test, public WithParamInterface { // Generates a nonfatal failure at the given source file location with // a generic message. -#define ADD_FAILURE_AT(file, line) \ +#define ADD_FAILURE_AT(file, line) \ GTEST_MESSAGE_AT_(file, line, "Failed", \ ::testing::TestPartResult::kNonFatalFailure) @@ -1930,7 +1730,7 @@ class TestWithParam : public Test, public WithParamInterface { // Define this macro to 1 to omit the definition of FAIL(), which is a // generic name and clashes with some other libraries. #if !GTEST_DONT_DEFINE_FAIL -# define FAIL() GTEST_FAIL() +#define FAIL() GTEST_FAIL() #endif // Generates a success with a generic message. @@ -1939,7 +1739,7 @@ class TestWithParam : public Test, public WithParamInterface { // Define this macro to 1 to omit the definition of SUCCEED(), which // is a generic name and clashes with some other libraries. #if !GTEST_DONT_DEFINE_SUCCEED -# define SUCCEED() GTEST_SUCCEED() +#define SUCCEED() GTEST_SUCCEED() #endif // Macros for testing exceptions. @@ -1967,16 +1767,15 @@ class TestWithParam : public Test, public WithParamInterface { // Boolean assertions. Condition can be either a Boolean expression or an // AssertionResult. For more information on how to use AssertionResult with // these macros see comments on that class. -#define GTEST_EXPECT_TRUE(condition) \ +#define GTEST_EXPECT_TRUE(condition) \ GTEST_TEST_BOOLEAN_(condition, #condition, false, true, \ GTEST_NONFATAL_FAILURE_) -#define GTEST_EXPECT_FALSE(condition) \ +#define GTEST_EXPECT_FALSE(condition) \ GTEST_TEST_BOOLEAN_(!(condition), #condition, true, false, \ GTEST_NONFATAL_FAILURE_) #define GTEST_ASSERT_TRUE(condition) \ - GTEST_TEST_BOOLEAN_(condition, #condition, false, true, \ - GTEST_FATAL_FAILURE_) -#define GTEST_ASSERT_FALSE(condition) \ + GTEST_TEST_BOOLEAN_(condition, #condition, false, true, GTEST_FATAL_FAILURE_) +#define GTEST_ASSERT_FALSE(condition) \ GTEST_TEST_BOOLEAN_(!(condition), #condition, true, false, \ GTEST_FATAL_FAILURE_) @@ -2075,27 +1874,27 @@ class TestWithParam : public Test, public WithParamInterface { // ASSERT_XY(), which clashes with some users' own code. #if !GTEST_DONT_DEFINE_ASSERT_EQ -# define ASSERT_EQ(val1, val2) GTEST_ASSERT_EQ(val1, val2) +#define ASSERT_EQ(val1, val2) GTEST_ASSERT_EQ(val1, val2) #endif #if !GTEST_DONT_DEFINE_ASSERT_NE -# define ASSERT_NE(val1, val2) GTEST_ASSERT_NE(val1, val2) +#define ASSERT_NE(val1, val2) GTEST_ASSERT_NE(val1, val2) #endif #if !GTEST_DONT_DEFINE_ASSERT_LE -# define ASSERT_LE(val1, val2) GTEST_ASSERT_LE(val1, val2) +#define ASSERT_LE(val1, val2) GTEST_ASSERT_LE(val1, val2) #endif #if !GTEST_DONT_DEFINE_ASSERT_LT -# define ASSERT_LT(val1, val2) GTEST_ASSERT_LT(val1, val2) +#define ASSERT_LT(val1, val2) GTEST_ASSERT_LT(val1, val2) #endif #if !GTEST_DONT_DEFINE_ASSERT_GE -# define ASSERT_GE(val1, val2) GTEST_ASSERT_GE(val1, val2) +#define ASSERT_GE(val1, val2) GTEST_ASSERT_GE(val1, val2) #endif #if !GTEST_DONT_DEFINE_ASSERT_GT -# define ASSERT_GT(val1, val2) GTEST_ASSERT_GT(val1, val2) +#define ASSERT_GT(val1, val2) GTEST_ASSERT_GT(val1, val2) #endif // C-string Comparisons. All tests treat NULL and any non-NULL string @@ -2120,7 +1919,7 @@ class TestWithParam : public Test, public WithParamInterface { EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRNE, s1, s2) #define EXPECT_STRCASEEQ(s1, s2) \ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASEEQ, s1, s2) -#define EXPECT_STRCASENE(s1, s2)\ +#define EXPECT_STRCASENE(s1, s2) \ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASENE, s1, s2) #define ASSERT_STREQ(s1, s2) \ @@ -2129,7 +1928,7 @@ class TestWithParam : public Test, public WithParamInterface { ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRNE, s1, s2) #define ASSERT_STRCASEEQ(s1, s2) \ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASEEQ, s1, s2) -#define ASSERT_STRCASENE(s1, s2)\ +#define ASSERT_STRCASENE(s1, s2) \ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASENE, s1, s2) // Macros for comparing floating-point numbers. @@ -2146,29 +1945,29 @@ class TestWithParam : public Test, public WithParamInterface { // FloatingPoint template class in gtest-internal.h if you are // interested in the implementation details. -#define EXPECT_FLOAT_EQ(val1, val2)\ +#define EXPECT_FLOAT_EQ(val1, val2) \ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ, \ val1, val2) -#define EXPECT_DOUBLE_EQ(val1, val2)\ +#define EXPECT_DOUBLE_EQ(val1, val2) \ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ, \ val1, val2) -#define ASSERT_FLOAT_EQ(val1, val2)\ +#define ASSERT_FLOAT_EQ(val1, val2) \ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ, \ val1, val2) -#define ASSERT_DOUBLE_EQ(val1, val2)\ +#define ASSERT_DOUBLE_EQ(val1, val2) \ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ, \ val1, val2) -#define EXPECT_NEAR(val1, val2, abs_error)\ - EXPECT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, \ - val1, val2, abs_error) +#define EXPECT_NEAR(val1, val2, abs_error) \ + EXPECT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, val1, val2, \ + abs_error) -#define ASSERT_NEAR(val1, val2, abs_error)\ - ASSERT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, \ - val1, val2, abs_error) +#define ASSERT_NEAR(val1, val2, abs_error) \ + ASSERT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, val1, val2, \ + abs_error) // These predicate format functions work on floating-point values, and // can be used in {ASSERT|EXPECT}_PRED_FORMAT2*(), e.g. @@ -2182,7 +1981,6 @@ GTEST_API_ AssertionResult FloatLE(const char* expr1, const char* expr2, GTEST_API_ AssertionResult DoubleLE(const char* expr1, const char* expr2, double val1, double val2); - #if GTEST_OS_WINDOWS // Macros that test for HRESULT failure and success, these are only useful @@ -2194,17 +1992,17 @@ GTEST_API_ AssertionResult DoubleLE(const char* expr1, const char* expr2, // expected result and the actual result with both a human-readable // string representation of the error, if available, as well as the // hex result code. -# define EXPECT_HRESULT_SUCCEEDED(expr) \ - EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr)) +#define EXPECT_HRESULT_SUCCEEDED(expr) \ + EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr)) -# define ASSERT_HRESULT_SUCCEEDED(expr) \ - ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr)) +#define ASSERT_HRESULT_SUCCEEDED(expr) \ + ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr)) -# define EXPECT_HRESULT_FAILED(expr) \ - EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr)) +#define EXPECT_HRESULT_FAILED(expr) \ + EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr)) -# define ASSERT_HRESULT_FAILED(expr) \ - ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr)) +#define ASSERT_HRESULT_FAILED(expr) \ + ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr)) #endif // GTEST_OS_WINDOWS @@ -2219,9 +2017,9 @@ GTEST_API_ AssertionResult DoubleLE(const char* expr1, const char* expr2, // ASSERT_NO_FATAL_FAILURE(Process()) << "Process() failed"; // #define ASSERT_NO_FATAL_FAILURE(statement) \ - GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_FATAL_FAILURE_) + GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_FATAL_FAILURE_) #define EXPECT_NO_FATAL_FAILURE(statement) \ - GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_NONFATAL_FAILURE_) + GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_NONFATAL_FAILURE_) // Causes a trace (including the given source file path and line number, // and the given message) to be included in every test failure message generated @@ -2263,7 +2061,8 @@ class GTEST_API_ ScopedTrace { private: void PushTrace(const char* file, int line, std::string message); - GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedTrace); + ScopedTrace(const ScopedTrace&) = delete; + ScopedTrace& operator=(const ScopedTrace&) = delete; } GTEST_ATTRIBUTE_UNUSED_; // A ScopedTrace object does its job in its // c'tor and d'tor. Therefore it doesn't // need to be used otherwise. @@ -2283,9 +2082,9 @@ class GTEST_API_ ScopedTrace { // Assuming that each thread maintains its own stack of traces. // Therefore, a SCOPED_TRACE() would (correctly) only affect the // assertions in its own thread. -#define SCOPED_TRACE(message) \ - ::testing::ScopedTrace GTEST_CONCAT_TOKEN_(gtest_trace_, __LINE__)(\ - __FILE__, __LINE__, (message)) +#define SCOPED_TRACE(message) \ + ::testing::ScopedTrace GTEST_CONCAT_TOKEN_(gtest_trace_, __LINE__)( \ + __FILE__, __LINE__, (message)) // Compile-time assertion for type equality. // StaticAssertTypeEq() compiles if and only if type1 and type2 @@ -2383,20 +2182,19 @@ constexpr bool StaticAssertTypeEq() noexcept { // EXPECT_EQ(a_.size(), 0); // EXPECT_EQ(b_.size(), 1); // } -// -// GOOGLETEST_CM0011 DO NOT DELETE -#if !GTEST_DONT_DEFINE_TEST -#define TEST_F(test_fixture, test_name)\ +#define GTEST_TEST_F(test_fixture, test_name) \ GTEST_TEST_(test_fixture, test_name, test_fixture, \ ::testing::internal::GetTypeId()) -#endif // !GTEST_DONT_DEFINE_TEST +#if !GTEST_DONT_DEFINE_TEST_F +#define TEST_F(test_fixture, test_name) GTEST_TEST_F(test_fixture, test_name) +#endif // Returns a path to temporary directory. // Tries to determine an appropriate directory for the platform. GTEST_API_ std::string TempDir(); #ifdef _MSC_VER -# pragma warning(pop) +#pragma warning(pop) #endif // Dynamically registers a test with the framework. @@ -2450,6 +2248,7 @@ GTEST_API_ std::string TempDir(); // } // ... // int main(int argc, char** argv) { +// ::testing::InitGoogleTest(&argc, argv); // std::vector values_to_test = LoadValuesFromConfig(); // RegisterMyTests(values_to_test); // ... @@ -2491,9 +2290,7 @@ TestInfo* RegisterTest(const char* test_suite_name, const char* test_name, // namespace and has an all-caps name. int RUN_ALL_TESTS() GTEST_MUST_USE_RESULT_; -inline int RUN_ALL_TESTS() { - return ::testing::UnitTest::GetInstance()->Run(); -} +inline int RUN_ALL_TESTS() { return ::testing::UnitTest::GetInstance()->Run(); } GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251 diff --git a/ext/googletest/googletest/include/gtest/gtest_pred_impl.h b/ext/googletest/googletest/include/gtest/gtest_pred_impl.h index 5029a9bb02..47a24aa687 100644 --- a/ext/googletest/googletest/include/gtest/gtest_pred_impl.h +++ b/ext/googletest/googletest/include/gtest/gtest_pred_impl.h @@ -26,17 +26,19 @@ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// This file is AUTOMATICALLY GENERATED on 01/02/2019 by command -// 'gen_gtest_pred_impl.py 5'. DO NOT EDIT BY HAND! // // Implements a family of generic predicate assertion macros. -// GOOGLETEST_CM0001 DO NOT DELETE + +// IWYU pragma: private, include "gtest/gtest.h" +// IWYU pragma: friend gtest/.* +// IWYU pragma: friend gmock/.* #ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_ #define GOOGLETEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_ -#include "gtest/gtest.h" +#include "gtest/gtest-assertion-result.h" +#include "gtest/internal/gtest-internal.h" +#include "gtest/internal/gtest-port.h" namespace testing { @@ -72,22 +74,18 @@ namespace testing { // GTEST_ASSERT_ is the basic statement to which all of the assertions // in this file reduce. Don't use this in your code. -#define GTEST_ASSERT_(expression, on_failure) \ - GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ +#define GTEST_ASSERT_(expression, on_failure) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ if (const ::testing::AssertionResult gtest_ar = (expression)) \ - ; \ - else \ + ; \ + else \ on_failure(gtest_ar.failure_message()) - // Helper function for implementing {EXPECT|ASSERT}_PRED1. Don't use // this in your code. -template -AssertionResult AssertPred1Helper(const char* pred_text, - const char* e1, - Pred pred, - const T1& v1) { +template +AssertionResult AssertPred1Helper(const char* pred_text, const char* e1, + Pred pred, const T1& v1) { if (pred(v1)) return AssertionSuccess(); return AssertionFailure() @@ -98,40 +96,27 @@ AssertionResult AssertPred1Helper(const char* pred_text, // Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT1. // Don't use this in your code. -#define GTEST_PRED_FORMAT1_(pred_format, v1, on_failure)\ - GTEST_ASSERT_(pred_format(#v1, v1), \ - on_failure) +#define GTEST_PRED_FORMAT1_(pred_format, v1, on_failure) \ + GTEST_ASSERT_(pred_format(#v1, v1), on_failure) // Internal macro for implementing {EXPECT|ASSERT}_PRED1. Don't use // this in your code. -#define GTEST_PRED1_(pred, v1, on_failure)\ - GTEST_ASSERT_(::testing::AssertPred1Helper(#pred, \ - #v1, \ - pred, \ - v1), on_failure) +#define GTEST_PRED1_(pred, v1, on_failure) \ + GTEST_ASSERT_(::testing::AssertPred1Helper(#pred, #v1, pred, v1), on_failure) // Unary predicate assertion macros. #define EXPECT_PRED_FORMAT1(pred_format, v1) \ GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_NONFATAL_FAILURE_) -#define EXPECT_PRED1(pred, v1) \ - GTEST_PRED1_(pred, v1, GTEST_NONFATAL_FAILURE_) +#define EXPECT_PRED1(pred, v1) GTEST_PRED1_(pred, v1, GTEST_NONFATAL_FAILURE_) #define ASSERT_PRED_FORMAT1(pred_format, v1) \ GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_FATAL_FAILURE_) -#define ASSERT_PRED1(pred, v1) \ - GTEST_PRED1_(pred, v1, GTEST_FATAL_FAILURE_) - - +#define ASSERT_PRED1(pred, v1) GTEST_PRED1_(pred, v1, GTEST_FATAL_FAILURE_) // Helper function for implementing {EXPECT|ASSERT}_PRED2. Don't use // this in your code. -template -AssertionResult AssertPred2Helper(const char* pred_text, - const char* e1, - const char* e2, - Pred pred, - const T1& v1, +template +AssertionResult AssertPred2Helper(const char* pred_text, const char* e1, + const char* e2, Pred pred, const T1& v1, const T2& v2) { if (pred(v1, v2)) return AssertionSuccess(); @@ -145,19 +130,14 @@ AssertionResult AssertPred2Helper(const char* pred_text, // Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT2. // Don't use this in your code. -#define GTEST_PRED_FORMAT2_(pred_format, v1, v2, on_failure)\ - GTEST_ASSERT_(pred_format(#v1, #v2, v1, v2), \ - on_failure) +#define GTEST_PRED_FORMAT2_(pred_format, v1, v2, on_failure) \ + GTEST_ASSERT_(pred_format(#v1, #v2, v1, v2), on_failure) // Internal macro for implementing {EXPECT|ASSERT}_PRED2. Don't use // this in your code. -#define GTEST_PRED2_(pred, v1, v2, on_failure)\ - GTEST_ASSERT_(::testing::AssertPred2Helper(#pred, \ - #v1, \ - #v2, \ - pred, \ - v1, \ - v2), on_failure) +#define GTEST_PRED2_(pred, v1, v2, on_failure) \ + GTEST_ASSERT_(::testing::AssertPred2Helper(#pred, #v1, #v2, pred, v1, v2), \ + on_failure) // Binary predicate assertion macros. #define EXPECT_PRED_FORMAT2(pred_format, v1, v2) \ @@ -169,22 +149,12 @@ AssertionResult AssertPred2Helper(const char* pred_text, #define ASSERT_PRED2(pred, v1, v2) \ GTEST_PRED2_(pred, v1, v2, GTEST_FATAL_FAILURE_) - - // Helper function for implementing {EXPECT|ASSERT}_PRED3. Don't use // this in your code. -template -AssertionResult AssertPred3Helper(const char* pred_text, - const char* e1, - const char* e2, - const char* e3, - Pred pred, - const T1& v1, - const T2& v2, - const T3& v3) { +template +AssertionResult AssertPred3Helper(const char* pred_text, const char* e1, + const char* e2, const char* e3, Pred pred, + const T1& v1, const T2& v2, const T3& v3) { if (pred(v1, v2, v3)) return AssertionSuccess(); return AssertionFailure() @@ -198,21 +168,15 @@ AssertionResult AssertPred3Helper(const char* pred_text, // Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT3. // Don't use this in your code. -#define GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, on_failure)\ - GTEST_ASSERT_(pred_format(#v1, #v2, #v3, v1, v2, v3), \ - on_failure) +#define GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, on_failure) \ + GTEST_ASSERT_(pred_format(#v1, #v2, #v3, v1, v2, v3), on_failure) // Internal macro for implementing {EXPECT|ASSERT}_PRED3. Don't use // this in your code. -#define GTEST_PRED3_(pred, v1, v2, v3, on_failure)\ - GTEST_ASSERT_(::testing::AssertPred3Helper(#pred, \ - #v1, \ - #v2, \ - #v3, \ - pred, \ - v1, \ - v2, \ - v3), on_failure) +#define GTEST_PRED3_(pred, v1, v2, v3, on_failure) \ + GTEST_ASSERT_( \ + ::testing::AssertPred3Helper(#pred, #v1, #v2, #v3, pred, v1, v2, v3), \ + on_failure) // Ternary predicate assertion macros. #define EXPECT_PRED_FORMAT3(pred_format, v1, v2, v3) \ @@ -224,25 +188,13 @@ AssertionResult AssertPred3Helper(const char* pred_text, #define ASSERT_PRED3(pred, v1, v2, v3) \ GTEST_PRED3_(pred, v1, v2, v3, GTEST_FATAL_FAILURE_) - - // Helper function for implementing {EXPECT|ASSERT}_PRED4. Don't use // this in your code. -template -AssertionResult AssertPred4Helper(const char* pred_text, - const char* e1, - const char* e2, - const char* e3, - const char* e4, - Pred pred, - const T1& v1, - const T2& v2, - const T3& v3, - const T4& v4) { +template +AssertionResult AssertPred4Helper(const char* pred_text, const char* e1, + const char* e2, const char* e3, + const char* e4, Pred pred, const T1& v1, + const T2& v2, const T3& v3, const T4& v4) { if (pred(v1, v2, v3, v4)) return AssertionSuccess(); return AssertionFailure() @@ -257,23 +209,15 @@ AssertionResult AssertPred4Helper(const char* pred_text, // Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT4. // Don't use this in your code. -#define GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, on_failure)\ - GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, v1, v2, v3, v4), \ - on_failure) +#define GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, on_failure) \ + GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, v1, v2, v3, v4), on_failure) // Internal macro for implementing {EXPECT|ASSERT}_PRED4. Don't use // this in your code. -#define GTEST_PRED4_(pred, v1, v2, v3, v4, on_failure)\ - GTEST_ASSERT_(::testing::AssertPred4Helper(#pred, \ - #v1, \ - #v2, \ - #v3, \ - #v4, \ - pred, \ - v1, \ - v2, \ - v3, \ - v4), on_failure) +#define GTEST_PRED4_(pred, v1, v2, v3, v4, on_failure) \ + GTEST_ASSERT_(::testing::AssertPred4Helper(#pred, #v1, #v2, #v3, #v4, pred, \ + v1, v2, v3, v4), \ + on_failure) // 4-ary predicate assertion macros. #define EXPECT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \ @@ -285,28 +229,15 @@ AssertionResult AssertPred4Helper(const char* pred_text, #define ASSERT_PRED4(pred, v1, v2, v3, v4) \ GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_FATAL_FAILURE_) - - // Helper function for implementing {EXPECT|ASSERT}_PRED5. Don't use // this in your code. -template -AssertionResult AssertPred5Helper(const char* pred_text, - const char* e1, - const char* e2, - const char* e3, - const char* e4, - const char* e5, - Pred pred, - const T1& v1, - const T2& v2, - const T3& v3, - const T4& v4, - const T5& v5) { +AssertionResult AssertPred5Helper(const char* pred_text, const char* e1, + const char* e2, const char* e3, + const char* e4, const char* e5, Pred pred, + const T1& v1, const T2& v2, const T3& v3, + const T4& v4, const T5& v5) { if (pred(v1, v2, v3, v4, v5)) return AssertionSuccess(); return AssertionFailure() @@ -322,25 +253,16 @@ AssertionResult AssertPred5Helper(const char* pred_text, // Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT5. // Don't use this in your code. -#define GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, on_failure)\ +#define GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, on_failure) \ GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, #v5, v1, v2, v3, v4, v5), \ on_failure) // Internal macro for implementing {EXPECT|ASSERT}_PRED5. Don't use // this in your code. -#define GTEST_PRED5_(pred, v1, v2, v3, v4, v5, on_failure)\ - GTEST_ASSERT_(::testing::AssertPred5Helper(#pred, \ - #v1, \ - #v2, \ - #v3, \ - #v4, \ - #v5, \ - pred, \ - v1, \ - v2, \ - v3, \ - v4, \ - v5), on_failure) +#define GTEST_PRED5_(pred, v1, v2, v3, v4, v5, on_failure) \ + GTEST_ASSERT_(::testing::AssertPred5Helper(#pred, #v1, #v2, #v3, #v4, #v5, \ + pred, v1, v2, v3, v4, v5), \ + on_failure) // 5-ary predicate assertion macros. #define EXPECT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \ @@ -352,8 +274,6 @@ AssertionResult AssertPred5Helper(const char* pred_text, #define ASSERT_PRED5(pred, v1, v2, v3, v4, v5) \ GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_) - - } // namespace testing #endif // GOOGLETEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_ diff --git a/ext/googletest/googletest/include/gtest/gtest_prod.h b/ext/googletest/googletest/include/gtest/gtest_prod.h index 38b9d85a51..1f37dc31c3 100644 --- a/ext/googletest/googletest/include/gtest/gtest_prod.h +++ b/ext/googletest/googletest/include/gtest/gtest_prod.h @@ -27,9 +27,8 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -// Google C++ Testing and Mocking Framework definitions useful in production code. -// GOOGLETEST_CM0003 DO NOT DELETE +// Google C++ Testing and Mocking Framework definitions useful in production +// code. #ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_PROD_H_ #define GOOGLETEST_INCLUDE_GTEST_GTEST_PROD_H_ @@ -55,7 +54,7 @@ // Note: The test class must be in the same namespace as the class being tested. // For example, putting MyClassTest in an anonymous namespace will not work. -#define FRIEND_TEST(test_case_name, test_name)\ -friend class test_case_name##_##test_name##_Test +#define FRIEND_TEST(test_case_name, test_name) \ + friend class test_case_name##_##test_name##_Test #endif // GOOGLETEST_INCLUDE_GTEST_GTEST_PROD_H_ diff --git a/ext/googletest/googletest/include/gtest/internal/custom/README.md b/ext/googletest/googletest/include/gtest/internal/custom/README.md index 0af3539abf..cb49e2c754 100644 --- a/ext/googletest/googletest/include/gtest/internal/custom/README.md +++ b/ext/googletest/googletest/include/gtest/internal/custom/README.md @@ -15,20 +15,6 @@ The custom directory is an injection point for custom user configurations. The following macros can be defined: -### Flag related macros: - -* `GTEST_FLAG(flag_name)` -* `GTEST_USE_OWN_FLAGFILE_FLAG_` - Define to 0 when the system provides its - own flagfile flag parsing. -* `GTEST_DECLARE_bool_(name)` -* `GTEST_DECLARE_int32_(name)` -* `GTEST_DECLARE_string_(name)` -* `GTEST_DEFINE_bool_(name, default_val, doc)` -* `GTEST_DEFINE_int32_(name, default_val, doc)` -* `GTEST_DEFINE_string_(name, default_val, doc)` -* `GTEST_FLAG_GET(flag_name)` -* `GTEST_FLAG_SET(flag_name, value)` - ### Logging: * `GTEST_LOG_(severity)` diff --git a/ext/googletest/googletest/include/gtest/internal/gtest-death-test-internal.h b/ext/googletest/googletest/include/gtest/internal/gtest-death-test-internal.h index 44277c3869..45580ae805 100644 --- a/ext/googletest/googletest/include/gtest/internal/gtest-death-test-internal.h +++ b/ext/googletest/googletest/include/gtest/internal/gtest-death-test-internal.h @@ -26,22 +26,26 @@ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// + // The Google C++ Testing and Mocking Framework (Google Test) // // This header file defines internal utilities needed for implementing // death tests. They are subject to change without notice. -// GOOGLETEST_CM0001 DO NOT DELETE + +// IWYU pragma: private, include "gtest/gtest.h" +// IWYU pragma: friend gtest/.* +// IWYU pragma: friend gmock/.* #ifndef GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_ #define GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_ +#include + +#include + #include "gtest/gtest-matchers.h" #include "gtest/internal/gtest-internal.h" -#include -#include - GTEST_DECLARE_string_(internal_run_death_test); namespace testing { @@ -83,16 +87,18 @@ class GTEST_API_ DeathTest { static bool Create(const char* statement, Matcher matcher, const char* file, int line, DeathTest** test); DeathTest(); - virtual ~DeathTest() { } + virtual ~DeathTest() {} // A helper class that aborts a death test when it's deleted. class ReturnSentinel { public: - explicit ReturnSentinel(DeathTest* test) : test_(test) { } + explicit ReturnSentinel(DeathTest* test) : test_(test) {} ~ReturnSentinel() { test_->Abort(TEST_ENCOUNTERED_RETURN_STATEMENT); } + private: DeathTest* const test_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(ReturnSentinel); + ReturnSentinel(const ReturnSentinel&) = delete; + ReturnSentinel& operator=(const ReturnSentinel&) = delete; } GTEST_ATTRIBUTE_UNUSED_; // An enumeration of possible roles that may be taken when a death @@ -137,7 +143,8 @@ class GTEST_API_ DeathTest { // A string containing a description of the outcome of the last death test. static std::string last_death_test_message_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(DeathTest); + DeathTest(const DeathTest&) = delete; + DeathTest& operator=(const DeathTest&) = delete; }; GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251 @@ -145,7 +152,7 @@ GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251 // Factory interface for death tests. May be mocked out for testing. class DeathTestFactory { public: - virtual ~DeathTestFactory() { } + virtual ~DeathTestFactory() {} virtual bool Create(const char* statement, Matcher matcher, const char* file, int line, DeathTest** test) = 0; @@ -186,28 +193,28 @@ inline Matcher MakeDeathTestMatcher( // Traps C++ exceptions escaping statement and reports them as test // failures. Note that trapping SEH exceptions is not implemented here. -# if GTEST_HAS_EXCEPTIONS -# define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \ - try { \ - GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ - } catch (const ::std::exception& gtest_exception) { \ - fprintf(\ - stderr, \ - "\n%s: Caught std::exception-derived exception escaping the " \ - "death test statement. Exception message: %s\n", \ +#if GTEST_HAS_EXCEPTIONS +#define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \ + try { \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + } catch (const ::std::exception& gtest_exception) { \ + fprintf( \ + stderr, \ + "\n%s: Caught std::exception-derived exception escaping the " \ + "death test statement. Exception message: %s\n", \ ::testing::internal::FormatFileLocation(__FILE__, __LINE__).c_str(), \ - gtest_exception.what()); \ - fflush(stderr); \ + gtest_exception.what()); \ + fflush(stderr); \ death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \ - } catch (...) { \ + } catch (...) { \ death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \ } -# else -# define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \ +#else +#define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement) -# endif +#endif // This macro is for implementing ASSERT_DEATH*, EXPECT_DEATH*, // ASSERT_EXIT*, and EXPECT_EXIT*. @@ -263,16 +270,12 @@ inline Matcher MakeDeathTestMatcher( // RUN_ALL_TESTS was called. class InternalRunDeathTestFlag { public: - InternalRunDeathTestFlag(const std::string& a_file, - int a_line, - int an_index, + InternalRunDeathTestFlag(const std::string& a_file, int a_line, int an_index, int a_write_fd) - : file_(a_file), line_(a_line), index_(an_index), - write_fd_(a_write_fd) {} + : file_(a_file), line_(a_line), index_(an_index), write_fd_(a_write_fd) {} ~InternalRunDeathTestFlag() { - if (write_fd_ >= 0) - posix::Close(write_fd_); + if (write_fd_ >= 0) posix::Close(write_fd_); } const std::string& file() const { return file_; } @@ -286,7 +289,8 @@ class InternalRunDeathTestFlag { int index_; int write_fd_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(InternalRunDeathTestFlag); + InternalRunDeathTestFlag(const InternalRunDeathTestFlag&) = delete; + InternalRunDeathTestFlag& operator=(const InternalRunDeathTestFlag&) = delete; }; // Returns a newly created InternalRunDeathTestFlag object with fields diff --git a/ext/googletest/googletest/include/gtest/internal/gtest-filepath.h b/ext/googletest/googletest/include/gtest/internal/gtest-filepath.h index 0c033abc34..a2a60a962b 100644 --- a/ext/googletest/googletest/include/gtest/internal/gtest-filepath.h +++ b/ext/googletest/googletest/include/gtest/internal/gtest-filepath.h @@ -26,7 +26,7 @@ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// + // Google Test filepath utilities // // This header file declares classes and functions used internally by @@ -35,7 +35,9 @@ // This file is #included in gtest/internal/gtest-internal.h. // Do not include this header file separately! -// GOOGLETEST_CM0001 DO NOT DELETE +// IWYU pragma: private, include "gtest/gtest.h" +// IWYU pragma: friend gtest/.* +// IWYU pragma: friend gmock/.* #ifndef GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_ #define GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_ @@ -61,8 +63,8 @@ namespace internal { class GTEST_API_ FilePath { public: - FilePath() : pathname_("") { } - FilePath(const FilePath& rhs) : pathname_(rhs.pathname_) { } + FilePath() : pathname_("") {} + FilePath(const FilePath& rhs) : pathname_(rhs.pathname_) {} explicit FilePath(const std::string& pathname) : pathname_(pathname) { Normalize(); @@ -73,9 +75,7 @@ class GTEST_API_ FilePath { return *this; } - void Set(const FilePath& rhs) { - pathname_ = rhs.pathname_; - } + void Set(const FilePath& rhs) { pathname_ = rhs.pathname_; } const std::string& string() const { return pathname_; } const char* c_str() const { return pathname_.c_str(); } @@ -88,8 +88,7 @@ class GTEST_API_ FilePath { // than zero (e.g., 12), returns "dir/test_12.xml". // On Windows platform, uses \ as the separator rather than /. static FilePath MakeFileName(const FilePath& directory, - const FilePath& base_name, - int number, + const FilePath& base_name, int number, const char* extension); // Given directory = "dir", relative_path = "test.xml", diff --git a/ext/googletest/googletest/include/gtest/internal/gtest-internal.h b/ext/googletest/googletest/include/gtest/internal/gtest-internal.h index f8cbdbd81d..9b04e4c85f 100644 --- a/ext/googletest/googletest/include/gtest/internal/gtest-internal.h +++ b/ext/googletest/googletest/include/gtest/internal/gtest-internal.h @@ -26,13 +26,15 @@ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// + // The Google C++ Testing and Mocking Framework (Google Test) // // This header file declares functions and macros used internally by // Google Test. They are subject to change without notice. -// GOOGLETEST_CM0001 DO NOT DELETE +// IWYU pragma: private, include "gtest/gtest.h" +// IWYU pragma: friend gtest/.* +// IWYU pragma: friend gmock/.* #ifndef GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_ #define GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_ @@ -40,19 +42,20 @@ #include "gtest/internal/gtest-port.h" #if GTEST_OS_LINUX -# include -# include -# include -# include +#include +#include +#include +#include #endif // GTEST_OS_LINUX #if GTEST_HAS_EXCEPTIONS -# include +#include #endif #include #include #include + #include #include #include @@ -76,7 +79,7 @@ // the current line number. For more details, see // http://www.parashift.com/c++-faq-lite/misc-technical-issues.html#faq-39.6 #define GTEST_CONCAT_TOKEN_(foo, bar) GTEST_CONCAT_TOKEN_IMPL_(foo, bar) -#define GTEST_CONCAT_TOKEN_IMPL_(foo, bar) foo ## bar +#define GTEST_CONCAT_TOKEN_IMPL_(foo, bar) foo##bar // Stringifies its argument. // Work around a bug in visual studio which doesn't accept code like this: @@ -98,21 +101,21 @@ namespace testing { // Forward declarations. -class AssertionResult; // Result of an assertion. -class Message; // Represents a failure message. -class Test; // Represents a test. -class TestInfo; // Information about a test. -class TestPartResult; // Result of a test part. -class UnitTest; // A collection of test suites. +class AssertionResult; // Result of an assertion. +class Message; // Represents a failure message. +class Test; // Represents a test. +class TestInfo; // Information about a test. +class TestPartResult; // Result of a test part. +class UnitTest; // A collection of test suites. template ::std::string PrintToString(const T& value); namespace internal { -struct TraceInfo; // Information about a trace point. -class TestInfoImpl; // Opaque implementation of TestInfo -class UnitTestImpl; // Opaque implementation of UnitTest +struct TraceInfo; // Information about a trace point. +class TestInfoImpl; // Opaque implementation of TestInfo +class UnitTestImpl; // Opaque implementation of UnitTest // The text used in failure messages to indicate the start of the // stack trace. @@ -121,6 +124,7 @@ GTEST_API_ extern const char kStackTraceMarker[]; // An IgnoredValue object can be implicitly constructed from ANY value. class IgnoredValue { struct Sink {}; + public: // This constructor template allows any value to be implicitly // converted to IgnoredValue. The object has no data member and @@ -136,13 +140,13 @@ class IgnoredValue { }; // Appends the user-supplied message to the Google-Test-generated message. -GTEST_API_ std::string AppendUserMessage( - const std::string& gtest_msg, const Message& user_msg); +GTEST_API_ std::string AppendUserMessage(const std::string& gtest_msg, + const Message& user_msg); #if GTEST_HAS_EXCEPTIONS -GTEST_DISABLE_MSC_WARNINGS_PUSH_(4275 \ -/* an exported class was derived from a class that was not exported */) +GTEST_DISABLE_MSC_WARNINGS_PUSH_( + 4275 /* an exported class was derived from a class that was not exported */) // This exception is thrown by (and only by) a failed Google Test // assertion when GTEST_FLAG(throw_on_failure) is true (if exceptions @@ -181,14 +185,6 @@ GTEST_API_ std::string CreateUnifiedDiff(const std::vector& left, } // namespace edit_distance -// Calculate the diff between 'left' and 'right' and return it in unified diff -// format. -// If not null, stores in 'total_line_count' the total number of lines found -// in left + right. -GTEST_API_ std::string DiffStrings(const std::string& left, - const std::string& right, - size_t* total_line_count); - // Constructs and returns the message for an equality assertion // (e.g. ASSERT_EQ, EXPECT_STREQ, etc) failure. // @@ -212,10 +208,8 @@ GTEST_API_ AssertionResult EqFailure(const char* expected_expression, // Constructs a failure message for Boolean assertions such as EXPECT_TRUE. GTEST_API_ std::string GetBoolAssertionFailureMessage( - const AssertionResult& assertion_result, - const char* expression_text, - const char* actual_predicate_value, - const char* expected_predicate_value); + const AssertionResult& assertion_result, const char* expression_text, + const char* actual_predicate_value, const char* expected_predicate_value); // This template class represents an IEEE floating-point number // (either single-precision or double-precision, depending on the @@ -256,11 +250,11 @@ class FloatingPoint { // Constants. // # of bits in a number. - static const size_t kBitCount = 8*sizeof(RawType); + static const size_t kBitCount = 8 * sizeof(RawType); // # of fraction bits in a number. static const size_t kFractionBitCount = - std::numeric_limits::digits - 1; + std::numeric_limits::digits - 1; // # of exponent bits in a number. static const size_t kExponentBitCount = kBitCount - 1 - kFractionBitCount; @@ -269,8 +263,8 @@ class FloatingPoint { static const Bits kSignBitMask = static_cast(1) << (kBitCount - 1); // The mask for the fraction bits. - static const Bits kFractionBitMask = - ~static_cast(0) >> (kExponentBitCount + 1); + static const Bits kFractionBitMask = ~static_cast(0) >> + (kExponentBitCount + 1); // The mask for the exponent bits. static const Bits kExponentBitMask = ~(kSignBitMask | kFractionBitMask); @@ -309,9 +303,7 @@ class FloatingPoint { } // Returns the floating-point number that represent positive infinity. - static RawType Infinity() { - return ReinterpretBits(kExponentBitMask); - } + static RawType Infinity() { return ReinterpretBits(kExponentBitMask); } // Returns the maximum representable finite floating-point number. static RawType Max(); @@ -319,7 +311,7 @@ class FloatingPoint { // Non-static methods // Returns the bits that represents this number. - const Bits &bits() const { return u_.bits_; } + const Bits& bits() const { return u_.bits_; } // Returns the exponent bits of this number. Bits exponent_bits() const { return kExponentBitMask & u_.bits_; } @@ -348,8 +340,8 @@ class FloatingPoint { // a NAN must return false. if (is_nan() || rhs.is_nan()) return false; - return DistanceBetweenSignAndMagnitudeNumbers(u_.bits_, rhs.u_.bits_) - <= kMaxUlps; + return DistanceBetweenSignAndMagnitudeNumbers(u_.bits_, rhs.u_.bits_) <= + kMaxUlps; } private: @@ -374,7 +366,7 @@ class FloatingPoint { // // Read http://en.wikipedia.org/wiki/Signed_number_representations // for more details on signed number representations. - static Bits SignAndMagnitudeToBiased(const Bits &sam) { + static Bits SignAndMagnitudeToBiased(const Bits& sam) { if (kSignBitMask & sam) { // sam represents a negative number. return ~sam + 1; @@ -386,8 +378,8 @@ class FloatingPoint { // Given two numbers in the sign-and-magnitude representation, // returns the distance between them as an unsigned number. - static Bits DistanceBetweenSignAndMagnitudeNumbers(const Bits &sam1, - const Bits &sam2) { + static Bits DistanceBetweenSignAndMagnitudeNumbers(const Bits& sam1, + const Bits& sam2) { const Bits biased1 = SignAndMagnitudeToBiased(sam1); const Bits biased2 = SignAndMagnitudeToBiased(sam2); return (biased1 >= biased2) ? (biased1 - biased2) : (biased2 - biased1); @@ -399,9 +391,13 @@ class FloatingPoint { // We cannot use std::numeric_limits::max() as it clashes with the max() // macro defined by . template <> -inline float FloatingPoint::Max() { return FLT_MAX; } +inline float FloatingPoint::Max() { + return FLT_MAX; +} template <> -inline double FloatingPoint::Max() { return DBL_MAX; } +inline double FloatingPoint::Max() { + return DBL_MAX; +} // Typedefs the instances of the FloatingPoint template class that we // care to use. @@ -461,7 +457,8 @@ class TestFactoryBase { TestFactoryBase() {} private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(TestFactoryBase); + TestFactoryBase(const TestFactoryBase&) = delete; + TestFactoryBase& operator=(const TestFactoryBase&) = delete; }; // This class provides implementation of TeastFactoryBase interface. @@ -510,11 +507,11 @@ inline SetUpTearDownSuiteFuncType GetNotDefaultOrNull( template // Note that SuiteApiResolver inherits from T because -// SetUpTestSuite()/TearDownTestSuite() could be protected. Ths way +// SetUpTestSuite()/TearDownTestSuite() could be protected. This way // SuiteApiResolver can access them. struct SuiteApiResolver : T { // testing::Test is only forward declared at this point. So we make it a - // dependend class for the compiler to be OK with it. + // dependent class for the compiler to be OK with it. using Test = typename std::conditional::type; @@ -654,7 +651,8 @@ inline const char* SkipComma(const char* str) { if (comma == nullptr) { return nullptr; } - while (IsSpace(*(++comma))) {} + while (IsSpace(*(++comma))) { + } return comma; } @@ -668,7 +666,7 @@ inline std::string GetPrefixUntilComma(const char* str) { // Splits a given string on a given delimiter, populating a given // vector with the fields. void SplitString(const ::std::string& str, char delimiter, - ::std::vector< ::std::string>* dest); + ::std::vector<::std::string>* dest); // The default argument to the template below for the case when the user does // not provide a name generator. @@ -781,13 +779,13 @@ class TypeParameterizedTestSuite { const std::vector& type_names = GenerateNames()) { RegisterTypeParameterizedTestSuiteInstantiation(case_name); - std::string test_name = StripTrailingSpaces( - GetPrefixUntilComma(test_names)); + std::string test_name = + StripTrailingSpaces(GetPrefixUntilComma(test_names)); if (!state->TestExists(test_name)) { fprintf(stderr, "Failed to get code location for test %s.%s at %s.", case_name, test_name.c_str(), - FormatFileLocation(code_location.file.c_str(), - code_location.line).c_str()); + FormatFileLocation(code_location.file.c_str(), code_location.line) + .c_str()); fflush(stderr); posix::Abort(); } @@ -831,8 +829,8 @@ class TypeParameterizedTestSuite { // For example, if Foo() calls Bar(), which in turn calls // GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in // the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't. -GTEST_API_ std::string GetCurrentOsStackTraceExceptTop( - UnitTest* unit_test, int skip_count); +GTEST_API_ std::string GetCurrentOsStackTraceExceptTop(UnitTest* unit_test, + int skip_count); // Helpers for suppressing warnings on unreachable code or constant // condition. @@ -881,7 +879,8 @@ class GTEST_API_ Random { private: uint32_t state_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(Random); + Random(const Random&) = delete; + Random& operator=(const Random&) = delete; }; // Turns const U&, U&, const U, and U all into U. @@ -954,7 +953,9 @@ IsContainer IsContainerTest(int /* dummy */) { typedef char IsNotContainer; template -IsNotContainer IsContainerTest(long /* dummy */) { return '\0'; } +IsNotContainer IsContainerTest(long /* dummy */) { + return '\0'; +} // Trait to detect whether a type T is a hash table. // The heuristic used is that the type contains an inner type `hasher` and does @@ -1017,11 +1018,13 @@ bool ArrayEq(const T* lhs, size_t size, const U* rhs); // This generic version is used when k is 0. template -inline bool ArrayEq(const T& lhs, const U& rhs) { return lhs == rhs; } +inline bool ArrayEq(const T& lhs, const U& rhs) { + return lhs == rhs; +} // This overload is used when k >= 1. template -inline bool ArrayEq(const T(&lhs)[N], const U(&rhs)[N]) { +inline bool ArrayEq(const T (&lhs)[N], const U (&rhs)[N]) { return internal::ArrayEq(lhs, N, rhs); } @@ -1031,8 +1034,7 @@ inline bool ArrayEq(const T(&lhs)[N], const U(&rhs)[N]) { template bool ArrayEq(const T* lhs, size_t size, const U* rhs) { for (size_t i = 0; i != size; i++) { - if (!internal::ArrayEq(lhs[i], rhs[i])) - return false; + if (!internal::ArrayEq(lhs[i], rhs[i])) return false; } return true; } @@ -1042,8 +1044,7 @@ bool ArrayEq(const T* lhs, size_t size, const U* rhs) { template Iter ArrayAwareFind(Iter begin, Iter end, const Element& elem) { for (Iter it = begin; it != end; ++it) { - if (internal::ArrayEq(*it, elem)) - return it; + if (internal::ArrayEq(*it, elem)) return it; } return end; } @@ -1057,11 +1058,13 @@ void CopyArray(const T* from, size_t size, U* to); // This generic version is used when k is 0. template -inline void CopyArray(const T& from, U* to) { *to = from; } +inline void CopyArray(const T& from, U* to) { + *to = from; +} // This overload is used when k >= 1. template -inline void CopyArray(const T(&from)[N], U(*to)[N]) { +inline void CopyArray(const T (&from)[N], U (*to)[N]) { internal::CopyArray(from, N, *to); } @@ -1114,8 +1117,7 @@ class NativeArray { } ~NativeArray() { - if (clone_ != &NativeArray::InitRef) - delete[] array_; + if (clone_ != &NativeArray::InitRef) delete[] array_; } // STL-style container methods. @@ -1123,8 +1125,7 @@ class NativeArray { const_iterator begin() const { return array_; } const_iterator end() const { return array_ + size_; } bool operator==(const NativeArray& rhs) const { - return size() == rhs.size() && - ArrayEq(begin(), size(), rhs.begin()); + return size() == rhs.size() && ArrayEq(begin(), size(), rhs.begin()); } private: @@ -1335,9 +1336,9 @@ struct tuple_size> #endif } // namespace std -#define GTEST_MESSAGE_AT_(file, line, message, result_type) \ - ::testing::internal::AssertHelper(result_type, file, line, message) \ - = ::testing::Message() +#define GTEST_MESSAGE_AT_(file, line, message, result_type) \ + ::testing::internal::AssertHelper(result_type, file, line, message) = \ + ::testing::Message() #define GTEST_MESSAGE_(message, result_type) \ GTEST_MESSAGE_AT_(__FILE__, __LINE__, message, result_type) @@ -1458,103 +1459,112 @@ class NeverThrown { #endif // GTEST_HAS_EXCEPTIONS -#define GTEST_TEST_NO_THROW_(statement, fail) \ - GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ - if (::testing::internal::TrueWithString gtest_msg{}) { \ - try { \ - GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ - } \ - GTEST_TEST_NO_THROW_CATCH_STD_EXCEPTION_() \ - catch (...) { \ - gtest_msg.value = "it throws."; \ - goto GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__); \ - } \ - } else \ - GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__): \ - fail(("Expected: " #statement " doesn't throw an exception.\n" \ - " Actual: " + gtest_msg.value).c_str()) +#define GTEST_TEST_NO_THROW_(statement, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::TrueWithString gtest_msg{}) { \ + try { \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + } \ + GTEST_TEST_NO_THROW_CATCH_STD_EXCEPTION_() \ + catch (...) { \ + gtest_msg.value = "it throws."; \ + goto GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__); \ + } \ + } else \ + GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__) \ + : fail(("Expected: " #statement " doesn't throw an exception.\n" \ + " Actual: " + \ + gtest_msg.value) \ + .c_str()) -#define GTEST_TEST_ANY_THROW_(statement, fail) \ - GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ - if (::testing::internal::AlwaysTrue()) { \ - bool gtest_caught_any = false; \ - try { \ - GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ - } \ - catch (...) { \ - gtest_caught_any = true; \ - } \ - if (!gtest_caught_any) { \ +#define GTEST_TEST_ANY_THROW_(statement, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + bool gtest_caught_any = false; \ + try { \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + } catch (...) { \ + gtest_caught_any = true; \ + } \ + if (!gtest_caught_any) { \ goto GTEST_CONCAT_TOKEN_(gtest_label_testanythrow_, __LINE__); \ - } \ - } else \ - GTEST_CONCAT_TOKEN_(gtest_label_testanythrow_, __LINE__): \ - fail("Expected: " #statement " throws an exception.\n" \ - " Actual: it doesn't.") - + } \ + } else \ + GTEST_CONCAT_TOKEN_(gtest_label_testanythrow_, __LINE__) \ + : fail("Expected: " #statement \ + " throws an exception.\n" \ + " Actual: it doesn't.") // Implements Boolean test assertions such as EXPECT_TRUE. expression can be // either a boolean expression or an AssertionResult. text is a textual // representation of expression as it was passed into the EXPECT_TRUE. #define GTEST_TEST_BOOLEAN_(expression, text, actual, expected, fail) \ - GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ - if (const ::testing::AssertionResult gtest_ar_ = \ - ::testing::AssertionResult(expression)) \ - ; \ - else \ - fail(::testing::internal::GetBoolAssertionFailureMessage(\ - gtest_ar_, text, #actual, #expected).c_str()) + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (const ::testing::AssertionResult gtest_ar_ = \ + ::testing::AssertionResult(expression)) \ + ; \ + else \ + fail(::testing::internal::GetBoolAssertionFailureMessage( \ + gtest_ar_, text, #actual, #expected) \ + .c_str()) -#define GTEST_TEST_NO_FATAL_FAILURE_(statement, fail) \ - GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ - if (::testing::internal::AlwaysTrue()) { \ +#define GTEST_TEST_NO_FATAL_FAILURE_(statement, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ ::testing::internal::HasNewFatalFailureHelper gtest_fatal_failure_checker; \ - GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ - if (gtest_fatal_failure_checker.has_new_fatal_failure()) { \ - goto GTEST_CONCAT_TOKEN_(gtest_label_testnofatal_, __LINE__); \ - } \ - } else \ - GTEST_CONCAT_TOKEN_(gtest_label_testnofatal_, __LINE__): \ - fail("Expected: " #statement " doesn't generate new fatal " \ - "failures in the current thread.\n" \ - " Actual: it does.") + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + if (gtest_fatal_failure_checker.has_new_fatal_failure()) { \ + goto GTEST_CONCAT_TOKEN_(gtest_label_testnofatal_, __LINE__); \ + } \ + } else \ + GTEST_CONCAT_TOKEN_(gtest_label_testnofatal_, __LINE__) \ + : fail("Expected: " #statement \ + " doesn't generate new fatal " \ + "failures in the current thread.\n" \ + " Actual: it does.") // Expands to the name of the class that implements the given test. #define GTEST_TEST_CLASS_NAME_(test_suite_name, test_name) \ test_suite_name##_##test_name##_Test // Helper macro for defining tests. -#define GTEST_TEST_(test_suite_name, test_name, parent_class, parent_id) \ - static_assert(sizeof(GTEST_STRINGIFY_(test_suite_name)) > 1, \ - "test_suite_name must not be empty"); \ - static_assert(sizeof(GTEST_STRINGIFY_(test_name)) > 1, \ - "test_name must not be empty"); \ - class GTEST_TEST_CLASS_NAME_(test_suite_name, test_name) \ - : public parent_class { \ - public: \ - GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)() = default; \ - ~GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)() override = default; \ - GTEST_DISALLOW_COPY_AND_ASSIGN_(GTEST_TEST_CLASS_NAME_(test_suite_name, \ - test_name)); \ - GTEST_DISALLOW_MOVE_AND_ASSIGN_(GTEST_TEST_CLASS_NAME_(test_suite_name, \ - test_name)); \ - \ - private: \ - void TestBody() override; \ - static ::testing::TestInfo* const test_info_ GTEST_ATTRIBUTE_UNUSED_; \ - }; \ - \ - ::testing::TestInfo* const GTEST_TEST_CLASS_NAME_(test_suite_name, \ - test_name)::test_info_ = \ - ::testing::internal::MakeAndRegisterTestInfo( \ - #test_suite_name, #test_name, nullptr, nullptr, \ - ::testing::internal::CodeLocation(__FILE__, __LINE__), (parent_id), \ - ::testing::internal::SuiteApiResolver< \ - parent_class>::GetSetUpCaseOrSuite(__FILE__, __LINE__), \ - ::testing::internal::SuiteApiResolver< \ - parent_class>::GetTearDownCaseOrSuite(__FILE__, __LINE__), \ - new ::testing::internal::TestFactoryImpl); \ +#define GTEST_TEST_(test_suite_name, test_name, parent_class, parent_id) \ + static_assert(sizeof(GTEST_STRINGIFY_(test_suite_name)) > 1, \ + "test_suite_name must not be empty"); \ + static_assert(sizeof(GTEST_STRINGIFY_(test_name)) > 1, \ + "test_name must not be empty"); \ + class GTEST_TEST_CLASS_NAME_(test_suite_name, test_name) \ + : public parent_class { \ + public: \ + GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)() = default; \ + ~GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)() override = default; \ + GTEST_TEST_CLASS_NAME_(test_suite_name, test_name) \ + (const GTEST_TEST_CLASS_NAME_(test_suite_name, test_name) &) = delete; \ + GTEST_TEST_CLASS_NAME_(test_suite_name, test_name) & operator=( \ + const GTEST_TEST_CLASS_NAME_(test_suite_name, \ + test_name) &) = delete; /* NOLINT */ \ + GTEST_TEST_CLASS_NAME_(test_suite_name, test_name) \ + (GTEST_TEST_CLASS_NAME_(test_suite_name, test_name) &&) noexcept = delete; \ + GTEST_TEST_CLASS_NAME_(test_suite_name, test_name) & operator=( \ + GTEST_TEST_CLASS_NAME_(test_suite_name, \ + test_name) &&) noexcept = delete; /* NOLINT */ \ + \ + private: \ + void TestBody() override; \ + static ::testing::TestInfo* const test_info_ GTEST_ATTRIBUTE_UNUSED_; \ + }; \ + \ + ::testing::TestInfo* const GTEST_TEST_CLASS_NAME_(test_suite_name, \ + test_name)::test_info_ = \ + ::testing::internal::MakeAndRegisterTestInfo( \ + #test_suite_name, #test_name, nullptr, nullptr, \ + ::testing::internal::CodeLocation(__FILE__, __LINE__), (parent_id), \ + ::testing::internal::SuiteApiResolver< \ + parent_class>::GetSetUpCaseOrSuite(__FILE__, __LINE__), \ + ::testing::internal::SuiteApiResolver< \ + parent_class>::GetTearDownCaseOrSuite(__FILE__, __LINE__), \ + new ::testing::internal::TestFactoryImpl); \ void GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)::TestBody() #endif // GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_ diff --git a/ext/googletest/googletest/include/gtest/internal/gtest-param-util.h b/ext/googletest/googletest/include/gtest/internal/gtest-param-util.h index c2ef6e3124..e7af2f904a 100644 --- a/ext/googletest/googletest/include/gtest/internal/gtest-param-util.h +++ b/ext/googletest/googletest/include/gtest/internal/gtest-param-util.h @@ -27,10 +27,11 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Type and function utilities for implementing parameterized tests. -// GOOGLETEST_CM0001 DO NOT DELETE +// IWYU pragma: private, include "gtest/gtest.h" +// IWYU pragma: friend gtest/.* +// IWYU pragma: friend gmock/.* #ifndef GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_ #define GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_ @@ -46,19 +47,18 @@ #include #include -#include "gtest/internal/gtest-internal.h" -#include "gtest/internal/gtest-port.h" #include "gtest/gtest-printers.h" #include "gtest/gtest-test-part.h" +#include "gtest/internal/gtest-internal.h" +#include "gtest/internal/gtest-port.h" namespace testing { // Input to a parameterized test name generator, describing a test parameter. // Consists of the parameter value and the integer parameter index. template struct TestParamInfo { - TestParamInfo(const ParamType& a_param, size_t an_index) : - param(a_param), - index(an_index) {} + TestParamInfo(const ParamType& a_param, size_t an_index) + : param(a_param), index(an_index) {} ParamType param; size_t index; }; @@ -84,8 +84,10 @@ namespace internal { GTEST_API_ void ReportInvalidTestSuiteType(const char* test_suite_name, CodeLocation code_location); -template class ParamGeneratorInterface; -template class ParamGenerator; +template +class ParamGeneratorInterface; +template +class ParamGenerator; // Interface for iterating over elements provided by an implementation // of ParamGeneratorInterface. @@ -129,8 +131,7 @@ class ParamIterator { // ParamIterator assumes ownership of the impl_ pointer. ParamIterator(const ParamIterator& other) : impl_(other.impl_->Clone()) {} ParamIterator& operator=(const ParamIterator& other) { - if (this != &other) - impl_.reset(other.impl_->Clone()); + if (this != &other) impl_.reset(other.impl_->Clone()); return *this; } @@ -157,7 +158,7 @@ class ParamIterator { private: friend class ParamGenerator; explicit ParamIterator(ParamIteratorInterface* impl) : impl_(impl) {} - std::unique_ptr > impl_; + std::unique_ptr> impl_; }; // ParamGeneratorInterface is the binary interface to access generators @@ -179,7 +180,7 @@ class ParamGeneratorInterface { // This class implements copy initialization semantics and the contained // ParamGeneratorInterface instance is shared among all copies // of the original object. This is possible because that instance is immutable. -template +template class ParamGenerator { public: typedef ParamIterator iterator; @@ -196,7 +197,7 @@ class ParamGenerator { iterator end() const { return iterator(impl_->End()); } private: - std::shared_ptr > impl_; + std::shared_ptr> impl_; }; // Generates values from a range of two comparable values. Can be used to @@ -207,8 +208,10 @@ template class RangeGenerator : public ParamGeneratorInterface { public: RangeGenerator(T begin, T end, IncrementT step) - : begin_(begin), end_(end), - step_(step), end_index_(CalculateEndIndex(begin, end, step)) {} + : begin_(begin), + end_(end), + step_(step), + end_index_(CalculateEndIndex(begin, end, step)) {} ~RangeGenerator() override {} ParamIteratorInterface* Begin() const override { @@ -251,7 +254,9 @@ class RangeGenerator : public ParamGeneratorInterface { private: Iterator(const Iterator& other) : ParamIteratorInterface(), - base_(other.base_), value_(other.value_), index_(other.index_), + base_(other.base_), + value_(other.value_), + index_(other.index_), step_(other.step_) {} // No implementation - assignment is unsupported. @@ -263,12 +268,10 @@ class RangeGenerator : public ParamGeneratorInterface { const IncrementT step_; }; // class RangeGenerator::Iterator - static int CalculateEndIndex(const T& begin, - const T& end, + static int CalculateEndIndex(const T& begin, const T& end, const IncrementT& step) { int end_index = 0; - for (T i = begin; i < end; i = static_cast(i + step)) - end_index++; + for (T i = begin; i < end; i = static_cast(i + step)) end_index++; return end_index; } @@ -283,7 +286,6 @@ class RangeGenerator : public ParamGeneratorInterface { const int end_index_; }; // class RangeGenerator - // Generates values from a pair of STL-style iterators. Used in the // ValuesIn() function. The elements are copied from the source range // since the source can be located on the stack, and the generator @@ -341,13 +343,13 @@ class ValuesInIteratorRangeGenerator : public ParamGeneratorInterface { << "The program attempted to compare iterators " << "from different generators." << std::endl; return iterator_ == - CheckedDowncastToActualType(&other)->iterator_; + CheckedDowncastToActualType(&other)->iterator_; } private: Iterator(const Iterator& other) - // The explicit constructor call suppresses a false warning - // emitted by gcc when supplied with the -Wextra option. + // The explicit constructor call suppresses a false warning + // emitted by gcc when supplied with the -Wextra option. : ParamIteratorInterface(), base_(other.base_), iterator_(other.iterator_) {} @@ -394,8 +396,8 @@ template class ParameterizedTestFactory : public TestFactoryBase { public: typedef typename TestClass::ParamType ParamType; - explicit ParameterizedTestFactory(ParamType parameter) : - parameter_(parameter) {} + explicit ParameterizedTestFactory(ParamType parameter) + : parameter_(parameter) {} Test* CreateTest() override { TestClass::SetParam(¶meter_); return new TestClass(); @@ -404,7 +406,8 @@ class ParameterizedTestFactory : public TestFactoryBase { private: const ParamType parameter_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestFactory); + ParameterizedTestFactory(const ParameterizedTestFactory&) = delete; + ParameterizedTestFactory& operator=(const ParameterizedTestFactory&) = delete; }; // INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. @@ -440,7 +443,8 @@ class TestMetaFactory } private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(TestMetaFactory); + TestMetaFactory(const TestMetaFactory&) = delete; + TestMetaFactory& operator=(const TestMetaFactory&) = delete; }; // INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. @@ -471,7 +475,10 @@ class ParameterizedTestSuiteInfoBase { ParameterizedTestSuiteInfoBase() {} private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestSuiteInfoBase); + ParameterizedTestSuiteInfoBase(const ParameterizedTestSuiteInfoBase&) = + delete; + ParameterizedTestSuiteInfoBase& operator=( + const ParameterizedTestSuiteInfoBase&) = delete; }; // INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. @@ -547,8 +554,8 @@ class ParameterizedTestSuiteInfo : public ParameterizedTestSuiteInfoBase { test_it != tests_.end(); ++test_it) { std::shared_ptr test_info = *test_it; for (typename InstantiationContainer::iterator gen_it = - instantiations_.begin(); gen_it != instantiations_.end(); - ++gen_it) { + instantiations_.begin(); + gen_it != instantiations_.end(); ++gen_it) { const std::string& instantiation_name = gen_it->name; ParamGenerator generator((*gen_it->generator)()); ParamNameGeneratorFunc* name_func = gen_it->name_func; @@ -556,7 +563,7 @@ class ParameterizedTestSuiteInfo : public ParameterizedTestSuiteInfoBase { int line = gen_it->line; std::string test_suite_name; - if ( !instantiation_name.empty() ) + if (!instantiation_name.empty()) test_suite_name = instantiation_name + "/"; test_suite_name += test_info->test_suite_base_name; @@ -569,17 +576,16 @@ class ParameterizedTestSuiteInfo : public ParameterizedTestSuiteInfoBase { Message test_name_stream; - std::string param_name = name_func( - TestParamInfo(*param_it, i)); + std::string param_name = + name_func(TestParamInfo(*param_it, i)); GTEST_CHECK_(IsValidParamName(param_name)) << "Parameterized test name '" << param_name - << "' is invalid, in " << file - << " line " << line << std::endl; + << "' is invalid, in " << file << " line " << line << std::endl; GTEST_CHECK_(test_param_names.count(param_name) == 0) - << "Duplicate parameterized test name '" << param_name - << "', in " << file << " line " << line << std::endl; + << "Duplicate parameterized test name '" << param_name << "', in " + << file << " line " << line << std::endl; test_param_names.insert(param_name); @@ -596,15 +602,15 @@ class ParameterizedTestSuiteInfo : public ParameterizedTestSuiteInfoBase { SuiteApiResolver::GetTearDownCaseOrSuite(file, line), test_info->test_meta_factory->CreateTestFactory(*param_it)); } // for param_it - } // for gen_it - } // for test_it + } // for gen_it + } // for test_it if (!generated_instantiations) { // There are no generaotrs, or they all generate nothing ... InsertSyntheticTestCase(GetTestSuiteName(), code_location_, !tests_.empty()); } - } // RegisterTests + } // RegisterTests private: // LocalTestInfo structure keeps information about a single test registered @@ -620,42 +626,39 @@ class ParameterizedTestSuiteInfo : public ParameterizedTestSuiteInfoBase { const std::string test_suite_base_name; const std::string test_base_name; - const std::unique_ptr > test_meta_factory; + const std::unique_ptr> test_meta_factory; const CodeLocation code_location; }; - using TestInfoContainer = ::std::vector >; + using TestInfoContainer = ::std::vector>; // Records data received from INSTANTIATE_TEST_SUITE_P macros: // struct InstantiationInfo { - InstantiationInfo(const std::string &name_in, - GeneratorCreationFunc* generator_in, - ParamNameGeneratorFunc* name_func_in, - const char* file_in, - int line_in) - : name(name_in), - generator(generator_in), - name_func(name_func_in), - file(file_in), - line(line_in) {} + InstantiationInfo(const std::string& name_in, + GeneratorCreationFunc* generator_in, + ParamNameGeneratorFunc* name_func_in, const char* file_in, + int line_in) + : name(name_in), + generator(generator_in), + name_func(name_func_in), + file(file_in), + line(line_in) {} - std::string name; - GeneratorCreationFunc* generator; - ParamNameGeneratorFunc* name_func; - const char* file; - int line; + std::string name; + GeneratorCreationFunc* generator; + ParamNameGeneratorFunc* name_func; + const char* file; + int line; }; typedef ::std::vector InstantiationContainer; static bool IsValidParamName(const std::string& name) { // Check for empty string - if (name.empty()) - return false; + if (name.empty()) return false; // Check for invalid characters for (std::string::size_type index = 0; index < name.size(); ++index) { - if (!IsAlNum(name[index]) && name[index] != '_') - return false; + if (!IsAlNum(name[index]) && name[index] != '_') return false; } return true; @@ -666,7 +669,9 @@ class ParameterizedTestSuiteInfo : public ParameterizedTestSuiteInfoBase { TestInfoContainer tests_; InstantiationContainer instantiations_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestSuiteInfo); + ParameterizedTestSuiteInfo(const ParameterizedTestSuiteInfo&) = delete; + ParameterizedTestSuiteInfo& operator=(const ParameterizedTestSuiteInfo&) = + delete; }; // class ParameterizedTestSuiteInfo // Legacy API is deprecated but still available @@ -709,7 +714,7 @@ class ParameterizedTestSuiteRegistry { // type we are looking for, so we downcast it to that type // without further checks. typed_test_info = CheckedDowncastToActualType< - ParameterizedTestSuiteInfo >(test_suite_info); + ParameterizedTestSuiteInfo>(test_suite_info); } break; } @@ -741,7 +746,10 @@ class ParameterizedTestSuiteRegistry { TestSuiteInfoContainer test_suite_infos_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestSuiteRegistry); + ParameterizedTestSuiteRegistry(const ParameterizedTestSuiteRegistry&) = + delete; + ParameterizedTestSuiteRegistry& operator=( + const ParameterizedTestSuiteRegistry&) = delete; }; // Keep track of what type-parameterized test suite are defined and @@ -836,7 +844,8 @@ class CartesianProductGenerator : public ParamIteratorInterface { public: IteratorImpl(const ParamGeneratorInterface* base, - const std::tuple...>& generators, bool is_end) + const std::tuple...>& generators, + bool is_end) : base_(base), begin_(std::get(generators).begin()...), end_(std::get(generators).end()...), diff --git a/ext/googletest/googletest/include/gtest/internal/gtest-port-arch.h b/ext/googletest/googletest/include/gtest/internal/gtest-port-arch.h index 4dcdc89c85..f025db76ad 100644 --- a/ext/googletest/googletest/include/gtest/internal/gtest-port-arch.h +++ b/ext/googletest/googletest/include/gtest/internal/gtest-port-arch.h @@ -26,7 +26,7 @@ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// + // The Google C++ Testing and Mocking Framework (Google Test) // // This header file defines the GTEST_OS_* macro. @@ -37,72 +37,72 @@ // Determines the platform on which Google Test is compiled. #ifdef __CYGWIN__ -# define GTEST_OS_CYGWIN 1 -# elif defined(__MINGW__) || defined(__MINGW32__) || defined(__MINGW64__) -# define GTEST_OS_WINDOWS_MINGW 1 -# define GTEST_OS_WINDOWS 1 +#define GTEST_OS_CYGWIN 1 +#elif defined(__MINGW__) || defined(__MINGW32__) || defined(__MINGW64__) +#define GTEST_OS_WINDOWS_MINGW 1 +#define GTEST_OS_WINDOWS 1 #elif defined _WIN32 -# define GTEST_OS_WINDOWS 1 -# ifdef _WIN32_WCE -# define GTEST_OS_WINDOWS_MOBILE 1 -# elif defined(WINAPI_FAMILY) -# include -# if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) -# define GTEST_OS_WINDOWS_DESKTOP 1 -# elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_PHONE_APP) -# define GTEST_OS_WINDOWS_PHONE 1 -# elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) -# define GTEST_OS_WINDOWS_RT 1 -# elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_TV_TITLE) -# define GTEST_OS_WINDOWS_PHONE 1 -# define GTEST_OS_WINDOWS_TV_TITLE 1 -# else - // WINAPI_FAMILY defined but no known partition matched. - // Default to desktop. -# define GTEST_OS_WINDOWS_DESKTOP 1 -# endif -# else -# define GTEST_OS_WINDOWS_DESKTOP 1 -# endif // _WIN32_WCE +#define GTEST_OS_WINDOWS 1 +#ifdef _WIN32_WCE +#define GTEST_OS_WINDOWS_MOBILE 1 +#elif defined(WINAPI_FAMILY) +#include +#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) +#define GTEST_OS_WINDOWS_DESKTOP 1 +#elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_PHONE_APP) +#define GTEST_OS_WINDOWS_PHONE 1 +#elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) +#define GTEST_OS_WINDOWS_RT 1 +#elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_TV_TITLE) +#define GTEST_OS_WINDOWS_PHONE 1 +#define GTEST_OS_WINDOWS_TV_TITLE 1 +#else +// WINAPI_FAMILY defined but no known partition matched. +// Default to desktop. +#define GTEST_OS_WINDOWS_DESKTOP 1 +#endif +#else +#define GTEST_OS_WINDOWS_DESKTOP 1 +#endif // _WIN32_WCE #elif defined __OS2__ -# define GTEST_OS_OS2 1 +#define GTEST_OS_OS2 1 #elif defined __APPLE__ -# define GTEST_OS_MAC 1 -# include -# if TARGET_OS_IPHONE -# define GTEST_OS_IOS 1 -# endif +#define GTEST_OS_MAC 1 +#include +#if TARGET_OS_IPHONE +#define GTEST_OS_IOS 1 +#endif #elif defined __DragonFly__ -# define GTEST_OS_DRAGONFLY 1 +#define GTEST_OS_DRAGONFLY 1 #elif defined __FreeBSD__ -# define GTEST_OS_FREEBSD 1 +#define GTEST_OS_FREEBSD 1 #elif defined __Fuchsia__ -# define GTEST_OS_FUCHSIA 1 +#define GTEST_OS_FUCHSIA 1 #elif defined(__GNU__) -# define GTEST_OS_GNU_HURD 1 +#define GTEST_OS_GNU_HURD 1 #elif defined(__GLIBC__) && defined(__FreeBSD_kernel__) -# define GTEST_OS_GNU_KFREEBSD 1 +#define GTEST_OS_GNU_KFREEBSD 1 #elif defined __linux__ -# define GTEST_OS_LINUX 1 -# if defined __ANDROID__ -# define GTEST_OS_LINUX_ANDROID 1 -# endif +#define GTEST_OS_LINUX 1 +#if defined __ANDROID__ +#define GTEST_OS_LINUX_ANDROID 1 +#endif #elif defined __MVS__ -# define GTEST_OS_ZOS 1 +#define GTEST_OS_ZOS 1 #elif defined(__sun) && defined(__SVR4) -# define GTEST_OS_SOLARIS 1 +#define GTEST_OS_SOLARIS 1 #elif defined(_AIX) -# define GTEST_OS_AIX 1 +#define GTEST_OS_AIX 1 #elif defined(__hpux) -# define GTEST_OS_HPUX 1 +#define GTEST_OS_HPUX 1 #elif defined __native_client__ -# define GTEST_OS_NACL 1 +#define GTEST_OS_NACL 1 #elif defined __NetBSD__ -# define GTEST_OS_NETBSD 1 +#define GTEST_OS_NETBSD 1 #elif defined __OpenBSD__ -# define GTEST_OS_OPENBSD 1 +#define GTEST_OS_OPENBSD 1 #elif defined __QNX__ -# define GTEST_OS_QNX 1 +#define GTEST_OS_QNX 1 #elif defined(__HAIKU__) #define GTEST_OS_HAIKU 1 #elif defined ESP8266 diff --git a/ext/googletest/googletest/include/gtest/internal/gtest-port.h b/ext/googletest/googletest/include/gtest/internal/gtest-port.h index 524bbeb011..0003d27658 100644 --- a/ext/googletest/googletest/include/gtest/internal/gtest-port.h +++ b/ext/googletest/googletest/include/gtest/internal/gtest-port.h @@ -26,7 +26,7 @@ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// + // Low-level types and utilities for porting Google Test to various // platforms. All macros ending with _ and symbols defined in an // internal namespace are subject to change without notice. Code @@ -38,7 +38,9 @@ // files are expected to #include this. Therefore, it cannot #include // any other Google Test header. -// GOOGLETEST_CM0001 DO NOT DELETE +// IWYU pragma: private, include "gtest/gtest.h" +// IWYU pragma: friend gtest/.* +// IWYU pragma: friend gmock/.* #ifndef GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_ #define GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_ @@ -168,7 +170,7 @@ // GTEST_HAS_TYPED_TEST - typed tests // GTEST_HAS_TYPED_TEST_P - type-parameterized tests // GTEST_IS_THREADSAFE - Google Test is thread-safe. -// GOOGLETEST_CM0007 DO NOT DELETE +// GTEST_USES_RE2 - the RE2 regular expression library is used // GTEST_USES_POSIX_RE - enhanced POSIX regex is used. Do not confuse with // GTEST_HAS_POSIX_RE (see above) which users can // define themselves. @@ -191,10 +193,6 @@ // GTEST_AMBIGUOUS_ELSE_BLOCKER_ - for disabling a gcc warning. // GTEST_ATTRIBUTE_UNUSED_ - declares that a class' instances or a // variable don't have to be used. -// GTEST_DISALLOW_ASSIGN_ - disables copy operator=. -// GTEST_DISALLOW_COPY_AND_ASSIGN_ - disables copy ctor and operator=. -// GTEST_DISALLOW_MOVE_ASSIGN_ - disables move operator=. -// GTEST_DISALLOW_MOVE_AND_ASSIGN_ - disables move ctor and operator=. // GTEST_MUST_USE_RESULT_ - declares that a function's result must be used. // GTEST_INTENTIONAL_CONST_COND_PUSH_ - start code section where MSVC C4127 is // suppressed (constant conditional). @@ -218,11 +216,13 @@ // - synchronization primitives. // // Regular expressions: -// RE - a simple regular expression class using the POSIX -// Extended Regular Expression syntax on UNIX-like platforms -// GOOGLETEST_CM0008 DO NOT DELETE -// or a reduced regular exception syntax on other -// platforms, including Windows. +// RE - a simple regular expression class using +// 1) the RE2 syntax on all platforms when built with RE2 +// and Abseil as dependencies +// 2) the POSIX Extended Regular Expression syntax on +// UNIX-like platforms, +// 3) A reduced regular exception syntax on other platforms, +// including Windows. // Logging: // GTEST_LOG_() - logs messages at the specified severity level. // LogToStderr() - directs all log messages to stderr. @@ -242,8 +242,6 @@ // BiggestInt - the biggest signed integer type. // // Command-line utilities: -// GTEST_DECLARE_*() - declares a flag. -// GTEST_DEFINE_*() - defines a flag. // GetInjectableArgvs() - returns the command line as a vector of strings. // // Environment variable utilities: @@ -264,48 +262,55 @@ #include #include +// #include // Guarded by GTEST_IS_THREADSAFE below #include +#include #include +#include +#include +#include +// #include // Guarded by GTEST_IS_THREADSAFE below +#include #include +#include #ifndef _WIN32_WCE -# include -# include +#include +#include #endif // !_WIN32_WCE #if defined __APPLE__ -# include -# include +#include +#include #endif -#include // NOLINT -#include -#include -#include // NOLINT -#include -#include // NOLINT - #include "gtest/internal/custom/gtest-port.h" #include "gtest/internal/gtest-port-arch.h" +#if GTEST_HAS_ABSL +#include "absl/flags/declare.h" +#include "absl/flags/flag.h" +#include "absl/flags/reflection.h" +#endif + #if !defined(GTEST_DEV_EMAIL_) -# define GTEST_DEV_EMAIL_ "googletestframework@@googlegroups.com" -# define GTEST_FLAG_PREFIX_ "gtest_" -# define GTEST_FLAG_PREFIX_DASH_ "gtest-" -# define GTEST_FLAG_PREFIX_UPPER_ "GTEST_" -# define GTEST_NAME_ "Google Test" -# define GTEST_PROJECT_URL_ "https://github.com/google/googletest/" +#define GTEST_DEV_EMAIL_ "googletestframework@@googlegroups.com" +#define GTEST_FLAG_PREFIX_ "gtest_" +#define GTEST_FLAG_PREFIX_DASH_ "gtest-" +#define GTEST_FLAG_PREFIX_UPPER_ "GTEST_" +#define GTEST_NAME_ "Google Test" +#define GTEST_PROJECT_URL_ "https://github.com/google/googletest/" #endif // !defined(GTEST_DEV_EMAIL_) #if !defined(GTEST_INIT_GOOGLE_TEST_NAME_) -# define GTEST_INIT_GOOGLE_TEST_NAME_ "testing::InitGoogleTest" +#define GTEST_INIT_GOOGLE_TEST_NAME_ "testing::InitGoogleTest" #endif // !defined(GTEST_INIT_GOOGLE_TEST_NAME_) // Determines the version of gcc that is used to compile this. #ifdef __GNUC__ // 40302 means version 4.3.2. -# define GTEST_GCC_VER_ \ - (__GNUC__*10000 + __GNUC_MINOR__*100 + __GNUC_PATCHLEVEL__) +#define GTEST_GCC_VER_ \ + (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) #endif // __GNUC__ // Macros for disabling Microsoft Visual C++ warnings. @@ -314,41 +319,37 @@ // /* code that triggers warnings C4800 and C4385 */ // GTEST_DISABLE_MSC_WARNINGS_POP_() #if defined(_MSC_VER) -# define GTEST_DISABLE_MSC_WARNINGS_PUSH_(warnings) \ - __pragma(warning(push)) \ - __pragma(warning(disable: warnings)) -# define GTEST_DISABLE_MSC_WARNINGS_POP_() \ - __pragma(warning(pop)) +#define GTEST_DISABLE_MSC_WARNINGS_PUSH_(warnings) \ + __pragma(warning(push)) __pragma(warning(disable : warnings)) +#define GTEST_DISABLE_MSC_WARNINGS_POP_() __pragma(warning(pop)) #else // Not all compilers are MSVC -# define GTEST_DISABLE_MSC_WARNINGS_PUSH_(warnings) -# define GTEST_DISABLE_MSC_WARNINGS_POP_() +#define GTEST_DISABLE_MSC_WARNINGS_PUSH_(warnings) +#define GTEST_DISABLE_MSC_WARNINGS_POP_() #endif // Clang on Windows does not understand MSVC's pragma warning. // We need clang-specific way to disable function deprecation warning. #ifdef __clang__ -# define GTEST_DISABLE_MSC_DEPRECATED_PUSH_() \ - _Pragma("clang diagnostic push") \ - _Pragma("clang diagnostic ignored \"-Wdeprecated-declarations\"") \ - _Pragma("clang diagnostic ignored \"-Wdeprecated-implementations\"") -#define GTEST_DISABLE_MSC_DEPRECATED_POP_() \ - _Pragma("clang diagnostic pop") +#define GTEST_DISABLE_MSC_DEPRECATED_PUSH_() \ + _Pragma("clang diagnostic push") \ + _Pragma("clang diagnostic ignored \"-Wdeprecated-declarations\"") \ + _Pragma("clang diagnostic ignored \"-Wdeprecated-implementations\"") +#define GTEST_DISABLE_MSC_DEPRECATED_POP_() _Pragma("clang diagnostic pop") #else -# define GTEST_DISABLE_MSC_DEPRECATED_PUSH_() \ - GTEST_DISABLE_MSC_WARNINGS_PUSH_(4996) -# define GTEST_DISABLE_MSC_DEPRECATED_POP_() \ - GTEST_DISABLE_MSC_WARNINGS_POP_() +#define GTEST_DISABLE_MSC_DEPRECATED_PUSH_() \ + GTEST_DISABLE_MSC_WARNINGS_PUSH_(4996) +#define GTEST_DISABLE_MSC_DEPRECATED_POP_() GTEST_DISABLE_MSC_WARNINGS_POP_() #endif // Brings in definitions for functions used in the testing::internal::posix // namespace (read, write, close, chdir, isatty, stat). We do not currently // use them on Windows Mobile. #if GTEST_OS_WINDOWS -# if !GTEST_OS_WINDOWS_MOBILE -# include -# include -# endif +#if !GTEST_OS_WINDOWS_MOBILE +#include +#include +#endif // In order to avoid having to include , use forward declaration #if GTEST_OS_WINDOWS_MINGW && !defined(__MINGW64_VERSION_MAJOR) // MinGW defined _CRITICAL_SECTION and _RTL_CRITICAL_SECTION as two @@ -368,68 +369,55 @@ typedef struct _RTL_CRITICAL_SECTION GTEST_CRITICAL_SECTION; // This assumes that non-Windows OSes provide unistd.h. For OSes where this // is not the case, we need to include headers that provide the functions // mentioned above. -# include -# include +#include +#include #endif // GTEST_OS_WINDOWS #if GTEST_OS_LINUX_ANDROID // Used to define __ANDROID_API__ matching the target NDK API level. -# include // NOLINT +#include // NOLINT #endif // Defines this to true if and only if Google Test can use POSIX regular // expressions. #ifndef GTEST_HAS_POSIX_RE -# if GTEST_OS_LINUX_ANDROID +#if GTEST_OS_LINUX_ANDROID // On Android, is only available starting with Gingerbread. -# define GTEST_HAS_POSIX_RE (__ANDROID_API__ >= 9) -# else +#define GTEST_HAS_POSIX_RE (__ANDROID_API__ >= 9) +#else #define GTEST_HAS_POSIX_RE (!GTEST_OS_WINDOWS && !GTEST_OS_XTENSA) -# endif +#endif #endif -#if GTEST_USES_PCRE -// The appropriate headers have already been included. - +// Select the regular expression implementation. +#if GTEST_HAS_ABSL +// When using Abseil, RE2 is required. +#include "absl/strings/string_view.h" +#include "re2/re2.h" +#define GTEST_USES_RE2 1 #elif GTEST_HAS_POSIX_RE - -// On some platforms, needs someone to define size_t, and -// won't compile otherwise. We can #include it here as we already -// included , which is guaranteed to define size_t through -// . -# include // NOLINT - -# define GTEST_USES_POSIX_RE 1 - -#elif GTEST_OS_WINDOWS - -// is not available on Windows. Use our own simple regex -// implementation instead. -# define GTEST_USES_SIMPLE_RE 1 - +#include // NOLINT +#define GTEST_USES_POSIX_RE 1 #else - -// may not be available on this platform. Use our own -// simple regex implementation instead. -# define GTEST_USES_SIMPLE_RE 1 - -#endif // GTEST_USES_PCRE +// Use our own simple regex implementation. +#define GTEST_USES_SIMPLE_RE 1 +#endif #ifndef GTEST_HAS_EXCEPTIONS // The user didn't tell us whether exceptions are enabled, so we need // to figure it out. -# if defined(_MSC_VER) && defined(_CPPUNWIND) +#if defined(_MSC_VER) && defined(_CPPUNWIND) // MSVC defines _CPPUNWIND to 1 if and only if exceptions are enabled. -# define GTEST_HAS_EXCEPTIONS 1 -# elif defined(__BORLANDC__) +#define GTEST_HAS_EXCEPTIONS 1 +#elif defined(__BORLANDC__) // C++Builder's implementation of the STL uses the _HAS_EXCEPTIONS // macro to enable exceptions, so we'll do the same. // Assumes that exceptions are enabled by default. -# ifndef _HAS_EXCEPTIONS -# define _HAS_EXCEPTIONS 1 -# endif // _HAS_EXCEPTIONS -# define GTEST_HAS_EXCEPTIONS _HAS_EXCEPTIONS -# elif defined(__clang__) +#ifndef _HAS_EXCEPTIONS +#define _HAS_EXCEPTIONS 1 +#endif // _HAS_EXCEPTIONS +#define GTEST_HAS_EXCEPTIONS _HAS_EXCEPTIONS +#elif defined(__clang__) // clang defines __EXCEPTIONS if and only if exceptions are enabled before clang // 220714, but if and only if cleanups are enabled after that. In Obj-C++ files, // there can be cleanups for ObjC exceptions which also need cleanups, even if @@ -438,27 +426,27 @@ typedef struct _RTL_CRITICAL_SECTION GTEST_CRITICAL_SECTION; // cleanups prior to that. To reliably check for C++ exception availability with // clang, check for // __EXCEPTIONS && __has_feature(cxx_exceptions). -# define GTEST_HAS_EXCEPTIONS (__EXCEPTIONS && __has_feature(cxx_exceptions)) -# elif defined(__GNUC__) && __EXCEPTIONS +#define GTEST_HAS_EXCEPTIONS (__EXCEPTIONS && __has_feature(cxx_exceptions)) +#elif defined(__GNUC__) && __EXCEPTIONS // gcc defines __EXCEPTIONS to 1 if and only if exceptions are enabled. -# define GTEST_HAS_EXCEPTIONS 1 -# elif defined(__SUNPRO_CC) +#define GTEST_HAS_EXCEPTIONS 1 +#elif defined(__SUNPRO_CC) // Sun Pro CC supports exceptions. However, there is no compile-time way of // detecting whether they are enabled or not. Therefore, we assume that // they are enabled unless the user tells us otherwise. -# define GTEST_HAS_EXCEPTIONS 1 -# elif defined(__IBMCPP__) && __EXCEPTIONS +#define GTEST_HAS_EXCEPTIONS 1 +#elif defined(__IBMCPP__) && __EXCEPTIONS // xlC defines __EXCEPTIONS to 1 if and only if exceptions are enabled. -# define GTEST_HAS_EXCEPTIONS 1 -# elif defined(__HP_aCC) +#define GTEST_HAS_EXCEPTIONS 1 +#elif defined(__HP_aCC) // Exception handling is in effect by default in HP aCC compiler. It has to // be turned of by +noeh compiler option if desired. -# define GTEST_HAS_EXCEPTIONS 1 -# else +#define GTEST_HAS_EXCEPTIONS 1 +#else // For other compilers, we assume exceptions are disabled to be // conservative. -# define GTEST_HAS_EXCEPTIONS 0 -# endif // defined(_MSC_VER) || defined(__BORLANDC__) +#define GTEST_HAS_EXCEPTIONS 0 +#endif // defined(_MSC_VER) || defined(__BORLANDC__) #endif // GTEST_HAS_EXCEPTIONS #ifndef GTEST_HAS_STD_WSTRING @@ -478,63 +466,62 @@ typedef struct _RTL_CRITICAL_SECTION GTEST_CRITICAL_SECTION; // The user didn't tell us whether RTTI is enabled, so we need to // figure it out. -# ifdef _MSC_VER +#ifdef _MSC_VER #ifdef _CPPRTTI // MSVC defines this macro if and only if RTTI is enabled. -# define GTEST_HAS_RTTI 1 -# else -# define GTEST_HAS_RTTI 0 -# endif +#define GTEST_HAS_RTTI 1 +#else +#define GTEST_HAS_RTTI 0 +#endif // Starting with version 4.3.2, gcc defines __GXX_RTTI if and only if RTTI is // enabled. -# elif defined(__GNUC__) +#elif defined(__GNUC__) -# ifdef __GXX_RTTI +#ifdef __GXX_RTTI // When building against STLport with the Android NDK and with // -frtti -fno-exceptions, the build fails at link time with undefined // references to __cxa_bad_typeid. Note sure if STL or toolchain bug, // so disable RTTI when detected. -# if GTEST_OS_LINUX_ANDROID && defined(_STLPORT_MAJOR) && \ - !defined(__EXCEPTIONS) -# define GTEST_HAS_RTTI 0 -# else -# define GTEST_HAS_RTTI 1 -# endif // GTEST_OS_LINUX_ANDROID && __STLPORT_MAJOR && !__EXCEPTIONS -# else -# define GTEST_HAS_RTTI 0 -# endif // __GXX_RTTI +#if GTEST_OS_LINUX_ANDROID && defined(_STLPORT_MAJOR) && !defined(__EXCEPTIONS) +#define GTEST_HAS_RTTI 0 +#else +#define GTEST_HAS_RTTI 1 +#endif // GTEST_OS_LINUX_ANDROID && __STLPORT_MAJOR && !__EXCEPTIONS +#else +#define GTEST_HAS_RTTI 0 +#endif // __GXX_RTTI // Clang defines __GXX_RTTI starting with version 3.0, but its manual recommends // using has_feature instead. has_feature(cxx_rtti) is supported since 2.7, the // first version with C++ support. -# elif defined(__clang__) +#elif defined(__clang__) -# define GTEST_HAS_RTTI __has_feature(cxx_rtti) +#define GTEST_HAS_RTTI __has_feature(cxx_rtti) // Starting with version 9.0 IBM Visual Age defines __RTTI_ALL__ to 1 if // both the typeid and dynamic_cast features are present. -# elif defined(__IBMCPP__) && (__IBMCPP__ >= 900) +#elif defined(__IBMCPP__) && (__IBMCPP__ >= 900) -# ifdef __RTTI_ALL__ -# define GTEST_HAS_RTTI 1 -# else -# define GTEST_HAS_RTTI 0 -# endif +#ifdef __RTTI_ALL__ +#define GTEST_HAS_RTTI 1 +#else +#define GTEST_HAS_RTTI 0 +#endif -# else +#else // For all other compilers, we assume RTTI is enabled. -# define GTEST_HAS_RTTI 1 +#define GTEST_HAS_RTTI 1 -# endif // _MSC_VER +#endif // _MSC_VER #endif // GTEST_HAS_RTTI // It's this header's responsibility to #include when RTTI // is enabled. #if GTEST_HAS_RTTI -# include +#include #endif // Determines whether Google Test can use the pthreads library. @@ -554,10 +541,10 @@ typedef struct _RTL_CRITICAL_SECTION GTEST_CRITICAL_SECTION; #if GTEST_HAS_PTHREAD // gtest-port.h guarantees to #include when GTEST_HAS_PTHREAD is // true. -# include // NOLINT +#include // NOLINT // For timespec and nanosleep, used below. -# include // NOLINT +#include // NOLINT #endif // Determines whether clone(2) is supported. @@ -567,24 +554,23 @@ typedef struct _RTL_CRITICAL_SECTION GTEST_CRITICAL_SECTION; #ifndef GTEST_HAS_CLONE // The user didn't tell us, so we need to figure it out. -# if GTEST_OS_LINUX && !defined(__ia64__) -# if GTEST_OS_LINUX_ANDROID +#if GTEST_OS_LINUX && !defined(__ia64__) +#if GTEST_OS_LINUX_ANDROID // On Android, clone() became available at different API levels for each 32-bit // architecture. -# if defined(__LP64__) || \ - (defined(__arm__) && __ANDROID_API__ >= 9) || \ - (defined(__mips__) && __ANDROID_API__ >= 12) || \ - (defined(__i386__) && __ANDROID_API__ >= 17) -# define GTEST_HAS_CLONE 1 -# else -# define GTEST_HAS_CLONE 0 -# endif -# else -# define GTEST_HAS_CLONE 1 -# endif -# else -# define GTEST_HAS_CLONE 0 -# endif // GTEST_OS_LINUX && !defined(__ia64__) +#if defined(__LP64__) || (defined(__arm__) && __ANDROID_API__ >= 9) || \ + (defined(__mips__) && __ANDROID_API__ >= 12) || \ + (defined(__i386__) && __ANDROID_API__ >= 17) +#define GTEST_HAS_CLONE 1 +#else +#define GTEST_HAS_CLONE 0 +#endif +#else +#define GTEST_HAS_CLONE 1 +#endif +#else +#define GTEST_HAS_CLONE 0 +#endif // GTEST_OS_LINUX && !defined(__ia64__) #endif // GTEST_HAS_CLONE @@ -595,10 +581,10 @@ typedef struct _RTL_CRITICAL_SECTION GTEST_CRITICAL_SECTION; // platforms except known mobile ones. #if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_WINDOWS_PHONE || \ GTEST_OS_WINDOWS_RT || GTEST_OS_ESP8266 || GTEST_OS_XTENSA -# define GTEST_HAS_STREAM_REDIRECTION 0 -# else -# define GTEST_HAS_STREAM_REDIRECTION 1 -# endif // !GTEST_OS_WINDOWS_MOBILE +#define GTEST_HAS_STREAM_REDIRECTION 0 +#else +#define GTEST_HAS_STREAM_REDIRECTION 1 +#endif // !GTEST_OS_WINDOWS_MOBILE #endif // GTEST_HAS_STREAM_REDIRECTION // Determines whether to support death tests. @@ -610,7 +596,7 @@ typedef struct _RTL_CRITICAL_SECTION GTEST_CRITICAL_SECTION; GTEST_OS_FREEBSD || GTEST_OS_NETBSD || GTEST_OS_FUCHSIA || \ GTEST_OS_DRAGONFLY || GTEST_OS_GNU_KFREEBSD || GTEST_OS_HAIKU || \ GTEST_OS_GNU_HURD) -# define GTEST_HAS_DEATH_TEST 1 +#define GTEST_HAS_DEATH_TEST 1 #endif // Determines whether to support type-driven tests. @@ -619,8 +605,8 @@ typedef struct _RTL_CRITICAL_SECTION GTEST_CRITICAL_SECTION; // Sun Pro CC, IBM Visual Age, and HP aCC support. #if defined(__GNUC__) || defined(_MSC_VER) || defined(__SUNPRO_CC) || \ defined(__IBMCPP__) || defined(__HP_aCC) -# define GTEST_HAS_TYPED_TEST 1 -# define GTEST_HAS_TYPED_TEST_P 1 +#define GTEST_HAS_TYPED_TEST 1 +#define GTEST_HAS_TYPED_TEST_P 1 #endif // Determines whether the system compiler uses UTF-16 for encoding wide strings. @@ -631,7 +617,7 @@ typedef struct _RTL_CRITICAL_SECTION GTEST_CRITICAL_SECTION; #if GTEST_OS_LINUX || GTEST_OS_GNU_KFREEBSD || GTEST_OS_DRAGONFLY || \ GTEST_OS_FREEBSD || GTEST_OS_NETBSD || GTEST_OS_OPENBSD || \ GTEST_OS_GNU_HURD -# define GTEST_CAN_STREAM_RESULTS_ 1 +#define GTEST_CAN_STREAM_RESULTS_ 1 #endif // Defines some utility macros. @@ -645,9 +631,12 @@ typedef struct _RTL_CRITICAL_SECTION GTEST_CRITICAL_SECTION; // // The "switch (0) case 0:" idiom is used to suppress this. #ifdef __INTEL_COMPILER -# define GTEST_AMBIGUOUS_ELSE_BLOCKER_ +#define GTEST_AMBIGUOUS_ELSE_BLOCKER_ #else -# define GTEST_AMBIGUOUS_ELSE_BLOCKER_ switch (0) case 0: default: // NOLINT +#define GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + switch (0) \ + case 0: \ + default: // NOLINT #endif // Use this annotation at the end of a struct/class definition to @@ -662,55 +651,32 @@ typedef struct _RTL_CRITICAL_SECTION GTEST_CRITICAL_SECTION; // Also use it after a variable or parameter declaration to tell the // compiler the variable/parameter does not have to be used. #if defined(__GNUC__) && !defined(COMPILER_ICC) -# define GTEST_ATTRIBUTE_UNUSED_ __attribute__ ((unused)) +#define GTEST_ATTRIBUTE_UNUSED_ __attribute__((unused)) #elif defined(__clang__) -# if __has_attribute(unused) -# define GTEST_ATTRIBUTE_UNUSED_ __attribute__ ((unused)) -# endif +#if __has_attribute(unused) +#define GTEST_ATTRIBUTE_UNUSED_ __attribute__((unused)) +#endif #endif #ifndef GTEST_ATTRIBUTE_UNUSED_ -# define GTEST_ATTRIBUTE_UNUSED_ +#define GTEST_ATTRIBUTE_UNUSED_ #endif // Use this annotation before a function that takes a printf format string. #if (defined(__GNUC__) || defined(__clang__)) && !defined(COMPILER_ICC) -# if defined(__MINGW_PRINTF_FORMAT) +#if defined(__MINGW_PRINTF_FORMAT) // MinGW has two different printf implementations. Ensure the format macro // matches the selected implementation. See // https://sourceforge.net/p/mingw-w64/wiki2/gnu%20printf/. -# define GTEST_ATTRIBUTE_PRINTF_(string_index, first_to_check) \ - __attribute__((__format__(__MINGW_PRINTF_FORMAT, string_index, \ - first_to_check))) -# else -# define GTEST_ATTRIBUTE_PRINTF_(string_index, first_to_check) \ - __attribute__((__format__(__printf__, string_index, first_to_check))) -# endif +#define GTEST_ATTRIBUTE_PRINTF_(string_index, first_to_check) \ + __attribute__(( \ + __format__(__MINGW_PRINTF_FORMAT, string_index, first_to_check))) #else -# define GTEST_ATTRIBUTE_PRINTF_(string_index, first_to_check) +#define GTEST_ATTRIBUTE_PRINTF_(string_index, first_to_check) \ + __attribute__((__format__(__printf__, string_index, first_to_check))) +#endif +#else +#define GTEST_ATTRIBUTE_PRINTF_(string_index, first_to_check) #endif - - -// A macro to disallow copy operator= -// This should be used in the private: declarations for a class. -#define GTEST_DISALLOW_ASSIGN_(type) \ - type& operator=(type const &) = delete - -// A macro to disallow copy constructor and operator= -// This should be used in the private: declarations for a class. -#define GTEST_DISALLOW_COPY_AND_ASSIGN_(type) \ - type(type const&) = delete; \ - type& operator=(type const&) = delete - -// A macro to disallow move operator= -// This should be used in the private: declarations for a class. -#define GTEST_DISALLOW_MOVE_ASSIGN_(type) \ - type& operator=(type &&) noexcept = delete - -// A macro to disallow move constructor and operator= -// This should be used in the private: declarations for a class. -#define GTEST_DISALLOW_MOVE_AND_ASSIGN_(type) \ - type(type&&) noexcept = delete; \ - type& operator=(type&&) noexcept = delete // Tell the compiler to warn about unused return values for functions declared // with this macro. The macro should be used on function declarations @@ -718,9 +684,9 @@ typedef struct _RTL_CRITICAL_SECTION GTEST_CRITICAL_SECTION; // // Sprocket* AllocateSprocket() GTEST_MUST_USE_RESULT_; #if defined(__GNUC__) && !defined(COMPILER_ICC) -# define GTEST_MUST_USE_RESULT_ __attribute__ ((warn_unused_result)) +#define GTEST_MUST_USE_RESULT_ __attribute__((warn_unused_result)) #else -# define GTEST_MUST_USE_RESULT_ +#define GTEST_MUST_USE_RESULT_ #endif // __GNUC__ && !COMPILER_ICC // MS C++ compiler emits warning when a conditional expression is compile time @@ -731,10 +697,9 @@ typedef struct _RTL_CRITICAL_SECTION GTEST_CRITICAL_SECTION; // while (true) { // GTEST_INTENTIONAL_CONST_COND_POP_() // } -# define GTEST_INTENTIONAL_CONST_COND_PUSH_() \ - GTEST_DISABLE_MSC_WARNINGS_PUSH_(4127) -# define GTEST_INTENTIONAL_CONST_COND_POP_() \ - GTEST_DISABLE_MSC_WARNINGS_POP_() +#define GTEST_INTENTIONAL_CONST_COND_PUSH_() \ + GTEST_DISABLE_MSC_WARNINGS_PUSH_(4127) +#define GTEST_INTENTIONAL_CONST_COND_POP_() GTEST_DISABLE_MSC_WARNINGS_POP_() // Determine whether the compiler supports Microsoft's Structured Exception // Handling. This is supported by several Windows compilers but generally @@ -742,13 +707,13 @@ typedef struct _RTL_CRITICAL_SECTION GTEST_CRITICAL_SECTION; #ifndef GTEST_HAS_SEH // The user didn't tell us, so we need to figure it out. -# if defined(_MSC_VER) || defined(__BORLANDC__) +#if defined(_MSC_VER) || defined(__BORLANDC__) // These two compilers are known to support SEH. -# define GTEST_HAS_SEH 1 -# else +#define GTEST_HAS_SEH 1 +#else // Assume no SEH. -# define GTEST_HAS_SEH 0 -# endif +#define GTEST_HAS_SEH 0 +#endif #endif // GTEST_HAS_SEH @@ -761,94 +726,112 @@ typedef struct _RTL_CRITICAL_SECTION GTEST_CRITICAL_SECTION; #endif // GTEST_IS_THREADSAFE +#if GTEST_IS_THREADSAFE +// Some platforms don't support including these threading related headers. +#include // NOLINT +#include // NOLINT +#endif // GTEST_IS_THREADSAFE + // GTEST_API_ qualifies all symbols that must be exported. The definitions below // are guarded by #ifndef to give embedders a chance to define GTEST_API_ in // gtest/internal/custom/gtest-port.h #ifndef GTEST_API_ #ifdef _MSC_VER -# if GTEST_LINKED_AS_SHARED_LIBRARY -# define GTEST_API_ __declspec(dllimport) -# elif GTEST_CREATE_SHARED_LIBRARY -# define GTEST_API_ __declspec(dllexport) -# endif +#if GTEST_LINKED_AS_SHARED_LIBRARY +#define GTEST_API_ __declspec(dllimport) +#elif GTEST_CREATE_SHARED_LIBRARY +#define GTEST_API_ __declspec(dllexport) +#endif #elif __GNUC__ >= 4 || defined(__clang__) -# define GTEST_API_ __attribute__((visibility ("default"))) +#define GTEST_API_ __attribute__((visibility("default"))) #endif // _MSC_VER #endif // GTEST_API_ #ifndef GTEST_API_ -# define GTEST_API_ +#define GTEST_API_ #endif // GTEST_API_ #ifndef GTEST_DEFAULT_DEATH_TEST_STYLE -# define GTEST_DEFAULT_DEATH_TEST_STYLE "fast" +#define GTEST_DEFAULT_DEATH_TEST_STYLE "fast" #endif // GTEST_DEFAULT_DEATH_TEST_STYLE #ifdef __GNUC__ // Ask the compiler to never inline a given function. -# define GTEST_NO_INLINE_ __attribute__((noinline)) +#define GTEST_NO_INLINE_ __attribute__((noinline)) #else -# define GTEST_NO_INLINE_ +#define GTEST_NO_INLINE_ +#endif + +#if defined(__clang__) +// Nested ifs to avoid triggering MSVC warning. +#if __has_attribute(disable_tail_calls) +// Ask the compiler not to perform tail call optimization inside +// the marked function. +#define GTEST_NO_TAIL_CALL_ __attribute__((disable_tail_calls)) +#endif +#elif __GNUC__ +#define GTEST_NO_TAIL_CALL_ \ + __attribute__((optimize("no-optimize-sibling-calls"))) +#else +#define GTEST_NO_TAIL_CALL_ #endif // _LIBCPP_VERSION is defined by the libc++ library from the LLVM project. #if !defined(GTEST_HAS_CXXABI_H_) -# if defined(__GLIBCXX__) || (defined(_LIBCPP_VERSION) && !defined(_MSC_VER)) -# define GTEST_HAS_CXXABI_H_ 1 -# else -# define GTEST_HAS_CXXABI_H_ 0 -# endif +#if defined(__GLIBCXX__) || (defined(_LIBCPP_VERSION) && !defined(_MSC_VER)) +#define GTEST_HAS_CXXABI_H_ 1 +#else +#define GTEST_HAS_CXXABI_H_ 0 +#endif #endif // A function level attribute to disable checking for use of uninitialized // memory when built with MemorySanitizer. #if defined(__clang__) -# if __has_feature(memory_sanitizer) -# define GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_ \ - __attribute__((no_sanitize_memory)) -# else -# define GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_ -# endif // __has_feature(memory_sanitizer) +#if __has_feature(memory_sanitizer) +#define GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_ __attribute__((no_sanitize_memory)) #else -# define GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_ +#define GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_ +#endif // __has_feature(memory_sanitizer) +#else +#define GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_ #endif // __clang__ // A function level attribute to disable AddressSanitizer instrumentation. #if defined(__clang__) -# if __has_feature(address_sanitizer) -# define GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_ \ - __attribute__((no_sanitize_address)) -# else -# define GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_ -# endif // __has_feature(address_sanitizer) +#if __has_feature(address_sanitizer) +#define GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_ \ + __attribute__((no_sanitize_address)) #else -# define GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_ +#define GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_ +#endif // __has_feature(address_sanitizer) +#else +#define GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_ #endif // __clang__ // A function level attribute to disable HWAddressSanitizer instrumentation. #if defined(__clang__) -# if __has_feature(hwaddress_sanitizer) -# define GTEST_ATTRIBUTE_NO_SANITIZE_HWADDRESS_ \ - __attribute__((no_sanitize("hwaddress"))) -# else -# define GTEST_ATTRIBUTE_NO_SANITIZE_HWADDRESS_ -# endif // __has_feature(hwaddress_sanitizer) +#if __has_feature(hwaddress_sanitizer) +#define GTEST_ATTRIBUTE_NO_SANITIZE_HWADDRESS_ \ + __attribute__((no_sanitize("hwaddress"))) #else -# define GTEST_ATTRIBUTE_NO_SANITIZE_HWADDRESS_ +#define GTEST_ATTRIBUTE_NO_SANITIZE_HWADDRESS_ +#endif // __has_feature(hwaddress_sanitizer) +#else +#define GTEST_ATTRIBUTE_NO_SANITIZE_HWADDRESS_ #endif // __clang__ // A function level attribute to disable ThreadSanitizer instrumentation. #if defined(__clang__) -# if __has_feature(thread_sanitizer) -# define GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_ \ - __attribute__((no_sanitize_thread)) -# else -# define GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_ -# endif // __has_feature(thread_sanitizer) +#if __has_feature(thread_sanitizer) +#define GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_ __attribute__((no_sanitize_thread)) #else -# define GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_ +#define GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_ +#endif // __has_feature(thread_sanitizer) +#else +#define GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_ #endif // __clang__ namespace testing { @@ -870,25 +853,37 @@ namespace internal { // Secret object, which is what we want. class Secret; -// The GTEST_COMPILE_ASSERT_ is a legacy macro used to verify that a compile -// time expression is true (in new code, use static_assert instead). For -// example, you could use it to verify the size of a static array: -// -// GTEST_COMPILE_ASSERT_(GTEST_ARRAY_SIZE_(names) == NUM_NAMES, -// names_incorrect_size); -// -// The second argument to the macro must be a valid C++ identifier. If the -// expression is false, compiler will issue an error containing this identifier. -#define GTEST_COMPILE_ASSERT_(expr, msg) static_assert(expr, #msg) - // A helper for suppressing warnings on constant condition. It just // returns 'condition'. GTEST_API_ bool IsTrue(bool condition); // Defines RE. -#if GTEST_USES_PCRE -// if used, PCRE is injected by custom/gtest-port.h +#if GTEST_USES_RE2 + +// This is almost `using RE = ::RE2`, except it is copy-constructible, and it +// needs to disambiguate the `std::string`, `absl::string_view`, and `const +// char*` constructors. +class GTEST_API_ RE { + public: + RE(absl::string_view regex) : regex_(regex) {} // NOLINT + RE(const char* regex) : RE(absl::string_view(regex)) {} // NOLINT + RE(const std::string& regex) : RE(absl::string_view(regex)) {} // NOLINT + RE(const RE& other) : RE(other.pattern()) {} + + const std::string& pattern() const { return regex_.pattern(); } + + static bool FullMatch(absl::string_view str, const RE& re) { + return RE2::FullMatch(str, re.regex_); + } + static bool PartialMatch(absl::string_view str, const RE& re) { + return RE2::PartialMatch(str, re.regex_); + } + + private: + RE2 regex_; +}; + #elif GTEST_USES_POSIX_RE || GTEST_USES_SIMPLE_RE // A simple C++ wrapper for . It uses the POSIX Extended @@ -927,19 +922,19 @@ class GTEST_API_ RE { const char* pattern_; bool is_valid_; -# if GTEST_USES_POSIX_RE +#if GTEST_USES_POSIX_RE regex_t full_regex_; // For FullMatch(). regex_t partial_regex_; // For PartialMatch(). -# else // GTEST_USES_SIMPLE_RE +#else // GTEST_USES_SIMPLE_RE const char* full_pattern_; // For FullMatch(); -# endif +#endif }; -#endif // GTEST_USES_PCRE +#endif // ::testing::internal::RE implementation // Formats a source file path and a line number as they would appear // in an error message from the compiler used to compile this code. @@ -957,12 +952,7 @@ GTEST_API_ ::std::string FormatCompilerIndependentFileLocation(const char* file, // LogToStderr() - directs all log messages to stderr. // FlushInfoLog() - flushes informational log messages. -enum GTestLogSeverity { - GTEST_INFO, - GTEST_WARNING, - GTEST_ERROR, - GTEST_FATAL -}; +enum GTestLogSeverity { GTEST_INFO, GTEST_WARNING, GTEST_ERROR, GTEST_FATAL }; // Formats log entry severity, provides a stream object for streaming the // log message, and terminates the message with a newline when going out of @@ -979,14 +969,16 @@ class GTEST_API_ GTestLog { private: const GTestLogSeverity severity_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestLog); + GTestLog(const GTestLog&) = delete; + GTestLog& operator=(const GTestLog&) = delete; }; #if !defined(GTEST_LOG_) -# define GTEST_LOG_(severity) \ - ::testing::internal::GTestLog(::testing::internal::GTEST_##severity, \ - __FILE__, __LINE__).GetStream() +#define GTEST_LOG_(severity) \ + ::testing::internal::GTestLog(::testing::internal::GTEST_##severity, \ + __FILE__, __LINE__) \ + .GetStream() inline void LogToStderr() {} inline void FlushInfoLog() { fflush(nullptr); } @@ -998,7 +990,7 @@ inline void FlushInfoLog() { fflush(nullptr); } // // GTEST_CHECK_ is an all-mode assert. It aborts the program if the condition // is not satisfied. -// Synopsys: +// Synopsis: // GTEST_CHECK_(boolean_condition); // or // GTEST_CHECK_(boolean_condition) << "Additional message"; @@ -1008,12 +1000,12 @@ inline void FlushInfoLog() { fflush(nullptr); } // condition itself, plus additional message streamed into it, if any, // and then it aborts the program. It aborts the program irrespective of // whether it is built in the debug mode or not. -# define GTEST_CHECK_(condition) \ - GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ - if (::testing::internal::IsTrue(condition)) \ - ; \ - else \ - GTEST_LOG_(FATAL) << "Condition " #condition " failed. " +#define GTEST_CHECK_(condition) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::IsTrue(condition)) \ + ; \ + else \ + GTEST_LOG_(FATAL) << "Condition " #condition " failed. " #endif // !defined(GTEST_CHECK_) // An all-mode assert to verify that the given POSIX-style function @@ -1022,9 +1014,8 @@ inline void FlushInfoLog() { fflush(nullptr); } // in {} if you need to use it as the only statement in an 'if' // branch. #define GTEST_CHECK_POSIX_SUCCESS_(posix_call) \ - if (const int gtest_error = (posix_call)) \ - GTEST_LOG_(FATAL) << #posix_call << "failed with error " \ - << gtest_error + if (const int gtest_error = (posix_call)) \ + GTEST_LOG_(FATAL) << #posix_call << "failed with error " << gtest_error // Transforms "T" into "const T&" according to standard reference collapsing // rules (this is only needed as a backport for C++98 compilers that do not @@ -1038,9 +1029,13 @@ inline void FlushInfoLog() { fflush(nullptr); } // Note that the non-const reference will not have "const" added. This is // standard, and necessary so that "T" can always bind to "const T&". template -struct ConstRef { typedef const T& type; }; +struct ConstRef { + typedef const T& type; +}; template -struct ConstRef { typedef T& type; }; +struct ConstRef { + typedef T& type; +}; // The argument T must depend on some template parameters. #define GTEST_REFERENCE_TO_CONST_(T) \ @@ -1053,7 +1048,7 @@ struct ConstRef { typedef T& type; }; // const Foo*). When you use ImplicitCast_, the compiler checks that // the cast is safe. Such explicit ImplicitCast_s are necessary in // surprisingly many situations where C++ demands an exact type match -// instead of an argument type convertable to a target type. +// instead of an argument type convertible to a target type. // // The syntax for using ImplicitCast_ is the same as for static_cast: // @@ -1066,8 +1061,10 @@ struct ConstRef { typedef T& type; }; // This relatively ugly name is intentional. It prevents clashes with // similar functions users may have (e.g., implicit_cast). The internal // namespace alone is not enough because the function can be found by ADL. -template -inline To ImplicitCast_(To x) { return x; } +template +inline To ImplicitCast_(To x) { + return x; +} // When you upcast (that is, cast a pointer from type Foo to type // SuperclassOfFoo), it's fine to use ImplicitCast_<>, since upcasts @@ -1090,17 +1087,17 @@ inline To ImplicitCast_(To x) { return x; } // This relatively ugly name is intentional. It prevents clashes with // similar functions users may have (e.g., down_cast). The internal // namespace alone is not enough because the function can be found by ADL. -template // use like this: DownCast_(foo); -inline To DownCast_(From* f) { // so we only accept pointers +template // use like this: DownCast_(foo); +inline To DownCast_(From* f) { // so we only accept pointers // Ensures that To is a sub-type of From *. This test is here only // for compile-time type checking, and has no overhead in an // optimized build at run-time, as it will be optimized away // completely. GTEST_INTENTIONAL_CONST_COND_PUSH_() if (false) { - GTEST_INTENTIONAL_CONST_COND_POP_() - const To to = nullptr; - ::testing::internal::ImplicitCast_(to); + GTEST_INTENTIONAL_CONST_COND_POP_() + const To to = nullptr; + ::testing::internal::ImplicitCast_(to); } #if GTEST_HAS_RTTI @@ -1165,71 +1162,8 @@ void ClearInjectableArgvs(); // Defines synchronization primitives. #if GTEST_IS_THREADSAFE -# if GTEST_HAS_PTHREAD -// Sleeps for (roughly) n milliseconds. This function is only for testing -// Google Test's own constructs. Don't use it in user tests, either -// directly or indirectly. -inline void SleepMilliseconds(int n) { - const timespec time = { - 0, // 0 seconds. - n * 1000L * 1000L, // And n ms. - }; - nanosleep(&time, nullptr); -} -# endif // GTEST_HAS_PTHREAD - -# if GTEST_HAS_NOTIFICATION_ -// Notification has already been imported into the namespace. -// Nothing to do here. - -# elif GTEST_HAS_PTHREAD -// Allows a controller thread to pause execution of newly created -// threads until notified. Instances of this class must be created -// and destroyed in the controller thread. -// -// This class is only for testing Google Test's own constructs. Do not -// use it in user tests, either directly or indirectly. -class Notification { - public: - Notification() : notified_(false) { - GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_init(&mutex_, nullptr)); - } - ~Notification() { - pthread_mutex_destroy(&mutex_); - } - - // Notifies all threads created with this notification to start. Must - // be called from the controller thread. - void Notify() { - pthread_mutex_lock(&mutex_); - notified_ = true; - pthread_mutex_unlock(&mutex_); - } - - // Blocks until the controller thread notifies. Must be called from a test - // thread. - void WaitForNotification() { - for (;;) { - pthread_mutex_lock(&mutex_); - const bool notified = notified_; - pthread_mutex_unlock(&mutex_); - if (notified) - break; - SleepMilliseconds(10); - } - } - - private: - pthread_mutex_t mutex_; - bool notified_; - - GTEST_DISALLOW_COPY_AND_ASSIGN_(Notification); -}; - -# elif GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT - -GTEST_API_ void SleepMilliseconds(int n); +#if GTEST_OS_WINDOWS // Provides leak-safe Windows kernel handle ownership. // Used in death tests and in threading support. class GTEST_API_ AutoHandle { @@ -1256,8 +1190,18 @@ class GTEST_API_ AutoHandle { Handle handle_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(AutoHandle); + AutoHandle(const AutoHandle&) = delete; + AutoHandle& operator=(const AutoHandle&) = delete; }; +#endif + +#if GTEST_HAS_NOTIFICATION_ +// Notification has already been imported into the namespace. +// Nothing to do here. + +#else +GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \ +/* class A needs to have dll-interface to be used by clients of class B */) // Allows a controller thread to pause execution of newly created // threads until notified. Instances of this class must be created @@ -1265,23 +1209,40 @@ class GTEST_API_ AutoHandle { // // This class is only for testing Google Test's own constructs. Do not // use it in user tests, either directly or indirectly. +// TODO(b/203539622): Replace unconditionally with absl::Notification. class GTEST_API_ Notification { public: - Notification(); - void Notify(); - void WaitForNotification(); + Notification() : notified_(false) {} + Notification(const Notification&) = delete; + Notification& operator=(const Notification&) = delete; + + // Notifies all threads created with this notification to start. Must + // be called from the controller thread. + void Notify() { + std::lock_guard lock(mu_); + notified_ = true; + cv_.notify_all(); + } + + // Blocks until the controller thread notifies. Must be called from a test + // thread. + void WaitForNotification() { + std::unique_lock lock(mu_); + cv_.wait(lock, [this]() { return notified_; }); + } private: - AutoHandle event_; - - GTEST_DISALLOW_COPY_AND_ASSIGN_(Notification); + std::mutex mu_; + std::condition_variable cv_; + bool notified_; }; -# endif // GTEST_HAS_NOTIFICATION_ +GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251 +#endif // GTEST_HAS_NOTIFICATION_ // On MinGW, we can have both GTEST_OS_WINDOWS and GTEST_HAS_PTHREAD // defined, but we don't want to use MinGW's pthreads implementation, which // has conformance problems with some versions of the POSIX standard. -# if GTEST_HAS_PTHREAD && !GTEST_OS_WINDOWS_MINGW +#if GTEST_HAS_PTHREAD && !GTEST_OS_WINDOWS_MINGW // As a C-function, ThreadFuncWithCLinkage cannot be templated itself. // Consequently, it cannot select a correct instantiation of ThreadWithParam @@ -1357,16 +1318,17 @@ class ThreadWithParam : public ThreadWithParamBase { // finished. pthread_t thread_; // The native thread object. - GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadWithParam); + ThreadWithParam(const ThreadWithParam&) = delete; + ThreadWithParam& operator=(const ThreadWithParam&) = delete; }; -# endif // !GTEST_OS_WINDOWS && GTEST_HAS_PTHREAD || - // GTEST_HAS_MUTEX_AND_THREAD_LOCAL_ +#endif // !GTEST_OS_WINDOWS && GTEST_HAS_PTHREAD || + // GTEST_HAS_MUTEX_AND_THREAD_LOCAL_ -# if GTEST_HAS_MUTEX_AND_THREAD_LOCAL_ +#if GTEST_HAS_MUTEX_AND_THREAD_LOCAL_ // Mutex and ThreadLocal have already been imported into the namespace. // Nothing to do here. -# elif GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT +#elif GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT // Mutex implements mutex on Windows platforms. It is used in conjunction // with class MutexLock: @@ -1420,14 +1382,15 @@ class GTEST_API_ Mutex { long critical_section_init_phase_; // NOLINT GTEST_CRITICAL_SECTION* critical_section_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(Mutex); + Mutex(const Mutex&) = delete; + Mutex& operator=(const Mutex&) = delete; }; -# define GTEST_DECLARE_STATIC_MUTEX_(mutex) \ - extern ::testing::internal::Mutex mutex +#define GTEST_DECLARE_STATIC_MUTEX_(mutex) \ + extern ::testing::internal::Mutex mutex -# define GTEST_DEFINE_STATIC_MUTEX_(mutex) \ - ::testing::internal::Mutex mutex(::testing::internal::Mutex::kStaticMutex) +#define GTEST_DEFINE_STATIC_MUTEX_(mutex) \ + ::testing::internal::Mutex mutex(::testing::internal::Mutex::kStaticMutex) // We cannot name this class MutexLock because the ctor declaration would // conflict with a macro named MutexLock, which is defined on some @@ -1436,15 +1399,15 @@ class GTEST_API_ Mutex { // "MutexLock l(&mu)". Hence the typedef trick below. class GTestMutexLock { public: - explicit GTestMutexLock(Mutex* mutex) - : mutex_(mutex) { mutex_->Lock(); } + explicit GTestMutexLock(Mutex* mutex) : mutex_(mutex) { mutex_->Lock(); } ~GTestMutexLock() { mutex_->Unlock(); } private: Mutex* const mutex_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestMutexLock); + GTestMutexLock(const GTestMutexLock&) = delete; + GTestMutexLock& operator=(const GTestMutexLock&) = delete; }; typedef GTestMutexLock MutexLock; @@ -1471,7 +1434,8 @@ class ThreadLocalBase { virtual ~ThreadLocalBase() {} private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadLocalBase); + ThreadLocalBase(const ThreadLocalBase&) = delete; + ThreadLocalBase& operator=(const ThreadLocalBase&) = delete; }; // Maps a thread to a set of ThreadLocals that have values instantiated on that @@ -1500,7 +1464,7 @@ class GTEST_API_ ThreadWithParamBase { virtual void Run() = 0; }; - ThreadWithParamBase(Runnable *runnable, Notification* thread_can_start); + ThreadWithParamBase(Runnable* runnable, Notification* thread_can_start); virtual ~ThreadWithParamBase(); private: @@ -1514,30 +1478,26 @@ class ThreadWithParam : public ThreadWithParamBase { typedef void UserThreadFunc(T); ThreadWithParam(UserThreadFunc* func, T param, Notification* thread_can_start) - : ThreadWithParamBase(new RunnableImpl(func, param), thread_can_start) { - } + : ThreadWithParamBase(new RunnableImpl(func, param), thread_can_start) {} virtual ~ThreadWithParam() {} private: class RunnableImpl : public Runnable { public: - RunnableImpl(UserThreadFunc* func, T param) - : func_(func), - param_(param) { - } + RunnableImpl(UserThreadFunc* func, T param) : func_(func), param_(param) {} virtual ~RunnableImpl() {} - virtual void Run() { - func_(param_); - } + virtual void Run() { func_(param_); } private: UserThreadFunc* const func_; const T param_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(RunnableImpl); + RunnableImpl(const RunnableImpl&) = delete; + RunnableImpl& operator=(const RunnableImpl&) = delete; }; - GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadWithParam); + ThreadWithParam(const ThreadWithParam&) = delete; + ThreadWithParam& operator=(const ThreadWithParam&) = delete; }; // Implements thread-local storage on Windows systems. @@ -1574,7 +1534,7 @@ class ThreadLocal : public ThreadLocalBase { explicit ThreadLocal(const T& value) : default_factory_(new InstanceValueHolderFactory(value)) {} - ~ThreadLocal() { ThreadLocalRegistry::OnThreadLocalDestroyed(this); } + ~ThreadLocal() override { ThreadLocalRegistry::OnThreadLocalDestroyed(this); } T* pointer() { return GetOrCreateValue(); } const T* pointer() const { return GetOrCreateValue(); } @@ -1593,16 +1553,17 @@ class ThreadLocal : public ThreadLocalBase { private: T value_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolder); + ValueHolder(const ValueHolder&) = delete; + ValueHolder& operator=(const ValueHolder&) = delete; }; - T* GetOrCreateValue() const { return static_cast( - ThreadLocalRegistry::GetValueOnCurrentThread(this))->pointer(); + ThreadLocalRegistry::GetValueOnCurrentThread(this)) + ->pointer(); } - virtual ThreadLocalValueHolderBase* NewValueForCurrentThread() const { + ThreadLocalValueHolderBase* NewValueForCurrentThread() const override { return default_factory_->MakeNewHolder(); } @@ -1613,7 +1574,8 @@ class ThreadLocal : public ThreadLocalBase { virtual ValueHolder* MakeNewHolder() const = 0; private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolderFactory); + ValueHolderFactory(const ValueHolderFactory&) = delete; + ValueHolderFactory& operator=(const ValueHolderFactory&) = delete; }; class DefaultValueHolderFactory : public ValueHolderFactory { @@ -1622,7 +1584,9 @@ class ThreadLocal : public ThreadLocalBase { ValueHolder* MakeNewHolder() const override { return new ValueHolder(); } private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultValueHolderFactory); + DefaultValueHolderFactory(const DefaultValueHolderFactory&) = delete; + DefaultValueHolderFactory& operator=(const DefaultValueHolderFactory&) = + delete; }; class InstanceValueHolderFactory : public ValueHolderFactory { @@ -1635,15 +1599,18 @@ class ThreadLocal : public ThreadLocalBase { private: const T value_; // The value for each thread. - GTEST_DISALLOW_COPY_AND_ASSIGN_(InstanceValueHolderFactory); + InstanceValueHolderFactory(const InstanceValueHolderFactory&) = delete; + InstanceValueHolderFactory& operator=(const InstanceValueHolderFactory&) = + delete; }; std::unique_ptr default_factory_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadLocal); + ThreadLocal(const ThreadLocal&) = delete; + ThreadLocal& operator=(const ThreadLocal&) = delete; }; -# elif GTEST_HAS_PTHREAD +#elif GTEST_HAS_PTHREAD // MutexBase and Mutex implement mutex on pthreads-based platforms. class MutexBase { @@ -1690,8 +1657,8 @@ class MutexBase { }; // Forward-declares a static mutex. -# define GTEST_DECLARE_STATIC_MUTEX_(mutex) \ - extern ::testing::internal::MutexBase mutex +#define GTEST_DECLARE_STATIC_MUTEX_(mutex) \ + extern ::testing::internal::MutexBase mutex // Defines and statically (i.e. at link time) initializes a static mutex. // The initialization list here does not explicitly initialize each field, @@ -1710,12 +1677,11 @@ class Mutex : public MutexBase { GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_init(&mutex_, nullptr)); has_owner_ = false; } - ~Mutex() { - GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_destroy(&mutex_)); - } + ~Mutex() { GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_destroy(&mutex_)); } private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(Mutex); + Mutex(const Mutex&) = delete; + Mutex& operator=(const Mutex&) = delete; }; // We cannot name this class MutexLock because the ctor declaration would @@ -1725,15 +1691,15 @@ class Mutex : public MutexBase { // "MutexLock l(&mu)". Hence the typedef trick below. class GTestMutexLock { public: - explicit GTestMutexLock(MutexBase* mutex) - : mutex_(mutex) { mutex_->Lock(); } + explicit GTestMutexLock(MutexBase* mutex) : mutex_(mutex) { mutex_->Lock(); } ~GTestMutexLock() { mutex_->Unlock(); } private: MutexBase* const mutex_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestMutexLock); + GTestMutexLock(const GTestMutexLock&) = delete; + GTestMutexLock& operator=(const GTestMutexLock&) = delete; }; typedef GTestMutexLock MutexLock; @@ -1790,7 +1756,8 @@ class GTEST_API_ ThreadLocal { private: T value_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolder); + ValueHolder(const ValueHolder&) = delete; + ValueHolder& operator=(const ValueHolder&) = delete; }; static pthread_key_t CreateKey() { @@ -1822,7 +1789,8 @@ class GTEST_API_ ThreadLocal { virtual ValueHolder* MakeNewHolder() const = 0; private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolderFactory); + ValueHolderFactory(const ValueHolderFactory&) = delete; + ValueHolderFactory& operator=(const ValueHolderFactory&) = delete; }; class DefaultValueHolderFactory : public ValueHolderFactory { @@ -1831,7 +1799,9 @@ class GTEST_API_ ThreadLocal { ValueHolder* MakeNewHolder() const override { return new ValueHolder(); } private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultValueHolderFactory); + DefaultValueHolderFactory(const DefaultValueHolderFactory&) = delete; + DefaultValueHolderFactory& operator=(const DefaultValueHolderFactory&) = + delete; }; class InstanceValueHolderFactory : public ValueHolderFactory { @@ -1844,17 +1814,20 @@ class GTEST_API_ ThreadLocal { private: const T value_; // The value for each thread. - GTEST_DISALLOW_COPY_AND_ASSIGN_(InstanceValueHolderFactory); + InstanceValueHolderFactory(const InstanceValueHolderFactory&) = delete; + InstanceValueHolderFactory& operator=(const InstanceValueHolderFactory&) = + delete; }; // A key pthreads uses for looking up per-thread values. const pthread_key_t key_; std::unique_ptr default_factory_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadLocal); + ThreadLocal(const ThreadLocal&) = delete; + ThreadLocal& operator=(const ThreadLocal&) = delete; }; -# endif // GTEST_HAS_MUTEX_AND_THREAD_LOCAL_ +#endif // GTEST_HAS_MUTEX_AND_THREAD_LOCAL_ #else // GTEST_IS_THREADSAFE @@ -1871,10 +1844,10 @@ class Mutex { void AssertHeld() const {} }; -# define GTEST_DECLARE_STATIC_MUTEX_(mutex) \ +#define GTEST_DECLARE_STATIC_MUTEX_(mutex) \ extern ::testing::internal::Mutex mutex -# define GTEST_DEFINE_STATIC_MUTEX_(mutex) ::testing::internal::Mutex mutex +#define GTEST_DEFINE_STATIC_MUTEX_(mutex) ::testing::internal::Mutex mutex // We cannot name this class MutexLock because the ctor declaration would // conflict with a macro named MutexLock, which is defined on some @@ -1897,6 +1870,7 @@ class GTEST_API_ ThreadLocal { const T* pointer() const { return &value_; } const T& get() const { return value_; } void set(const T& value) { value_ = value; } + private: T value_; }; @@ -1908,11 +1882,11 @@ class GTEST_API_ ThreadLocal { GTEST_API_ size_t GetThreadCount(); #if GTEST_OS_WINDOWS -# define GTEST_PATH_SEP_ "\\" -# define GTEST_HAS_ALT_PATH_SEP_ 1 +#define GTEST_PATH_SEP_ "\\" +#define GTEST_HAS_ALT_PATH_SEP_ 1 #else -# define GTEST_PATH_SEP_ "/" -# define GTEST_HAS_ALT_PATH_SEP_ 0 +#define GTEST_PATH_SEP_ "/" +#define GTEST_HAS_ALT_PATH_SEP_ 0 #endif // GTEST_OS_WINDOWS // Utilities for char. @@ -1970,8 +1944,7 @@ inline char ToUpper(char ch) { inline std::string StripTrailingSpaces(std::string str) { std::string::iterator it = str.end(); - while (it != str.begin() && IsSpace(*--it)) - it = str.erase(it); + while (it != str.begin() && IsSpace(*--it)) it = str.erase(it); return str; } @@ -1989,36 +1962,35 @@ namespace posix { typedef struct _stat StatStruct; -# ifdef __BORLANDC__ +#ifdef __BORLANDC__ inline int DoIsATTY(int fd) { return isatty(fd); } inline int StrCaseCmp(const char* s1, const char* s2) { return stricmp(s1, s2); } inline char* StrDup(const char* src) { return strdup(src); } -# else // !__BORLANDC__ -# if GTEST_OS_WINDOWS_MOBILE +#else // !__BORLANDC__ +#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_ZOS || GTEST_OS_IOS || \ + GTEST_OS_WINDOWS_PHONE || GTEST_OS_WINDOWS_RT || defined(ESP_PLATFORM) inline int DoIsATTY(int /* fd */) { return 0; } -# else +#else inline int DoIsATTY(int fd) { return _isatty(fd); } -# endif // GTEST_OS_WINDOWS_MOBILE +#endif // GTEST_OS_WINDOWS_MOBILE inline int StrCaseCmp(const char* s1, const char* s2) { return _stricmp(s1, s2); } inline char* StrDup(const char* src) { return _strdup(src); } -# endif // __BORLANDC__ +#endif // __BORLANDC__ -# if GTEST_OS_WINDOWS_MOBILE +#if GTEST_OS_WINDOWS_MOBILE inline int FileNo(FILE* file) { return reinterpret_cast(_fileno(file)); } // Stat(), RmDir(), and IsDir() are not needed on Windows CE at this // time and thus not defined there. -# else +#else inline int FileNo(FILE* file) { return _fileno(file); } inline int Stat(const char* path, StatStruct* buf) { return _stat(path, buf); } inline int RmDir(const char* dir) { return _rmdir(dir); } -inline bool IsDir(const StatStruct& st) { - return (_S_IFDIR & st.st_mode) != 0; -} -# endif // GTEST_OS_WINDOWS_MOBILE +inline bool IsDir(const StatStruct& st) { return (_S_IFDIR & st.st_mode) != 0; } +#endif // GTEST_OS_WINDOWS_MOBILE #elif GTEST_OS_ESP8266 typedef struct stat StatStruct; @@ -2082,12 +2054,12 @@ inline FILE* FOpen(const char* path, const char* mode) { std::wstring wide_path = converter.from_bytes(path); std::wstring wide_mode = converter.from_bytes(mode); return _wfopen(wide_path.c_str(), wide_mode.c_str()); -#else // GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MINGW +#else // GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MINGW return fopen(path, mode); #endif // GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MINGW } #if !GTEST_OS_WINDOWS_MOBILE -inline FILE *FReopen(const char* path, const char* mode, FILE* stream) { +inline FILE* FReopen(const char* path, const char* mode, FILE* stream) { return freopen(path, mode, stream); } inline FILE* FDOpen(int fd, const char* mode) { return fdopen(fd, mode); } @@ -2139,13 +2111,13 @@ GTEST_DISABLE_MSC_DEPRECATED_POP_() // snprintf is a variadic function. #if _MSC_VER && !GTEST_OS_WINDOWS_MOBILE // MSVC 2005 and above support variadic macros. -# define GTEST_SNPRINTF_(buffer, size, format, ...) \ - _snprintf_s(buffer, size, size, format, __VA_ARGS__) +#define GTEST_SNPRINTF_(buffer, size, format, ...) \ + _snprintf_s(buffer, size, size, format, __VA_ARGS__) #elif defined(_MSC_VER) // Windows CE does not define _snprintf_s -# define GTEST_SNPRINTF_ _snprintf +#define GTEST_SNPRINTF_ _snprintf #else -# define GTEST_SNPRINTF_ snprintf +#define GTEST_SNPRINTF_ snprintf #endif // The biggest signed integer type the compiler supports. @@ -2205,55 +2177,84 @@ using TimeInMillis = int64_t; // Represents time in milliseconds. // Macro for referencing flags. #if !defined(GTEST_FLAG) -# define GTEST_FLAG(name) FLAGS_gtest_##name +#define GTEST_FLAG_NAME_(name) gtest_##name +#define GTEST_FLAG(name) FLAGS_gtest_##name #endif // !defined(GTEST_FLAG) -#if !defined(GTEST_USE_OWN_FLAGFILE_FLAG_) -# define GTEST_USE_OWN_FLAGFILE_FLAG_ 1 -#endif // !defined(GTEST_USE_OWN_FLAGFILE_FLAG_) +// Pick a command line flags implementation. +#if GTEST_HAS_ABSL -#if !defined(GTEST_DECLARE_bool_) -# define GTEST_FLAG_SAVER_ ::testing::internal::GTestFlagSaver +// Macros for defining flags. +#define GTEST_DEFINE_bool_(name, default_val, doc) \ + ABSL_FLAG(bool, GTEST_FLAG_NAME_(name), default_val, doc) +#define GTEST_DEFINE_int32_(name, default_val, doc) \ + ABSL_FLAG(int32_t, GTEST_FLAG_NAME_(name), default_val, doc) +#define GTEST_DEFINE_string_(name, default_val, doc) \ + ABSL_FLAG(std::string, GTEST_FLAG_NAME_(name), default_val, doc) // Macros for declaring flags. -#define GTEST_DECLARE_bool_(name) \ - namespace testing { \ - GTEST_API_ extern bool GTEST_FLAG(name); \ - } -#define GTEST_DECLARE_int32_(name) \ - namespace testing { \ - GTEST_API_ extern std::int32_t GTEST_FLAG(name); \ - } -#define GTEST_DECLARE_string_(name) \ - namespace testing { \ - GTEST_API_ extern ::std::string GTEST_FLAG(name); \ - } +#define GTEST_DECLARE_bool_(name) \ + ABSL_DECLARE_FLAG(bool, GTEST_FLAG_NAME_(name)) +#define GTEST_DECLARE_int32_(name) \ + ABSL_DECLARE_FLAG(int32_t, GTEST_FLAG_NAME_(name)) +#define GTEST_DECLARE_string_(name) \ + ABSL_DECLARE_FLAG(std::string, GTEST_FLAG_NAME_(name)) + +#define GTEST_FLAG_SAVER_ ::absl::FlagSaver + +#define GTEST_FLAG_GET(name) ::absl::GetFlag(GTEST_FLAG(name)) +#define GTEST_FLAG_SET(name, value) \ + (void)(::absl::SetFlag(>EST_FLAG(name), value)) +#define GTEST_USE_OWN_FLAGFILE_FLAG_ 0 + +#else // GTEST_HAS_ABSL // Macros for defining flags. #define GTEST_DEFINE_bool_(name, default_val, doc) \ namespace testing { \ GTEST_API_ bool GTEST_FLAG(name) = (default_val); \ - } + } \ + static_assert(true, "no-op to require trailing semicolon") #define GTEST_DEFINE_int32_(name, default_val, doc) \ namespace testing { \ GTEST_API_ std::int32_t GTEST_FLAG(name) = (default_val); \ - } + } \ + static_assert(true, "no-op to require trailing semicolon") #define GTEST_DEFINE_string_(name, default_val, doc) \ namespace testing { \ GTEST_API_ ::std::string GTEST_FLAG(name) = (default_val); \ - } + } \ + static_assert(true, "no-op to require trailing semicolon") -#endif // !defined(GTEST_DECLARE_bool_) +// Macros for declaring flags. +#define GTEST_DECLARE_bool_(name) \ + namespace testing { \ + GTEST_API_ extern bool GTEST_FLAG(name); \ + } \ + static_assert(true, "no-op to require trailing semicolon") +#define GTEST_DECLARE_int32_(name) \ + namespace testing { \ + GTEST_API_ extern std::int32_t GTEST_FLAG(name); \ + } \ + static_assert(true, "no-op to require trailing semicolon") +#define GTEST_DECLARE_string_(name) \ + namespace testing { \ + GTEST_API_ extern ::std::string GTEST_FLAG(name); \ + } \ + static_assert(true, "no-op to require trailing semicolon") + +#define GTEST_FLAG_SAVER_ ::testing::internal::GTestFlagSaver -#if !defined(GTEST_FLAG_GET) #define GTEST_FLAG_GET(name) ::testing::GTEST_FLAG(name) #define GTEST_FLAG_SET(name, value) (void)(::testing::GTEST_FLAG(name) = value) -#endif // !defined(GTEST_FLAG_GET) +#define GTEST_USE_OWN_FLAGFILE_FLAG_ 1 + +#endif // GTEST_HAS_ABSL // Thread annotations #if !defined(GTEST_EXCLUSIVE_LOCK_REQUIRED_) -# define GTEST_EXCLUSIVE_LOCK_REQUIRED_(locks) -# define GTEST_LOCK_EXCLUDED_(locks) +#define GTEST_EXCLUSIVE_LOCK_REQUIRED_(locks) +#define GTEST_LOCK_EXCLUDED_(locks) #endif // !defined(GTEST_EXCLUSIVE_LOCK_REQUIRED_) // Parses 'str' for a 32-bit signed integer. If successful, writes the result @@ -2329,6 +2330,7 @@ namespace testing { namespace internal { template using Optional = ::absl::optional; +inline ::absl::nullopt_t Nullopt() { return ::absl::nullopt; } } // namespace internal } // namespace testing #else @@ -2342,6 +2344,7 @@ namespace testing { namespace internal { template using Optional = ::std::optional; +inline ::std::nullopt_t Nullopt() { return ::std::nullopt; } } // namespace internal } // namespace testing // The case where absl is configured NOT to alias std::optional is not @@ -2353,7 +2356,7 @@ using Optional = ::std::optional; #if GTEST_HAS_ABSL // Always use absl::string_view for Matcher<> specializations if googletest // is built with absl support. -# define GTEST_INTERNAL_HAS_STRING_VIEW 1 +#define GTEST_INTERNAL_HAS_STRING_VIEW 1 #include "absl/strings/string_view.h" namespace testing { namespace internal { @@ -2361,11 +2364,11 @@ using StringView = ::absl::string_view; } // namespace internal } // namespace testing #else -# ifdef __has_include -# if __has_include() && __cplusplus >= 201703L +#ifdef __has_include +#if __has_include() && __cplusplus >= 201703L // Otherwise for C++17 and higher use std::string_view for Matcher<> // specializations. -# define GTEST_INTERNAL_HAS_STRING_VIEW 1 +#define GTEST_INTERNAL_HAS_STRING_VIEW 1 #include namespace testing { namespace internal { @@ -2374,8 +2377,8 @@ using StringView = ::std::string_view; } // namespace testing // The case where absl is configured NOT to alias std::string_view is not // supported. -# endif // __has_include() && __cplusplus >= 201703L -# endif // __has_include +#endif // __has_include() && __cplusplus >= 201703L +#endif // __has_include #endif // GTEST_HAS_ABSL #if GTEST_HAS_ABSL diff --git a/ext/googletest/googletest/include/gtest/internal/gtest-string.h b/ext/googletest/googletest/include/gtest/internal/gtest-string.h index 10f774f966..cca2e1f2ad 100644 --- a/ext/googletest/googletest/include/gtest/internal/gtest-string.h +++ b/ext/googletest/googletest/include/gtest/internal/gtest-string.h @@ -26,7 +26,7 @@ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// + // The Google C++ Testing and Mocking Framework (Google Test) // // This header file declares the String class and functions used internally by @@ -36,17 +36,20 @@ // This header file is #included by gtest-internal.h. // It should not be #included by other files. -// GOOGLETEST_CM0001 DO NOT DELETE +// IWYU pragma: private, include "gtest/gtest.h" +// IWYU pragma: friend gtest/.* +// IWYU pragma: friend gmock/.* #ifndef GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_ #define GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_ #ifdef __BORLANDC__ // string.h is not guaranteed to provide strcpy on C++ Builder. -# include +#include #endif #include + #include #include @@ -123,8 +126,7 @@ class GTEST_API_ String { // Unlike strcasecmp(), this function can handle NULL argument(s). // A NULL C string is considered different to any non-NULL C string, // including the empty string. - static bool CaseInsensitiveCStringEquals(const char* lhs, - const char* rhs); + static bool CaseInsensitiveCStringEquals(const char* lhs, const char* rhs); // Compares two wide C strings, ignoring case. Returns true if and only if // they have the same content. @@ -143,8 +145,8 @@ class GTEST_API_ String { // Returns true if and only if the given string ends with the given suffix, // ignoring case. Any string is considered to end with an empty suffix. - static bool EndsWithCaseInsensitive( - const std::string& str, const std::string& suffix); + static bool EndsWithCaseInsensitive(const std::string& str, + const std::string& suffix); // Formats an int value as "%02d". static std::string FormatIntWidth2(int value); // "%02d" for width == 2 @@ -163,7 +165,7 @@ class GTEST_API_ String { private: String(); // Not meant to be instantiated. -}; // class String +}; // class String // Gets the content of the stringstream's buffer as an std::string. Each '\0' // character in the buffer is replaced with "\\0". diff --git a/ext/googletest/googletest/include/gtest/internal/gtest-type-util.h b/ext/googletest/googletest/include/gtest/internal/gtest-type-util.h index b87a2e2cac..6bc02a7de3 100644 --- a/ext/googletest/googletest/include/gtest/internal/gtest-type-util.h +++ b/ext/googletest/googletest/include/gtest/internal/gtest-type-util.h @@ -30,7 +30,9 @@ // Type utilities needed for implementing typed and type-parameterized // tests. -// GOOGLETEST_CM0001 DO NOT DELETE +// IWYU pragma: private, include "gtest/gtest.h" +// IWYU pragma: friend gtest/.* +// IWYU pragma: friend gmock/.* #ifndef GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_ #define GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_ @@ -39,11 +41,11 @@ // #ifdef __GNUC__ is too general here. It is possible to use gcc without using // libstdc++ (which is where cxxabi.h comes from). -# if GTEST_HAS_CXXABI_H_ -# include -# elif defined(__HP_aCC) -# include -# endif // GTEST_HASH_CXXABI_H_ +#if GTEST_HAS_CXXABI_H_ +#include +#elif defined(__HP_aCC) +#include +#endif // GTEST_HASH_CXXABI_H_ namespace testing { namespace internal { @@ -101,7 +103,9 @@ std::string GetTypeName() { // A unique type indicating an empty node struct None {}; -# define GTEST_TEMPLATE_ template class +#define GTEST_TEMPLATE_ \ + template \ + class // The template "selector" struct TemplateSel is used to // represent Tmpl, which must be a class template with one type @@ -119,8 +123,7 @@ struct TemplateSel { }; }; -# define GTEST_BIND_(TmplSel, T) \ - TmplSel::template Bind::type +#define GTEST_BIND_(TmplSel, T) TmplSel::template Bind::type template struct Templates { diff --git a/ext/googletest/googletest/samples/prime_tables.h b/ext/googletest/googletest/samples/prime_tables.h index 3a10352baa..7c0286e1ae 100644 --- a/ext/googletest/googletest/samples/prime_tables.h +++ b/ext/googletest/googletest/samples/prime_tables.h @@ -27,8 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - // This provides interface PrimeTable that determines whether a number is a // prime and determines a next prime number. This interface is used // in Google Test samples demonstrating use of parameterized tests. @@ -57,7 +55,7 @@ class OnTheFlyPrimeTable : public PrimeTable { bool IsPrime(int n) const override { if (n <= 1) return false; - for (int i = 2; i*i <= n; i++) { + for (int i = 2; i * i <= n; i++) { // n is divisible by an integer other than 1 and itself. if ((n % i) == 0) return false; } @@ -104,13 +102,13 @@ class PreCalculatedPrimeTable : public PrimeTable { // Checks every candidate for prime number (we know that 2 is the only even // prime). - for (int i = 2; i*i <= max; i += i%2+1) { + for (int i = 2; i * i <= max; i += i % 2 + 1) { if (!is_prime_[i]) continue; // Marks all multiples of i (except i itself) as non-prime. // We are starting here from i-th multiplier, because all smaller // complex numbers were already marked. - for (int j = i*i; j <= max; j += i) { + for (int j = i * i; j <= max; j += i) { is_prime_[j] = false; } } diff --git a/ext/googletest/googletest/samples/sample1.cc b/ext/googletest/googletest/samples/sample1.cc index 1d4275979f..80b69f415c 100644 --- a/ext/googletest/googletest/samples/sample1.cc +++ b/ext/googletest/googletest/samples/sample1.cc @@ -52,9 +52,9 @@ bool IsPrime(int n) { // Now, we have that n is odd and n >= 3. // Try to divide n by every odd number i, starting from 3 - for (int i = 3; ; i += 2) { + for (int i = 3;; i += 2) { // We only have to try i up to the square root of n - if (i > n/i) break; + if (i > n / i) break; // Now, we have i <= n/i < n. // If n is divisible by i, n is not prime. diff --git a/ext/googletest/googletest/samples/sample10_unittest.cc b/ext/googletest/googletest/samples/sample10_unittest.cc index 36cdac2279..95b4811b87 100644 --- a/ext/googletest/googletest/samples/sample10_unittest.cc +++ b/ext/googletest/googletest/samples/sample10_unittest.cc @@ -26,7 +26,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // This sample shows how to use Google Test listener API to implement // a primitive leak checker. @@ -104,14 +103,15 @@ TEST(ListenersTest, LeaksWater) { } } // namespace -int main(int argc, char **argv) { +int main(int argc, char** argv) { InitGoogleTest(&argc, argv); bool check_for_leaks = false; - if (argc > 1 && strcmp(argv[1], "--check_for_leaks") == 0 ) + if (argc > 1 && strcmp(argv[1], "--check_for_leaks") == 0) check_for_leaks = true; else - printf("%s\n", "Run this program with --check_for_leaks to enable " + printf("%s\n", + "Run this program with --check_for_leaks to enable " "custom leak checking in the tests."); // If we are given the --check_for_leaks command line flag, installs the diff --git a/ext/googletest/googletest/samples/sample1_unittest.cc b/ext/googletest/googletest/samples/sample1_unittest.cc index cb08b61a59..60f2770ca0 100644 --- a/ext/googletest/googletest/samples/sample1_unittest.cc +++ b/ext/googletest/googletest/samples/sample1_unittest.cc @@ -34,14 +34,15 @@ // // Writing a unit test using Google C++ testing framework is easy as 1-2-3: - // Step 1. Include necessary header files such that the stuff your // test logic needs is declared. // // Don't forget gtest.h, which declares the testing framework. -#include #include "sample1.h" + +#include + #include "gtest/gtest.h" namespace { @@ -69,7 +70,6 @@ namespace { // // - // Tests Factorial(). // Tests factorial of negative numbers. @@ -97,9 +97,7 @@ TEST(FactorialTest, Negative) { } // Tests factorial of 0. -TEST(FactorialTest, Zero) { - EXPECT_EQ(1, Factorial(0)); -} +TEST(FactorialTest, Zero) { EXPECT_EQ(1, Factorial(0)); } // Tests factorial of positive numbers. TEST(FactorialTest, Positive) { @@ -109,7 +107,6 @@ TEST(FactorialTest, Positive) { EXPECT_EQ(40320, Factorial(8)); } - // Tests IsPrime() // Tests negative input. diff --git a/ext/googletest/googletest/samples/sample2.cc b/ext/googletest/googletest/samples/sample2.cc index d8e8723965..be7c4c9949 100644 --- a/ext/googletest/googletest/samples/sample2.cc +++ b/ext/googletest/googletest/samples/sample2.cc @@ -38,7 +38,7 @@ const char* MyString::CloneCString(const char* a_c_string) { if (a_c_string == nullptr) return nullptr; const size_t len = strlen(a_c_string); - char* const clone = new char[ len + 1 ]; + char* const clone = new char[len + 1]; memcpy(clone, a_c_string, len + 1); return clone; diff --git a/ext/googletest/googletest/samples/sample2.h b/ext/googletest/googletest/samples/sample2.h index 0f9868959d..15a1ce7781 100644 --- a/ext/googletest/googletest/samples/sample2.h +++ b/ext/googletest/googletest/samples/sample2.h @@ -34,7 +34,6 @@ #include - // A simple string class. class MyString { private: diff --git a/ext/googletest/googletest/samples/sample2_unittest.cc b/ext/googletest/googletest/samples/sample2_unittest.cc index 41e31c1767..cd734f943a 100644 --- a/ext/googletest/googletest/samples/sample2_unittest.cc +++ b/ext/googletest/googletest/samples/sample2_unittest.cc @@ -38,6 +38,7 @@ // needed. #include "sample2.h" + #include "gtest/gtest.h" namespace { // In this example, we test the MyString class (a simple string). @@ -77,8 +78,7 @@ const char kHelloString[] = "Hello, world!"; TEST(MyString, ConstructorFromCString) { const MyString s(kHelloString); EXPECT_EQ(0, strcmp(s.c_string(), kHelloString)); - EXPECT_EQ(sizeof(kHelloString)/sizeof(kHelloString[0]) - 1, - s.Length()); + EXPECT_EQ(sizeof(kHelloString) / sizeof(kHelloString[0]) - 1, s.Length()); } // Tests the copy c'tor. diff --git a/ext/googletest/googletest/samples/sample3-inl.h b/ext/googletest/googletest/samples/sample3-inl.h index 659e0f0bb5..bc3ffb9c40 100644 --- a/ext/googletest/googletest/samples/sample3-inl.h +++ b/ext/googletest/googletest/samples/sample3-inl.h @@ -34,7 +34,6 @@ #include - // Queue is a simple queue implemented as a singled-linked list. // // The element type must support copy constructor. @@ -62,7 +61,7 @@ class QueueNode { : element_(an_element), next_(nullptr) {} // We disable the default assignment operator and copy c'tor. - const QueueNode& operator = (const QueueNode&); + const QueueNode& operator=(const QueueNode&); QueueNode(const QueueNode&); E element_; @@ -84,7 +83,7 @@ class Queue { // 1. Deletes every node. QueueNode* node = head_; QueueNode* next = node->next(); - for (; ;) { + for (;;) { delete node; node = next; if (node == nullptr) break; @@ -162,11 +161,11 @@ class Queue { private: QueueNode* head_; // The first node of the queue. QueueNode* last_; // The last node of the queue. - size_t size_; // The number of elements in the queue. + size_t size_; // The number of elements in the queue. // We disallow copying a queue. Queue(const Queue&); - const Queue& operator = (const Queue&); + const Queue& operator=(const Queue&); }; #endif // GOOGLETEST_SAMPLES_SAMPLE3_INL_H_ diff --git a/ext/googletest/googletest/samples/sample3_unittest.cc b/ext/googletest/googletest/samples/sample3_unittest.cc index b19416d53c..71609c6a09 100644 --- a/ext/googletest/googletest/samples/sample3_unittest.cc +++ b/ext/googletest/googletest/samples/sample3_unittest.cc @@ -67,7 +67,6 @@ namespace { class QueueTestSmpl3 : public testing::Test { protected: // You should make the members protected s.t. they can be // accessed from sub-classes. - // virtual void SetUp() will be called before each test is run. You // should define it if you need to initialize the variables. // Otherwise, this can be skipped. @@ -85,15 +84,13 @@ class QueueTestSmpl3 : public testing::Test { // } // A helper function that some test uses. - static int Double(int n) { - return 2*n; - } + static int Double(int n) { return 2 * n; } // A helper function for testing Queue::Map(). - void MapTester(const Queue * q) { + void MapTester(const Queue* q) { // Creates a new queue, where each element is twice as big as the // corresponding one in q. - const Queue * const new_q = q->Map(Double); + const Queue* const new_q = q->Map(Double); // Verifies that the new queue has the same size as q. ASSERT_EQ(q->Size(), new_q->Size()); @@ -124,7 +121,7 @@ TEST_F(QueueTestSmpl3, DefaultConstructor) { // Tests Dequeue(). TEST_F(QueueTestSmpl3, Dequeue) { - int * n = q0_.Dequeue(); + int* n = q0_.Dequeue(); EXPECT_TRUE(n == nullptr); n = q1_.Dequeue(); diff --git a/ext/googletest/googletest/samples/sample4.cc b/ext/googletest/googletest/samples/sample4.cc index b0ee6093b4..489c89b0d3 100644 --- a/ext/googletest/googletest/samples/sample4.cc +++ b/ext/googletest/googletest/samples/sample4.cc @@ -29,26 +29,22 @@ // A sample program demonstrating using Google C++ testing framework. -#include - #include "sample4.h" +#include + // Returns the current counter value, and increments it. -int Counter::Increment() { - return counter_++; -} +int Counter::Increment() { return counter_++; } // Returns the current counter value, and decrements it. // counter can not be less than 0, return 0 in this case int Counter::Decrement() { if (counter_ == 0) { return counter_; - } else { + } else { return counter_--; } } // Prints the current counter value to STDOUT. -void Counter::Print() const { - printf("%d", counter_); -} +void Counter::Print() const { printf("%d", counter_); } diff --git a/ext/googletest/googletest/samples/sample4_unittest.cc b/ext/googletest/googletest/samples/sample4_unittest.cc index d5144c0d00..fb9973fe66 100644 --- a/ext/googletest/googletest/samples/sample4_unittest.cc +++ b/ext/googletest/googletest/samples/sample4_unittest.cc @@ -27,8 +27,8 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - #include "sample4.h" + #include "gtest/gtest.h" namespace { diff --git a/ext/googletest/googletest/samples/sample5_unittest.cc b/ext/googletest/googletest/samples/sample5_unittest.cc index 0a21dd2157..cc8c0f012e 100644 --- a/ext/googletest/googletest/samples/sample5_unittest.cc +++ b/ext/googletest/googletest/samples/sample5_unittest.cc @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // This sample teaches how to reuse a test fixture in multiple test // cases by deriving sub-fixtures from it. // @@ -45,9 +44,10 @@ #include #include -#include "gtest/gtest.h" + #include "sample1.h" #include "sample3-inl.h" +#include "gtest/gtest.h" namespace { // In this sample, we want to ensure that every test finishes within // ~5 seconds. If a test takes longer to run, we consider it a @@ -81,7 +81,6 @@ class QuickTest : public testing::Test { time_t start_time_; }; - // We derive a fixture named IntegerFunctionTest from the QuickTest // fixture. All tests using this fixture will be automatically // required to be quick. @@ -90,7 +89,6 @@ class IntegerFunctionTest : public QuickTest { // Therefore the body is empty. }; - // Now we can write tests in the IntegerFunctionTest test case. // Tests Factorial() @@ -110,7 +108,6 @@ TEST_F(IntegerFunctionTest, Factorial) { EXPECT_EQ(40320, Factorial(8)); } - // Tests IsPrime() TEST_F(IntegerFunctionTest, IsPrime) { // Tests negative input. @@ -131,7 +128,6 @@ TEST_F(IntegerFunctionTest, IsPrime) { EXPECT_TRUE(IsPrime(23)); } - // The next test case (named "QueueTest") also needs to be quick, so // we derive another fixture from QuickTest. // @@ -163,13 +159,10 @@ class QueueTest : public QuickTest { Queue q2_; }; - // Now, let's write tests using the QueueTest fixture. // Tests the default constructor. -TEST_F(QueueTest, DefaultConstructor) { - EXPECT_EQ(0u, q0_.Size()); -} +TEST_F(QueueTest, DefaultConstructor) { EXPECT_EQ(0u, q0_.Size()); } // Tests Dequeue(). TEST_F(QueueTest, Dequeue) { diff --git a/ext/googletest/googletest/samples/sample6_unittest.cc b/ext/googletest/googletest/samples/sample6_unittest.cc index da317eed5a..cf576f0a53 100644 --- a/ext/googletest/googletest/samples/sample6_unittest.cc +++ b/ext/googletest/googletest/samples/sample6_unittest.cc @@ -27,13 +27,11 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // This sample shows how to test common properties of multiple // implementations of the same interface (aka interface tests). // The interface and its implementations are in this header. #include "prime_tables.h" - #include "gtest/gtest.h" namespace { // First, we define some factory functions for creating instances of @@ -151,8 +149,7 @@ using testing::Types; // the PrimeTableTest fixture defined earlier: template -class PrimeTableTest2 : public PrimeTableTest { -}; +class PrimeTableTest2 : public PrimeTableTest {}; // Then, declare the test case. The argument is the name of the test // fixture, and also the name of the test case (as usual). The _P diff --git a/ext/googletest/googletest/samples/sample7_unittest.cc b/ext/googletest/googletest/samples/sample7_unittest.cc index e0efc29e4a..3ad22cab8d 100644 --- a/ext/googletest/googletest/samples/sample7_unittest.cc +++ b/ext/googletest/googletest/samples/sample7_unittest.cc @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // This sample shows how to test common properties of multiple // implementations of an interface (aka interface tests) using // value-parameterized tests. Each test in the test case has @@ -36,7 +35,6 @@ // The interface and its implementations are in this header. #include "prime_tables.h" - #include "gtest/gtest.h" namespace { @@ -50,9 +48,7 @@ using ::testing::Values; // SetUp() method and delete them in TearDown() method. typedef PrimeTable* CreatePrimeTableFunc(); -PrimeTable* CreateOnTheFlyPrimeTable() { - return new OnTheFlyPrimeTable(); -} +PrimeTable* CreateOnTheFlyPrimeTable() { return new OnTheFlyPrimeTable(); } template PrimeTable* CreatePreCalculatedPrimeTable() { diff --git a/ext/googletest/googletest/samples/sample8_unittest.cc b/ext/googletest/googletest/samples/sample8_unittest.cc index 10488b0ea4..9717e28608 100644 --- a/ext/googletest/googletest/samples/sample8_unittest.cc +++ b/ext/googletest/googletest/samples/sample8_unittest.cc @@ -27,14 +27,12 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // This sample shows how to test code relying on some global flag variables. // Combine() helps with generating all possible combinations of such flags, // and each test is given one combination as a parameter. // Use class definitions to test from this header. #include "prime_tables.h" - #include "gtest/gtest.h" namespace { @@ -79,10 +77,10 @@ class HybridPrimeTable : public PrimeTable { int max_precalculated_; }; -using ::testing::TestWithParam; using ::testing::Bool; -using ::testing::Values; using ::testing::Combine; +using ::testing::TestWithParam; +using ::testing::Values; // To test all code paths for HybridPrimeTable we must test it with numbers // both within and outside PreCalculatedPrimeTable's capacity and also with diff --git a/ext/googletest/googletest/samples/sample9_unittest.cc b/ext/googletest/googletest/samples/sample9_unittest.cc index e502d08d73..d627ea7d57 100644 --- a/ext/googletest/googletest/samples/sample9_unittest.cc +++ b/ext/googletest/googletest/samples/sample9_unittest.cc @@ -26,10 +26,9 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // This sample shows how to use Google Test listener API to implement // an alternative console output and how to use the UnitTest reflection API -// to enumerate test cases and tests and to inspect their results. +// to enumerate test suites and tests and to inspect their results. #include @@ -38,10 +37,10 @@ using ::testing::EmptyTestEventListener; using ::testing::InitGoogleTest; using ::testing::Test; -using ::testing::TestCase; using ::testing::TestEventListeners; using ::testing::TestInfo; using ::testing::TestPartResult; +using ::testing::TestSuite; using ::testing::UnitTest; namespace { // Provides alternative output mode which produces minimal amount of @@ -59,29 +58,23 @@ class TersePrinter : public EmptyTestEventListener { // Called before a test starts. void OnTestStart(const TestInfo& test_info) override { - fprintf(stdout, - "*** Test %s.%s starting.\n", - test_info.test_case_name(), + fprintf(stdout, "*** Test %s.%s starting.\n", test_info.test_suite_name(), test_info.name()); fflush(stdout); } // Called after a failed assertion or a SUCCEED() invocation. void OnTestPartResult(const TestPartResult& test_part_result) override { - fprintf(stdout, - "%s in %s:%d\n%s\n", + fprintf(stdout, "%s in %s:%d\n%s\n", test_part_result.failed() ? "*** Failure" : "Success", - test_part_result.file_name(), - test_part_result.line_number(), + test_part_result.file_name(), test_part_result.line_number(), test_part_result.summary()); fflush(stdout); } // Called after a test ends. void OnTestEnd(const TestInfo& test_info) override { - fprintf(stdout, - "*** Test %s.%s ending.\n", - test_info.test_case_name(), + fprintf(stdout, "*** Test %s.%s ending.\n", test_info.test_suite_name(), test_info.name()); fflush(stdout); } @@ -101,14 +94,15 @@ TEST(CustomOutputTest, Fails) { } } // namespace -int main(int argc, char **argv) { +int main(int argc, char** argv) { InitGoogleTest(&argc, argv); bool terse_output = false; - if (argc > 1 && strcmp(argv[1], "--terse_output") == 0 ) + if (argc > 1 && strcmp(argv[1], "--terse_output") == 0) terse_output = true; else - printf("%s\n", "Run this program with --terse_output to change the way " + printf("%s\n", + "Run this program with --terse_output to change the way " "it prints its output."); UnitTest& unit_test = *UnitTest::GetInstance(); @@ -149,8 +143,7 @@ int main(int argc, char **argv) { } // Test that were meant to fail should not affect the test program outcome. - if (unexpectedly_failed_tests == 0) - ret_val = 0; + if (unexpectedly_failed_tests == 0) ret_val = 0; return ret_val; } diff --git a/ext/googletest/googletest/scripts/README.md b/ext/googletest/googletest/scripts/README.md deleted file mode 100644 index fa359fed92..0000000000 --- a/ext/googletest/googletest/scripts/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Please Note: - -Files in this directory are no longer supported by the maintainers. They -represent mosty historical artifacts and supported by the community only. There -is no guarantee whatsoever that these scripts still work. diff --git a/ext/googletest/googletest/scripts/common.py b/ext/googletest/googletest/scripts/common.py deleted file mode 100644 index 3c0347a75b..0000000000 --- a/ext/googletest/googletest/scripts/common.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright 2013 Google Inc. All Rights Reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Shared utilities for writing scripts for Google Test/Mock.""" - -__author__ = 'wan@google.com (Zhanyong Wan)' - - -import os -import re - - -# Matches the line from 'svn info .' output that describes what SVN -# path the current local directory corresponds to. For example, in -# a googletest SVN workspace's trunk/test directory, the output will be: -# -# URL: https://googletest.googlecode.com/svn/trunk/test -_SVN_INFO_URL_RE = re.compile(r'^URL: https://(\w+)\.googlecode\.com/svn(.*)') - - -def GetCommandOutput(command): - """Runs the shell command and returns its stdout as a list of lines.""" - - f = os.popen(command, 'r') - lines = [line.strip() for line in f.readlines()] - f.close() - return lines - - -def GetSvnInfo(): - """Returns the project name and the current SVN workspace's root path.""" - - for line in GetCommandOutput('svn info .'): - m = _SVN_INFO_URL_RE.match(line) - if m: - project = m.group(1) # googletest or googlemock - rel_path = m.group(2) - root = os.path.realpath(rel_path.count('/') * '../') - return project, root - - return None, None - - -def GetSvnTrunk(): - """Returns the current SVN workspace's trunk root path.""" - - _, root = GetSvnInfo() - return root + '/trunk' if root else None - - -def IsInGTestSvn(): - project, _ = GetSvnInfo() - return project == 'googletest' - - -def IsInGMockSvn(): - project, _ = GetSvnInfo() - return project == 'googlemock' diff --git a/ext/googletest/googletest/scripts/fuse_gtest_files.py b/ext/googletest/googletest/scripts/fuse_gtest_files.py deleted file mode 100755 index d0dd464fe8..0000000000 --- a/ext/googletest/googletest/scripts/fuse_gtest_files.py +++ /dev/null @@ -1,253 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2009, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""fuse_gtest_files.py v0.2.0 -Fuses Google Test source code into a .h file and a .cc file. - -SYNOPSIS - fuse_gtest_files.py [GTEST_ROOT_DIR] OUTPUT_DIR - - Scans GTEST_ROOT_DIR for Google Test source code, and generates - two files: OUTPUT_DIR/gtest/gtest.h and OUTPUT_DIR/gtest/gtest-all.cc. - Then you can build your tests by adding OUTPUT_DIR to the include - search path and linking with OUTPUT_DIR/gtest/gtest-all.cc. These - two files contain everything you need to use Google Test. Hence - you can "install" Google Test by copying them to wherever you want. - - GTEST_ROOT_DIR can be omitted and defaults to the parent - directory of the directory holding this script. - -EXAMPLES - ./fuse_gtest_files.py fused_gtest - ./fuse_gtest_files.py path/to/unpacked/gtest fused_gtest - -This tool is experimental. In particular, it assumes that there is no -conditional inclusion of Google Test headers. Please report any -problems to googletestframework@googlegroups.com. You can read -https://github.com/google/googletest/blob/master/googletest/docs/advanced.md for -more information. -""" - -__author__ = 'wan@google.com (Zhanyong Wan)' - -import os -import re -try: - from sets import Set as set # For Python 2.3 compatibility -except ImportError: - pass -import sys - -# We assume that this file is in the scripts/ directory in the Google -# Test root directory. -DEFAULT_GTEST_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..') - -# Regex for matching '#include "gtest/..."'. -INCLUDE_GTEST_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gtest/.+)"') - -# Regex for matching '#include "src/..."'. -INCLUDE_SRC_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(src/.+)"') - -# Where to find the source seed files. -GTEST_H_SEED = 'include/gtest/gtest.h' -GTEST_SPI_H_SEED = 'include/gtest/gtest-spi.h' -GTEST_ALL_CC_SEED = 'src/gtest-all.cc' - -# Where to put the generated files. -GTEST_H_OUTPUT = 'gtest/gtest.h' -GTEST_ALL_CC_OUTPUT = 'gtest/gtest-all.cc' - - -def VerifyFileExists(directory, relative_path): - """Verifies that the given file exists; aborts on failure. - - relative_path is the file path relative to the given directory. - """ - - if not os.path.isfile(os.path.join(directory, relative_path)): - print('ERROR: Cannot find %s in directory %s.' % (relative_path, - directory)) - print('Please either specify a valid project root directory ' - 'or omit it on the command line.') - sys.exit(1) - - -def ValidateGTestRootDir(gtest_root): - """Makes sure gtest_root points to a valid gtest root directory. - - The function aborts the program on failure. - """ - - VerifyFileExists(gtest_root, GTEST_H_SEED) - VerifyFileExists(gtest_root, GTEST_ALL_CC_SEED) - - -def VerifyOutputFile(output_dir, relative_path): - """Verifies that the given output file path is valid. - - relative_path is relative to the output_dir directory. - """ - - # Makes sure the output file either doesn't exist or can be overwritten. - output_file = os.path.join(output_dir, relative_path) - if os.path.exists(output_file): - # TODO(wan@google.com): The following user-interaction doesn't - # work with automated processes. We should provide a way for the - # Makefile to force overwriting the files. - print('%s already exists in directory %s - overwrite it? (y/N) ' % - (relative_path, output_dir)) - answer = sys.stdin.readline().strip() - if answer not in ['y', 'Y']: - print('ABORTED.') - sys.exit(1) - - # Makes sure the directory holding the output file exists; creates - # it and all its ancestors if necessary. - parent_directory = os.path.dirname(output_file) - if not os.path.isdir(parent_directory): - os.makedirs(parent_directory) - - -def ValidateOutputDir(output_dir): - """Makes sure output_dir points to a valid output directory. - - The function aborts the program on failure. - """ - - VerifyOutputFile(output_dir, GTEST_H_OUTPUT) - VerifyOutputFile(output_dir, GTEST_ALL_CC_OUTPUT) - - -def FuseGTestH(gtest_root, output_dir): - """Scans folder gtest_root to generate gtest/gtest.h in output_dir.""" - - output_file = open(os.path.join(output_dir, GTEST_H_OUTPUT), 'w') - processed_files = set() # Holds all gtest headers we've processed. - - def ProcessFile(gtest_header_path): - """Processes the given gtest header file.""" - - # We don't process the same header twice. - if gtest_header_path in processed_files: - return - - processed_files.add(gtest_header_path) - - # Reads each line in the given gtest header. - for line in open(os.path.join(gtest_root, gtest_header_path), 'r'): - m = INCLUDE_GTEST_FILE_REGEX.match(line) - if m: - # It's '#include "gtest/..."' - let's process it recursively. - ProcessFile('include/' + m.group(1)) - else: - # Otherwise we copy the line unchanged to the output file. - output_file.write(line) - - ProcessFile(GTEST_H_SEED) - output_file.close() - - -def FuseGTestAllCcToFile(gtest_root, output_file): - """Scans folder gtest_root to generate gtest/gtest-all.cc in output_file.""" - - processed_files = set() - - def ProcessFile(gtest_source_file): - """Processes the given gtest source file.""" - - # We don't process the same #included file twice. - if gtest_source_file in processed_files: - return - - processed_files.add(gtest_source_file) - - # Reads each line in the given gtest source file. - for line in open(os.path.join(gtest_root, gtest_source_file), 'r'): - m = INCLUDE_GTEST_FILE_REGEX.match(line) - if m: - if 'include/' + m.group(1) == GTEST_SPI_H_SEED: - # It's '#include "gtest/gtest-spi.h"'. This file is not - # #included by "gtest/gtest.h", so we need to process it. - ProcessFile(GTEST_SPI_H_SEED) - else: - # It's '#include "gtest/foo.h"' where foo is not gtest-spi. - # We treat it as '#include "gtest/gtest.h"', as all other - # gtest headers are being fused into gtest.h and cannot be - # #included directly. - - # There is no need to #include "gtest/gtest.h" more than once. - if not GTEST_H_SEED in processed_files: - processed_files.add(GTEST_H_SEED) - output_file.write('#include "%s"\n' % (GTEST_H_OUTPUT,)) - else: - m = INCLUDE_SRC_FILE_REGEX.match(line) - if m: - # It's '#include "src/foo"' - let's process it recursively. - ProcessFile(m.group(1)) - else: - output_file.write(line) - - ProcessFile(GTEST_ALL_CC_SEED) - - -def FuseGTestAllCc(gtest_root, output_dir): - """Scans folder gtest_root to generate gtest/gtest-all.cc in output_dir.""" - - output_file = open(os.path.join(output_dir, GTEST_ALL_CC_OUTPUT), 'w') - FuseGTestAllCcToFile(gtest_root, output_file) - output_file.close() - - -def FuseGTest(gtest_root, output_dir): - """Fuses gtest.h and gtest-all.cc.""" - - ValidateGTestRootDir(gtest_root) - ValidateOutputDir(output_dir) - - FuseGTestH(gtest_root, output_dir) - FuseGTestAllCc(gtest_root, output_dir) - - -def main(): - argc = len(sys.argv) - if argc == 2: - # fuse_gtest_files.py OUTPUT_DIR - FuseGTest(DEFAULT_GTEST_ROOT_DIR, sys.argv[1]) - elif argc == 3: - # fuse_gtest_files.py GTEST_ROOT_DIR OUTPUT_DIR - FuseGTest(sys.argv[1], sys.argv[2]) - else: - print(__doc__) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/ext/googletest/googletest/scripts/gen_gtest_pred_impl.py b/ext/googletest/googletest/scripts/gen_gtest_pred_impl.py deleted file mode 100755 index e09a6e0177..0000000000 --- a/ext/googletest/googletest/scripts/gen_gtest_pred_impl.py +++ /dev/null @@ -1,733 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2006, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""gen_gtest_pred_impl.py v0.1 - -Generates the implementation of Google Test predicate assertions and -accompanying tests. - -Usage: - - gen_gtest_pred_impl.py MAX_ARITY - -where MAX_ARITY is a positive integer. - -The command generates the implementation of up-to MAX_ARITY-ary -predicate assertions, and writes it to file gtest_pred_impl.h in the -directory where the script is. It also generates the accompanying -unit test in file gtest_pred_impl_unittest.cc. -""" - -__author__ = 'wan@google.com (Zhanyong Wan)' - -import os -import sys -import time - -# Where this script is. -SCRIPT_DIR = os.path.dirname(sys.argv[0]) - -# Where to store the generated header. -HEADER = os.path.join(SCRIPT_DIR, '../include/gtest/gtest_pred_impl.h') - -# Where to store the generated unit test. -UNIT_TEST = os.path.join(SCRIPT_DIR, '../test/gtest_pred_impl_unittest.cc') - - -def HeaderPreamble(n): - """Returns the preamble for the header file. - - Args: - n: the maximum arity of the predicate macros to be generated. - """ - - # A map that defines the values used in the preamble template. - DEFS = { - 'today' : time.strftime('%m/%d/%Y'), - 'year' : time.strftime('%Y'), - 'command' : '%s %s' % (os.path.basename(sys.argv[0]), n), - 'n' : n - } - - return ( - """// Copyright 2006, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// This file is AUTOMATICALLY GENERATED on %(today)s by command -// '%(command)s'. DO NOT EDIT BY HAND! -// -// Implements a family of generic predicate assertion macros. -// GOOGLETEST_CM0001 DO NOT DELETE - - -#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_ -#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_ - -#include "gtest/gtest.h" - -namespace testing { - -// This header implements a family of generic predicate assertion -// macros: -// -// ASSERT_PRED_FORMAT1(pred_format, v1) -// ASSERT_PRED_FORMAT2(pred_format, v1, v2) -// ... -// -// where pred_format is a function or functor that takes n (in the -// case of ASSERT_PRED_FORMATn) values and their source expression -// text, and returns a testing::AssertionResult. See the definition -// of ASSERT_EQ in gtest.h for an example. -// -// If you don't care about formatting, you can use the more -// restrictive version: -// -// ASSERT_PRED1(pred, v1) -// ASSERT_PRED2(pred, v1, v2) -// ... -// -// where pred is an n-ary function or functor that returns bool, -// and the values v1, v2, ..., must support the << operator for -// streaming to std::ostream. -// -// We also define the EXPECT_* variations. -// -// For now we only support predicates whose arity is at most %(n)s. -// Please email googletestframework@googlegroups.com if you need -// support for higher arities. - -// GTEST_ASSERT_ is the basic statement to which all of the assertions -// in this file reduce. Don't use this in your code. - -#define GTEST_ASSERT_(expression, on_failure) \\ - GTEST_AMBIGUOUS_ELSE_BLOCKER_ \\ - if (const ::testing::AssertionResult gtest_ar = (expression)) \\ - ; \\ - else \\ - on_failure(gtest_ar.failure_message()) -""" % DEFS) - - -def Arity(n): - """Returns the English name of the given arity.""" - - if n < 0: - return None - elif n <= 3: - return ['nullary', 'unary', 'binary', 'ternary'][n] - else: - return '%s-ary' % n - - -def Title(word): - """Returns the given word in title case. The difference between - this and string's title() method is that Title('4-ary') is '4-ary' - while '4-ary'.title() is '4-Ary'.""" - - return word[0].upper() + word[1:] - - -def OneTo(n): - """Returns the list [1, 2, 3, ..., n].""" - - return range(1, n + 1) - - -def Iter(n, format, sep=''): - """Given a positive integer n, a format string that contains 0 or - more '%s' format specs, and optionally a separator string, returns - the join of n strings, each formatted with the format string on an - iterator ranged from 1 to n. - - Example: - - Iter(3, 'v%s', sep=', ') returns 'v1, v2, v3'. - """ - - # How many '%s' specs are in format? - spec_count = len(format.split('%s')) - 1 - return sep.join([format % (spec_count * (i,)) for i in OneTo(n)]) - - -def ImplementationForArity(n): - """Returns the implementation of n-ary predicate assertions.""" - - # A map the defines the values used in the implementation template. - DEFS = { - 'n' : str(n), - 'vs' : Iter(n, 'v%s', sep=', '), - 'vts' : Iter(n, '#v%s', sep=', '), - 'arity' : Arity(n), - 'Arity' : Title(Arity(n)) - } - - impl = """ - -// Helper function for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use -// this in your code. -template -AssertionResult AssertPred%(n)sHelper(const char* pred_text""" % DEFS - - impl += Iter(n, """, - const char* e%s""") - - impl += """, - Pred pred""" - - impl += Iter(n, """, - const T%s& v%s""") - - impl += """) { - if (pred(%(vs)s)) return AssertionSuccess(); - -""" % DEFS - - impl += ' return AssertionFailure() << pred_text << "("' - - impl += Iter(n, """ - << e%s""", sep=' << ", "') - - impl += ' << ") evaluates to false, where"' - - impl += Iter( - n, """ - << "\\n" << e%s << " evaluates to " << ::testing::PrintToString(v%s)""" - ) - - impl += """; -} - -// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT%(n)s. -// Don't use this in your code. -#define GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, on_failure)\\ - GTEST_ASSERT_(pred_format(%(vts)s, %(vs)s), \\ - on_failure) - -// Internal macro for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use -// this in your code. -#define GTEST_PRED%(n)s_(pred, %(vs)s, on_failure)\\ - GTEST_ASSERT_(::testing::AssertPred%(n)sHelper(#pred""" % DEFS - - impl += Iter(n, """, \\ - #v%s""") - - impl += """, \\ - pred""" - - impl += Iter(n, """, \\ - v%s""") - - impl += """), on_failure) - -// %(Arity)s predicate assertion macros. -#define EXPECT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\ - GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_NONFATAL_FAILURE_) -#define EXPECT_PRED%(n)s(pred, %(vs)s) \\ - GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_NONFATAL_FAILURE_) -#define ASSERT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\ - GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_FATAL_FAILURE_) -#define ASSERT_PRED%(n)s(pred, %(vs)s) \\ - GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_FATAL_FAILURE_) - -""" % DEFS - - return impl - - -def HeaderPostamble(): - """Returns the postamble for the header file.""" - - return """ - -} // namespace testing - -#endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_ -""" - - -def GenerateFile(path, content): - """Given a file path and a content string - overwrites it with the given content. - """ - print 'Updating file %s . . .' % path - f = file(path, 'w+') - print >>f, content, - f.close() - - print 'File %s has been updated.' % path - - -def GenerateHeader(n): - """Given the maximum arity n, updates the header file that implements - the predicate assertions. - """ - GenerateFile(HEADER, - HeaderPreamble(n) - + ''.join([ImplementationForArity(i) for i in OneTo(n)]) - + HeaderPostamble()) - - -def UnitTestPreamble(): - """Returns the preamble for the unit test file.""" - - # A map that defines the values used in the preamble template. - DEFS = { - 'today' : time.strftime('%m/%d/%Y'), - 'year' : time.strftime('%Y'), - 'command' : '%s %s' % (os.path.basename(sys.argv[0]), sys.argv[1]), - } - - return ( - """// Copyright 2006, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// This file is AUTOMATICALLY GENERATED on %(today)s by command -// '%(command)s'. DO NOT EDIT BY HAND! - -// Regression test for gtest_pred_impl.h -// -// This file is generated by a script and quite long. If you intend to -// learn how Google Test works by reading its unit tests, read -// gtest_unittest.cc instead. -// -// This is intended as a regression test for the Google Test predicate -// assertions. We compile it as part of the gtest_unittest target -// only to keep the implementation tidy and compact, as it is quite -// involved to set up the stage for testing Google Test using Google -// Test itself. -// -// Currently, gtest_unittest takes ~11 seconds to run in the testing -// daemon. In the future, if it grows too large and needs much more -// time to finish, we should consider separating this file into a -// stand-alone regression test. - -#include - -#include "gtest/gtest.h" -#include "gtest/gtest-spi.h" - -// A user-defined data type. -struct Bool { - explicit Bool(int val) : value(val != 0) {} - - bool operator>(int n) const { return value > Bool(n).value; } - - Bool operator+(const Bool& rhs) const { return Bool(value + rhs.value); } - - bool operator==(const Bool& rhs) const { return value == rhs.value; } - - bool value; -}; - -// Enables Bool to be used in assertions. -std::ostream& operator<<(std::ostream& os, const Bool& x) { - return os << (x.value ? "true" : "false"); -} - -""" % DEFS) - - -def TestsForArity(n): - """Returns the tests for n-ary predicate assertions.""" - - # A map that defines the values used in the template for the tests. - DEFS = { - 'n' : n, - 'es' : Iter(n, 'e%s', sep=', '), - 'vs' : Iter(n, 'v%s', sep=', '), - 'vts' : Iter(n, '#v%s', sep=', '), - 'tvs' : Iter(n, 'T%s v%s', sep=', '), - 'int_vs' : Iter(n, 'int v%s', sep=', '), - 'Bool_vs' : Iter(n, 'Bool v%s', sep=', '), - 'types' : Iter(n, 'typename T%s', sep=', '), - 'v_sum' : Iter(n, 'v%s', sep=' + '), - 'arity' : Arity(n), - 'Arity' : Title(Arity(n)), - } - - tests = ( - """// Sample functions/functors for testing %(arity)s predicate assertions. - -// A %(arity)s predicate function. -template <%(types)s> -bool PredFunction%(n)s(%(tvs)s) { - return %(v_sum)s > 0; -} - -// The following two functions are needed because a compiler doesn't have -// a context yet to know which template function must be instantiated. -bool PredFunction%(n)sInt(%(int_vs)s) { - return %(v_sum)s > 0; -} -bool PredFunction%(n)sBool(%(Bool_vs)s) { - return %(v_sum)s > 0; -} -""" % DEFS) - - tests += """ -// A %(arity)s predicate functor. -struct PredFunctor%(n)s { - template <%(types)s> - bool operator()(""" % DEFS - - tests += Iter(n, 'const T%s& v%s', sep=""", - """) - - tests += """) { - return %(v_sum)s > 0; - } -}; -""" % DEFS - - tests += """ -// A %(arity)s predicate-formatter function. -template <%(types)s> -testing::AssertionResult PredFormatFunction%(n)s(""" % DEFS - - tests += Iter(n, 'const char* e%s', sep=""", - """) - - tests += Iter(n, """, - const T%s& v%s""") - - tests += """) { - if (PredFunction%(n)s(%(vs)s)) - return testing::AssertionSuccess(); - - return testing::AssertionFailure() - << """ % DEFS - - tests += Iter(n, 'e%s', sep=' << " + " << ') - - tests += """ - << " is expected to be positive, but evaluates to " - << %(v_sum)s << "."; -} -""" % DEFS - - tests += """ -// A %(arity)s predicate-formatter functor. -struct PredFormatFunctor%(n)s { - template <%(types)s> - testing::AssertionResult operator()(""" % DEFS - - tests += Iter(n, 'const char* e%s', sep=""", - """) - - tests += Iter(n, """, - const T%s& v%s""") - - tests += """) const { - return PredFormatFunction%(n)s(%(es)s, %(vs)s); - } -}; -""" % DEFS - - tests += """ -// Tests for {EXPECT|ASSERT}_PRED_FORMAT%(n)s. - -class Predicate%(n)sTest : public testing::Test { - protected: - void SetUp() override { - expected_to_finish_ = true; - finished_ = false;""" % DEFS - - tests += """ - """ + Iter(n, 'n%s_ = ') + """0; - } -""" - - tests += """ - void TearDown() override { - // Verifies that each of the predicate's arguments was evaluated - // exactly once.""" - - tests += ''.join([""" - EXPECT_EQ(1, n%s_) << - "The predicate assertion didn't evaluate argument %s " - "exactly once.";""" % (i, i + 1) for i in OneTo(n)]) - - tests += """ - - // Verifies that the control flow in the test function is expected. - if (expected_to_finish_ && !finished_) { - FAIL() << "The predicate assertion unexpactedly aborted the test."; - } else if (!expected_to_finish_ && finished_) { - FAIL() << "The failed predicate assertion didn't abort the test " - "as expected."; - } - } - - // true if and only if the test function is expected to run to finish. - static bool expected_to_finish_; - - // true if and only if the test function did run to finish. - static bool finished_; -""" % DEFS - - tests += Iter(n, """ - static int n%s_;""") - - tests += """ -}; - -bool Predicate%(n)sTest::expected_to_finish_; -bool Predicate%(n)sTest::finished_; -""" % DEFS - - tests += Iter(n, """int Predicate%%(n)sTest::n%s_; -""") % DEFS - - tests += """ -typedef Predicate%(n)sTest EXPECT_PRED_FORMAT%(n)sTest; -typedef Predicate%(n)sTest ASSERT_PRED_FORMAT%(n)sTest; -typedef Predicate%(n)sTest EXPECT_PRED%(n)sTest; -typedef Predicate%(n)sTest ASSERT_PRED%(n)sTest; -""" % DEFS - - def GenTest(use_format, use_assert, expect_failure, - use_functor, use_user_type): - """Returns the test for a predicate assertion macro. - - Args: - use_format: true if and only if the assertion is a *_PRED_FORMAT*. - use_assert: true if and only if the assertion is a ASSERT_*. - expect_failure: true if and only if the assertion is expected to fail. - use_functor: true if and only if the first argument of the assertion is - a functor (as opposed to a function) - use_user_type: true if and only if the predicate functor/function takes - argument(s) of a user-defined type. - - Example: - - GenTest(1, 0, 0, 1, 0) returns a test that tests the behavior - of a successful EXPECT_PRED_FORMATn() that takes a functor - whose arguments have built-in types.""" - - if use_assert: - assrt = 'ASSERT' # 'assert' is reserved, so we cannot use - # that identifier here. - else: - assrt = 'EXPECT' - - assertion = assrt + '_PRED' - - if use_format: - pred_format = 'PredFormat' - assertion += '_FORMAT' - else: - pred_format = 'Pred' - - assertion += '%(n)s' % DEFS - - if use_functor: - pred_format_type = 'functor' - pred_format += 'Functor%(n)s()' - else: - pred_format_type = 'function' - pred_format += 'Function%(n)s' - if not use_format: - if use_user_type: - pred_format += 'Bool' - else: - pred_format += 'Int' - - test_name = pred_format_type.title() - - if use_user_type: - arg_type = 'user-defined type (Bool)' - test_name += 'OnUserType' - if expect_failure: - arg = 'Bool(n%s_++)' - else: - arg = 'Bool(++n%s_)' - else: - arg_type = 'built-in type (int)' - test_name += 'OnBuiltInType' - if expect_failure: - arg = 'n%s_++' - else: - arg = '++n%s_' - - if expect_failure: - successful_or_failed = 'failed' - expected_or_not = 'expected.' - test_name += 'Failure' - else: - successful_or_failed = 'successful' - expected_or_not = 'UNEXPECTED!' - test_name += 'Success' - - # A map that defines the values used in the test template. - defs = DEFS.copy() - defs.update({ - 'assert' : assrt, - 'assertion' : assertion, - 'test_name' : test_name, - 'pf_type' : pred_format_type, - 'pf' : pred_format, - 'arg_type' : arg_type, - 'arg' : arg, - 'successful' : successful_or_failed, - 'expected' : expected_or_not, - }) - - test = """ -// Tests a %(successful)s %(assertion)s where the -// predicate-formatter is a %(pf_type)s on a %(arg_type)s. -TEST_F(%(assertion)sTest, %(test_name)s) {""" % defs - - indent = (len(assertion) + 3)*' ' - extra_indent = '' - - if expect_failure: - extra_indent = ' ' - if use_assert: - test += """ - expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT""" - else: - test += """ - EXPECT_NONFATAL_FAILURE({ // NOLINT""" - - test += '\n' + extra_indent + """ %(assertion)s(%(pf)s""" % defs - - test = test % defs - test += Iter(n, ',\n' + indent + extra_indent + '%(arg)s' % defs) - test += ');\n' + extra_indent + ' finished_ = true;\n' - - if expect_failure: - test += ' }, "");\n' - - test += '}\n' - return test - - # Generates tests for all 2**6 = 64 combinations. - tests += ''.join([GenTest(use_format, use_assert, expect_failure, - use_functor, use_user_type) - for use_format in [0, 1] - for use_assert in [0, 1] - for expect_failure in [0, 1] - for use_functor in [0, 1] - for use_user_type in [0, 1] - ]) - - return tests - - -def UnitTestPostamble(): - """Returns the postamble for the tests.""" - - return '' - - -def GenerateUnitTest(n): - """Returns the tests for up-to n-ary predicate assertions.""" - - GenerateFile(UNIT_TEST, - UnitTestPreamble() - + ''.join([TestsForArity(i) for i in OneTo(n)]) - + UnitTestPostamble()) - - -def _Main(): - """The entry point of the script. Generates the header file and its - unit test.""" - - if len(sys.argv) != 2: - print __doc__ - print 'Author: ' + __author__ - sys.exit(1) - - n = int(sys.argv[1]) - GenerateHeader(n) - GenerateUnitTest(n) - - -if __name__ == '__main__': - _Main() diff --git a/ext/googletest/googletest/scripts/gtest-config.in b/ext/googletest/googletest/scripts/gtest-config.in deleted file mode 100755 index 780f8432ef..0000000000 --- a/ext/googletest/googletest/scripts/gtest-config.in +++ /dev/null @@ -1,274 +0,0 @@ -#!/bin/sh - -# These variables are automatically filled in by the configure script. -name="@PACKAGE_TARNAME@" -version="@PACKAGE_VERSION@" - -show_usage() -{ - echo "Usage: gtest-config [OPTIONS...]" -} - -show_help() -{ - show_usage - cat <<\EOF - -The `gtest-config' script provides access to the necessary compile and linking -flags to connect with Google C++ Testing Framework, both in a build prior to -installation, and on the system proper after installation. The installation -overrides may be issued in combination with any other queries, but will only -affect installation queries if called on a built but not installed gtest. The -installation queries may not be issued with any other types of queries, and -only one installation query may be made at a time. The version queries and -compiler flag queries may be combined as desired but not mixed. Different -version queries are always combined with logical "and" semantics, and only the -last of any particular query is used while all previous ones ignored. All -versions must be specified as a sequence of numbers separated by periods. -Compiler flag queries output the union of the sets of flags when combined. - - Examples: - gtest-config --min-version=1.0 || echo "Insufficient Google Test version." - - g++ $(gtest-config --cppflags --cxxflags) -o foo.o -c foo.cpp - g++ $(gtest-config --ldflags --libs) -o foo foo.o - - # When using a built but not installed Google Test: - g++ $(../../my_gtest_build/scripts/gtest-config ...) ... - - # When using an installed Google Test, but with installation overrides: - export GTEST_PREFIX="/opt" - g++ $(gtest-config --libdir="/opt/lib64" ...) ... - - Help: - --usage brief usage information - --help display this help message - - Installation Overrides: - --prefix= overrides the installation prefix - --exec-prefix= overrides the executable installation prefix - --libdir= overrides the library installation prefix - --includedir= overrides the header file installation prefix - - Installation Queries: - --prefix installation prefix - --exec-prefix executable installation prefix - --libdir library installation directory - --includedir header file installation directory - --version the version of the Google Test installation - - Version Queries: - --min-version=VERSION return 0 if the version is at least VERSION - --exact-version=VERSION return 0 if the version is exactly VERSION - --max-version=VERSION return 0 if the version is at most VERSION - - Compilation Flag Queries: - --cppflags compile flags specific to the C-like preprocessors - --cxxflags compile flags appropriate for C++ programs - --ldflags linker flags - --libs libraries for linking - -EOF -} - -# This function bounds our version with a min and a max. It uses some clever -# POSIX-compliant variable expansion to portably do all the work in the shell -# and avoid any dependency on a particular "sed" or "awk" implementation. -# Notable is that it will only ever compare the first 3 components of versions. -# Further components will be cleanly stripped off. All versions must be -# unadorned, so "v1.0" will *not* work. The minimum version must be in $1, and -# the max in $2. TODO(chandlerc@google.com): If this ever breaks, we should -# investigate expanding this via autom4te from AS_VERSION_COMPARE rather than -# continuing to maintain our own shell version. -check_versions() -{ - major_version=${version%%.*} - minor_version="0" - point_version="0" - if test "${version#*.}" != "${version}"; then - minor_version=${version#*.} - minor_version=${minor_version%%.*} - fi - if test "${version#*.*.}" != "${version}"; then - point_version=${version#*.*.} - point_version=${point_version%%.*} - fi - - min_version="$1" - min_major_version=${min_version%%.*} - min_minor_version="0" - min_point_version="0" - if test "${min_version#*.}" != "${min_version}"; then - min_minor_version=${min_version#*.} - min_minor_version=${min_minor_version%%.*} - fi - if test "${min_version#*.*.}" != "${min_version}"; then - min_point_version=${min_version#*.*.} - min_point_version=${min_point_version%%.*} - fi - - max_version="$2" - max_major_version=${max_version%%.*} - max_minor_version="0" - max_point_version="0" - if test "${max_version#*.}" != "${max_version}"; then - max_minor_version=${max_version#*.} - max_minor_version=${max_minor_version%%.*} - fi - if test "${max_version#*.*.}" != "${max_version}"; then - max_point_version=${max_version#*.*.} - max_point_version=${max_point_version%%.*} - fi - - test $(($major_version)) -lt $(($min_major_version)) && exit 1 - if test $(($major_version)) -eq $(($min_major_version)); then - test $(($minor_version)) -lt $(($min_minor_version)) && exit 1 - if test $(($minor_version)) -eq $(($min_minor_version)); then - test $(($point_version)) -lt $(($min_point_version)) && exit 1 - fi - fi - - test $(($major_version)) -gt $(($max_major_version)) && exit 1 - if test $(($major_version)) -eq $(($max_major_version)); then - test $(($minor_version)) -gt $(($max_minor_version)) && exit 1 - if test $(($minor_version)) -eq $(($max_minor_version)); then - test $(($point_version)) -gt $(($max_point_version)) && exit 1 - fi - fi - - exit 0 -} - -# Show the usage line when no arguments are specified. -if test $# -eq 0; then - show_usage - exit 1 -fi - -while test $# -gt 0; do - case $1 in - --usage) show_usage; exit 0;; - --help) show_help; exit 0;; - - # Installation overrides - --prefix=*) GTEST_PREFIX=${1#--prefix=};; - --exec-prefix=*) GTEST_EXEC_PREFIX=${1#--exec-prefix=};; - --libdir=*) GTEST_LIBDIR=${1#--libdir=};; - --includedir=*) GTEST_INCLUDEDIR=${1#--includedir=};; - - # Installation queries - --prefix|--exec-prefix|--libdir|--includedir|--version) - if test -n "${do_query}"; then - show_usage - exit 1 - fi - do_query=${1#--} - ;; - - # Version checking - --min-version=*) - do_check_versions=yes - min_version=${1#--min-version=} - ;; - --max-version=*) - do_check_versions=yes - max_version=${1#--max-version=} - ;; - --exact-version=*) - do_check_versions=yes - exact_version=${1#--exact-version=} - ;; - - # Compiler flag output - --cppflags) echo_cppflags=yes;; - --cxxflags) echo_cxxflags=yes;; - --ldflags) echo_ldflags=yes;; - --libs) echo_libs=yes;; - - # Everything else is an error - *) show_usage; exit 1;; - esac - shift -done - -# These have defaults filled in by the configure script but can also be -# overridden by environment variables or command line parameters. -prefix="${GTEST_PREFIX:-@prefix@}" -exec_prefix="${GTEST_EXEC_PREFIX:-@exec_prefix@}" -libdir="${GTEST_LIBDIR:-@libdir@}" -includedir="${GTEST_INCLUDEDIR:-@includedir@}" - -# We try and detect if our binary is not located at its installed location. If -# it's not, we provide variables pointing to the source and build tree rather -# than to the install tree. This allows building against a just-built gtest -# rather than an installed gtest. -bindir="@bindir@" -this_relative_bindir=`dirname $0` -this_bindir=`cd ${this_relative_bindir}; pwd -P` -if test "${this_bindir}" = "${this_bindir%${bindir}}"; then - # The path to the script doesn't end in the bindir sequence from Autoconf, - # assume that we are in a build tree. - build_dir=`dirname ${this_bindir}` - src_dir=`cd ${this_bindir}; cd @top_srcdir@; pwd -P` - - # TODO(chandlerc@google.com): This is a dangerous dependency on libtool, we - # should work to remove it, and/or remove libtool altogether, replacing it - # with direct references to the library and a link path. - gtest_libs="${build_dir}/lib/libgtest.la @PTHREAD_CFLAGS@ @PTHREAD_LIBS@" - gtest_ldflags="" - - # We provide hooks to include from either the source or build dir, where the - # build dir is always preferred. This will potentially allow us to write - # build rules for generated headers and have them automatically be preferred - # over provided versions. - gtest_cppflags="-I${build_dir}/include -I${src_dir}/include" - gtest_cxxflags="@PTHREAD_CFLAGS@" -else - # We're using an installed gtest, although it may be staged under some - # prefix. Assume (as our own libraries do) that we can resolve the prefix, - # and are present in the dynamic link paths. - gtest_ldflags="-L${libdir}" - gtest_libs="-l${name} @PTHREAD_CFLAGS@ @PTHREAD_LIBS@" - gtest_cppflags="-I${includedir}" - gtest_cxxflags="@PTHREAD_CFLAGS@" -fi - -# Do an installation query if requested. -if test -n "$do_query"; then - case $do_query in - prefix) echo $prefix; exit 0;; - exec-prefix) echo $exec_prefix; exit 0;; - libdir) echo $libdir; exit 0;; - includedir) echo $includedir; exit 0;; - version) echo $version; exit 0;; - *) show_usage; exit 1;; - esac -fi - -# Do a version check if requested. -if test "$do_check_versions" = "yes"; then - # Make sure we didn't receive a bad combination of parameters. - test "$echo_cppflags" = "yes" && show_usage && exit 1 - test "$echo_cxxflags" = "yes" && show_usage && exit 1 - test "$echo_ldflags" = "yes" && show_usage && exit 1 - test "$echo_libs" = "yes" && show_usage && exit 1 - - if test "$exact_version" != ""; then - check_versions $exact_version $exact_version - # unreachable - else - check_versions ${min_version:-0.0.0} ${max_version:-9999.9999.9999} - # unreachable - fi -fi - -# Do the output in the correct order so that these can be used in-line of -# a compiler invocation. -output="" -test "$echo_cppflags" = "yes" && output="$output $gtest_cppflags" -test "$echo_cxxflags" = "yes" && output="$output $gtest_cxxflags" -test "$echo_ldflags" = "yes" && output="$output $gtest_ldflags" -test "$echo_libs" = "yes" && output="$output $gtest_libs" -echo $output - -exit 0 diff --git a/ext/googletest/googletest/scripts/release_docs.py b/ext/googletest/googletest/scripts/release_docs.py deleted file mode 100755 index 8d24f28fdf..0000000000 --- a/ext/googletest/googletest/scripts/release_docs.py +++ /dev/null @@ -1,158 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2013 Google Inc. All Rights Reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Script for branching Google Test/Mock wiki pages for a new version. - -SYNOPSIS - release_docs.py NEW_RELEASE_VERSION - - Google Test and Google Mock's external user documentation is in - interlinked wiki files. When we release a new version of - Google Test or Google Mock, we need to branch the wiki files - such that users of a specific version of Google Test/Mock can - look up documentation relevant for that version. This script - automates that process by: - - - branching the current wiki pages (which document the - behavior of the SVN trunk head) to pages for the specified - version (e.g. branching FAQ.wiki to V2_6_FAQ.wiki when - NEW_RELEASE_VERSION is 2.6); - - updating the links in the branched files to point to the branched - version (e.g. a link in V2_6_FAQ.wiki that pointed to - Primer.wiki#Anchor will now point to V2_6_Primer.wiki#Anchor). - - NOTE: NEW_RELEASE_VERSION must be a NEW version number for - which the wiki pages don't yet exist; otherwise you'll get SVN - errors like "svn: Path 'V1_7_PumpManual.wiki' is not a - directory" when running the script. - -EXAMPLE - $ cd PATH/TO/GTEST_SVN_WORKSPACE/trunk - $ scripts/release_docs.py 2.6 # create wiki pages for v2.6 - $ svn status # verify the file list - $ svn diff # verify the file contents - $ svn commit -m "release wiki pages for v2.6" -""" - -__author__ = 'wan@google.com (Zhanyong Wan)' - -import os -import re -import sys - -import common - - -# Wiki pages that shouldn't be branched for every gtest/gmock release. -GTEST_UNVERSIONED_WIKIS = ['DevGuide.wiki'] -GMOCK_UNVERSIONED_WIKIS = [ - 'DesignDoc.wiki', - 'DevGuide.wiki', - 'KnownIssues.wiki' - ] - - -def DropWikiSuffix(wiki_filename): - """Removes the .wiki suffix (if any) from the given filename.""" - - return (wiki_filename[:-len('.wiki')] if wiki_filename.endswith('.wiki') - else wiki_filename) - - -class WikiBrancher(object): - """Branches ...""" - - def __init__(self, dot_version): - self.project, svn_root_path = common.GetSvnInfo() - if self.project not in ('googletest', 'googlemock'): - sys.exit('This script must be run in a gtest or gmock SVN workspace.') - self.wiki_dir = svn_root_path + '/wiki' - # Turn '2.6' to 'V2_6_'. - self.version_prefix = 'V' + dot_version.replace('.', '_') + '_' - self.files_to_branch = self.GetFilesToBranch() - page_names = [DropWikiSuffix(f) for f in self.files_to_branch] - # A link to Foo.wiki is in one of the following forms: - # [Foo words] - # [Foo#Anchor words] - # [http://code.google.com/.../wiki/Foo words] - # [http://code.google.com/.../wiki/Foo#Anchor words] - # We want to replace 'Foo' with 'V2_6_Foo' in the above cases. - self.search_for_re = re.compile( - # This regex matches either - # [Foo - # or - # /wiki/Foo - # followed by a space or a #, where Foo is the name of an - # unversioned wiki page. - r'(\[|/wiki/)(%s)([ #])' % '|'.join(page_names)) - self.replace_with = r'\1%s\2\3' % (self.version_prefix,) - - def GetFilesToBranch(self): - """Returns a list of .wiki file names that need to be branched.""" - - unversioned_wikis = (GTEST_UNVERSIONED_WIKIS if self.project == 'googletest' - else GMOCK_UNVERSIONED_WIKIS) - return [f for f in os.listdir(self.wiki_dir) - if (f.endswith('.wiki') and - not re.match(r'^V\d', f) and # Excluded versioned .wiki files. - f not in unversioned_wikis)] - - def BranchFiles(self): - """Branches the .wiki files needed to be branched.""" - - print 'Branching %d .wiki files:' % (len(self.files_to_branch),) - os.chdir(self.wiki_dir) - for f in self.files_to_branch: - command = 'svn cp %s %s%s' % (f, self.version_prefix, f) - print command - os.system(command) - - def UpdateLinksInBranchedFiles(self): - - for f in self.files_to_branch: - source_file = os.path.join(self.wiki_dir, f) - versioned_file = os.path.join(self.wiki_dir, self.version_prefix + f) - print 'Updating links in %s.' % (versioned_file,) - text = file(source_file, 'r').read() - new_text = self.search_for_re.sub(self.replace_with, text) - file(versioned_file, 'w').write(new_text) - - -def main(): - if len(sys.argv) != 2: - sys.exit(__doc__) - - brancher = WikiBrancher(sys.argv[1]) - brancher.BranchFiles() - brancher.UpdateLinksInBranchedFiles() - - -if __name__ == '__main__': - main() diff --git a/ext/googletest/googletest/scripts/run_with_path.py b/ext/googletest/googletest/scripts/run_with_path.py deleted file mode 100755 index d46ab4d34a..0000000000 --- a/ext/googletest/googletest/scripts/run_with_path.py +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2010 Google Inc. All Rights Reserved. - -"""Runs program specified in the command line with the substituted PATH. - - This script is needed for to support building under Pulse which is unable - to override the existing PATH variable. -""" - -import os -import subprocess -import sys - -SUBST_PATH_ENV_VAR_NAME = "SUBST_PATH" - -def main(): - if SUBST_PATH_ENV_VAR_NAME in os.environ: - os.environ["PATH"] = os.environ[SUBST_PATH_ENV_VAR_NAME] - - exit_code = subprocess.Popen(sys.argv[1:]).wait() - - # exit_code is negative (-signal) if the process has been terminated by - # a signal. Returning negative exit code is not portable and so we return - # 100 instead. - if exit_code < 0: - exit_code = 100 - - sys.exit(exit_code) - -if __name__ == "__main__": - main() diff --git a/ext/googletest/googletest/scripts/test/Makefile b/ext/googletest/googletest/scripts/test/Makefile deleted file mode 100644 index cdff584637..0000000000 --- a/ext/googletest/googletest/scripts/test/Makefile +++ /dev/null @@ -1,59 +0,0 @@ -# A Makefile for fusing Google Test and building a sample test against it. -# -# SYNOPSIS: -# -# make [all] - makes everything. -# make TARGET - makes the given target. -# make check - makes everything and runs the built sample test. -# make clean - removes all files generated by make. - -# Points to the root of fused Google Test, relative to where this file is. -FUSED_GTEST_DIR = output - -# Paths to the fused gtest files. -FUSED_GTEST_H = $(FUSED_GTEST_DIR)/gtest/gtest.h -FUSED_GTEST_ALL_CC = $(FUSED_GTEST_DIR)/gtest/gtest-all.cc - -# Where to find the sample test. -SAMPLE_DIR = ../../samples - -# Where to find gtest_main.cc. -GTEST_MAIN_CC = ../../src/gtest_main.cc - -# Flags passed to the preprocessor. -# We have no idea here whether pthreads is available in the system, so -# disable its use. -CPPFLAGS += -I$(FUSED_GTEST_DIR) -DGTEST_HAS_PTHREAD=0 - -# Flags passed to the C++ compiler. -CXXFLAGS += -g - -all : sample1_unittest - -check : all - ./sample1_unittest - -clean : - rm -rf $(FUSED_GTEST_DIR) sample1_unittest *.o - -$(FUSED_GTEST_H) : - ../fuse_gtest_files.py $(FUSED_GTEST_DIR) - -$(FUSED_GTEST_ALL_CC) : - ../fuse_gtest_files.py $(FUSED_GTEST_DIR) - -gtest-all.o : $(FUSED_GTEST_H) $(FUSED_GTEST_ALL_CC) - $(CXX) $(CPPFLAGS) $(CXXFLAGS) -c $(FUSED_GTEST_DIR)/gtest/gtest-all.cc - -gtest_main.o : $(FUSED_GTEST_H) $(GTEST_MAIN_CC) - $(CXX) $(CPPFLAGS) $(CXXFLAGS) -c $(GTEST_MAIN_CC) - -sample1.o : $(SAMPLE_DIR)/sample1.cc $(SAMPLE_DIR)/sample1.h - $(CXX) $(CPPFLAGS) $(CXXFLAGS) -c $(SAMPLE_DIR)/sample1.cc - -sample1_unittest.o : $(SAMPLE_DIR)/sample1_unittest.cc \ - $(SAMPLE_DIR)/sample1.h $(FUSED_GTEST_H) - $(CXX) $(CPPFLAGS) $(CXXFLAGS) -c $(SAMPLE_DIR)/sample1_unittest.cc - -sample1_unittest : sample1.o sample1_unittest.o gtest-all.o gtest_main.o - $(CXX) $(CPPFLAGS) $(CXXFLAGS) $^ -o $@ diff --git a/ext/googletest/googletest/scripts/upload.py b/ext/googletest/googletest/scripts/upload.py deleted file mode 100755 index eba571142f..0000000000 --- a/ext/googletest/googletest/scripts/upload.py +++ /dev/null @@ -1,1402 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2007, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Tool for uploading diffs from a version control system to the codereview app. - -Usage summary: upload.py [options] [-- diff_options] - -Diff options are passed to the diff command of the underlying system. - -Supported version control systems: - Git - Mercurial - Subversion - -It is important for Git/Mercurial users to specify a tree/node/branch to diff -against by using the '--rev' option. -""" -# This code is derived from appcfg.py in the App Engine SDK (open source), -# and from ASPN recipe #146306. - -import cookielib -import getpass -import logging -import md5 -import mimetypes -import optparse -import os -import re -import socket -import subprocess -import sys -import urllib -import urllib2 -import urlparse - -try: - import readline -except ImportError: - pass - -# The logging verbosity: -# 0: Errors only. -# 1: Status messages. -# 2: Info logs. -# 3: Debug logs. -verbosity = 1 - -# Max size of patch or base file. -MAX_UPLOAD_SIZE = 900 * 1024 - - -def GetEmail(prompt): - """Prompts the user for their email address and returns it. - - The last used email address is saved to a file and offered up as a suggestion - to the user. If the user presses enter without typing in anything the last - used email address is used. If the user enters a new address, it is saved - for next time we prompt. - - """ - last_email_file_name = os.path.expanduser("~/.last_codereview_email_address") - last_email = "" - if os.path.exists(last_email_file_name): - try: - last_email_file = open(last_email_file_name, "r") - last_email = last_email_file.readline().strip("\n") - last_email_file.close() - prompt += " [%s]" % last_email - except IOError, e: - pass - email = raw_input(prompt + ": ").strip() - if email: - try: - last_email_file = open(last_email_file_name, "w") - last_email_file.write(email) - last_email_file.close() - except IOError, e: - pass - else: - email = last_email - return email - - -def StatusUpdate(msg): - """Print a status message to stdout. - - If 'verbosity' is greater than 0, print the message. - - Args: - msg: The string to print. - """ - if verbosity > 0: - print msg - - -def ErrorExit(msg): - """Print an error message to stderr and exit.""" - print >>sys.stderr, msg - sys.exit(1) - - -class ClientLoginError(urllib2.HTTPError): - """Raised to indicate there was an error authenticating with ClientLogin.""" - - def __init__(self, url, code, msg, headers, args): - urllib2.HTTPError.__init__(self, url, code, msg, headers, None) - self.args = args - self.reason = args["Error"] - - -class AbstractRpcServer(object): - """Provides a common interface for a simple RPC server.""" - - def __init__(self, host, auth_function, host_override=None, extra_headers={}, - save_cookies=False): - """Creates a new HttpRpcServer. - - Args: - host: The host to send requests to. - auth_function: A function that takes no arguments and returns an - (email, password) tuple when called. Will be called if authentication - is required. - host_override: The host header to send to the server (defaults to host). - extra_headers: A dict of extra headers to append to every request. - save_cookies: If True, save the authentication cookies to local disk. - If False, use an in-memory cookiejar instead. Subclasses must - implement this functionality. Defaults to False. - """ - self.host = host - self.host_override = host_override - self.auth_function = auth_function - self.authenticated = False - self.extra_headers = extra_headers - self.save_cookies = save_cookies - self.opener = self._GetOpener() - if self.host_override: - logging.info("Server: %s; Host: %s", self.host, self.host_override) - else: - logging.info("Server: %s", self.host) - - def _GetOpener(self): - """Returns an OpenerDirector for making HTTP requests. - - Returns: - A urllib2.OpenerDirector object. - """ - raise NotImplementedError() - - def _CreateRequest(self, url, data=None): - """Creates a new urllib request.""" - logging.debug("Creating request for: '%s' with payload:\n%s", url, data) - req = urllib2.Request(url, data=data) - if self.host_override: - req.add_header("Host", self.host_override) - for key, value in self.extra_headers.iteritems(): - req.add_header(key, value) - return req - - def _GetAuthToken(self, email, password): - """Uses ClientLogin to authenticate the user, returning an auth token. - - Args: - email: The user's email address - password: The user's password - - Raises: - ClientLoginError: If there was an error authenticating with ClientLogin. - HTTPError: If there was some other form of HTTP error. - - Returns: - The authentication token returned by ClientLogin. - """ - account_type = "GOOGLE" - if self.host.endswith(".google.com"): - # Needed for use inside Google. - account_type = "HOSTED" - req = self._CreateRequest( - url="https://www.google.com/accounts/ClientLogin", - data=urllib.urlencode({ - "Email": email, - "Passwd": password, - "service": "ah", - "source": "rietveld-codereview-upload", - "accountType": account_type, - }), - ) - try: - response = self.opener.open(req) - response_body = response.read() - response_dict = dict(x.split("=") - for x in response_body.split("\n") if x) - return response_dict["Auth"] - except urllib2.HTTPError, e: - if e.code == 403: - body = e.read() - response_dict = dict(x.split("=", 1) for x in body.split("\n") if x) - raise ClientLoginError(req.get_full_url(), e.code, e.msg, - e.headers, response_dict) - else: - raise - - def _GetAuthCookie(self, auth_token): - """Fetches authentication cookies for an authentication token. - - Args: - auth_token: The authentication token returned by ClientLogin. - - Raises: - HTTPError: If there was an error fetching the authentication cookies. - """ - # This is a dummy value to allow us to identify when we're successful. - continue_location = "http://localhost/" - args = {"continue": continue_location, "auth": auth_token} - req = self._CreateRequest("http://%s/_ah/login?%s" % - (self.host, urllib.urlencode(args))) - try: - response = self.opener.open(req) - except urllib2.HTTPError, e: - response = e - if (response.code != 302 or - response.info()["location"] != continue_location): - raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg, - response.headers, response.fp) - self.authenticated = True - - def _Authenticate(self): - """Authenticates the user. - - The authentication process works as follows: - 1) We get a username and password from the user - 2) We use ClientLogin to obtain an AUTH token for the user - (see https://developers.google.com/identity/protocols/AuthForInstalledApps). - 3) We pass the auth token to /_ah/login on the server to obtain an - authentication cookie. If login was successful, it tries to redirect - us to the URL we provided. - - If we attempt to access the upload API without first obtaining an - authentication cookie, it returns a 401 response and directs us to - authenticate ourselves with ClientLogin. - """ - for i in range(3): - credentials = self.auth_function() - try: - auth_token = self._GetAuthToken(credentials[0], credentials[1]) - except ClientLoginError, e: - if e.reason == "BadAuthentication": - print >>sys.stderr, "Invalid username or password." - continue - if e.reason == "CaptchaRequired": - print >>sys.stderr, ( - "Please go to\n" - "https://www.google.com/accounts/DisplayUnlockCaptcha\n" - "and verify you are a human. Then try again.") - break - if e.reason == "NotVerified": - print >>sys.stderr, "Account not verified." - break - if e.reason == "TermsNotAgreed": - print >>sys.stderr, "User has not agreed to TOS." - break - if e.reason == "AccountDeleted": - print >>sys.stderr, "The user account has been deleted." - break - if e.reason == "AccountDisabled": - print >>sys.stderr, "The user account has been disabled." - break - if e.reason == "ServiceDisabled": - print >>sys.stderr, ("The user's access to the service has been " - "disabled.") - break - if e.reason == "ServiceUnavailable": - print >>sys.stderr, "The service is not available; try again later." - break - raise - self._GetAuthCookie(auth_token) - return - - def Send(self, request_path, payload=None, - content_type="application/octet-stream", - timeout=None, - **kwargs): - """Sends an RPC and returns the response. - - Args: - request_path: The path to send the request to, eg /api/appversion/create. - payload: The body of the request, or None to send an empty request. - content_type: The Content-Type header to use. - timeout: timeout in seconds; default None i.e. no timeout. - (Note: for large requests on OS X, the timeout doesn't work right.) - kwargs: Any keyword arguments are converted into query string parameters. - - Returns: - The response body, as a string. - """ - # TODO: Don't require authentication. Let the server say - # whether it is necessary. - if not self.authenticated: - self._Authenticate() - - old_timeout = socket.getdefaulttimeout() - socket.setdefaulttimeout(timeout) - try: - tries = 0 - while True: - tries += 1 - args = dict(kwargs) - url = "http://%s%s" % (self.host, request_path) - if args: - url += "?" + urllib.urlencode(args) - req = self._CreateRequest(url=url, data=payload) - req.add_header("Content-Type", content_type) - try: - f = self.opener.open(req) - response = f.read() - f.close() - return response - except urllib2.HTTPError, e: - if tries > 3: - raise - elif e.code == 401: - self._Authenticate() -## elif e.code >= 500 and e.code < 600: -## # Server Error - try again. -## continue - else: - raise - finally: - socket.setdefaulttimeout(old_timeout) - - -class HttpRpcServer(AbstractRpcServer): - """Provides a simplified RPC-style interface for HTTP requests.""" - - def _Authenticate(self): - """Save the cookie jar after authentication.""" - super(HttpRpcServer, self)._Authenticate() - if self.save_cookies: - StatusUpdate("Saving authentication cookies to %s" % self.cookie_file) - self.cookie_jar.save() - - def _GetOpener(self): - """Returns an OpenerDirector that supports cookies and ignores redirects. - - Returns: - A urllib2.OpenerDirector object. - """ - opener = urllib2.OpenerDirector() - opener.add_handler(urllib2.ProxyHandler()) - opener.add_handler(urllib2.UnknownHandler()) - opener.add_handler(urllib2.HTTPHandler()) - opener.add_handler(urllib2.HTTPDefaultErrorHandler()) - opener.add_handler(urllib2.HTTPSHandler()) - opener.add_handler(urllib2.HTTPErrorProcessor()) - if self.save_cookies: - self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies") - self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file) - if os.path.exists(self.cookie_file): - try: - self.cookie_jar.load() - self.authenticated = True - StatusUpdate("Loaded authentication cookies from %s" % - self.cookie_file) - except (cookielib.LoadError, IOError): - # Failed to load cookies - just ignore them. - pass - else: - # Create an empty cookie file with mode 600 - fd = os.open(self.cookie_file, os.O_CREAT, 0600) - os.close(fd) - # Always chmod the cookie file - os.chmod(self.cookie_file, 0600) - else: - # Don't save cookies across runs of update.py. - self.cookie_jar = cookielib.CookieJar() - opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar)) - return opener - - -parser = optparse.OptionParser(usage="%prog [options] [-- diff_options]") -parser.add_option("-y", "--assume_yes", action="store_true", - dest="assume_yes", default=False, - help="Assume that the answer to yes/no questions is 'yes'.") -# Logging -group = parser.add_option_group("Logging options") -group.add_option("-q", "--quiet", action="store_const", const=0, - dest="verbose", help="Print errors only.") -group.add_option("-v", "--verbose", action="store_const", const=2, - dest="verbose", default=1, - help="Print info level logs (default).") -group.add_option("--noisy", action="store_const", const=3, - dest="verbose", help="Print all logs.") -# Review server -group = parser.add_option_group("Review server options") -group.add_option("-s", "--server", action="store", dest="server", - default="codereview.appspot.com", - metavar="SERVER", - help=("The server to upload to. The format is host[:port]. " - "Defaults to 'codereview.appspot.com'.")) -group.add_option("-e", "--email", action="store", dest="email", - metavar="EMAIL", default=None, - help="The username to use. Will prompt if omitted.") -group.add_option("-H", "--host", action="store", dest="host", - metavar="HOST", default=None, - help="Overrides the Host header sent with all RPCs.") -group.add_option("--no_cookies", action="store_false", - dest="save_cookies", default=True, - help="Do not save authentication cookies to local disk.") -# Issue -group = parser.add_option_group("Issue options") -group.add_option("-d", "--description", action="store", dest="description", - metavar="DESCRIPTION", default=None, - help="Optional description when creating an issue.") -group.add_option("-f", "--description_file", action="store", - dest="description_file", metavar="DESCRIPTION_FILE", - default=None, - help="Optional path of a file that contains " - "the description when creating an issue.") -group.add_option("-r", "--reviewers", action="store", dest="reviewers", - metavar="REVIEWERS", default=None, - help="Add reviewers (comma separated email addresses).") -group.add_option("--cc", action="store", dest="cc", - metavar="CC", default=None, - help="Add CC (comma separated email addresses).") -# Upload options -group = parser.add_option_group("Patch options") -group.add_option("-m", "--message", action="store", dest="message", - metavar="MESSAGE", default=None, - help="A message to identify the patch. " - "Will prompt if omitted.") -group.add_option("-i", "--issue", type="int", action="store", - metavar="ISSUE", default=None, - help="Issue number to which to add. Defaults to new issue.") -group.add_option("--download_base", action="store_true", - dest="download_base", default=False, - help="Base files will be downloaded by the server " - "(side-by-side diffs may not work on files with CRs).") -group.add_option("--rev", action="store", dest="revision", - metavar="REV", default=None, - help="Branch/tree/revision to diff against (used by DVCS).") -group.add_option("--send_mail", action="store_true", - dest="send_mail", default=False, - help="Send notification email to reviewers.") - - -def GetRpcServer(options): - """Returns an instance of an AbstractRpcServer. - - Returns: - A new AbstractRpcServer, on which RPC calls can be made. - """ - - rpc_server_class = HttpRpcServer - - def GetUserCredentials(): - """Prompts the user for a username and password.""" - email = options.email - if email is None: - email = GetEmail("Email (login for uploading to %s)" % options.server) - password = getpass.getpass("Password for %s: " % email) - return (email, password) - - # If this is the dev_appserver, use fake authentication. - host = (options.host or options.server).lower() - if host == "localhost" or host.startswith("localhost:"): - email = options.email - if email is None: - email = "test@example.com" - logging.info("Using debug user %s. Override with --email" % email) - server = rpc_server_class( - options.server, - lambda: (email, "password"), - host_override=options.host, - extra_headers={"Cookie": - 'dev_appserver_login="%s:False"' % email}, - save_cookies=options.save_cookies) - # Don't try to talk to ClientLogin. - server.authenticated = True - return server - - return rpc_server_class(options.server, GetUserCredentials, - host_override=options.host, - save_cookies=options.save_cookies) - - -def EncodeMultipartFormData(fields, files): - """Encode form fields for multipart/form-data. - - Args: - fields: A sequence of (name, value) elements for regular form fields. - files: A sequence of (name, filename, value) elements for data to be - uploaded as files. - Returns: - (content_type, body) ready for httplib.HTTP instance. - - Source: - https://web.archive.org/web/20160116052001/code.activestate.com/recipes/146306 - """ - BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-' - CRLF = '\r\n' - lines = [] - for (key, value) in fields: - lines.append('--' + BOUNDARY) - lines.append('Content-Disposition: form-data; name="%s"' % key) - lines.append('') - lines.append(value) - for (key, filename, value) in files: - lines.append('--' + BOUNDARY) - lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' % - (key, filename)) - lines.append('Content-Type: %s' % GetContentType(filename)) - lines.append('') - lines.append(value) - lines.append('--' + BOUNDARY + '--') - lines.append('') - body = CRLF.join(lines) - content_type = 'multipart/form-data; boundary=%s' % BOUNDARY - return content_type, body - - -def GetContentType(filename): - """Helper to guess the content-type from the filename.""" - return mimetypes.guess_type(filename)[0] or 'application/octet-stream' - - -# Use a shell for subcommands on Windows to get a PATH search. -use_shell = sys.platform.startswith("win") - -def RunShellWithReturnCode(command, print_output=False, - universal_newlines=True): - """Executes a command and returns the output from stdout and the return code. - - Args: - command: Command to execute. - print_output: If True, the output is printed to stdout. - If False, both stdout and stderr are ignored. - universal_newlines: Use universal_newlines flag (default: True). - - Returns: - Tuple (output, return code) - """ - logging.info("Running %s", command) - p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, - shell=use_shell, universal_newlines=universal_newlines) - if print_output: - output_array = [] - while True: - line = p.stdout.readline() - if not line: - break - print line.strip("\n") - output_array.append(line) - output = "".join(output_array) - else: - output = p.stdout.read() - p.wait() - errout = p.stderr.read() - if print_output and errout: - print >>sys.stderr, errout - p.stdout.close() - p.stderr.close() - return output, p.returncode - - -def RunShell(command, silent_ok=False, universal_newlines=True, - print_output=False): - data, retcode = RunShellWithReturnCode(command, print_output, - universal_newlines) - if retcode: - ErrorExit("Got error status from %s:\n%s" % (command, data)) - if not silent_ok and not data: - ErrorExit("No output from %s" % command) - return data - - -class VersionControlSystem(object): - """Abstract base class providing an interface to the VCS.""" - - def __init__(self, options): - """Constructor. - - Args: - options: Command line options. - """ - self.options = options - - def GenerateDiff(self, args): - """Return the current diff as a string. - - Args: - args: Extra arguments to pass to the diff command. - """ - raise NotImplementedError( - "abstract method -- subclass %s must override" % self.__class__) - - def GetUnknownFiles(self): - """Return a list of files unknown to the VCS.""" - raise NotImplementedError( - "abstract method -- subclass %s must override" % self.__class__) - - def CheckForUnknownFiles(self): - """Show an "are you sure?" prompt if there are unknown files.""" - unknown_files = self.GetUnknownFiles() - if unknown_files: - print "The following files are not added to version control:" - for line in unknown_files: - print line - prompt = "Are you sure to continue?(y/N) " - answer = raw_input(prompt).strip() - if answer != "y": - ErrorExit("User aborted") - - def GetBaseFile(self, filename): - """Get the content of the upstream version of a file. - - Returns: - A tuple (base_content, new_content, is_binary, status) - base_content: The contents of the base file. - new_content: For text files, this is empty. For binary files, this is - the contents of the new file, since the diff output won't contain - information to reconstruct the current file. - is_binary: True iff the file is binary. - status: The status of the file. - """ - - raise NotImplementedError( - "abstract method -- subclass %s must override" % self.__class__) - - - def GetBaseFiles(self, diff): - """Helper that calls GetBase file for each file in the patch. - - Returns: - A dictionary that maps from filename to GetBaseFile's tuple. Filenames - are retrieved based on lines that start with "Index:" or - "Property changes on:". - """ - files = {} - for line in diff.splitlines(True): - if line.startswith('Index:') or line.startswith('Property changes on:'): - unused, filename = line.split(':', 1) - # On Windows if a file has property changes its filename uses '\' - # instead of '/'. - filename = filename.strip().replace('\\', '/') - files[filename] = self.GetBaseFile(filename) - return files - - - def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options, - files): - """Uploads the base files (and if necessary, the current ones as well).""" - - def UploadFile(filename, file_id, content, is_binary, status, is_base): - """Uploads a file to the server.""" - file_too_large = False - if is_base: - type = "base" - else: - type = "current" - if len(content) > MAX_UPLOAD_SIZE: - print ("Not uploading the %s file for %s because it's too large." % - (type, filename)) - file_too_large = True - content = "" - checksum = md5.new(content).hexdigest() - if options.verbose > 0 and not file_too_large: - print "Uploading %s file for %s" % (type, filename) - url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id) - form_fields = [("filename", filename), - ("status", status), - ("checksum", checksum), - ("is_binary", str(is_binary)), - ("is_current", str(not is_base)), - ] - if file_too_large: - form_fields.append(("file_too_large", "1")) - if options.email: - form_fields.append(("user", options.email)) - ctype, body = EncodeMultipartFormData(form_fields, - [("data", filename, content)]) - response_body = rpc_server.Send(url, body, - content_type=ctype) - if not response_body.startswith("OK"): - StatusUpdate(" --> %s" % response_body) - sys.exit(1) - - patches = dict() - [patches.setdefault(v, k) for k, v in patch_list] - for filename in patches.keys(): - base_content, new_content, is_binary, status = files[filename] - file_id_str = patches.get(filename) - if file_id_str.find("nobase") != -1: - base_content = None - file_id_str = file_id_str[file_id_str.rfind("_") + 1:] - file_id = int(file_id_str) - if base_content != None: - UploadFile(filename, file_id, base_content, is_binary, status, True) - if new_content != None: - UploadFile(filename, file_id, new_content, is_binary, status, False) - - def IsImage(self, filename): - """Returns true if the filename has an image extension.""" - mimetype = mimetypes.guess_type(filename)[0] - if not mimetype: - return False - return mimetype.startswith("image/") - - -class SubversionVCS(VersionControlSystem): - """Implementation of the VersionControlSystem interface for Subversion.""" - - def __init__(self, options): - super(SubversionVCS, self).__init__(options) - if self.options.revision: - match = re.match(r"(\d+)(:(\d+))?", self.options.revision) - if not match: - ErrorExit("Invalid Subversion revision %s." % self.options.revision) - self.rev_start = match.group(1) - self.rev_end = match.group(3) - else: - self.rev_start = self.rev_end = None - # Cache output from "svn list -r REVNO dirname". - # Keys: dirname, Values: 2-tuple (output for start rev and end rev). - self.svnls_cache = {} - # SVN base URL is required to fetch files deleted in an older revision. - # Result is cached to not guess it over and over again in GetBaseFile(). - required = self.options.download_base or self.options.revision is not None - self.svn_base = self._GuessBase(required) - - def GuessBase(self, required): - """Wrapper for _GuessBase.""" - return self.svn_base - - def _GuessBase(self, required): - """Returns the SVN base URL. - - Args: - required: If true, exits if the url can't be guessed, otherwise None is - returned. - """ - info = RunShell(["svn", "info"]) - for line in info.splitlines(): - words = line.split() - if len(words) == 2 and words[0] == "URL:": - url = words[1] - scheme, netloc, path, params, query, fragment = urlparse.urlparse(url) - username, netloc = urllib.splituser(netloc) - if username: - logging.info("Removed username from base URL") - if netloc.endswith("svn.python.org"): - if netloc == "svn.python.org": - if path.startswith("/projects/"): - path = path[9:] - elif netloc != "pythondev@svn.python.org": - ErrorExit("Unrecognized Python URL: %s" % url) - base = "http://svn.python.org/view/*checkout*%s/" % path - logging.info("Guessed Python base = %s", base) - elif netloc.endswith("svn.collab.net"): - if path.startswith("/repos/"): - path = path[6:] - base = "http://svn.collab.net/viewvc/*checkout*%s/" % path - logging.info("Guessed CollabNet base = %s", base) - elif netloc.endswith(".googlecode.com"): - path = path + "/" - base = urlparse.urlunparse(("http", netloc, path, params, - query, fragment)) - logging.info("Guessed Google Code base = %s", base) - else: - path = path + "/" - base = urlparse.urlunparse((scheme, netloc, path, params, - query, fragment)) - logging.info("Guessed base = %s", base) - return base - if required: - ErrorExit("Can't find URL in output from svn info") - return None - - def GenerateDiff(self, args): - cmd = ["svn", "diff"] - if self.options.revision: - cmd += ["-r", self.options.revision] - cmd.extend(args) - data = RunShell(cmd) - count = 0 - for line in data.splitlines(): - if line.startswith("Index:") or line.startswith("Property changes on:"): - count += 1 - logging.info(line) - if not count: - ErrorExit("No valid patches found in output from svn diff") - return data - - def _CollapseKeywords(self, content, keyword_str): - """Collapses SVN keywords.""" - # svn cat translates keywords but svn diff doesn't. As a result of this - # behavior patching.PatchChunks() fails with a chunk mismatch error. - # This part was originally written by the Review Board development team - # who had the same problem (https://reviews.reviewboard.org/r/276/). - # Mapping of keywords to known aliases - svn_keywords = { - # Standard keywords - 'Date': ['Date', 'LastChangedDate'], - 'Revision': ['Revision', 'LastChangedRevision', 'Rev'], - 'Author': ['Author', 'LastChangedBy'], - 'HeadURL': ['HeadURL', 'URL'], - 'Id': ['Id'], - - # Aliases - 'LastChangedDate': ['LastChangedDate', 'Date'], - 'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'], - 'LastChangedBy': ['LastChangedBy', 'Author'], - 'URL': ['URL', 'HeadURL'], - } - - def repl(m): - if m.group(2): - return "$%s::%s$" % (m.group(1), " " * len(m.group(3))) - return "$%s$" % m.group(1) - keywords = [keyword - for name in keyword_str.split(" ") - for keyword in svn_keywords.get(name, [])] - return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content) - - def GetUnknownFiles(self): - status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True) - unknown_files = [] - for line in status.split("\n"): - if line and line[0] == "?": - unknown_files.append(line) - return unknown_files - - def ReadFile(self, filename): - """Returns the contents of a file.""" - file = open(filename, 'rb') - result = "" - try: - result = file.read() - finally: - file.close() - return result - - def GetStatus(self, filename): - """Returns the status of a file.""" - if not self.options.revision: - status = RunShell(["svn", "status", "--ignore-externals", filename]) - if not status: - ErrorExit("svn status returned no output for %s" % filename) - status_lines = status.splitlines() - # If file is in a cl, the output will begin with - # "\n--- Changelist 'cl_name':\n". See - # https://web.archive.org/web/20090918234815/svn.collab.net/repos/svn/trunk/notes/changelist-design.txt - if (len(status_lines) == 3 and - not status_lines[0] and - status_lines[1].startswith("--- Changelist")): - status = status_lines[2] - else: - status = status_lines[0] - # If we have a revision to diff against we need to run "svn list" - # for the old and the new revision and compare the results to get - # the correct status for a file. - else: - dirname, relfilename = os.path.split(filename) - if dirname not in self.svnls_cache: - cmd = ["svn", "list", "-r", self.rev_start, dirname or "."] - out, returncode = RunShellWithReturnCode(cmd) - if returncode: - ErrorExit("Failed to get status for %s." % filename) - old_files = out.splitlines() - args = ["svn", "list"] - if self.rev_end: - args += ["-r", self.rev_end] - cmd = args + [dirname or "."] - out, returncode = RunShellWithReturnCode(cmd) - if returncode: - ErrorExit("Failed to run command %s" % cmd) - self.svnls_cache[dirname] = (old_files, out.splitlines()) - old_files, new_files = self.svnls_cache[dirname] - if relfilename in old_files and relfilename not in new_files: - status = "D " - elif relfilename in old_files and relfilename in new_files: - status = "M " - else: - status = "A " - return status - - def GetBaseFile(self, filename): - status = self.GetStatus(filename) - base_content = None - new_content = None - - # If a file is copied its status will be "A +", which signifies - # "addition-with-history". See "svn st" for more information. We need to - # upload the original file or else diff parsing will fail if the file was - # edited. - if status[0] == "A" and status[3] != "+": - # We'll need to upload the new content if we're adding a binary file - # since diff's output won't contain it. - mimetype = RunShell(["svn", "propget", "svn:mime-type", filename], - silent_ok=True) - base_content = "" - is_binary = mimetype and not mimetype.startswith("text/") - if is_binary and self.IsImage(filename): - new_content = self.ReadFile(filename) - elif (status[0] in ("M", "D", "R") or - (status[0] == "A" and status[3] == "+") or # Copied file. - (status[0] == " " and status[1] == "M")): # Property change. - args = [] - if self.options.revision: - url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start) - else: - # Don't change filename, it's needed later. - url = filename - args += ["-r", "BASE"] - cmd = ["svn"] + args + ["propget", "svn:mime-type", url] - mimetype, returncode = RunShellWithReturnCode(cmd) - if returncode: - # File does not exist in the requested revision. - # Reset mimetype, it contains an error message. - mimetype = "" - get_base = False - is_binary = mimetype and not mimetype.startswith("text/") - if status[0] == " ": - # Empty base content just to force an upload. - base_content = "" - elif is_binary: - if self.IsImage(filename): - get_base = True - if status[0] == "M": - if not self.rev_end: - new_content = self.ReadFile(filename) - else: - url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end) - new_content = RunShell(["svn", "cat", url], - universal_newlines=True, silent_ok=True) - else: - base_content = "" - else: - get_base = True - - if get_base: - if is_binary: - universal_newlines = False - else: - universal_newlines = True - if self.rev_start: - # "svn cat -r REV delete_file.txt" doesn't work. cat requires - # the full URL with "@REV" appended instead of using "-r" option. - url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start) - base_content = RunShell(["svn", "cat", url], - universal_newlines=universal_newlines, - silent_ok=True) - else: - base_content = RunShell(["svn", "cat", filename], - universal_newlines=universal_newlines, - silent_ok=True) - if not is_binary: - args = [] - if self.rev_start: - url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start) - else: - url = filename - args += ["-r", "BASE"] - cmd = ["svn"] + args + ["propget", "svn:keywords", url] - keywords, returncode = RunShellWithReturnCode(cmd) - if keywords and not returncode: - base_content = self._CollapseKeywords(base_content, keywords) - else: - StatusUpdate("svn status returned unexpected output: %s" % status) - sys.exit(1) - return base_content, new_content, is_binary, status[0:5] - - -class GitVCS(VersionControlSystem): - """Implementation of the VersionControlSystem interface for Git.""" - - def __init__(self, options): - super(GitVCS, self).__init__(options) - # Map of filename -> hash of base file. - self.base_hashes = {} - - def GenerateDiff(self, extra_args): - # This is more complicated than svn's GenerateDiff because we must convert - # the diff output to include an svn-style "Index:" line as well as record - # the hashes of the base files, so we can upload them along with our diff. - if self.options.revision: - extra_args = [self.options.revision] + extra_args - gitdiff = RunShell(["git", "diff", "--full-index"] + extra_args) - svndiff = [] - filecount = 0 - filename = None - for line in gitdiff.splitlines(): - match = re.match(r"diff --git a/(.*) b/.*$", line) - if match: - filecount += 1 - filename = match.group(1) - svndiff.append("Index: %s\n" % filename) - else: - # The "index" line in a git diff looks like this (long hashes elided): - # index 82c0d44..b2cee3f 100755 - # We want to save the left hash, as that identifies the base file. - match = re.match(r"index (\w+)\.\.", line) - if match: - self.base_hashes[filename] = match.group(1) - svndiff.append(line + "\n") - if not filecount: - ErrorExit("No valid patches found in output from git diff") - return "".join(svndiff) - - def GetUnknownFiles(self): - status = RunShell(["git", "ls-files", "--exclude-standard", "--others"], - silent_ok=True) - return status.splitlines() - - def GetBaseFile(self, filename): - hash = self.base_hashes[filename] - base_content = None - new_content = None - is_binary = False - if hash == "0" * 40: # All-zero hash indicates no base file. - status = "A" - base_content = "" - else: - status = "M" - base_content, returncode = RunShellWithReturnCode(["git", "show", hash]) - if returncode: - ErrorExit("Got error status from 'git show %s'" % hash) - return (base_content, new_content, is_binary, status) - - -class MercurialVCS(VersionControlSystem): - """Implementation of the VersionControlSystem interface for Mercurial.""" - - def __init__(self, options, repo_dir): - super(MercurialVCS, self).__init__(options) - # Absolute path to repository (we can be in a subdir) - self.repo_dir = os.path.normpath(repo_dir) - # Compute the subdir - cwd = os.path.normpath(os.getcwd()) - assert cwd.startswith(self.repo_dir) - self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/") - if self.options.revision: - self.base_rev = self.options.revision - else: - self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip() - - def _GetRelPath(self, filename): - """Get relative path of a file according to the current directory, - given its logical path in the repo.""" - assert filename.startswith(self.subdir), filename - return filename[len(self.subdir):].lstrip(r"\/") - - def GenerateDiff(self, extra_args): - # If no file specified, restrict to the current subdir - extra_args = extra_args or ["."] - cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args - data = RunShell(cmd, silent_ok=True) - svndiff = [] - filecount = 0 - for line in data.splitlines(): - m = re.match("diff --git a/(\S+) b/(\S+)", line) - if m: - # Modify line to make it look like as it comes from svn diff. - # With this modification no changes on the server side are required - # to make upload.py work with Mercurial repos. - # NOTE: for proper handling of moved/copied files, we have to use - # the second filename. - filename = m.group(2) - svndiff.append("Index: %s" % filename) - svndiff.append("=" * 67) - filecount += 1 - logging.info(line) - else: - svndiff.append(line) - if not filecount: - ErrorExit("No valid patches found in output from hg diff") - return "\n".join(svndiff) + "\n" - - def GetUnknownFiles(self): - """Return a list of files unknown to the VCS.""" - args = [] - status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."], - silent_ok=True) - unknown_files = [] - for line in status.splitlines(): - st, fn = line.split(" ", 1) - if st == "?": - unknown_files.append(fn) - return unknown_files - - def GetBaseFile(self, filename): - # "hg status" and "hg cat" both take a path relative to the current subdir - # rather than to the repo root, but "hg diff" has given us the full path - # to the repo root. - base_content = "" - new_content = None - is_binary = False - oldrelpath = relpath = self._GetRelPath(filename) - # "hg status -C" returns two lines for moved/copied files, one otherwise - out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath]) - out = out.splitlines() - # HACK: strip error message about missing file/directory if it isn't in - # the working copy - if out[0].startswith('%s: ' % relpath): - out = out[1:] - if len(out) > 1: - # Moved/copied => considered as modified, use old filename to - # retrieve base contents - oldrelpath = out[1].strip() - status = "M" - else: - status, _ = out[0].split(' ', 1) - if status != "A": - base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath], - silent_ok=True) - is_binary = "\0" in base_content # Mercurial's heuristic - if status != "R": - new_content = open(relpath, "rb").read() - is_binary = is_binary or "\0" in new_content - if is_binary and base_content: - # Fetch again without converting newlines - base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath], - silent_ok=True, universal_newlines=False) - if not is_binary or not self.IsImage(relpath): - new_content = None - return base_content, new_content, is_binary, status - - -# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync. -def SplitPatch(data): - """Splits a patch into separate pieces for each file. - - Args: - data: A string containing the output of svn diff. - - Returns: - A list of 2-tuple (filename, text) where text is the svn diff output - pertaining to filename. - """ - patches = [] - filename = None - diff = [] - for line in data.splitlines(True): - new_filename = None - if line.startswith('Index:'): - unused, new_filename = line.split(':', 1) - new_filename = new_filename.strip() - elif line.startswith('Property changes on:'): - unused, temp_filename = line.split(':', 1) - # When a file is modified, paths use '/' between directories, however - # when a property is modified '\' is used on Windows. Make them the same - # otherwise the file shows up twice. - temp_filename = temp_filename.strip().replace('\\', '/') - if temp_filename != filename: - # File has property changes but no modifications, create a new diff. - new_filename = temp_filename - if new_filename: - if filename and diff: - patches.append((filename, ''.join(diff))) - filename = new_filename - diff = [line] - continue - if diff is not None: - diff.append(line) - if filename and diff: - patches.append((filename, ''.join(diff))) - return patches - - -def UploadSeparatePatches(issue, rpc_server, patchset, data, options): - """Uploads a separate patch for each file in the diff output. - - Returns a list of [patch_key, filename] for each file. - """ - patches = SplitPatch(data) - rv = [] - for patch in patches: - if len(patch[1]) > MAX_UPLOAD_SIZE: - print ("Not uploading the patch for " + patch[0] + - " because the file is too large.") - continue - form_fields = [("filename", patch[0])] - if not options.download_base: - form_fields.append(("content_upload", "1")) - files = [("data", "data.diff", patch[1])] - ctype, body = EncodeMultipartFormData(form_fields, files) - url = "/%d/upload_patch/%d" % (int(issue), int(patchset)) - print "Uploading patch for " + patch[0] - response_body = rpc_server.Send(url, body, content_type=ctype) - lines = response_body.splitlines() - if not lines or lines[0] != "OK": - StatusUpdate(" --> %s" % response_body) - sys.exit(1) - rv.append([lines[1], patch[0]]) - return rv - - -def GuessVCS(options): - """Helper to guess the version control system. - - This examines the current directory, guesses which VersionControlSystem - we're using, and returns an instance of the appropriate class. Exit with an - error if we can't figure it out. - - Returns: - A VersionControlSystem instance. Exits if the VCS can't be guessed. - """ - # Mercurial has a command to get the base directory of a repository - # Try running it, but don't die if we don't have hg installed. - # NOTE: we try Mercurial first as it can sit on top of an SVN working copy. - try: - out, returncode = RunShellWithReturnCode(["hg", "root"]) - if returncode == 0: - return MercurialVCS(options, out.strip()) - except OSError, (errno, message): - if errno != 2: # ENOENT -- they don't have hg installed. - raise - - # Subversion has a .svn in all working directories. - if os.path.isdir('.svn'): - logging.info("Guessed VCS = Subversion") - return SubversionVCS(options) - - # Git has a command to test if you're in a git tree. - # Try running it, but don't die if we don't have git installed. - try: - out, returncode = RunShellWithReturnCode(["git", "rev-parse", - "--is-inside-work-tree"]) - if returncode == 0: - return GitVCS(options) - except OSError, (errno, message): - if errno != 2: # ENOENT -- they don't have git installed. - raise - - ErrorExit(("Could not guess version control system. " - "Are you in a working copy directory?")) - - -def RealMain(argv, data=None): - """The real main function. - - Args: - argv: Command line arguments. - data: Diff contents. If None (default) the diff is generated by - the VersionControlSystem implementation returned by GuessVCS(). - - Returns: - A 2-tuple (issue id, patchset id). - The patchset id is None if the base files are not uploaded by this - script (applies only to SVN checkouts). - """ - logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:" - "%(lineno)s %(message)s ")) - os.environ['LC_ALL'] = 'C' - options, args = parser.parse_args(argv[1:]) - global verbosity - verbosity = options.verbose - if verbosity >= 3: - logging.getLogger().setLevel(logging.DEBUG) - elif verbosity >= 2: - logging.getLogger().setLevel(logging.INFO) - vcs = GuessVCS(options) - if isinstance(vcs, SubversionVCS): - # base field is only allowed for Subversion. - # Note: Fetching base files may become deprecated in future releases. - base = vcs.GuessBase(options.download_base) - else: - base = None - if not base and options.download_base: - options.download_base = True - logging.info("Enabled upload of base file") - if not options.assume_yes: - vcs.CheckForUnknownFiles() - if data is None: - data = vcs.GenerateDiff(args) - files = vcs.GetBaseFiles(data) - if verbosity >= 1: - print "Upload server:", options.server, "(change with -s/--server)" - if options.issue: - prompt = "Message describing this patch set: " - else: - prompt = "New issue subject: " - message = options.message or raw_input(prompt).strip() - if not message: - ErrorExit("A non-empty message is required") - rpc_server = GetRpcServer(options) - form_fields = [("subject", message)] - if base: - form_fields.append(("base", base)) - if options.issue: - form_fields.append(("issue", str(options.issue))) - if options.email: - form_fields.append(("user", options.email)) - if options.reviewers: - for reviewer in options.reviewers.split(','): - if "@" in reviewer and not reviewer.split("@")[1].count(".") == 1: - ErrorExit("Invalid email address: %s" % reviewer) - form_fields.append(("reviewers", options.reviewers)) - if options.cc: - for cc in options.cc.split(','): - if "@" in cc and not cc.split("@")[1].count(".") == 1: - ErrorExit("Invalid email address: %s" % cc) - form_fields.append(("cc", options.cc)) - description = options.description - if options.description_file: - if options.description: - ErrorExit("Can't specify description and description_file") - file = open(options.description_file, 'r') - description = file.read() - file.close() - if description: - form_fields.append(("description", description)) - # Send a hash of all the base file so the server can determine if a copy - # already exists in an earlier patchset. - base_hashes = "" - for file, info in files.iteritems(): - if not info[0] is None: - checksum = md5.new(info[0]).hexdigest() - if base_hashes: - base_hashes += "|" - base_hashes += checksum + ":" + file - form_fields.append(("base_hashes", base_hashes)) - # If we're uploading base files, don't send the email before the uploads, so - # that it contains the file status. - if options.send_mail and options.download_base: - form_fields.append(("send_mail", "1")) - if not options.download_base: - form_fields.append(("content_upload", "1")) - if len(data) > MAX_UPLOAD_SIZE: - print "Patch is large, so uploading file patches separately." - uploaded_diff_file = [] - form_fields.append(("separate_patches", "1")) - else: - uploaded_diff_file = [("data", "data.diff", data)] - ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file) - response_body = rpc_server.Send("/upload", body, content_type=ctype) - patchset = None - if not options.download_base or not uploaded_diff_file: - lines = response_body.splitlines() - if len(lines) >= 2: - msg = lines[0] - patchset = lines[1].strip() - patches = [x.split(" ", 1) for x in lines[2:]] - else: - msg = response_body - else: - msg = response_body - StatusUpdate(msg) - if not response_body.startswith("Issue created.") and \ - not response_body.startswith("Issue updated."): - sys.exit(0) - issue = msg[msg.rfind("/")+1:] - - if not uploaded_diff_file: - result = UploadSeparatePatches(issue, rpc_server, patchset, data, options) - if not options.download_base: - patches = result - - if not options.download_base: - vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files) - if options.send_mail: - rpc_server.Send("/" + issue + "/mail", payload="") - return issue, patchset - - -def main(): - try: - RealMain(sys.argv) - except KeyboardInterrupt: - print - StatusUpdate("Interrupted.") - sys.exit(1) - - -if __name__ == "__main__": - main() diff --git a/ext/googletest/googletest/scripts/upload_gtest.py b/ext/googletest/googletest/scripts/upload_gtest.py deleted file mode 100755 index be19ae8091..0000000000 --- a/ext/googletest/googletest/scripts/upload_gtest.py +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2009, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""upload_gtest.py v0.1.0 -- uploads a Google Test patch for review. - -This simple wrapper passes all command line flags and ---cc=googletestframework@googlegroups.com to upload.py. - -USAGE: upload_gtest.py [options for upload.py] -""" - -__author__ = 'wan@google.com (Zhanyong Wan)' - -import os -import sys - -CC_FLAG = '--cc=' -GTEST_GROUP = 'googletestframework@googlegroups.com' - - -def main(): - # Finds the path to upload.py, assuming it is in the same directory - # as this file. - my_dir = os.path.dirname(os.path.abspath(__file__)) - upload_py_path = os.path.join(my_dir, 'upload.py') - - # Adds Google Test discussion group to the cc line if it's not there - # already. - upload_py_argv = [upload_py_path] - found_cc_flag = False - for arg in sys.argv[1:]: - if arg.startswith(CC_FLAG): - found_cc_flag = True - cc_line = arg[len(CC_FLAG):] - cc_list = [addr for addr in cc_line.split(',') if addr] - if GTEST_GROUP not in cc_list: - cc_list.append(GTEST_GROUP) - upload_py_argv.append(CC_FLAG + ','.join(cc_list)) - else: - upload_py_argv.append(arg) - - if not found_cc_flag: - upload_py_argv.append(CC_FLAG + GTEST_GROUP) - - # Invokes upload.py with the modified command line flags. - os.execv(upload_py_path, upload_py_argv) - - -if __name__ == '__main__': - main() diff --git a/ext/googletest/googletest/src/gtest-all.cc b/ext/googletest/googletest/src/gtest-all.cc index ad292905cf..2a70ed88c7 100644 --- a/ext/googletest/googletest/src/gtest-all.cc +++ b/ext/googletest/googletest/src/gtest-all.cc @@ -38,7 +38,7 @@ #include "gtest/gtest.h" // The following lines pull in the real gtest *.cc files. -#include "src/gtest.cc" +#include "src/gtest-assertion-result.cc" #include "src/gtest-death-test.cc" #include "src/gtest-filepath.cc" #include "src/gtest-matchers.cc" @@ -46,3 +46,4 @@ #include "src/gtest-printers.cc" #include "src/gtest-test-part.cc" #include "src/gtest-typed-test.cc" +#include "src/gtest.cc" diff --git a/ext/googletest/googletest/src/gtest-assertion-result.cc b/ext/googletest/googletest/src/gtest-assertion-result.cc new file mode 100644 index 0000000000..f1c0b10dc9 --- /dev/null +++ b/ext/googletest/googletest/src/gtest-assertion-result.cc @@ -0,0 +1,77 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// The Google C++ Testing and Mocking Framework (Google Test) +// +// This file defines the AssertionResult type. + +#include "gtest/gtest-assertion-result.h" + +#include +#include + +#include "gtest/gtest-message.h" + +namespace testing { + +// AssertionResult constructors. +// Used in EXPECT_TRUE/FALSE(assertion_result). +AssertionResult::AssertionResult(const AssertionResult& other) + : success_(other.success_), + message_(other.message_.get() != nullptr + ? new ::std::string(*other.message_) + : static_cast< ::std::string*>(nullptr)) {} + +// Swaps two AssertionResults. +void AssertionResult::swap(AssertionResult& other) { + using std::swap; + swap(success_, other.success_); + swap(message_, other.message_); +} + +// Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE. +AssertionResult AssertionResult::operator!() const { + AssertionResult negation(!success_); + if (message_.get() != nullptr) negation << *message_; + return negation; +} + +// Makes a successful assertion result. +AssertionResult AssertionSuccess() { return AssertionResult(true); } + +// Makes a failed assertion result. +AssertionResult AssertionFailure() { return AssertionResult(false); } + +// Makes a failed assertion result with the given failure message. +// Deprecated; use AssertionFailure() << message. +AssertionResult AssertionFailure(const Message& message) { + return AssertionFailure() << message; +} + +} // namespace testing diff --git a/ext/googletest/googletest/src/gtest-death-test.cc b/ext/googletest/googletest/src/gtest-death-test.cc index 52af2c795e..e6abc6278a 100644 --- a/ext/googletest/googletest/src/gtest-death-test.cc +++ b/ext/googletest/googletest/src/gtest-death-test.cc @@ -35,49 +35,49 @@ #include #include -#include "gtest/internal/gtest-port.h" #include "gtest/internal/custom/gtest.h" +#include "gtest/internal/gtest-port.h" #if GTEST_HAS_DEATH_TEST -# if GTEST_OS_MAC -# include -# endif // GTEST_OS_MAC +#if GTEST_OS_MAC +#include +#endif // GTEST_OS_MAC -# include -# include -# include +#include +#include +#include -# if GTEST_OS_LINUX -# include -# endif // GTEST_OS_LINUX +#if GTEST_OS_LINUX +#include +#endif // GTEST_OS_LINUX -# include +#include -# if GTEST_OS_WINDOWS -# include -# else -# include -# include -# endif // GTEST_OS_WINDOWS +#if GTEST_OS_WINDOWS +#include +#else +#include +#include +#endif // GTEST_OS_WINDOWS -# if GTEST_OS_QNX -# include -# endif // GTEST_OS_QNX +#if GTEST_OS_QNX +#include +#endif // GTEST_OS_QNX -# if GTEST_OS_FUCHSIA -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# endif // GTEST_OS_FUCHSIA +#if GTEST_OS_FUCHSIA +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#endif // GTEST_OS_FUCHSIA #endif // GTEST_HAS_DEATH_TEST @@ -137,9 +137,9 @@ namespace internal { // Valid only for fast death tests. Indicates the code is running in the // child process of a fast style death test. -# if !GTEST_OS_WINDOWS && !GTEST_OS_FUCHSIA +#if !GTEST_OS_WINDOWS && !GTEST_OS_FUCHSIA static bool g_in_fast_death_test_child = false; -# endif +#endif // Returns a Boolean value indicating whether the caller is currently // executing in the context of the death test child process. Tools such as @@ -147,13 +147,13 @@ static bool g_in_fast_death_test_child = false; // tests. IMPORTANT: This is an internal utility. Using it may break the // implementation of death tests. User code MUST NOT use it. bool InDeathTestChild() { -# if GTEST_OS_WINDOWS || GTEST_OS_FUCHSIA +#if GTEST_OS_WINDOWS || GTEST_OS_FUCHSIA // On Windows and Fuchsia, death tests are thread-safe regardless of the value // of the death_test_style flag. return !GTEST_FLAG_GET(internal_run_death_test).empty(); -# else +#else if (GTEST_FLAG_GET(death_test_style) == "threadsafe") return !GTEST_FLAG_GET(internal_run_death_test).empty(); @@ -165,40 +165,38 @@ bool InDeathTestChild() { } // namespace internal // ExitedWithCode constructor. -ExitedWithCode::ExitedWithCode(int exit_code) : exit_code_(exit_code) { -} +ExitedWithCode::ExitedWithCode(int exit_code) : exit_code_(exit_code) {} // ExitedWithCode function-call operator. bool ExitedWithCode::operator()(int exit_status) const { -# if GTEST_OS_WINDOWS || GTEST_OS_FUCHSIA +#if GTEST_OS_WINDOWS || GTEST_OS_FUCHSIA return exit_status == exit_code_; -# else +#else return WIFEXITED(exit_status) && WEXITSTATUS(exit_status) == exit_code_; -# endif // GTEST_OS_WINDOWS || GTEST_OS_FUCHSIA +#endif // GTEST_OS_WINDOWS || GTEST_OS_FUCHSIA } -# if !GTEST_OS_WINDOWS && !GTEST_OS_FUCHSIA +#if !GTEST_OS_WINDOWS && !GTEST_OS_FUCHSIA // KilledBySignal constructor. -KilledBySignal::KilledBySignal(int signum) : signum_(signum) { -} +KilledBySignal::KilledBySignal(int signum) : signum_(signum) {} // KilledBySignal function-call operator. bool KilledBySignal::operator()(int exit_status) const { -# if defined(GTEST_KILLED_BY_SIGNAL_OVERRIDE_) +#if defined(GTEST_KILLED_BY_SIGNAL_OVERRIDE_) { bool result; if (GTEST_KILLED_BY_SIGNAL_OVERRIDE_(signum_, exit_status, &result)) { return result; } } -# endif // defined(GTEST_KILLED_BY_SIGNAL_OVERRIDE_) +#endif // defined(GTEST_KILLED_BY_SIGNAL_OVERRIDE_) return WIFSIGNALED(exit_status) && WTERMSIG(exit_status) == signum_; } -# endif // !GTEST_OS_WINDOWS && !GTEST_OS_FUCHSIA +#endif // !GTEST_OS_WINDOWS && !GTEST_OS_FUCHSIA namespace internal { @@ -209,23 +207,23 @@ namespace internal { static std::string ExitSummary(int exit_code) { Message m; -# if GTEST_OS_WINDOWS || GTEST_OS_FUCHSIA +#if GTEST_OS_WINDOWS || GTEST_OS_FUCHSIA m << "Exited with exit status " << exit_code; -# else +#else if (WIFEXITED(exit_code)) { m << "Exited with exit status " << WEXITSTATUS(exit_code); } else if (WIFSIGNALED(exit_code)) { m << "Terminated by signal " << WTERMSIG(exit_code); } -# ifdef WCOREDUMP +#ifdef WCOREDUMP if (WCOREDUMP(exit_code)) { m << " (core dumped)"; } -# endif -# endif // GTEST_OS_WINDOWS || GTEST_OS_FUCHSIA +#endif +#endif // GTEST_OS_WINDOWS || GTEST_OS_FUCHSIA return m.GetString(); } @@ -236,7 +234,7 @@ bool ExitedUnsuccessfully(int exit_status) { return !ExitedWithCode(0)(exit_status); } -# if !GTEST_OS_WINDOWS && !GTEST_OS_FUCHSIA +#if !GTEST_OS_WINDOWS && !GTEST_OS_FUCHSIA // Generates a textual failure message when a death test finds more than // one thread running, or cannot determine the number of threads, prior // to executing the given statement. It is the responsibility of the @@ -257,7 +255,7 @@ static std::string DeathTestThreadWarning(size_t thread_count) { << " this is the last message you see before your test times out."; return msg.GetString(); } -# endif // !GTEST_OS_WINDOWS && !GTEST_OS_FUCHSIA +#endif // !GTEST_OS_WINDOWS && !GTEST_OS_FUCHSIA // Flag characters for reporting a death test that did not die. static const char kDeathTestLived = 'L'; @@ -307,14 +305,14 @@ static void DeathTestAbort(const std::string& message) { // A replacement for CHECK that calls DeathTestAbort if the assertion // fails. -# define GTEST_DEATH_TEST_CHECK_(expression) \ - do { \ - if (!::testing::internal::IsTrue(expression)) { \ - DeathTestAbort( \ - ::std::string("CHECK failed: File ") + __FILE__ + ", line " \ - + ::testing::internal::StreamableToString(__LINE__) + ": " \ - + #expression); \ - } \ +#define GTEST_DEATH_TEST_CHECK_(expression) \ + do { \ + if (!::testing::internal::IsTrue(expression)) { \ + DeathTestAbort(::std::string("CHECK failed: File ") + __FILE__ + \ + ", line " + \ + ::testing::internal::StreamableToString(__LINE__) + \ + ": " + #expression); \ + } \ } while (::testing::internal::AlwaysFalse()) // This macro is similar to GTEST_DEATH_TEST_CHECK_, but it is meant for @@ -324,23 +322,23 @@ static void DeathTestAbort(const std::string& message) { // evaluates the expression as long as it evaluates to -1 and sets // errno to EINTR. If the expression evaluates to -1 but errno is // something other than EINTR, DeathTestAbort is called. -# define GTEST_DEATH_TEST_CHECK_SYSCALL_(expression) \ - do { \ - int gtest_retval; \ - do { \ - gtest_retval = (expression); \ - } while (gtest_retval == -1 && errno == EINTR); \ - if (gtest_retval == -1) { \ - DeathTestAbort( \ - ::std::string("CHECK failed: File ") + __FILE__ + ", line " \ - + ::testing::internal::StreamableToString(__LINE__) + ": " \ - + #expression + " != -1"); \ - } \ +#define GTEST_DEATH_TEST_CHECK_SYSCALL_(expression) \ + do { \ + int gtest_retval; \ + do { \ + gtest_retval = (expression); \ + } while (gtest_retval == -1 && errno == EINTR); \ + if (gtest_retval == -1) { \ + DeathTestAbort(::std::string("CHECK failed: File ") + __FILE__ + \ + ", line " + \ + ::testing::internal::StreamableToString(__LINE__) + \ + ": " + #expression + " != -1"); \ + } \ } while (::testing::internal::AlwaysFalse()) // Returns the message describing the last system error in errno. std::string GetLastErrnoDescription() { - return errno == 0 ? "" : posix::StrError(errno); + return errno == 0 ? "" : posix::StrError(errno); } // This is called from a death test parent process to read a failure @@ -373,8 +371,9 @@ static void FailFromInternalError(int fd) { DeathTest::DeathTest() { TestInfo* const info = GetUnitTestImpl()->current_test_info(); if (info == nullptr) { - DeathTestAbort("Cannot run a death test outside of a TEST or " - "TEST_F construct"); + DeathTestAbort( + "Cannot run a death test outside of a TEST or " + "TEST_F construct"); } } @@ -503,9 +502,7 @@ void DeathTestImpl::ReadAndInterpretStatusByte() { set_read_fd(-1); } -std::string DeathTestImpl::GetErrorLogs() { - return GetCapturedStderr(); -} +std::string DeathTestImpl::GetErrorLogs() { return GetCapturedStderr(); } // Signals that the death test code which should have exited, didn't. // Should be called only in a death test child process. @@ -515,9 +512,9 @@ void DeathTestImpl::Abort(AbortReason reason) { // The parent process considers the death test to be a failure if // it finds any data in our pipe. So, here we write a single flag byte // to the pipe, then exit. - const char status_ch = - reason == TEST_DID_NOT_DIE ? kDeathTestLived : - reason == TEST_THREW_EXCEPTION ? kDeathTestThrew : kDeathTestReturned; + const char status_ch = reason == TEST_DID_NOT_DIE ? kDeathTestLived + : reason == TEST_THREW_EXCEPTION ? kDeathTestThrew + : kDeathTestReturned; GTEST_DEATH_TEST_CHECK_SYSCALL_(posix::Write(write_fd(), &status_ch, 1)); // We are leaking the descriptor here because on some platforms (i.e., @@ -536,7 +533,7 @@ void DeathTestImpl::Abort(AbortReason reason) { // much easier. static ::std::string FormatDeathTestOutput(const ::std::string& output) { ::std::string ret; - for (size_t at = 0; ; ) { + for (size_t at = 0;;) { const size_t line_end = output.find('\n', at); ret += "[ DEATH ] "; if (line_end == ::std::string::npos) { @@ -571,8 +568,7 @@ static ::std::string FormatDeathTestOutput(const ::std::string& output) { // the first failing condition, in the order given above, is the one that is // reported. Also sets the last death test message string. bool DeathTestImpl::Passed(bool status_ok) { - if (!spawned()) - return false; + if (!spawned()) return false; const std::string error_message = GetErrorLogs(); @@ -583,15 +579,18 @@ bool DeathTestImpl::Passed(bool status_ok) { switch (outcome()) { case LIVED: buffer << " Result: failed to die.\n" - << " Error msg:\n" << FormatDeathTestOutput(error_message); + << " Error msg:\n" + << FormatDeathTestOutput(error_message); break; case THREW: buffer << " Result: threw an exception.\n" - << " Error msg:\n" << FormatDeathTestOutput(error_message); + << " Error msg:\n" + << FormatDeathTestOutput(error_message); break; case RETURNED: buffer << " Result: illegal return in test statement.\n" - << " Error msg:\n" << FormatDeathTestOutput(error_message); + << " Error msg:\n" + << FormatDeathTestOutput(error_message); break; case DIED: if (status_ok) { @@ -608,7 +607,8 @@ bool DeathTestImpl::Passed(bool status_ok) { } else { buffer << " Result: died but not with expected exit code:\n" << " " << ExitSummary(status()) << "\n" - << "Actual msg:\n" << FormatDeathTestOutput(error_message); + << "Actual msg:\n" + << FormatDeathTestOutput(error_message); } break; case IN_PROGRESS: @@ -621,7 +621,7 @@ bool DeathTestImpl::Passed(bool status_ok) { return success; } -# if GTEST_OS_WINDOWS +#if GTEST_OS_WINDOWS // WindowsDeathTest implements death tests on Windows. Due to the // specifics of starting new processes on Windows, death tests there are // always threadsafe, and Google Test considers the @@ -682,14 +682,12 @@ class WindowsDeathTest : public DeathTestImpl { // status, or 0 if no child process exists. As a side effect, sets the // outcome data member. int WindowsDeathTest::Wait() { - if (!spawned()) - return 0; + if (!spawned()) return 0; // Wait until the child either signals that it has acquired the write end // of the pipe or it dies. - const HANDLE wait_handles[2] = { child_handle_.Get(), event_handle_.Get() }; - switch (::WaitForMultipleObjects(2, - wait_handles, + const HANDLE wait_handles[2] = {child_handle_.Get(), event_handle_.Get()}; + switch (::WaitForMultipleObjects(2, wait_handles, FALSE, // Waits for any of the handles. INFINITE)) { case WAIT_OBJECT_0: @@ -710,9 +708,8 @@ int WindowsDeathTest::Wait() { // returns immediately if the child has already exited, regardless of // whether previous calls to WaitForMultipleObjects synchronized on this // handle or not. - GTEST_DEATH_TEST_CHECK_( - WAIT_OBJECT_0 == ::WaitForSingleObject(child_handle_.Get(), - INFINITE)); + GTEST_DEATH_TEST_CHECK_(WAIT_OBJECT_0 == + ::WaitForSingleObject(child_handle_.Get(), INFINITE)); DWORD status_code; GTEST_DEATH_TEST_CHECK_( ::GetExitCodeProcess(child_handle_.Get(), &status_code) != FALSE); @@ -745,12 +742,12 @@ DeathTest::TestRole WindowsDeathTest::AssumeRole() { SECURITY_ATTRIBUTES handles_are_inheritable = {sizeof(SECURITY_ATTRIBUTES), nullptr, TRUE}; HANDLE read_handle, write_handle; - GTEST_DEATH_TEST_CHECK_( - ::CreatePipe(&read_handle, &write_handle, &handles_are_inheritable, - 0) // Default buffer size. - != FALSE); - set_read_fd(::_open_osfhandle(reinterpret_cast(read_handle), - O_RDONLY)); + GTEST_DEATH_TEST_CHECK_(::CreatePipe(&read_handle, &write_handle, + &handles_are_inheritable, + 0) // Default buffer size. + != FALSE); + set_read_fd( + ::_open_osfhandle(reinterpret_cast(read_handle), O_RDONLY)); write_handle_.Reset(write_handle); event_handle_.Reset(::CreateEvent( &handles_are_inheritable, @@ -777,9 +774,8 @@ DeathTest::TestRole WindowsDeathTest::AssumeRole() { executable_path, _MAX_PATH)); - std::string command_line = - std::string(::GetCommandLineA()) + " " + filter_flag + " \"" + - internal_flag + "\""; + std::string command_line = std::string(::GetCommandLineA()) + " " + + filter_flag + " \"" + internal_flag + "\""; DeathTest::set_last_death_test_message(""); @@ -799,8 +795,8 @@ DeathTest::TestRole WindowsDeathTest::AssumeRole() { GTEST_DEATH_TEST_CHECK_( ::CreateProcessA( executable_path, const_cast(command_line.c_str()), - nullptr, // Retuned process handle is not inheritable. - nullptr, // Retuned thread handle is not inheritable. + nullptr, // Returned process handle is not inheritable. + nullptr, // Returned thread handle is not inheritable. TRUE, // Child inherits all inheritable handles (for write_handle_). 0x0, // Default creation flags. nullptr, // Inherit the parent's environment. @@ -812,7 +808,7 @@ DeathTest::TestRole WindowsDeathTest::AssumeRole() { return OVERSEE_TEST; } -# elif GTEST_OS_FUCHSIA +#elif GTEST_OS_FUCHSIA class FuchsiaDeathTest : public DeathTestImpl { public: @@ -858,18 +854,13 @@ class Arguments { template void AddArguments(const ::std::vector& arguments) { for (typename ::std::vector::const_iterator i = arguments.begin(); - i != arguments.end(); - ++i) { + i != arguments.end(); ++i) { args_.insert(args_.end() - 1, posix::StrDup(i->c_str())); } } - char* const* Argv() { - return &args_[0]; - } + char* const* Argv() { return &args_[0]; } - int size() { - return static_cast(args_.size()) - 1; - } + int size() { return static_cast(args_.size()) - 1; } private: std::vector args_; @@ -883,8 +874,7 @@ int FuchsiaDeathTest::Wait() { const int kSocketKey = 1; const int kExceptionKey = 2; - if (!spawned()) - return 0; + if (!spawned()) return 0; // Create a port to wait for socket/task/exception events. zx_status_t status_zx; @@ -893,8 +883,8 @@ int FuchsiaDeathTest::Wait() { GTEST_DEATH_TEST_CHECK_(status_zx == ZX_OK); // Register to wait for the child process to terminate. - status_zx = child_process_.wait_async( - port, kProcessKey, ZX_PROCESS_TERMINATED, 0); + status_zx = + child_process_.wait_async(port, kProcessKey, ZX_PROCESS_TERMINATED, 0); GTEST_DEATH_TEST_CHECK_(status_zx == ZX_OK); // Register to wait for the socket to be readable or closed. @@ -903,8 +893,8 @@ int FuchsiaDeathTest::Wait() { GTEST_DEATH_TEST_CHECK_(status_zx == ZX_OK); // Register to wait for an exception. - status_zx = exception_channel_.wait_async( - port, kExceptionKey, ZX_CHANNEL_READABLE, 0); + status_zx = exception_channel_.wait_async(port, kExceptionKey, + ZX_CHANNEL_READABLE, 0); GTEST_DEATH_TEST_CHECK_(status_zx == ZX_OK); bool process_terminated = false; @@ -934,9 +924,9 @@ int FuchsiaDeathTest::Wait() { size_t old_length = captured_stderr_.length(); size_t bytes_read = 0; captured_stderr_.resize(old_length + kBufferSize); - status_zx = stderr_socket_.read( - 0, &captured_stderr_.front() + old_length, kBufferSize, - &bytes_read); + status_zx = + stderr_socket_.read(0, &captured_stderr_.front() + old_length, + kBufferSize, &bytes_read); captured_stderr_.resize(old_length + bytes_read); } while (status_zx == ZX_OK); if (status_zx == ZX_ERR_PEER_CLOSED) { @@ -992,11 +982,10 @@ DeathTest::TestRole FuchsiaDeathTest::AssumeRole() { const std::string filter_flag = std::string("--") + GTEST_FLAG_PREFIX_ + "filter=" + info->test_suite_name() + "." + info->name(); - const std::string internal_flag = - std::string("--") + GTEST_FLAG_PREFIX_ + kInternalRunDeathTestFlag + "=" - + file_ + "|" - + StreamableToString(line_) + "|" - + StreamableToString(death_test_index); + const std::string internal_flag = std::string("--") + GTEST_FLAG_PREFIX_ + + kInternalRunDeathTestFlag + "=" + file_ + + "|" + StreamableToString(line_) + "|" + + StreamableToString(death_test_index); Arguments args; args.AddArguments(GetInjectableArgvs()); args.AddArgument(filter_flag.c_str()); @@ -1019,8 +1008,7 @@ DeathTest::TestRole FuchsiaDeathTest::AssumeRole() { // Create a socket pair will be used to receive the child process' stderr. zx::socket stderr_producer_socket; - status = - zx::socket::create(0, &stderr_producer_socket, &stderr_socket_); + status = zx::socket::create(0, &stderr_producer_socket, &stderr_socket_); GTEST_DEATH_TEST_CHECK_(status >= 0); int stderr_producer_fd = -1; status = @@ -1037,35 +1025,32 @@ DeathTest::TestRole FuchsiaDeathTest::AssumeRole() { // Create a child job. zx_handle_t child_job = ZX_HANDLE_INVALID; - status = zx_job_create(zx_job_default(), 0, & child_job); + status = zx_job_create(zx_job_default(), 0, &child_job); GTEST_DEATH_TEST_CHECK_(status == ZX_OK); zx_policy_basic_t policy; policy.condition = ZX_POL_NEW_ANY; policy.policy = ZX_POL_ACTION_ALLOW; - status = zx_job_set_policy( - child_job, ZX_JOB_POL_RELATIVE, ZX_JOB_POL_BASIC, &policy, 1); + status = zx_job_set_policy(child_job, ZX_JOB_POL_RELATIVE, ZX_JOB_POL_BASIC, + &policy, 1); GTEST_DEATH_TEST_CHECK_(status == ZX_OK); // Create an exception channel attached to the |child_job|, to allow // us to suppress the system default exception handler from firing. - status = - zx_task_create_exception_channel( - child_job, 0, exception_channel_.reset_and_get_address()); + status = zx_task_create_exception_channel( + child_job, 0, exception_channel_.reset_and_get_address()); GTEST_DEATH_TEST_CHECK_(status == ZX_OK); // Spawn the child process. - status = fdio_spawn_etc( - child_job, FDIO_SPAWN_CLONE_ALL, args.Argv()[0], args.Argv(), nullptr, - 2, spawn_actions, child_process_.reset_and_get_address(), nullptr); + status = fdio_spawn_etc(child_job, FDIO_SPAWN_CLONE_ALL, args.Argv()[0], + args.Argv(), nullptr, 2, spawn_actions, + child_process_.reset_and_get_address(), nullptr); GTEST_DEATH_TEST_CHECK_(status == ZX_OK); set_spawned(true); return OVERSEE_TEST; } -std::string FuchsiaDeathTest::GetErrorLogs() { - return captured_stderr_; -} +std::string FuchsiaDeathTest::GetErrorLogs() { return captured_stderr_; } #else // We are neither on Windows, nor on Fuchsia. @@ -1096,8 +1081,7 @@ ForkingDeathTest::ForkingDeathTest(const char* a_statement, // status, or 0 if no child process exists. As a side effect, sets the // outcome data member. int ForkingDeathTest::Wait() { - if (!spawned()) - return 0; + if (!spawned()) return 0; ReadAndInterpretStatusByte(); @@ -1176,11 +1160,11 @@ class ExecDeathTest : public ForkingDeathTest { private: static ::std::vector GetArgvsForDeathTestChildProcess() { ::std::vector args = GetInjectableArgvs(); -# if defined(GTEST_EXTRA_DEATH_TEST_COMMAND_LINE_ARGS_) +#if defined(GTEST_EXTRA_DEATH_TEST_COMMAND_LINE_ARGS_) ::std::vector extra_args = GTEST_EXTRA_DEATH_TEST_COMMAND_LINE_ARGS_(); args.insert(args.end(), extra_args.begin(), extra_args.end()); -# endif // defined(GTEST_EXTRA_DEATH_TEST_COMMAND_LINE_ARGS_) +#endif // defined(GTEST_EXTRA_DEATH_TEST_COMMAND_LINE_ARGS_) return args; } // The name of the file in which the death test is located. @@ -1207,14 +1191,11 @@ class Arguments { template void AddArguments(const ::std::vector& arguments) { for (typename ::std::vector::const_iterator i = arguments.begin(); - i != arguments.end(); - ++i) { + i != arguments.end(); ++i) { args_.insert(args_.end() - 1, posix::StrDup(i->c_str())); } } - char* const* Argv() { - return &args_[0]; - } + char* const* Argv() { return &args_[0]; } private: std::vector args_; @@ -1227,9 +1208,9 @@ struct ExecDeathTestArgs { int close_fd; // File descriptor to close; the read end of a pipe }; -# if GTEST_OS_QNX +#if GTEST_OS_QNX extern "C" char** environ; -# else // GTEST_OS_QNX +#else // GTEST_OS_QNX // The main function for a threadsafe-style death test child process. // This function is called in a clone()-ed process and thus must avoid // any potentially unsafe operations like malloc or libc functions. @@ -1244,8 +1225,8 @@ static int ExecDeathTestChildMain(void* child_arg) { UnitTest::GetInstance()->original_working_dir(); // We can safely call chdir() as it's a direct system call. if (chdir(original_dir) != 0) { - DeathTestAbort(std::string("chdir(\"") + original_dir + "\") failed: " + - GetLastErrnoDescription()); + DeathTestAbort(std::string("chdir(\"") + original_dir + + "\") failed: " + GetLastErrnoDescription()); return EXIT_FAILURE; } @@ -1256,13 +1237,12 @@ static int ExecDeathTestChildMain(void* child_arg) { // one path separator. execv(args->argv[0], args->argv); DeathTestAbort(std::string("execv(") + args->argv[0] + ", ...) in " + - original_dir + " failed: " + - GetLastErrnoDescription()); + original_dir + " failed: " + GetLastErrnoDescription()); return EXIT_FAILURE; } -# endif // GTEST_OS_QNX +#endif // GTEST_OS_QNX -# if GTEST_HAS_CLONE +#if GTEST_HAS_CLONE // Two utility routines that together determine the direction the stack // grows. // This could be accomplished more elegantly by a single recursive @@ -1296,7 +1276,7 @@ static bool StackGrowsDown() { StackLowerThanAddress(&dummy, &result); return result; } -# endif // GTEST_HAS_CLONE +#endif // GTEST_HAS_CLONE // Spawns a child process with the same executable as the current process in // a thread-safe manner and instructs it to run the death test. The @@ -1306,10 +1286,10 @@ static bool StackGrowsDown() { // spawn(2) there instead. The function dies with an error message if // anything goes wrong. static pid_t ExecDeathTestSpawnChild(char* const* argv, int close_fd) { - ExecDeathTestArgs args = { argv, close_fd }; + ExecDeathTestArgs args = {argv, close_fd}; pid_t child_pid = -1; -# if GTEST_OS_QNX +#if GTEST_OS_QNX // Obtains the current directory and sets it to be closed in the child // process. const int cwd_fd = open(".", O_RDONLY); @@ -1322,16 +1302,16 @@ static pid_t ExecDeathTestSpawnChild(char* const* argv, int close_fd) { UnitTest::GetInstance()->original_working_dir(); // We can safely call chdir() as it's a direct system call. if (chdir(original_dir) != 0) { - DeathTestAbort(std::string("chdir(\"") + original_dir + "\") failed: " + - GetLastErrnoDescription()); + DeathTestAbort(std::string("chdir(\"") + original_dir + + "\") failed: " + GetLastErrnoDescription()); return EXIT_FAILURE; } int fd_flags; // Set close_fd to be closed after spawn. GTEST_DEATH_TEST_CHECK_SYSCALL_(fd_flags = fcntl(close_fd, F_GETFD)); - GTEST_DEATH_TEST_CHECK_SYSCALL_(fcntl(close_fd, F_SETFD, - fd_flags | FD_CLOEXEC)); + GTEST_DEATH_TEST_CHECK_SYSCALL_( + fcntl(close_fd, F_SETFD, fd_flags | FD_CLOEXEC)); struct inheritance inherit = {0}; // spawn is a system call. child_pid = spawn(args.argv[0], 0, nullptr, &inherit, args.argv, environ); @@ -1339,8 +1319,8 @@ static pid_t ExecDeathTestSpawnChild(char* const* argv, int close_fd) { GTEST_DEATH_TEST_CHECK_(fchdir(cwd_fd) != -1); GTEST_DEATH_TEST_CHECK_SYSCALL_(close(cwd_fd)); -# else // GTEST_OS_QNX -# if GTEST_OS_LINUX +#else // GTEST_OS_QNX +#if GTEST_OS_LINUX // When a SIGPROF signal is received while fork() or clone() are executing, // the process may hang. To avoid this, we ignore SIGPROF here and re-enable // it after the call to fork()/clone() is complete. @@ -1349,11 +1329,11 @@ static pid_t ExecDeathTestSpawnChild(char* const* argv, int close_fd) { memset(&ignore_sigprof_action, 0, sizeof(ignore_sigprof_action)); sigemptyset(&ignore_sigprof_action.sa_mask); ignore_sigprof_action.sa_handler = SIG_IGN; - GTEST_DEATH_TEST_CHECK_SYSCALL_(sigaction( - SIGPROF, &ignore_sigprof_action, &saved_sigprof_action)); -# endif // GTEST_OS_LINUX + GTEST_DEATH_TEST_CHECK_SYSCALL_( + sigaction(SIGPROF, &ignore_sigprof_action, &saved_sigprof_action)); +#endif // GTEST_OS_LINUX -# if GTEST_HAS_CLONE +#if GTEST_HAS_CLONE const bool use_fork = GTEST_FLAG_GET(death_test_use_fork); if (!use_fork) { @@ -1373,7 +1353,7 @@ static pid_t ExecDeathTestSpawnChild(char* const* argv, int close_fd) { const size_t kMaxStackAlignment = 64; void* const stack_top = static_cast(stack) + - (stack_grows_down ? stack_size - kMaxStackAlignment : 0); + (stack_grows_down ? stack_size - kMaxStackAlignment : 0); GTEST_DEATH_TEST_CHECK_( static_cast(stack_size) > kMaxStackAlignment && reinterpret_cast(stack_top) % kMaxStackAlignment == 0); @@ -1382,19 +1362,19 @@ static pid_t ExecDeathTestSpawnChild(char* const* argv, int close_fd) { GTEST_DEATH_TEST_CHECK_(munmap(stack, stack_size) != -1); } -# else +#else const bool use_fork = true; -# endif // GTEST_HAS_CLONE +#endif // GTEST_HAS_CLONE if (use_fork && (child_pid = fork()) == 0) { - ExecDeathTestChildMain(&args); - _exit(0); + ExecDeathTestChildMain(&args); + _exit(0); } -# endif // GTEST_OS_QNX -# if GTEST_OS_LINUX +#endif // GTEST_OS_QNX +#if GTEST_OS_LINUX GTEST_DEATH_TEST_CHECK_SYSCALL_( sigaction(SIGPROF, &saved_sigprof_action, nullptr)); -# endif // GTEST_OS_LINUX +#endif // GTEST_OS_LINUX GTEST_DEATH_TEST_CHECK_(child_pid != -1); return child_pid; @@ -1450,7 +1430,7 @@ DeathTest::TestRole ExecDeathTest::AssumeRole() { return OVERSEE_TEST; } -# endif // !GTEST_OS_WINDOWS +#endif // !GTEST_OS_WINDOWS // Creates a concrete DeathTest-derived class that depends on the // --gtest_death_test_style flag, and sets the pointer pointed to @@ -1464,15 +1444,15 @@ bool DefaultDeathTestFactory::Create(const char* statement, UnitTestImpl* const impl = GetUnitTestImpl(); const InternalRunDeathTestFlag* const flag = impl->internal_run_death_test_flag(); - const int death_test_index = impl->current_test_info() - ->increment_death_test_count(); + const int death_test_index = + impl->current_test_info()->increment_death_test_count(); if (flag != nullptr) { if (death_test_index > flag->index()) { DeathTest::set_last_death_test_message( - "Death test count (" + StreamableToString(death_test_index) - + ") somehow exceeded expected maximum (" - + StreamableToString(flag->index()) + ")"); + "Death test count (" + StreamableToString(death_test_index) + + ") somehow exceeded expected maximum (" + + StreamableToString(flag->index()) + ")"); return false; } @@ -1483,21 +1463,21 @@ bool DefaultDeathTestFactory::Create(const char* statement, } } -# if GTEST_OS_WINDOWS +#if GTEST_OS_WINDOWS if (GTEST_FLAG_GET(death_test_style) == "threadsafe" || GTEST_FLAG_GET(death_test_style) == "fast") { *test = new WindowsDeathTest(statement, std::move(matcher), file, line); } -# elif GTEST_OS_FUCHSIA +#elif GTEST_OS_FUCHSIA if (GTEST_FLAG_GET(death_test_style) == "threadsafe" || GTEST_FLAG_GET(death_test_style) == "fast") { *test = new FuchsiaDeathTest(statement, std::move(matcher), file, line); } -# else +#else if (GTEST_FLAG_GET(death_test_style) == "threadsafe") { *test = new ExecDeathTest(statement, std::move(matcher), file, line); @@ -1505,7 +1485,7 @@ bool DefaultDeathTestFactory::Create(const char* statement, *test = new NoExecDeathTest(statement, std::move(matcher)); } -# endif // GTEST_OS_WINDOWS +#endif // GTEST_OS_WINDOWS else { // NOLINT - this is more readable than unbalanced brackets inside #if. DeathTest::set_last_death_test_message("Unknown death test style \"" + @@ -1517,16 +1497,16 @@ bool DefaultDeathTestFactory::Create(const char* statement, return true; } -# if GTEST_OS_WINDOWS +#if GTEST_OS_WINDOWS // Recreates the pipe and event handles from the provided parameters, // signals the event, and returns a file descriptor wrapped around the pipe // handle. This function is called in the child process only. static int GetStatusFileDescriptor(unsigned int parent_process_id, - size_t write_handle_as_size_t, - size_t event_handle_as_size_t) { + size_t write_handle_as_size_t, + size_t event_handle_as_size_t) { AutoHandle parent_process_handle(::OpenProcess(PROCESS_DUP_HANDLE, - FALSE, // Non-inheritable. - parent_process_id)); + FALSE, // Non-inheritable. + parent_process_id)); if (parent_process_handle.Get() == INVALID_HANDLE_VALUE) { DeathTestAbort("Unable to open parent process " + StreamableToString(parent_process_id)); @@ -1534,8 +1514,7 @@ static int GetStatusFileDescriptor(unsigned int parent_process_id, GTEST_CHECK_(sizeof(HANDLE) <= sizeof(size_t)); - const HANDLE write_handle = - reinterpret_cast(write_handle_as_size_t); + const HANDLE write_handle = reinterpret_cast(write_handle_as_size_t); HANDLE dup_write_handle; // The newly initialized handle is accessible only in the parent @@ -1557,9 +1536,7 @@ static int GetStatusFileDescriptor(unsigned int parent_process_id, HANDLE dup_event_handle; if (!::DuplicateHandle(parent_process_handle.Get(), event_handle, - ::GetCurrentProcess(), &dup_event_handle, - 0x0, - FALSE, + ::GetCurrentProcess(), &dup_event_handle, 0x0, FALSE, DUPLICATE_SAME_ACCESS)) { DeathTestAbort("Unable to duplicate the event handle " + StreamableToString(event_handle_as_size_t) + @@ -1581,7 +1558,7 @@ static int GetStatusFileDescriptor(unsigned int parent_process_id, return write_fd; } -# endif // GTEST_OS_WINDOWS +#endif // GTEST_OS_WINDOWS // Returns a newly created InternalRunDeathTestFlag object with fields // initialized from the GTEST_FLAG(internal_run_death_test) flag if @@ -1597,45 +1574,41 @@ InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag() { SplitString(GTEST_FLAG_GET(internal_run_death_test), '|', &fields); int write_fd = -1; -# if GTEST_OS_WINDOWS +#if GTEST_OS_WINDOWS unsigned int parent_process_id = 0; size_t write_handle_as_size_t = 0; size_t event_handle_as_size_t = 0; - if (fields.size() != 6 - || !ParseNaturalNumber(fields[1], &line) - || !ParseNaturalNumber(fields[2], &index) - || !ParseNaturalNumber(fields[3], &parent_process_id) - || !ParseNaturalNumber(fields[4], &write_handle_as_size_t) - || !ParseNaturalNumber(fields[5], &event_handle_as_size_t)) { + if (fields.size() != 6 || !ParseNaturalNumber(fields[1], &line) || + !ParseNaturalNumber(fields[2], &index) || + !ParseNaturalNumber(fields[3], &parent_process_id) || + !ParseNaturalNumber(fields[4], &write_handle_as_size_t) || + !ParseNaturalNumber(fields[5], &event_handle_as_size_t)) { DeathTestAbort("Bad --gtest_internal_run_death_test flag: " + GTEST_FLAG_GET(internal_run_death_test)); } - write_fd = GetStatusFileDescriptor(parent_process_id, - write_handle_as_size_t, + write_fd = GetStatusFileDescriptor(parent_process_id, write_handle_as_size_t, event_handle_as_size_t); -# elif GTEST_OS_FUCHSIA +#elif GTEST_OS_FUCHSIA - if (fields.size() != 3 - || !ParseNaturalNumber(fields[1], &line) - || !ParseNaturalNumber(fields[2], &index)) { + if (fields.size() != 3 || !ParseNaturalNumber(fields[1], &line) || + !ParseNaturalNumber(fields[2], &index)) { DeathTestAbort("Bad --gtest_internal_run_death_test flag: " + GTEST_FLAG_GET(internal_run_death_test)); } -# else +#else - if (fields.size() != 4 - || !ParseNaturalNumber(fields[1], &line) - || !ParseNaturalNumber(fields[2], &index) - || !ParseNaturalNumber(fields[3], &write_fd)) { + if (fields.size() != 4 || !ParseNaturalNumber(fields[1], &line) || + !ParseNaturalNumber(fields[2], &index) || + !ParseNaturalNumber(fields[3], &write_fd)) { DeathTestAbort("Bad --gtest_internal_run_death_test flag: " + GTEST_FLAG_GET(internal_run_death_test)); } -# endif // GTEST_OS_WINDOWS +#endif // GTEST_OS_WINDOWS return new InternalRunDeathTestFlag(fields[0], line, index, write_fd); } diff --git a/ext/googletest/googletest/src/gtest-filepath.cc b/ext/googletest/googletest/src/gtest-filepath.cc index 0b5629401b..f6ee90cdb7 100644 --- a/ext/googletest/googletest/src/gtest-filepath.cc +++ b/ext/googletest/googletest/src/gtest-filepath.cc @@ -30,29 +30,31 @@ #include "gtest/internal/gtest-filepath.h" #include -#include "gtest/internal/gtest-port.h" + #include "gtest/gtest-message.h" +#include "gtest/internal/gtest-port.h" #if GTEST_OS_WINDOWS_MOBILE -# include +#include #elif GTEST_OS_WINDOWS -# include -# include +#include +#include #else -# include -# include // Some Linux distributions define PATH_MAX here. -#endif // GTEST_OS_WINDOWS_MOBILE +#include + +#include // Some Linux distributions define PATH_MAX here. +#endif // GTEST_OS_WINDOWS_MOBILE #include "gtest/internal/gtest-string.h" #if GTEST_OS_WINDOWS -# define GTEST_PATH_MAX_ _MAX_PATH +#define GTEST_PATH_MAX_ _MAX_PATH #elif defined(PATH_MAX) -# define GTEST_PATH_MAX_ PATH_MAX +#define GTEST_PATH_MAX_ PATH_MAX #elif defined(_XOPEN_PATH_MAX) -# define GTEST_PATH_MAX_ _XOPEN_PATH_MAX +#define GTEST_PATH_MAX_ _XOPEN_PATH_MAX #else -# define GTEST_PATH_MAX_ _POSIX_PATH_MAX +#define GTEST_PATH_MAX_ _POSIX_PATH_MAX #endif // GTEST_OS_WINDOWS namespace testing { @@ -66,16 +68,16 @@ namespace internal { const char kPathSeparator = '\\'; const char kAlternatePathSeparator = '/'; const char kAlternatePathSeparatorString[] = "/"; -# if GTEST_OS_WINDOWS_MOBILE +#if GTEST_OS_WINDOWS_MOBILE // Windows CE doesn't have a current directory. You should not use // the current directory in tests on Windows CE, but this at least // provides a reasonable fallback. const char kCurrentDirectoryString[] = "\\"; // Windows CE doesn't define INVALID_FILE_ATTRIBUTES const DWORD kInvalidFileAttributes = 0xffffffff; -# else +#else const char kCurrentDirectoryString[] = ".\\"; -# endif // GTEST_OS_WINDOWS_MOBILE +#endif // GTEST_OS_WINDOWS_MOBILE #else const char kPathSeparator = '/'; const char kCurrentDirectoryString[] = "./"; @@ -99,17 +101,17 @@ FilePath FilePath::GetCurrentDir() { // something reasonable. return FilePath(kCurrentDirectoryString); #elif GTEST_OS_WINDOWS - char cwd[GTEST_PATH_MAX_ + 1] = { '\0' }; + char cwd[GTEST_PATH_MAX_ + 1] = {'\0'}; return FilePath(_getcwd(cwd, sizeof(cwd)) == nullptr ? "" : cwd); #else - char cwd[GTEST_PATH_MAX_ + 1] = { '\0' }; + char cwd[GTEST_PATH_MAX_ + 1] = {'\0'}; char* result = getcwd(cwd, sizeof(cwd)); -# if GTEST_OS_NACL +#if GTEST_OS_NACL // getcwd will likely fail in NaCl due to the sandbox, so return something // reasonable. The user may have provided a shim implementation for getcwd, // however, so fallback only when failure is detected. return FilePath(result == nullptr ? kCurrentDirectoryString : cwd); -# endif // GTEST_OS_NACL +#endif // GTEST_OS_NACL return FilePath(result == nullptr ? "" : cwd); #endif // GTEST_OS_WINDOWS_MOBILE } @@ -121,8 +123,8 @@ FilePath FilePath::GetCurrentDir() { FilePath FilePath::RemoveExtension(const char* extension) const { const std::string dot_extension = std::string(".") + extension; if (String::EndsWithCaseInsensitive(pathname_, dot_extension)) { - return FilePath(pathname_.substr( - 0, pathname_.length() - dot_extension.length())); + return FilePath( + pathname_.substr(0, pathname_.length() - dot_extension.length())); } return *this; } @@ -178,15 +180,14 @@ FilePath FilePath::RemoveFileName() const { // than zero (e.g., 12), returns "dir/test_12.xml". // On Windows platform, uses \ as the separator rather than /. FilePath FilePath::MakeFileName(const FilePath& directory, - const FilePath& base_name, - int number, + const FilePath& base_name, int number, const char* extension) { std::string file; if (number == 0) { file = base_name.string() + "." + extension; } else { - file = base_name.string() + "_" + StreamableToString(number) - + "." + extension; + file = + base_name.string() + "_" + StreamableToString(number) + "." + extension; } return ConcatPaths(directory, FilePath(file)); } @@ -195,8 +196,7 @@ FilePath FilePath::MakeFileName(const FilePath& directory, // On Windows, uses \ as the separator rather than /. FilePath FilePath::ConcatPaths(const FilePath& directory, const FilePath& relative_path) { - if (directory.IsEmpty()) - return relative_path; + if (directory.IsEmpty()) return relative_path; const FilePath dir(directory.RemoveTrailingPathSeparator()); return FilePath(dir.string() + kPathSeparator + relative_path.string()); } @@ -207,7 +207,7 @@ bool FilePath::FileOrDirectoryExists() const { #if GTEST_OS_WINDOWS_MOBILE LPCWSTR unicode = String::AnsiToUtf16(pathname_.c_str()); const DWORD attributes = GetFileAttributes(unicode); - delete [] unicode; + delete[] unicode; return attributes != kInvalidFileAttributes; #else posix::StatStruct file_stat{}; @@ -222,8 +222,8 @@ bool FilePath::DirectoryExists() const { #if GTEST_OS_WINDOWS // Don't strip off trailing separator if path is a root directory on // Windows (like "C:\\"). - const FilePath& path(IsRootDirectory() ? *this : - RemoveTrailingPathSeparator()); + const FilePath& path(IsRootDirectory() ? *this + : RemoveTrailingPathSeparator()); #else const FilePath& path(*this); #endif @@ -231,15 +231,15 @@ bool FilePath::DirectoryExists() const { #if GTEST_OS_WINDOWS_MOBILE LPCWSTR unicode = String::AnsiToUtf16(path.c_str()); const DWORD attributes = GetFileAttributes(unicode); - delete [] unicode; + delete[] unicode; if ((attributes != kInvalidFileAttributes) && (attributes & FILE_ATTRIBUTE_DIRECTORY)) { result = true; } #else posix::StatStruct file_stat{}; - result = posix::Stat(path.c_str(), &file_stat) == 0 && - posix::IsDir(file_stat); + result = + posix::Stat(path.c_str(), &file_stat) == 0 && posix::IsDir(file_stat); #endif // GTEST_OS_WINDOWS_MOBILE return result; @@ -260,10 +260,9 @@ bool FilePath::IsAbsolutePath() const { const char* const name = pathname_.c_str(); #if GTEST_OS_WINDOWS return pathname_.length() >= 3 && - ((name[0] >= 'a' && name[0] <= 'z') || - (name[0] >= 'A' && name[0] <= 'Z')) && - name[1] == ':' && - IsPathSeparator(name[2]); + ((name[0] >= 'a' && name[0] <= 'z') || + (name[0] >= 'A' && name[0] <= 'Z')) && + name[1] == ':' && IsPathSeparator(name[2]); #else return IsPathSeparator(name[0]); #endif @@ -321,7 +320,7 @@ bool FilePath::CreateFolder() const { FilePath removed_sep(this->RemoveTrailingPathSeparator()); LPCWSTR unicode = String::AnsiToUtf16(removed_sep.c_str()); int result = CreateDirectory(unicode, nullptr) ? 0 : -1; - delete [] unicode; + delete[] unicode; #elif GTEST_OS_WINDOWS int result = _mkdir(pathname_.c_str()); #elif GTEST_OS_ESP8266 || GTEST_OS_XTENSA @@ -341,9 +340,8 @@ bool FilePath::CreateFolder() const { // name, otherwise return the name string unmodified. // On Windows platform, uses \ as the separator, other platforms use /. FilePath FilePath::RemoveTrailingPathSeparator() const { - return IsDirectory() - ? FilePath(pathname_.substr(0, pathname_.length() - 1)) - : *this; + return IsDirectory() ? FilePath(pathname_.substr(0, pathname_.length() - 1)) + : *this; } // Removes any redundant separators that might be in the pathname. diff --git a/ext/googletest/googletest/src/gtest-internal-inl.h b/ext/googletest/googletest/src/gtest-internal-inl.h index 075b84c258..0b9e929c68 100644 --- a/ext/googletest/googletest/src/gtest-internal-inl.h +++ b/ext/googletest/googletest/src/gtest-internal-inl.h @@ -35,7 +35,7 @@ #define GOOGLETEST_SRC_GTEST_INTERNAL_INL_H_ #ifndef _WIN32_WCE -# include +#include #endif // !_WIN32_WCE #include #include // For strtoll/_strtoul64/malloc/free. @@ -50,16 +50,16 @@ #include "gtest/internal/gtest-port.h" #if GTEST_CAN_STREAM_RESULTS_ -# include // NOLINT -# include // NOLINT +#include // NOLINT +#include // NOLINT #endif #if GTEST_OS_WINDOWS -# include // NOLINT -#endif // GTEST_OS_WINDOWS +#include // NOLINT +#endif // GTEST_OS_WINDOWS -#include "gtest/gtest.h" #include "gtest/gtest-spi.h" +#include "gtest/gtest.h" GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \ /* class A needs to have dll-interface to be used by clients of class B */) @@ -109,15 +109,16 @@ GTEST_API_ bool ParseFlag(const char* str, const char* flag, int32_t* value); // Returns a random seed in range [1, kMaxRandomSeed] based on the // given --gtest_random_seed flag value. inline int GetRandomSeedFromFlag(int32_t random_seed_flag) { - const unsigned int raw_seed = (random_seed_flag == 0) ? - static_cast(GetTimeInMillis()) : - static_cast(random_seed_flag); + const unsigned int raw_seed = + (random_seed_flag == 0) ? static_cast(GetTimeInMillis()) + : static_cast(random_seed_flag); // Normalizes the actual seed to range [1, kMaxRandomSeed] such that // it's easy to type. const int normalized_seed = static_cast((raw_seed - 1U) % - static_cast(kMaxRandomSeed)) + 1; + static_cast(kMaxRandomSeed)) + + 1; return normalized_seed; } @@ -261,8 +262,8 @@ GTEST_API_ int32_t Int32FromEnvOrDie(const char* env_var, int32_t default_val); // returns true if and only if the test should be run on this shard. The test id // is some arbitrary but unique non-negative integer assigned to each test // method. Assumes that 0 <= shard_index < total_shards. -GTEST_API_ bool ShouldRunTestOnShard( - int total_shards, int shard_index, int test_id); +GTEST_API_ bool ShouldRunTestOnShard(int total_shards, int shard_index, + int test_id); // STL container utilities. @@ -273,9 +274,8 @@ inline int CountIf(const Container& c, Predicate predicate) { // Implemented as an explicit loop since std::count_if() in libCstd on // Solaris has a non-standard signature. int count = 0; - for (typename Container::const_iterator it = c.begin(); it != c.end(); ++it) { - if (predicate(*it)) - ++count; + for (auto it = c.begin(); it != c.end(); ++it) { + if (predicate(*it)) ++count; } return count; } @@ -424,7 +424,9 @@ class OsStackTraceGetterInterface { static const char* const kElidedFramesMarker; private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(OsStackTraceGetterInterface); + OsStackTraceGetterInterface(const OsStackTraceGetterInterface&) = delete; + OsStackTraceGetterInterface& operator=(const OsStackTraceGetterInterface&) = + delete; }; // A working implementation of the OsStackTraceGetterInterface interface. @@ -446,7 +448,8 @@ class OsStackTraceGetter : public OsStackTraceGetterInterface { void* caller_frame_ = nullptr; #endif // GTEST_HAS_ABSL - GTEST_DISALLOW_COPY_AND_ASSIGN_(OsStackTraceGetter); + OsStackTraceGetter(const OsStackTraceGetter&) = delete; + OsStackTraceGetter& operator=(const OsStackTraceGetter&) = delete; }; // Information about a Google Test trace point. @@ -459,7 +462,7 @@ struct TraceInfo { // This is the default global test part result reporter used in UnitTestImpl. // This class should only be used by UnitTestImpl. class DefaultGlobalTestPartResultReporter - : public TestPartResultReporterInterface { + : public TestPartResultReporterInterface { public: explicit DefaultGlobalTestPartResultReporter(UnitTestImpl* unit_test); // Implements the TestPartResultReporterInterface. Reports the test part @@ -469,7 +472,10 @@ class DefaultGlobalTestPartResultReporter private: UnitTestImpl* const unit_test_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultGlobalTestPartResultReporter); + DefaultGlobalTestPartResultReporter( + const DefaultGlobalTestPartResultReporter&) = delete; + DefaultGlobalTestPartResultReporter& operator=( + const DefaultGlobalTestPartResultReporter&) = delete; }; // This is the default per thread test part result reporter used in @@ -485,7 +491,10 @@ class DefaultPerThreadTestPartResultReporter private: UnitTestImpl* const unit_test_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultPerThreadTestPartResultReporter); + DefaultPerThreadTestPartResultReporter( + const DefaultPerThreadTestPartResultReporter&) = delete; + DefaultPerThreadTestPartResultReporter& operator=( + const DefaultPerThreadTestPartResultReporter&) = delete; }; // The private implementation of the UnitTest class. We don't protect @@ -623,7 +632,8 @@ class GTEST_API_ UnitTestImpl { // For example, if Foo() calls Bar(), which in turn calls // CurrentOsStackTraceExceptTop(1), Foo() will be included in the // trace but Bar() and CurrentOsStackTraceExceptTop() won't. - std::string CurrentOsStackTraceExceptTop(int skip_count) GTEST_NO_INLINE_; + std::string CurrentOsStackTraceExceptTop(int skip_count) + GTEST_NO_INLINE_ GTEST_NO_TAIL_CALL_; // Finds and returns a TestSuite with the given name. If one doesn't // exist, creates one and returns it. @@ -727,9 +737,7 @@ class GTEST_API_ UnitTestImpl { } // Clears the results of ad-hoc test assertions. - void ClearAdHocTestResult() { - ad_hoc_test_result_.Clear(); - } + void ClearAdHocTestResult() { ad_hoc_test_result_.Clear(); } // Adds a TestProperty to the current TestResult object when invoked in a // context of a test or a test suite, or to the global property set. If the @@ -737,10 +745,7 @@ class GTEST_API_ UnitTestImpl { // updated. void RecordProperty(const TestProperty& test_property); - enum ReactionToSharding { - HONOR_SHARDING_PROTOCOL, - IGNORE_SHARDING_PROTOCOL - }; + enum ReactionToSharding { HONOR_SHARDING_PROTOCOL, IGNORE_SHARDING_PROTOCOL }; // Matches the full name of each test against the user-specified // filter to decide whether the test should run, then records the @@ -946,7 +951,8 @@ class GTEST_API_ UnitTestImpl { // starts. bool catch_exceptions_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTestImpl); + UnitTestImpl(const UnitTestImpl&) = delete; + UnitTestImpl& operator=(const UnitTestImpl&) = delete; }; // class UnitTestImpl // Convenience function for accessing the global UnitTest @@ -969,8 +975,9 @@ GTEST_API_ bool IsValidEscape(char ch); GTEST_API_ bool AtomMatchesChar(bool escaped, char pattern, char ch); GTEST_API_ bool ValidateRegex(const char* regex); GTEST_API_ bool MatchRegexAtHead(const char* regex, const char* str); -GTEST_API_ bool MatchRepetitionAndRegexAtHead( - bool escaped, char ch, char repeat, const char* regex, const char* str); +GTEST_API_ bool MatchRepetitionAndRegexAtHead(bool escaped, char ch, + char repeat, const char* regex, + const char* str); GTEST_API_ bool MatchRegexAnywhere(const char* regex, const char* str); #endif // GTEST_USES_SIMPLE_RE @@ -1072,8 +1079,7 @@ class StreamingListener : public EmptyTestEventListener { } ~SocketWriter() override { - if (sockfd_ != -1) - CloseConnection(); + if (sockfd_ != -1) CloseConnection(); } // Sends a string to the socket. @@ -1083,9 +1089,8 @@ class StreamingListener : public EmptyTestEventListener { const auto len = static_cast(message.length()); if (write(sockfd_, message.c_str(), len) != static_cast(len)) { - GTEST_LOG_(WARNING) - << "stream_result_to: failed to stream to " - << host_name_ << ":" << port_num_; + GTEST_LOG_(WARNING) << "stream_result_to: failed to stream to " + << host_name_ << ":" << port_num_; } } @@ -1106,7 +1111,8 @@ class StreamingListener : public EmptyTestEventListener { const std::string host_name_; const std::string port_num_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(SocketWriter); + SocketWriter(const SocketWriter&) = delete; + SocketWriter& operator=(const SocketWriter&) = delete; }; // class SocketWriter // Escapes '=', '&', '%', and '\n' characters in str as "%xx". @@ -1118,7 +1124,9 @@ class StreamingListener : public EmptyTestEventListener { } explicit StreamingListener(AbstractSocketWriter* socket_writer) - : socket_writer_(socket_writer) { Start(); } + : socket_writer_(socket_writer) { + Start(); + } void OnTestProgramStart(const UnitTest& /* unit_test */) override { SendLn("event=TestProgramStart"); @@ -1141,22 +1149,22 @@ class StreamingListener : public EmptyTestEventListener { void OnTestIterationEnd(const UnitTest& unit_test, int /* iteration */) override { - SendLn("event=TestIterationEnd&passed=" + - FormatBool(unit_test.Passed()) + "&elapsed_time=" + - StreamableToString(unit_test.elapsed_time()) + "ms"); + SendLn("event=TestIterationEnd&passed=" + FormatBool(unit_test.Passed()) + + "&elapsed_time=" + StreamableToString(unit_test.elapsed_time()) + + "ms"); } // Note that "event=TestCaseStart" is a wire format and has to remain // "case" for compatibility - void OnTestCaseStart(const TestCase& test_case) override { - SendLn(std::string("event=TestCaseStart&name=") + test_case.name()); + void OnTestSuiteStart(const TestSuite& test_suite) override { + SendLn(std::string("event=TestCaseStart&name=") + test_suite.name()); } // Note that "event=TestCaseEnd" is a wire format and has to remain // "case" for compatibility - void OnTestCaseEnd(const TestCase& test_case) override { - SendLn("event=TestCaseEnd&passed=" + FormatBool(test_case.Passed()) + - "&elapsed_time=" + StreamableToString(test_case.elapsed_time()) + + void OnTestSuiteEnd(const TestSuite& test_suite) override { + SendLn("event=TestCaseEnd&passed=" + FormatBool(test_suite.Passed()) + + "&elapsed_time=" + StreamableToString(test_suite.elapsed_time()) + "ms"); } @@ -1166,8 +1174,7 @@ class StreamingListener : public EmptyTestEventListener { void OnTestEnd(const TestInfo& test_info) override { SendLn("event=TestEnd&passed=" + - FormatBool((test_info.result())->Passed()) + - "&elapsed_time=" + + FormatBool((test_info.result())->Passed()) + "&elapsed_time=" + StreamableToString((test_info.result())->elapsed_time()) + "ms"); } @@ -1191,7 +1198,8 @@ class StreamingListener : public EmptyTestEventListener { const std::unique_ptr socket_writer_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(StreamingListener); + StreamingListener(const StreamingListener&) = delete; + StreamingListener& operator=(const StreamingListener&) = delete; }; // class StreamingListener #endif // GTEST_CAN_STREAM_RESULTS_ diff --git a/ext/googletest/googletest/src/gtest-matchers.cc b/ext/googletest/googletest/src/gtest-matchers.cc index 65104ebab1..7e3bcc0cff 100644 --- a/ext/googletest/googletest/src/gtest-matchers.cc +++ b/ext/googletest/googletest/src/gtest-matchers.cc @@ -32,12 +32,13 @@ // This file implements just enough of the matcher interface to allow // EXPECT_DEATH and friends to accept a matcher argument. -#include "gtest/internal/gtest-internal.h" -#include "gtest/internal/gtest-port.h" #include "gtest/gtest-matchers.h" #include +#include "gtest/internal/gtest-internal.h" +#include "gtest/internal/gtest-port.h" + namespace testing { // Constructs a matcher that matches a const std::string& whose value is diff --git a/ext/googletest/googletest/src/gtest-port.cc b/ext/googletest/googletest/src/gtest-port.cc index c3c93e6185..d797fe4d58 100644 --- a/ext/googletest/googletest/src/gtest-port.cc +++ b/ext/googletest/googletest/src/gtest-port.cc @@ -27,61 +27,62 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - #include "gtest/internal/gtest-port.h" #include #include #include #include + #include #include #include #if GTEST_OS_WINDOWS -# include -# include -# include -# include // Used in ThreadLocal. -# ifdef _MSC_VER -# include -# endif // _MSC_VER +#include +#include +#include + +#include // Used in ThreadLocal. +#ifdef _MSC_VER +#include +#endif // _MSC_VER #else -# include +#include #endif // GTEST_OS_WINDOWS #if GTEST_OS_MAC -# include -# include -# include +#include +#include +#include #endif // GTEST_OS_MAC #if GTEST_OS_DRAGONFLY || GTEST_OS_FREEBSD || GTEST_OS_GNU_KFREEBSD || \ GTEST_OS_NETBSD || GTEST_OS_OPENBSD -# include -# if GTEST_OS_DRAGONFLY || GTEST_OS_FREEBSD || GTEST_OS_GNU_KFREEBSD -# include -# endif +#include +#if GTEST_OS_DRAGONFLY || GTEST_OS_FREEBSD || GTEST_OS_GNU_KFREEBSD +#include +#endif #endif #if GTEST_OS_QNX -# include -# include -# include +#include +#include +#include #endif // GTEST_OS_QNX #if GTEST_OS_AIX -# include -# include +#include +#include #endif // GTEST_OS_AIX #if GTEST_OS_FUCHSIA -# include -# include +#include +#include #endif // GTEST_OS_FUCHSIA -#include "gtest/gtest-spi.h" #include "gtest/gtest-message.h" +#include "gtest/gtest-spi.h" #include "gtest/internal/gtest-internal.h" #include "gtest/internal/gtest-string.h" #include "src/gtest-internal-inl.h" @@ -89,15 +90,6 @@ namespace testing { namespace internal { -#if defined(_MSC_VER) || defined(__BORLANDC__) -// MSVC and C++Builder do not provide a definition of STDERR_FILENO. -const int kStdOutFileno = 1; -const int kStdErrFileno = 2; -#else -const int kStdOutFileno = STDOUT_FILENO; -const int kStdErrFileno = STDERR_FILENO; -#endif // _MSC_VER - #if GTEST_OS_LINUX || GTEST_OS_GNU_HURD namespace { @@ -131,8 +123,7 @@ size_t GetThreadCount() { if (status == KERN_SUCCESS) { // task_threads allocates resources in thread_list and we need to free them // to avoid leaks. - vm_deallocate(task, - reinterpret_cast(thread_list), + vm_deallocate(task, reinterpret_cast(thread_list), sizeof(thread_t) * thread_count); return static_cast(thread_count); } else { @@ -141,7 +132,7 @@ size_t GetThreadCount() { } #elif GTEST_OS_DRAGONFLY || GTEST_OS_FREEBSD || GTEST_OS_GNU_KFREEBSD || \ - GTEST_OS_NETBSD + GTEST_OS_NETBSD #if GTEST_OS_NETBSD #undef KERN_PROC @@ -184,12 +175,12 @@ size_t GetThreadCount() { // we cannot detect it. size_t GetThreadCount() { int mib[] = { - CTL_KERN, - KERN_PROC, - KERN_PROC_PID | KERN_PROC_SHOW_THREADS, - getpid(), - sizeof(struct kinfo_proc), - 0, + CTL_KERN, + KERN_PROC, + KERN_PROC_PID | KERN_PROC_SHOW_THREADS, + getpid(), + sizeof(struct kinfo_proc), + 0, }; u_int miblen = sizeof(mib) / sizeof(mib[0]); @@ -210,8 +201,7 @@ size_t GetThreadCount() { // exclude empty members size_t nthreads = 0; for (size_t i = 0; i < size / static_cast(mib[4]); i++) { - if (info[i].p_tid != -1) - nthreads++; + if (info[i].p_tid != -1) nthreads++; } return nthreads; } @@ -254,13 +244,9 @@ size_t GetThreadCount() { size_t GetThreadCount() { int dummy_buffer; size_t avail; - zx_status_t status = zx_object_get_info( - zx_process_self(), - ZX_INFO_PROCESS_THREADS, - &dummy_buffer, - 0, - nullptr, - &avail); + zx_status_t status = + zx_object_get_info(zx_process_self(), ZX_INFO_PROCESS_THREADS, + &dummy_buffer, 0, nullptr, &avail); if (status == ZX_OK) { return avail; } else { @@ -280,27 +266,15 @@ size_t GetThreadCount() { #if GTEST_IS_THREADSAFE && GTEST_OS_WINDOWS -void SleepMilliseconds(int n) { - ::Sleep(static_cast(n)); -} +AutoHandle::AutoHandle() : handle_(INVALID_HANDLE_VALUE) {} -AutoHandle::AutoHandle() - : handle_(INVALID_HANDLE_VALUE) {} +AutoHandle::AutoHandle(Handle handle) : handle_(handle) {} -AutoHandle::AutoHandle(Handle handle) - : handle_(handle) {} +AutoHandle::~AutoHandle() { Reset(); } -AutoHandle::~AutoHandle() { - Reset(); -} +AutoHandle::Handle AutoHandle::Get() const { return handle_; } -AutoHandle::Handle AutoHandle::Get() const { - return handle_; -} - -void AutoHandle::Reset() { - Reset(INVALID_HANDLE_VALUE); -} +void AutoHandle::Reset() { Reset(INVALID_HANDLE_VALUE); } void AutoHandle::Reset(HANDLE handle) { // Resetting with the same handle we already own is invalid. @@ -312,7 +286,7 @@ void AutoHandle::Reset(HANDLE handle) { } else { GTEST_CHECK_(!IsCloseable()) << "Resetting a valid handle to itself is likely a programmer error " - "and thus not allowed."; + "and thus not allowed."; } } @@ -322,23 +296,6 @@ bool AutoHandle::IsCloseable() const { return handle_ != nullptr && handle_ != INVALID_HANDLE_VALUE; } -Notification::Notification() - : event_(::CreateEvent(nullptr, // Default security attributes. - TRUE, // Do not reset automatically. - FALSE, // Initially unset. - nullptr)) { // Anonymous event. - GTEST_CHECK_(event_.Get() != nullptr); -} - -void Notification::Notify() { - GTEST_CHECK_(::SetEvent(event_.Get()) != FALSE); -} - -void Notification::WaitForNotification() { - GTEST_CHECK_( - ::WaitForSingleObject(event_.Get(), INFINITE) == WAIT_OBJECT_0); -} - Mutex::Mutex() : owner_thread_id_(0), type_(kDynamic), @@ -391,25 +348,25 @@ namespace { // MemoryIsNotDeallocated memory_is_not_deallocated; // critical_section_ = new CRITICAL_SECTION; // -class MemoryIsNotDeallocated -{ +class MemoryIsNotDeallocated { public: MemoryIsNotDeallocated() : old_crtdbg_flag_(0) { old_crtdbg_flag_ = _CrtSetDbgFlag(_CRTDBG_REPORT_FLAG); // Set heap allocation block type to _IGNORE_BLOCK so that MS debug CRT // doesn't report mem leak if there's no matching deallocation. - _CrtSetDbgFlag(old_crtdbg_flag_ & ~_CRTDBG_ALLOC_MEM_DF); + (void)_CrtSetDbgFlag(old_crtdbg_flag_ & ~_CRTDBG_ALLOC_MEM_DF); } ~MemoryIsNotDeallocated() { // Restore the original _CRTDBG_ALLOC_MEM_DF flag - _CrtSetDbgFlag(old_crtdbg_flag_); + (void)_CrtSetDbgFlag(old_crtdbg_flag_); } private: int old_crtdbg_flag_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(MemoryIsNotDeallocated); + MemoryIsNotDeallocated(const MemoryIsNotDeallocated&) = delete; + MemoryIsNotDeallocated& operator=(const MemoryIsNotDeallocated&) = delete; }; #endif // _MSC_VER @@ -435,15 +392,13 @@ void Mutex::ThreadSafeLazyInit() { ::InitializeCriticalSection(critical_section_); // Updates the critical_section_init_phase_ to 2 to signal // initialization complete. - GTEST_CHECK_(::InterlockedCompareExchange( - &critical_section_init_phase_, 2L, 1L) == - 1L); + GTEST_CHECK_(::InterlockedCompareExchange(&critical_section_init_phase_, + 2L, 1L) == 1L); break; case 1: // Somebody else is already initializing the mutex; spin until they // are done. - while (::InterlockedCompareExchange(&critical_section_init_phase_, - 2L, + while (::InterlockedCompareExchange(&critical_section_init_phase_, 2L, 2L) != 2L) { // Possibly yields the rest of the thread's time slice to other // threads. @@ -488,9 +443,7 @@ class ThreadWithParamSupport : public ThreadWithParamBase { private: struct ThreadMainParam { ThreadMainParam(Runnable* runnable, Notification* thread_can_start) - : runnable_(runnable), - thread_can_start_(thread_can_start) { - } + : runnable_(runnable), thread_can_start_(thread_can_start) {} std::unique_ptr runnable_; // Does not own. Notification* thread_can_start_; @@ -508,20 +461,18 @@ class ThreadWithParamSupport : public ThreadWithParamBase { // Prohibit instantiation. ThreadWithParamSupport(); - GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadWithParamSupport); + ThreadWithParamSupport(const ThreadWithParamSupport&) = delete; + ThreadWithParamSupport& operator=(const ThreadWithParamSupport&) = delete; }; } // namespace -ThreadWithParamBase::ThreadWithParamBase(Runnable *runnable, +ThreadWithParamBase::ThreadWithParamBase(Runnable* runnable, Notification* thread_can_start) - : thread_(ThreadWithParamSupport::CreateThread(runnable, - thread_can_start)) { -} + : thread_( + ThreadWithParamSupport::CreateThread(runnable, thread_can_start)) {} -ThreadWithParamBase::~ThreadWithParamBase() { - Join(); -} +ThreadWithParamBase::~ThreadWithParamBase() { Join(); } void ThreadWithParamBase::Join() { GTEST_CHECK_(::WaitForSingleObject(thread_.Get(), INFINITE) == WAIT_OBJECT_0) @@ -548,8 +499,10 @@ class ThreadLocalRegistryImpl { ThreadIdToThreadLocals::iterator thread_local_pos = thread_to_thread_locals->find(current_thread); if (thread_local_pos == thread_to_thread_locals->end()) { - thread_local_pos = thread_to_thread_locals->insert( - std::make_pair(current_thread, ThreadLocalValues())).first; + thread_local_pos = + thread_to_thread_locals + ->insert(std::make_pair(current_thread, ThreadLocalValues())) + .first; StartWatcherThreadFor(current_thread); } ThreadLocalValues& thread_local_values = thread_local_pos->second; @@ -577,9 +530,8 @@ class ThreadLocalRegistryImpl { ThreadIdToThreadLocals* const thread_to_thread_locals = GetThreadLocalsMapLocked(); for (ThreadIdToThreadLocals::iterator it = - thread_to_thread_locals->begin(); - it != thread_to_thread_locals->end(); - ++it) { + thread_to_thread_locals->begin(); + it != thread_to_thread_locals->end(); ++it) { ThreadLocalValues& thread_local_values = it->second; ThreadLocalValues::iterator value_pos = thread_local_values.find(thread_local_instance); @@ -609,9 +561,8 @@ class ThreadLocalRegistryImpl { if (thread_local_pos != thread_to_thread_locals->end()) { ThreadLocalValues& thread_local_values = thread_local_pos->second; for (ThreadLocalValues::iterator value_pos = - thread_local_values.begin(); - value_pos != thread_local_values.end(); - ++value_pos) { + thread_local_values.begin(); + value_pos != thread_local_values.end(); ++value_pos) { value_holders.push_back(value_pos->second); } thread_to_thread_locals->erase(thread_local_pos); @@ -637,9 +588,8 @@ class ThreadLocalRegistryImpl { static void StartWatcherThreadFor(DWORD thread_id) { // The returned handle will be kept in thread_map and closed by // watcher_thread in WatcherThreadFunc. - HANDLE thread = ::OpenThread(SYNCHRONIZE | THREAD_QUERY_INFORMATION, - FALSE, - thread_id); + HANDLE thread = + ::OpenThread(SYNCHRONIZE | THREAD_QUERY_INFORMATION, FALSE, thread_id); GTEST_CHECK_(thread != nullptr); // We need to pass a valid thread ID pointer into CreateThread for it // to work correctly under Win98. @@ -650,7 +600,8 @@ class ThreadLocalRegistryImpl { &ThreadLocalRegistryImpl::WatcherThreadFunc, reinterpret_cast(new ThreadIdAndHandle(thread_id, thread)), CREATE_SUSPENDED, &watcher_thread_id); - GTEST_CHECK_(watcher_thread != nullptr); + GTEST_CHECK_(watcher_thread != nullptr) + << "CreateThread failed with error " << ::GetLastError() << "."; // Give the watcher thread the same priority as ours to avoid being // blocked by it. ::SetThreadPriority(watcher_thread, @@ -664,8 +615,7 @@ class ThreadLocalRegistryImpl { static DWORD WINAPI WatcherThreadFunc(LPVOID param) { const ThreadIdAndHandle* tah = reinterpret_cast(param); - GTEST_CHECK_( - ::WaitForSingleObject(tah->second, INFINITE) == WAIT_OBJECT_0); + GTEST_CHECK_(::WaitForSingleObject(tah->second, INFINITE) == WAIT_OBJECT_0); OnThreadExit(tah->first); ::CloseHandle(tah->second); delete tah; @@ -689,16 +639,17 @@ class ThreadLocalRegistryImpl { }; Mutex ThreadLocalRegistryImpl::mutex_(Mutex::kStaticMutex); // NOLINT -Mutex ThreadLocalRegistryImpl::thread_map_mutex_(Mutex::kStaticMutex); // NOLINT +Mutex ThreadLocalRegistryImpl::thread_map_mutex_( + Mutex::kStaticMutex); // NOLINT ThreadLocalValueHolderBase* ThreadLocalRegistry::GetValueOnCurrentThread( - const ThreadLocalBase* thread_local_instance) { + const ThreadLocalBase* thread_local_instance) { return ThreadLocalRegistryImpl::GetValueOnCurrentThread( thread_local_instance); } void ThreadLocalRegistry::OnThreadLocalDestroyed( - const ThreadLocalBase* thread_local_instance) { + const ThreadLocalBase* thread_local_instance) { ThreadLocalRegistryImpl::OnThreadLocalDestroyed(thread_local_instance); } @@ -786,7 +737,7 @@ bool IsRepeat(char ch) { return IsInSet(ch, "?*+"); } bool IsAsciiWhiteSpace(char ch) { return IsInSet(ch, " \f\n\r\t\v"); } bool IsAsciiWordChar(char ch) { return ('a' <= ch && ch <= 'z') || ('A' <= ch && ch <= 'Z') || - ('0' <= ch && ch <= '9') || ch == '_'; + ('0' <= ch && ch <= '9') || ch == '_'; } // Returns true if and only if "\\c" is a supported escape sequence. @@ -799,17 +750,28 @@ bool IsValidEscape(char c) { bool AtomMatchesChar(bool escaped, char pattern_char, char ch) { if (escaped) { // "\\p" where p is pattern_char. switch (pattern_char) { - case 'd': return IsAsciiDigit(ch); - case 'D': return !IsAsciiDigit(ch); - case 'f': return ch == '\f'; - case 'n': return ch == '\n'; - case 'r': return ch == '\r'; - case 's': return IsAsciiWhiteSpace(ch); - case 'S': return !IsAsciiWhiteSpace(ch); - case 't': return ch == '\t'; - case 'v': return ch == '\v'; - case 'w': return IsAsciiWordChar(ch); - case 'W': return !IsAsciiWordChar(ch); + case 'd': + return IsAsciiDigit(ch); + case 'D': + return !IsAsciiDigit(ch); + case 'f': + return ch == '\f'; + case 'n': + return ch == '\n'; + case 'r': + return ch == '\r'; + case 's': + return IsAsciiWhiteSpace(ch); + case 'S': + return !IsAsciiWhiteSpace(ch); + case 't': + return ch == '\t'; + case 'v': + return ch == '\v'; + case 'w': + return IsAsciiWordChar(ch); + case 'W': + return !IsAsciiWordChar(ch); } return IsAsciiPunct(pattern_char) && pattern_char == ch; } @@ -820,7 +782,8 @@ bool AtomMatchesChar(bool escaped, char pattern_char, char ch) { // Helper function used by ValidateRegex() to format error messages. static std::string FormatRegexSyntaxError(const char* regex, int index) { return (Message() << "Syntax error at index " << index - << " in simple regular expression \"" << regex << "\": ").GetString(); + << " in simple regular expression \"" << regex << "\": ") + .GetString(); } // Generates non-fatal failures and returns false if regex is invalid; @@ -862,12 +825,12 @@ bool ValidateRegex(const char* regex) { << "'$' can only appear at the end."; is_valid = false; } else if (IsInSet(ch, "()[]{}|")) { - ADD_FAILURE() << FormatRegexSyntaxError(regex, i) - << "'" << ch << "' is unsupported."; + ADD_FAILURE() << FormatRegexSyntaxError(regex, i) << "'" << ch + << "' is unsupported."; is_valid = false; } else if (IsRepeat(ch) && !prev_repeatable) { - ADD_FAILURE() << FormatRegexSyntaxError(regex, i) - << "'" << ch << "' can only follow a repeatable token."; + ADD_FAILURE() << FormatRegexSyntaxError(regex, i) << "'" << ch + << "' can only follow a repeatable token."; is_valid = false; } @@ -885,12 +848,10 @@ bool ValidateRegex(const char* regex) { // characters to be indexable by size_t, in which case the test will // probably time out anyway. We are fine with this limitation as // std::string has it too. -bool MatchRepetitionAndRegexAtHead( - bool escaped, char c, char repeat, const char* regex, - const char* str) { +bool MatchRepetitionAndRegexAtHead(bool escaped, char c, char repeat, + const char* regex, const char* str) { const size_t min_count = (repeat == '+') ? 1 : 0; - const size_t max_count = (repeat == '?') ? 1 : - static_cast(-1) - 1; + const size_t max_count = (repeat == '?') ? 1 : static_cast(-1) - 1; // We cannot call numeric_limits::max() as it conflicts with the // max() macro on Windows. @@ -903,8 +864,7 @@ bool MatchRepetitionAndRegexAtHead( // greedy match. return true; } - if (str[i] == '\0' || !AtomMatchesChar(escaped, c, str[i])) - return false; + if (str[i] == '\0' || !AtomMatchesChar(escaped, c, str[i])) return false; } return false; } @@ -918,25 +878,23 @@ bool MatchRegexAtHead(const char* regex, const char* str) { // "$" only matches the end of a string. Note that regex being // valid guarantees that there's nothing after "$" in it. - if (*regex == '$') - return *str == '\0'; + if (*regex == '$') return *str == '\0'; // Is the first thing in regex an escape sequence? const bool escaped = *regex == '\\'; - if (escaped) - ++regex; + if (escaped) ++regex; if (IsRepeat(regex[1])) { // MatchRepetitionAndRegexAtHead() calls MatchRegexAtHead(), so // here's an indirect recursion. It terminates as the regex gets // shorter in each recursion. - return MatchRepetitionAndRegexAtHead( - escaped, regex[0], regex[1], regex + 2, str); + return MatchRepetitionAndRegexAtHead(escaped, regex[0], regex[1], regex + 2, + str); } else { // regex isn't empty, isn't "$", and doesn't start with a // repetition. We match the first atom of regex with the first // character of str and recurse. return (*str != '\0') && AtomMatchesChar(escaped, *regex, *str) && - MatchRegexAtHead(regex + 1, str + 1); + MatchRegexAtHead(regex + 1, str + 1); } } @@ -951,13 +909,11 @@ bool MatchRegexAtHead(const char* regex, const char* str) { bool MatchRegexAnywhere(const char* regex, const char* str) { if (regex == nullptr || str == nullptr) return false; - if (*regex == '^') - return MatchRegexAtHead(regex + 1, str); + if (*regex == '^') return MatchRegexAtHead(regex + 1, str); // A successful match can be anywhere in str. do { - if (MatchRegexAtHead(regex, str)) - return true; + if (MatchRegexAtHead(regex, str)) return true; } while (*str++ != '\0'); return false; } @@ -1038,8 +994,8 @@ GTEST_API_ ::std::string FormatFileLocation(const char* file, int line) { // FormatFileLocation in order to contrast the two functions. // Note that FormatCompilerIndependentFileLocation() does NOT append colon // to the file location it produces, unlike FormatFileLocation(). -GTEST_API_ ::std::string FormatCompilerIndependentFileLocation( - const char* file, int line) { +GTEST_API_ ::std::string FormatCompilerIndependentFileLocation(const char* file, + int line) { const std::string file_name(file == nullptr ? kUnknownFile : file); if (line < 0) @@ -1050,12 +1006,13 @@ GTEST_API_ ::std::string FormatCompilerIndependentFileLocation( GTestLog::GTestLog(GTestLogSeverity severity, const char* file, int line) : severity_(severity) { - const char* const marker = - severity == GTEST_INFO ? "[ INFO ]" : - severity == GTEST_WARNING ? "[WARNING]" : - severity == GTEST_ERROR ? "[ ERROR ]" : "[ FATAL ]"; - GetStream() << ::std::endl << marker << " " - << FormatFileLocation(file, line).c_str() << ": "; + const char* const marker = severity == GTEST_INFO ? "[ INFO ]" + : severity == GTEST_WARNING ? "[WARNING]" + : severity == GTEST_ERROR ? "[ ERROR ]" + : "[ FATAL ]"; + GetStream() << ::std::endl + << marker << " " << FormatFileLocation(file, line).c_str() + << ": "; } // Flushes the buffers and, if severity is GTEST_FATAL, aborts the program. @@ -1078,27 +1035,26 @@ class CapturedStream { public: // The ctor redirects the stream to a temporary file. explicit CapturedStream(int fd) : fd_(fd), uncaptured_fd_(dup(fd)) { -# if GTEST_OS_WINDOWS - char temp_dir_path[MAX_PATH + 1] = { '\0' }; // NOLINT - char temp_file_path[MAX_PATH + 1] = { '\0' }; // NOLINT +#if GTEST_OS_WINDOWS + char temp_dir_path[MAX_PATH + 1] = {'\0'}; // NOLINT + char temp_file_path[MAX_PATH + 1] = {'\0'}; // NOLINT ::GetTempPathA(sizeof(temp_dir_path), temp_dir_path); - const UINT success = ::GetTempFileNameA(temp_dir_path, - "gtest_redir", + const UINT success = ::GetTempFileNameA(temp_dir_path, "gtest_redir", 0, // Generate unique file name. temp_file_path); GTEST_CHECK_(success != 0) << "Unable to create a temporary file in " << temp_dir_path; const int captured_fd = creat(temp_file_path, _S_IREAD | _S_IWRITE); - GTEST_CHECK_(captured_fd != -1) << "Unable to open temporary file " - << temp_file_path; + GTEST_CHECK_(captured_fd != -1) + << "Unable to open temporary file " << temp_file_path; filename_ = temp_file_path; -# else +#else // There's no guarantee that a test has write access to the current // directory, so we create the temporary file in a temporary directory. std::string name_template; -# if GTEST_OS_LINUX_ANDROID +#if GTEST_OS_LINUX_ANDROID // Note: Android applications are expected to call the framework's // Context.getExternalStorageDirectory() method through JNI to get // the location of the world-writable SD Card directory. However, @@ -1111,7 +1067,7 @@ class CapturedStream { // '/sdcard' and other variants cannot be relied on, as they are not // guaranteed to be mounted, or may have a delay in mounting. name_template = "/data/local/tmp/"; -# elif GTEST_OS_IOS +#elif GTEST_OS_IOS char user_temp_dir[PATH_MAX + 1]; // Documented alternative to NSTemporaryDirectory() (for obtaining creating @@ -1132,9 +1088,9 @@ class CapturedStream { name_template = user_temp_dir; if (name_template.back() != GTEST_PATH_SEP_[0]) name_template.push_back(GTEST_PATH_SEP_[0]); -# else +#else name_template = "/tmp/"; -# endif +#endif name_template.append("gtest_captured_stream.XXXXXX"); // mkstemp() modifies the string bytes in place, and does not go beyond the @@ -1150,15 +1106,13 @@ class CapturedStream { << " for test; does the test have access to the /tmp directory?"; } filename_ = std::move(name_template); -# endif // GTEST_OS_WINDOWS +#endif // GTEST_OS_WINDOWS fflush(nullptr); dup2(captured_fd, fd_); close(captured_fd); } - ~CapturedStream() { - remove(filename_.c_str()); - } + ~CapturedStream() { remove(filename_.c_str()); } std::string GetCapturedString() { if (uncaptured_fd_ != -1) { @@ -1185,7 +1139,8 @@ class CapturedStream { // Name of the temporary file holding the stderr output. ::std::string filename_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(CapturedStream); + CapturedStream(const CapturedStream&) = delete; + CapturedStream& operator=(const CapturedStream&) = delete; }; GTEST_DISABLE_MSC_DEPRECATED_POP_() @@ -1213,6 +1168,15 @@ static std::string GetCapturedStream(CapturedStream** captured_stream) { return content; } +#if defined(_MSC_VER) || defined(__BORLANDC__) +// MSVC and C++Builder do not provide a definition of STDERR_FILENO. +const int kStdOutFileno = 1; +const int kStdErrFileno = 2; +#else +const int kStdOutFileno = STDOUT_FILENO; +const int kStdErrFileno = STDERR_FILENO; +#endif // defined(_MSC_VER) || defined(__BORLANDC__) + // Starts capturing stdout. void CaptureStdout() { CaptureStream(kStdOutFileno, "stdout", &g_captured_stdout); @@ -1235,10 +1199,6 @@ std::string GetCapturedStderr() { #endif // GTEST_HAS_STREAM_REDIRECTION - - - - size_t GetFileSize(FILE* file) { fseek(file, 0, SEEK_END); return static_cast(ftell(file)); @@ -1256,7 +1216,8 @@ std::string ReadEntireFile(FILE* file) { // Keeps reading the file until we cannot read further or the // pre-determined file size is reached. do { - bytes_last_read = fread(buffer+bytes_read, 1, file_size-bytes_read, file); + bytes_last_read = + fread(buffer + bytes_read, 1, file_size - bytes_read, file); bytes_read += bytes_last_read; } while (bytes_last_read > 0 && bytes_read < file_size); @@ -1344,7 +1305,7 @@ bool ParseInt32(const Message& src_text, const char* str, int32_t* value) { // LONG_MAX or LONG_MIN when the input overflows.) result != long_value // The parsed value overflows as an int32_t. - ) { + ) { Message msg; msg << "WARNING: " << src_text << " is expected to be a 32-bit integer, but actually" @@ -1388,8 +1349,8 @@ int32_t Int32FromGTestEnv(const char* flag, int32_t default_value) { } int32_t result = default_value; - if (!ParseInt32(Message() << "Environment variable " << env_var, - string_value, &result)) { + if (!ParseInt32(Message() << "Environment variable " << env_var, string_value, + &result)) { printf("The default value %s is used.\n", (Message() << default_value).GetString().c_str()); fflush(stdout); @@ -1408,7 +1369,7 @@ int32_t Int32FromGTestEnv(const char* flag, int32_t default_value) { // not check that the flag is 'output' // In essence this checks an env variable called XML_OUTPUT_FILE // and if it is set we prepend "xml:" to its value, if it not set we return "" -std::string OutputFlagAlsoCheckEnvVar(){ +std::string OutputFlagAlsoCheckEnvVar() { std::string default_value_for_output_flag = ""; const char* xml_output_file_env = posix::GetEnv("XML_OUTPUT_FILE"); if (nullptr != xml_output_file_env) { diff --git a/ext/googletest/googletest/src/gtest-printers.cc b/ext/googletest/googletest/src/gtest-printers.cc index 41e29ccd60..f3976d230d 100644 --- a/ext/googletest/googletest/src/gtest-printers.cc +++ b/ext/googletest/googletest/src/gtest-printers.cc @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Test - The Google C++ Testing and Mocking Framework // // This file implements a universal value printer that can print a @@ -101,7 +100,7 @@ void PrintBytesInObjectToImpl(const unsigned char* obj_bytes, size_t count, PrintByteSegmentInObjectTo(obj_bytes, 0, kChunkSize, os); *os << " ... "; // Rounds up to 2-byte boundary. - const size_t resume_pos = (count - kChunkSize + 1)/2*2; + const size_t resume_pos = (count - kChunkSize + 1) / 2 * 2; PrintByteSegmentInObjectTo(obj_bytes, resume_pos, count - resume_pos, os); } *os << ">"; @@ -136,11 +135,7 @@ void PrintBytesInObjectTo(const unsigned char* obj_bytes, size_t count, // - as is if it's a printable ASCII (e.g. 'a', '2', ' '), // - as a hexadecimal escape sequence (e.g. '\x7F'), or // - as a special escape sequence (e.g. '\r', '\n'). -enum CharFormat { - kAsIs, - kHexEscape, - kSpecialEscape -}; +enum CharFormat { kAsIs, kHexEscape, kSpecialEscape }; // Returns true if c is a printable ASCII character. We test the // value of c directly instead of calling isprint(), which is buggy on @@ -213,35 +208,21 @@ static CharFormat PrintAsStringLiteralTo(char32_t c, ostream* os) { } } -static const char* GetCharWidthPrefix(char) { - return ""; -} +static const char* GetCharWidthPrefix(char) { return ""; } -static const char* GetCharWidthPrefix(signed char) { - return ""; -} +static const char* GetCharWidthPrefix(signed char) { return ""; } -static const char* GetCharWidthPrefix(unsigned char) { - return ""; -} +static const char* GetCharWidthPrefix(unsigned char) { return ""; } #ifdef __cpp_char8_t -static const char* GetCharWidthPrefix(char8_t) { - return "u8"; -} +static const char* GetCharWidthPrefix(char8_t) { return "u8"; } #endif -static const char* GetCharWidthPrefix(char16_t) { - return "u"; -} +static const char* GetCharWidthPrefix(char16_t) { return "u"; } -static const char* GetCharWidthPrefix(char32_t) { - return "U"; -} +static const char* GetCharWidthPrefix(char32_t) { return "U"; } -static const char* GetCharWidthPrefix(wchar_t) { - return "L"; -} +static const char* GetCharWidthPrefix(wchar_t) { return "L"; } // Prints a char c as if it's part of a string literal, escaping it when // necessary; returns how c was formatted. @@ -276,8 +257,7 @@ void PrintCharAndCodeTo(Char c, ostream* os) { // To aid user debugging, we also print c's code in decimal, unless // it's 0 (in which case c was printed as '\\0', making the code // obvious). - if (c == 0) - return; + if (c == 0) return; *os << " (" << static_cast(c); // For more convenience, we print c's code again in hexadecimal, @@ -304,17 +284,60 @@ void PrintTo(char32_t c, ::std::ostream* os) { << static_cast(c); } +// gcc/clang __{u,}int128_t +#if defined(__SIZEOF_INT128__) +void PrintTo(__uint128_t v, ::std::ostream* os) { + if (v == 0) { + *os << "0"; + return; + } + + // Buffer large enough for ceil(log10(2^128))==39 and the null terminator + char buf[40]; + char* p = buf + sizeof(buf); + + // Some configurations have a __uint128_t, but no support for built in + // division. Do manual long division instead. + + uint64_t high = static_cast(v >> 64); + uint64_t low = static_cast(v); + + *--p = 0; + while (high != 0 || low != 0) { + uint64_t high_mod = high % 10; + high = high / 10; + // This is the long division algorithm specialized for a divisor of 10 and + // only two elements. + // Notable values: + // 2^64 / 10 == 1844674407370955161 + // 2^64 % 10 == 6 + const uint64_t carry = 6 * high_mod + low % 10; + low = low / 10 + high_mod * 1844674407370955161 + carry / 10; + + char digit = static_cast(carry % 10); + *--p = '0' + digit; + } + *os << p; +} +void PrintTo(__int128_t v, ::std::ostream* os) { + __uint128_t uv = static_cast<__uint128_t>(v); + if (v < 0) { + *os << "-"; + uv = -uv; + } + PrintTo(uv, os); +} +#endif // __SIZEOF_INT128__ + // Prints the given array of characters to the ostream. CharType must be either // char, char8_t, char16_t, char32_t, or wchar_t. // The array starts at begin, the length is len, it may include '\0' characters // and may not be NUL-terminated. template -GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_ -GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_ -GTEST_ATTRIBUTE_NO_SANITIZE_HWADDRESS_ -GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_ -static CharFormat PrintCharsAsStringTo( - const CharType* begin, size_t len, ostream* os) { +GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_ GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_ + GTEST_ATTRIBUTE_NO_SANITIZE_HWADDRESS_ + GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_ static CharFormat + PrintCharsAsStringTo(const CharType* begin, size_t len, ostream* os) { const char* const quote_prefix = GetCharWidthPrefix(*begin); *os << quote_prefix << "\""; bool is_previous_hex = false; @@ -340,12 +363,11 @@ static CharFormat PrintCharsAsStringTo( // Prints a (const) char/wchar_t array of 'len' elements, starting at address // 'begin'. CharType must be either char or wchar_t. template -GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_ -GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_ -GTEST_ATTRIBUTE_NO_SANITIZE_HWADDRESS_ -GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_ -static void UniversalPrintCharArray( - const CharType* begin, size_t len, ostream* os) { +GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_ GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_ + GTEST_ATTRIBUTE_NO_SANITIZE_HWADDRESS_ + GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_ static void + UniversalPrintCharArray(const CharType* begin, size_t len, + ostream* os) { // The code // const char kFoo[] = "foo"; // generates an array of 4, not 3, elements, with the last one being '\0'. @@ -436,28 +458,28 @@ void PrintTo(const wchar_t* s, ostream* os) { PrintCStringTo(s, os); } namespace { bool ContainsUnprintableControlCodes(const char* str, size_t length) { - const unsigned char *s = reinterpret_cast(str); + const unsigned char* s = reinterpret_cast(str); for (size_t i = 0; i < length; i++) { unsigned char ch = *s++; if (std::iscntrl(ch)) { - switch (ch) { + switch (ch) { case '\t': case '\n': case '\r': break; default: return true; - } } + } } return false; } -bool IsUTF8TrailByte(unsigned char t) { return 0x80 <= t && t<= 0xbf; } +bool IsUTF8TrailByte(unsigned char t) { return 0x80 <= t && t <= 0xbf; } bool IsValidUTF8(const char* str, size_t length) { - const unsigned char *s = reinterpret_cast(str); + const unsigned char* s = reinterpret_cast(str); for (size_t i = 0; i < length;) { unsigned char lead = s[i++]; @@ -470,15 +492,13 @@ bool IsValidUTF8(const char* str, size_t length) { } else if (lead <= 0xdf && (i + 1) <= length && IsUTF8TrailByte(s[i])) { ++i; // 2-byte character } else if (0xe0 <= lead && lead <= 0xef && (i + 2) <= length && - IsUTF8TrailByte(s[i]) && - IsUTF8TrailByte(s[i + 1]) && + IsUTF8TrailByte(s[i]) && IsUTF8TrailByte(s[i + 1]) && // check for non-shortest form and surrogate (lead != 0xe0 || s[i] >= 0xa0) && (lead != 0xed || s[i] < 0xa0)) { i += 2; // 3-byte character } else if (0xf0 <= lead && lead <= 0xf4 && (i + 3) <= length && - IsUTF8TrailByte(s[i]) && - IsUTF8TrailByte(s[i + 1]) && + IsUTF8TrailByte(s[i]) && IsUTF8TrailByte(s[i + 1]) && IsUTF8TrailByte(s[i + 2]) && // check for non-shortest form (lead != 0xf0 || s[i] >= 0x90) && diff --git a/ext/googletest/googletest/src/gtest-test-part.cc b/ext/googletest/googletest/src/gtest-test-part.cc index a938683ced..eb7c8d1cf9 100644 --- a/ext/googletest/googletest/src/gtest-test-part.cc +++ b/ext/googletest/googletest/src/gtest-test-part.cc @@ -51,13 +51,11 @@ std::ostream& operator<<(std::ostream& os, const TestPartResult& result) { return os << internal::FormatFileLocation(result.file_name(), result.line_number()) << " " - << (result.type() == TestPartResult::kSuccess - ? "Success" - : result.type() == TestPartResult::kSkip - ? "Skipped" - : result.type() == TestPartResult::kFatalFailure - ? "Fatal failure" - : "Non-fatal failure") + << (result.type() == TestPartResult::kSuccess ? "Success" + : result.type() == TestPartResult::kSkip ? "Skipped" + : result.type() == TestPartResult::kFatalFailure + ? "Fatal failure" + : "Non-fatal failure") << ":\n" << result.message() << std::endl; } @@ -86,8 +84,8 @@ namespace internal { HasNewFatalFailureHelper::HasNewFatalFailureHelper() : has_new_fatal_failure_(false), - original_reporter_(GetUnitTestImpl()-> - GetTestPartResultReporterForCurrentThread()) { + original_reporter_( + GetUnitTestImpl()->GetTestPartResultReporterForCurrentThread()) { GetUnitTestImpl()->SetTestPartResultReporterForCurrentThread(this); } @@ -98,8 +96,7 @@ HasNewFatalFailureHelper::~HasNewFatalFailureHelper() { void HasNewFatalFailureHelper::ReportTestPartResult( const TestPartResult& result) { - if (result.fatally_failed()) - has_new_fatal_failure_ = true; + if (result.fatally_failed()) has_new_fatal_failure_ = true; original_reporter_->ReportTestPartResult(result); } diff --git a/ext/googletest/googletest/src/gtest-typed-test.cc b/ext/googletest/googletest/src/gtest-typed-test.cc index c02c3df659..a2828b83c6 100644 --- a/ext/googletest/googletest/src/gtest-typed-test.cc +++ b/ext/googletest/googletest/src/gtest-typed-test.cc @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - #include "gtest/gtest-typed-test.h" #include "gtest/gtest.h" @@ -38,8 +37,7 @@ namespace internal { // Skips to the first non-space char in str. Returns an empty string if str // contains only whitespace characters. static const char* SkipSpaces(const char* str) { - while (IsSpace(*str)) - str++; + while (IsSpace(*str)) str++; return str; } @@ -85,8 +83,7 @@ const char* TypedTestSuitePState::VerifyRegisteredTestNames( } for (RegisteredTestIter it = registered_tests_.begin(); - it != registered_tests_.end(); - ++it) { + it != registered_tests_.end(); ++it) { if (tests.count(it->first) == 0) { errors << "You forgot to list test " << it->first << ".\n"; } diff --git a/ext/googletest/googletest/src/gtest.cc b/ext/googletest/googletest/src/gtest.cc index 5a38768e4c..6f31dd2260 100644 --- a/ext/googletest/googletest/src/gtest.cc +++ b/ext/googletest/googletest/src/gtest.cc @@ -31,8 +31,6 @@ // The Google C++ Testing and Mocking Framework (Google Test) #include "gtest/gtest.h" -#include "gtest/internal/custom/gtest.h" -#include "gtest/gtest-spi.h" #include #include @@ -46,79 +44,87 @@ #include // NOLINT #include #include +#include #include +#include #include #include #include #include // NOLINT #include +#include #include +#include "gtest/gtest-assertion-result.h" +#include "gtest/gtest-spi.h" +#include "gtest/internal/custom/gtest.h" + #if GTEST_OS_LINUX -# include // NOLINT -# include // NOLINT -# include // NOLINT +#include // NOLINT +#include // NOLINT +#include // NOLINT // Declares vsnprintf(). This header is not available on Windows. -# include // NOLINT -# include // NOLINT -# include // NOLINT -# include // NOLINT -# include +#include // NOLINT +#include // NOLINT +#include // NOLINT +#include // NOLINT + +#include #elif GTEST_OS_ZOS -# include // NOLINT +#include // NOLINT // On z/OS we additionally need strings.h for strcasecmp. -# include // NOLINT +#include // NOLINT #elif GTEST_OS_WINDOWS_MOBILE // We are on Windows CE. -# include // NOLINT -# undef min +#include // NOLINT +#undef min #elif GTEST_OS_WINDOWS // We are on Windows proper. -# include // NOLINT -# undef min +#include // NOLINT +#undef min #ifdef _MSC_VER -# include // NOLINT +#include // NOLINT #endif -# include // NOLINT -# include // NOLINT -# include // NOLINT -# include // NOLINT +#include // NOLINT +#include // NOLINT +#include // NOLINT +#include // NOLINT -# if GTEST_OS_WINDOWS_MINGW -# include // NOLINT -# endif // GTEST_OS_WINDOWS_MINGW +#if GTEST_OS_WINDOWS_MINGW +#include // NOLINT +#endif // GTEST_OS_WINDOWS_MINGW #else // cpplint thinks that the header is already included, so we want to // silence it. -# include // NOLINT -# include // NOLINT +#include // NOLINT +#include // NOLINT #endif // GTEST_OS_LINUX #if GTEST_HAS_EXCEPTIONS -# include +#include #endif #if GTEST_CAN_STREAM_RESULTS_ -# include // NOLINT -# include // NOLINT -# include // NOLINT -# include // NOLINT +#include // NOLINT +#include // NOLINT +#include // NOLINT +#include // NOLINT #endif #include "src/gtest-internal-inl.h" #if GTEST_OS_WINDOWS -# define vsnprintf _vsnprintf +#define vsnprintf _vsnprintf #endif // GTEST_OS_WINDOWS #if GTEST_OS_MAC @@ -131,7 +137,10 @@ #include "absl/debugging/failure_signal_handler.h" #include "absl/debugging/stacktrace.h" #include "absl/debugging/symbolize.h" +#include "absl/flags/parse.h" +#include "absl/flags/usage.h" #include "absl/strings/str_cat.h" +#include "absl/strings/str_replace.h" #endif // GTEST_HAS_ABSL namespace testing { @@ -177,7 +186,7 @@ const char kStackTraceMarker[] = "\nStack trace:\n"; // is specified on the command line. bool g_help_flag = false; -// Utilty function to Open File for Writing +// Utility function to Open File for Writing static FILE* OpenFileForWriting(const std::string& output_file) { FILE* fileout = nullptr; FilePath output_file_path(output_file); @@ -267,8 +276,7 @@ GTEST_DEFINE_bool_( "install a signal handler that dumps debugging information when fatal " "signals are raised."); -GTEST_DEFINE_bool_(list_tests, false, - "List all tests without running them."); +GTEST_DEFINE_bool_(list_tests, false, "List all tests without running them."); // The net priority order after flag processing is thus: // --gtest_output command line flag @@ -315,7 +323,7 @@ GTEST_DEFINE_int32_( GTEST_DEFINE_bool_( recreate_environments_when_repeating, testing::internal::BoolFromGTestEnv("recreate_environments_when_repeating", - true), + false), "Controls whether global test environments are recreated for each repeat " "of the tests. If set to false the global test environments are only set " "up once, for the first iteration, and only torn down once, for the last. " @@ -370,10 +378,9 @@ namespace internal { uint32_t Random::Generate(uint32_t range) { // These constants are the same as are used in glibc's rand(3). // Use wider types than necessary to prevent unsigned overflow diagnostics. - state_ = static_cast(1103515245ULL*state_ + 12345U) % kMaxRange; + state_ = static_cast(1103515245ULL * state_ + 12345U) % kMaxRange; - GTEST_CHECK_(range > 0) - << "Cannot generate a number in the range [0, 0)."; + GTEST_CHECK_(range > 0) << "Cannot generate a number in the range [0, 0)."; GTEST_CHECK_(range <= kMaxRange) << "Generation of a number in [0, " << range << ") was requested, " << "but this can only generate numbers in [0, " << kMaxRange << ")."; @@ -418,32 +425,26 @@ static bool ShouldRunTestSuite(const TestSuite* test_suite) { } // AssertHelper constructor. -AssertHelper::AssertHelper(TestPartResult::Type type, - const char* file, - int line, - const char* message) - : data_(new AssertHelperData(type, file, line, message)) { -} +AssertHelper::AssertHelper(TestPartResult::Type type, const char* file, + int line, const char* message) + : data_(new AssertHelperData(type, file, line, message)) {} -AssertHelper::~AssertHelper() { - delete data_; -} +AssertHelper::~AssertHelper() { delete data_; } // Message assignment, for assertion streaming support. void AssertHelper::operator=(const Message& message) const { - UnitTest::GetInstance()-> - AddTestPartResult(data_->type, data_->file, data_->line, - AppendUserMessage(data_->message, message), - UnitTest::GetInstance()->impl() - ->CurrentOsStackTraceExceptTop(1) - // Skips the stack frame for this function itself. - ); // NOLINT + UnitTest::GetInstance()->AddTestPartResult( + data_->type, data_->file, data_->line, + AppendUserMessage(data_->message, message), + UnitTest::GetInstance()->impl()->CurrentOsStackTraceExceptTop(1) + // Skips the stack frame for this function itself. + ); // NOLINT } namespace { // When TEST_P is found without a matching INSTANTIATE_TEST_SUITE_P -// to creates test cases for it, a syntetic test case is +// to creates test cases for it, a synthetic test case is // inserted to report ether an error or a log message. // // This configuration bit will likely be removed at some point. @@ -474,7 +475,6 @@ class FailureTest : public Test { const bool as_error_; }; - } // namespace std::set* GetIgnoredParameterizedTestSuites() { @@ -518,7 +518,8 @@ void InsertSyntheticTestCase(const std::string& name, CodeLocation location, "To suppress this error for this test suite, insert the following line " "(in a non-header) in the namespace it is defined in:" "\n\n" - "GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(" + name + ");"; + "GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(" + + name + ");"; std::string full_name = "UninstantiatedParameterizedTestSuite<" + name + ">"; RegisterTest( // @@ -538,19 +539,18 @@ void RegisterTypeParameterizedTestSuite(const char* test_suite_name, } void RegisterTypeParameterizedTestSuiteInstantiation(const char* case_name) { - GetUnitTestImpl() - ->type_parameterized_test_registry() - .RegisterInstantiation(case_name); + GetUnitTestImpl()->type_parameterized_test_registry().RegisterInstantiation( + case_name); } void TypeParameterizedTestSuiteRegistry::RegisterTestSuite( const char* test_suite_name, CodeLocation code_location) { suites_.emplace(std::string(test_suite_name), - TypeParameterizedTestSuiteInfo(code_location)); + TypeParameterizedTestSuiteInfo(code_location)); } void TypeParameterizedTestSuiteRegistry::RegisterInstantiation( - const char* test_suite_name) { + const char* test_suite_name) { auto it = suites_.find(std::string(test_suite_name)); if (it != suites_.end()) { it->second.instantiated = true; @@ -644,16 +644,15 @@ std::string UnitTestOptions::GetAbsolutePathToOutputFile() { const char* const gtest_output_flag = s.c_str(); std::string format = GetOutputFormat(); - if (format.empty()) - format = std::string(kDefaultOutputFormat); + if (format.empty()) format = std::string(kDefaultOutputFormat); const char* const colon = strchr(gtest_output_flag, ':'); if (colon == nullptr) return internal::FilePath::MakeFileName( - internal::FilePath( - UnitTest::GetInstance()->original_working_dir()), - internal::FilePath(kDefaultOutputFile), 0, - format.c_str()).string(); + internal::FilePath( + UnitTest::GetInstance()->original_working_dir()), + internal::FilePath(kDefaultOutputFile), 0, format.c_str()) + .string(); internal::FilePath output_name(colon + 1); if (!output_name.IsAbsolutePath()) @@ -661,8 +660,7 @@ std::string UnitTestOptions::GetAbsolutePathToOutputFile() { internal::FilePath(UnitTest::GetInstance()->original_working_dir()), internal::FilePath(colon + 1)); - if (!output_name.IsDirectory()) - return output_name.string(); + if (!output_name.IsDirectory()) return output_name.string(); internal::FilePath result(internal::FilePath::GenerateUniqueFileName( output_name, internal::GetCurrentExecutableName(), @@ -723,60 +721,119 @@ static bool PatternMatchesString(const std::string& name_str, return true; } +namespace { + +bool IsGlobPattern(const std::string& pattern) { + return std::any_of(pattern.begin(), pattern.end(), + [](const char c) { return c == '?' || c == '*'; }); +} + +class UnitTestFilter { + public: + UnitTestFilter() = default; + + // Constructs a filter from a string of patterns separated by `:`. + explicit UnitTestFilter(const std::string& filter) { + // By design "" filter matches "" string. + std::vector all_patterns; + SplitString(filter, ':', &all_patterns); + const auto exact_match_patterns_begin = std::partition( + all_patterns.begin(), all_patterns.end(), &IsGlobPattern); + + glob_patterns_.reserve(static_cast( + std::distance(all_patterns.begin(), exact_match_patterns_begin))); + std::move(all_patterns.begin(), exact_match_patterns_begin, + std::inserter(glob_patterns_, glob_patterns_.begin())); + std::move( + exact_match_patterns_begin, all_patterns.end(), + std::inserter(exact_match_patterns_, exact_match_patterns_.begin())); + } + + // Returns true if and only if name matches at least one of the patterns in + // the filter. + bool MatchesName(const std::string& name) const { + return exact_match_patterns_.count(name) > 0 || + std::any_of(glob_patterns_.begin(), glob_patterns_.end(), + [&name](const std::string& pattern) { + return PatternMatchesString( + name, pattern.c_str(), + pattern.c_str() + pattern.size()); + }); + } + + private: + std::vector glob_patterns_; + std::unordered_set exact_match_patterns_; +}; + +class PositiveAndNegativeUnitTestFilter { + public: + // Constructs a positive and a negative filter from a string. The string + // contains a positive filter optionally followed by a '-' character and a + // negative filter. In case only a negative filter is provided the positive + // filter will be assumed "*". + // A filter is a list of patterns separated by ':'. + explicit PositiveAndNegativeUnitTestFilter(const std::string& filter) { + std::vector positive_and_negative_filters; + + // NOTE: `SplitString` always returns a non-empty container. + SplitString(filter, '-', &positive_and_negative_filters); + const auto& positive_filter = positive_and_negative_filters.front(); + + if (positive_and_negative_filters.size() > 1) { + positive_filter_ = UnitTestFilter( + positive_filter.empty() ? kUniversalFilter : positive_filter); + + // TODO(b/214626361): Fail on multiple '-' characters + // For the moment to preserve old behavior we concatenate the rest of the + // string parts with `-` as separator to generate the negative filter. + auto negative_filter_string = positive_and_negative_filters[1]; + for (std::size_t i = 2; i < positive_and_negative_filters.size(); i++) + negative_filter_string = + negative_filter_string + '-' + positive_and_negative_filters[i]; + negative_filter_ = UnitTestFilter(negative_filter_string); + } else { + // In case we don't have a negative filter and positive filter is "" + // we do not use kUniversalFilter by design as opposed to when we have a + // negative filter. + positive_filter_ = UnitTestFilter(positive_filter); + } + } + + // Returns true if and only if test name (this is generated by appending test + // suit name and test name via a '.' character) matches the positive filter + // and does not match the negative filter. + bool MatchesTest(const std::string& test_suite_name, + const std::string& test_name) const { + return MatchesName(test_suite_name + "." + test_name); + } + + // Returns true if and only if name matches the positive filter and does not + // match the negative filter. + bool MatchesName(const std::string& name) const { + return positive_filter_.MatchesName(name) && + !negative_filter_.MatchesName(name); + } + + private: + UnitTestFilter positive_filter_; + UnitTestFilter negative_filter_; +}; +} // namespace + bool UnitTestOptions::MatchesFilter(const std::string& name_str, const char* filter) { - // The filter is a list of patterns separated by colons (:). - const char* pattern = filter; - while (true) { - // Find the bounds of this pattern. - const char* const next_sep = strchr(pattern, ':'); - const char* const pattern_end = - next_sep != nullptr ? next_sep : pattern + strlen(pattern); - - // Check if this pattern matches name_str. - if (PatternMatchesString(name_str, pattern, pattern_end)) { - return true; - } - - // Give up on this pattern. However, if we found a pattern separator (:), - // advance to the next pattern (skipping over the separator) and restart. - if (next_sep == nullptr) { - return false; - } - pattern = next_sep + 1; - } - return true; + return UnitTestFilter(filter).MatchesName(name_str); } // Returns true if and only if the user-specified filter matches the test // suite name and the test name. bool UnitTestOptions::FilterMatchesTest(const std::string& test_suite_name, const std::string& test_name) { - const std::string& full_name = test_suite_name + "." + test_name.c_str(); - // Split --gtest_filter at '-', if there is one, to separate into // positive filter and negative filter portions - std::string str = GTEST_FLAG_GET(filter); - const char* const p = str.c_str(); - const char* const dash = strchr(p, '-'); - std::string positive; - std::string negative; - if (dash == nullptr) { - positive = str.c_str(); // Whole string is a positive filter - negative = ""; - } else { - positive = std::string(p, dash); // Everything up to the dash - negative = std::string(dash + 1); // Everything after the dash - if (positive.empty()) { - // Treat '-test1' as the same as '*-test1' - positive = kUniversalFilter; - } - } - - // A filter is a colon-separated list of patterns. It matches a - // test if any pattern in it matches the test. - return (MatchesFilter(full_name, positive.c_str()) && - !MatchesFilter(full_name, negative.c_str())); + return PositiveAndNegativeUnitTestFilter(GTEST_FLAG_GET(filter)) + .MatchesTest(test_suite_name, test_name); } #if GTEST_HAS_SEH @@ -814,8 +871,7 @@ int UnitTestOptions::GTestShouldProcessSEH(DWORD exception_code) { // results. Intercepts only failures from the current thread. ScopedFakeTestPartResultReporter::ScopedFakeTestPartResultReporter( TestPartResultArray* result) - : intercept_mode_(INTERCEPT_ONLY_CURRENT_THREAD), - result_(result) { + : intercept_mode_(INTERCEPT_ONLY_CURRENT_THREAD), result_(result) { Init(); } @@ -824,8 +880,7 @@ ScopedFakeTestPartResultReporter::ScopedFakeTestPartResultReporter( // results. ScopedFakeTestPartResultReporter::ScopedFakeTestPartResultReporter( InterceptMode intercept_mode, TestPartResultArray* result) - : intercept_mode_(intercept_mode), - result_(result) { + : intercept_mode_(intercept_mode), result_(result) { Init(); } @@ -869,9 +924,7 @@ namespace internal { // from user test code. GetTestTypeId() is guaranteed to always // return the same value, as it always calls GetTypeId<>() from the // gtest.cc, which is within the Google Test framework. -TypeId GetTestTypeId() { - return GetTypeId(); -} +TypeId GetTestTypeId() { return GetTypeId(); } // The value of GetTestTypeId() as seen from within the Google Test // library. This is solely for testing GetTestTypeId(). @@ -886,9 +939,9 @@ static AssertionResult HasOneFailure(const char* /* results_expr */, const TestPartResultArray& results, TestPartResult::Type type, const std::string& substr) { - const std::string expected(type == TestPartResult::kFatalFailure ? - "1 fatal failure" : - "1 non-fatal failure"); + const std::string expected(type == TestPartResult::kFatalFailure + ? "1 fatal failure" + : "1 non-fatal failure"); Message msg; if (results.size() != 1) { msg << "Expected: " << expected << "\n" @@ -907,10 +960,10 @@ static AssertionResult HasOneFailure(const char* /* results_expr */, } if (strstr(r.message(), substr.c_str()) == nullptr) { - return AssertionFailure() << "Expected: " << expected << " containing \"" - << substr << "\"\n" - << " Actual:\n" - << r; + return AssertionFailure() + << "Expected: " << expected << " containing \"" << substr << "\"\n" + << " Actual:\n" + << r; } return AssertionSuccess(); @@ -933,7 +986,8 @@ SingleFailureChecker::~SingleFailureChecker() { } DefaultGlobalTestPartResultReporter::DefaultGlobalTestPartResultReporter( - UnitTestImpl* unit_test) : unit_test_(unit_test) {} + UnitTestImpl* unit_test) + : unit_test_(unit_test) {} void DefaultGlobalTestPartResultReporter::ReportTestPartResult( const TestPartResult& result) { @@ -942,7 +996,8 @@ void DefaultGlobalTestPartResultReporter::ReportTestPartResult( } DefaultPerThreadTestPartResultReporter::DefaultPerThreadTestPartResultReporter( - UnitTestImpl* unit_test) : unit_test_(unit_test) {} + UnitTestImpl* unit_test) + : unit_test_(unit_test) {} void DefaultPerThreadTestPartResultReporter::ReportTestPartResult( const TestPartResult& result) { @@ -1096,8 +1151,7 @@ LPCWSTR String::AnsiToUtf16(const char* ansi) { const int unicode_length = MultiByteToWideChar(CP_ACP, 0, ansi, length, nullptr, 0); WCHAR* unicode = new WCHAR[unicode_length + 1]; - MultiByteToWideChar(CP_ACP, 0, ansi, length, - unicode, unicode_length); + MultiByteToWideChar(CP_ACP, 0, ansi, length, unicode, unicode_length); unicode[unicode_length] = 0; return unicode; } @@ -1106,7 +1160,7 @@ LPCWSTR String::AnsiToUtf16(const char* ansi) { // memory using new. The caller is responsible for deleting the return // value using delete[]. Returns the ANSI string, or NULL if the // input is NULL. -const char* String::Utf16ToAnsi(LPCWSTR utf16_str) { +const char* String::Utf16ToAnsi(LPCWSTR utf16_str) { if (!utf16_str) return nullptr; const int ansi_length = WideCharToMultiByte(CP_ACP, 0, utf16_str, -1, nullptr, 0, nullptr, nullptr); @@ -1125,7 +1179,7 @@ const char* String::Utf16ToAnsi(LPCWSTR utf16_str) { // Unlike strcmp(), this function can handle NULL argument(s). A NULL // C string is considered different to any non-NULL C string, // including the empty string. -bool String::CStringEquals(const char * lhs, const char * rhs) { +bool String::CStringEquals(const char* lhs, const char* rhs) { if (lhs == nullptr) return rhs == nullptr; if (rhs == nullptr) return false; @@ -1139,11 +1193,10 @@ bool String::CStringEquals(const char * lhs, const char * rhs) { // encoding, and streams the result to the given Message object. static void StreamWideCharsToMessage(const wchar_t* wstr, size_t length, Message* msg) { - for (size_t i = 0; i != length; ) { // NOLINT + for (size_t i = 0; i != length;) { // NOLINT if (wstr[i] != L'\0') { *msg << WideStringToUtf8(wstr + i, static_cast(length - i)); - while (i != length && wstr[i] != L'\0') - i++; + while (i != length && wstr[i] != L'\0') i++; } else { *msg << '\0'; i++; @@ -1185,17 +1238,17 @@ Message::Message() : ss_(new ::std::stringstream) { // These two overloads allow streaming a wide C string to a Message // using the UTF-8 encoding. -Message& Message::operator <<(const wchar_t* wide_c_str) { +Message& Message::operator<<(const wchar_t* wide_c_str) { return *this << internal::String::ShowWideCString(wide_c_str); } -Message& Message::operator <<(wchar_t* wide_c_str) { +Message& Message::operator<<(wchar_t* wide_c_str) { return *this << internal::String::ShowWideCString(wide_c_str); } #if GTEST_HAS_STD_WSTRING // Converts the given wide string to a narrow string using the UTF-8 // encoding, and streams the result to this Message object. -Message& Message::operator <<(const ::std::wstring& wstr) { +Message& Message::operator<<(const ::std::wstring& wstr) { internal::StreamWideCharsToMessage(wstr.c_str(), wstr.length(), this); return *this; } @@ -1207,44 +1260,6 @@ std::string Message::GetString() const { return internal::StringStreamToString(ss_.get()); } -// AssertionResult constructors. -// Used in EXPECT_TRUE/FALSE(assertion_result). -AssertionResult::AssertionResult(const AssertionResult& other) - : success_(other.success_), - message_(other.message_.get() != nullptr - ? new ::std::string(*other.message_) - : static_cast< ::std::string*>(nullptr)) {} - -// Swaps two AssertionResults. -void AssertionResult::swap(AssertionResult& other) { - using std::swap; - swap(success_, other.success_); - swap(message_, other.message_); -} - -// Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE. -AssertionResult AssertionResult::operator!() const { - AssertionResult negation(!success_); - if (message_.get() != nullptr) negation << *message_; - return negation; -} - -// Makes a successful assertion result. -AssertionResult AssertionSuccess() { - return AssertionResult(true); -} - -// Makes a failed assertion result. -AssertionResult AssertionFailure() { - return AssertionResult(false); -} - -// Makes a failed assertion result with the given failure message. -// Deprecated; use AssertionFailure() << message. -AssertionResult AssertionFailure(const Message& message) { - return AssertionFailure() << message; -} - namespace internal { namespace edit_distance { @@ -1536,8 +1551,7 @@ std::vector SplitEscapedString(const std::string& str) { AssertionResult EqFailure(const char* lhs_expression, const char* rhs_expression, const std::string& lhs_value, - const std::string& rhs_value, - bool ignoring_case) { + const std::string& rhs_value, bool ignoring_case) { Message msg; msg << "Expected equality of these values:"; msg << "\n " << lhs_expression; @@ -1554,10 +1568,8 @@ AssertionResult EqFailure(const char* lhs_expression, } if (!lhs_value.empty() && !rhs_value.empty()) { - const std::vector lhs_lines = - SplitEscapedString(lhs_value); - const std::vector rhs_lines = - SplitEscapedString(rhs_value); + const std::vector lhs_lines = SplitEscapedString(lhs_value); + const std::vector rhs_lines = SplitEscapedString(rhs_value); if (lhs_lines.size() > 1 || rhs_lines.size() > 1) { msg << "\nWith diff:\n" << edit_distance::CreateUnifiedDiff(lhs_lines, rhs_lines); @@ -1569,27 +1581,21 @@ AssertionResult EqFailure(const char* lhs_expression, // Constructs a failure message for Boolean assertions such as EXPECT_TRUE. std::string GetBoolAssertionFailureMessage( - const AssertionResult& assertion_result, - const char* expression_text, - const char* actual_predicate_value, - const char* expected_predicate_value) { + const AssertionResult& assertion_result, const char* expression_text, + const char* actual_predicate_value, const char* expected_predicate_value) { const char* actual_message = assertion_result.message(); Message msg; msg << "Value of: " << expression_text << "\n Actual: " << actual_predicate_value; - if (actual_message[0] != '\0') - msg << " (" << actual_message << ")"; + if (actual_message[0] != '\0') msg << " (" << actual_message << ")"; msg << "\nExpected: " << expected_predicate_value; return msg.GetString(); } // Helper function for implementing ASSERT_NEAR. -AssertionResult DoubleNearPredFormat(const char* expr1, - const char* expr2, - const char* abs_error_expr, - double val1, - double val2, - double abs_error) { +AssertionResult DoubleNearPredFormat(const char* expr1, const char* expr2, + const char* abs_error_expr, double val1, + double val2, double abs_error) { const double diff = fabs(val1 - val2); if (diff <= abs_error) return AssertionSuccess(); @@ -1619,20 +1625,17 @@ AssertionResult DoubleNearPredFormat(const char* expr1, "EXPECT_EQUAL. Consider using EXPECT_DOUBLE_EQ instead."; } return AssertionFailure() - << "The difference between " << expr1 << " and " << expr2 - << " is " << diff << ", which exceeds " << abs_error_expr << ", where\n" - << expr1 << " evaluates to " << val1 << ",\n" - << expr2 << " evaluates to " << val2 << ", and\n" - << abs_error_expr << " evaluates to " << abs_error << "."; + << "The difference between " << expr1 << " and " << expr2 << " is " + << diff << ", which exceeds " << abs_error_expr << ", where\n" + << expr1 << " evaluates to " << val1 << ",\n" + << expr2 << " evaluates to " << val2 << ", and\n" + << abs_error_expr << " evaluates to " << abs_error << "."; } - // Helper template for implementing FloatLE() and DoubleLE(). template -AssertionResult FloatingPointLE(const char* expr1, - const char* expr2, - RawType val1, - RawType val2) { +AssertionResult FloatingPointLE(const char* expr1, const char* expr2, + RawType val1, RawType val2) { // Returns success if val1 is less than val2, if (val1 < val2) { return AssertionSuccess(); @@ -1657,24 +1660,24 @@ AssertionResult FloatingPointLE(const char* expr1, << val2; return AssertionFailure() - << "Expected: (" << expr1 << ") <= (" << expr2 << ")\n" - << " Actual: " << StringStreamToString(&val1_ss) << " vs " - << StringStreamToString(&val2_ss); + << "Expected: (" << expr1 << ") <= (" << expr2 << ")\n" + << " Actual: " << StringStreamToString(&val1_ss) << " vs " + << StringStreamToString(&val2_ss); } } // namespace internal // Asserts that val1 is less than, or almost equal to, val2. Fails // otherwise. In particular, it fails if either val1 or val2 is NaN. -AssertionResult FloatLE(const char* expr1, const char* expr2, - float val1, float val2) { +AssertionResult FloatLE(const char* expr1, const char* expr2, float val1, + float val2) { return internal::FloatingPointLE(expr1, expr2, val1, val2); } // Asserts that val1 is less than, or almost equal to, val2. Fails // otherwise. In particular, it fails if either val1 or val2 is NaN. -AssertionResult DoubleLE(const char* expr1, const char* expr2, - double val1, double val2) { +AssertionResult DoubleLE(const char* expr1, const char* expr2, double val1, + double val2) { return internal::FloatingPointLE(expr1, expr2, val1, val2); } @@ -1682,62 +1685,51 @@ namespace internal { // The helper function for {ASSERT|EXPECT}_STREQ. AssertionResult CmpHelperSTREQ(const char* lhs_expression, - const char* rhs_expression, - const char* lhs, + const char* rhs_expression, const char* lhs, const char* rhs) { if (String::CStringEquals(lhs, rhs)) { return AssertionSuccess(); } - return EqFailure(lhs_expression, - rhs_expression, - PrintToString(lhs), - PrintToString(rhs), - false); + return EqFailure(lhs_expression, rhs_expression, PrintToString(lhs), + PrintToString(rhs), false); } // The helper function for {ASSERT|EXPECT}_STRCASEEQ. AssertionResult CmpHelperSTRCASEEQ(const char* lhs_expression, - const char* rhs_expression, - const char* lhs, + const char* rhs_expression, const char* lhs, const char* rhs) { if (String::CaseInsensitiveCStringEquals(lhs, rhs)) { return AssertionSuccess(); } - return EqFailure(lhs_expression, - rhs_expression, - PrintToString(lhs), - PrintToString(rhs), - true); + return EqFailure(lhs_expression, rhs_expression, PrintToString(lhs), + PrintToString(rhs), true); } // The helper function for {ASSERT|EXPECT}_STRNE. AssertionResult CmpHelperSTRNE(const char* s1_expression, - const char* s2_expression, - const char* s1, + const char* s2_expression, const char* s1, const char* s2) { if (!String::CStringEquals(s1, s2)) { return AssertionSuccess(); } else { - return AssertionFailure() << "Expected: (" << s1_expression << ") != (" - << s2_expression << "), actual: \"" - << s1 << "\" vs \"" << s2 << "\""; + return AssertionFailure() + << "Expected: (" << s1_expression << ") != (" << s2_expression + << "), actual: \"" << s1 << "\" vs \"" << s2 << "\""; } } // The helper function for {ASSERT|EXPECT}_STRCASENE. AssertionResult CmpHelperSTRCASENE(const char* s1_expression, - const char* s2_expression, - const char* s1, + const char* s2_expression, const char* s1, const char* s2) { if (!String::CaseInsensitiveCStringEquals(s1, s2)) { return AssertionSuccess(); } else { return AssertionFailure() - << "Expected: (" << s1_expression << ") != (" - << s2_expression << ") (ignoring case), actual: \"" - << s1 << "\" vs \"" << s2 << "\""; + << "Expected: (" << s1_expression << ") != (" << s2_expression + << ") (ignoring case), actual: \"" << s1 << "\" vs \"" << s2 << "\""; } } @@ -1765,8 +1757,7 @@ bool IsSubstringPred(const wchar_t* needle, const wchar_t* haystack) { // StringType here can be either ::std::string or ::std::wstring. template -bool IsSubstringPred(const StringType& needle, - const StringType& haystack) { +bool IsSubstringPred(const StringType& needle, const StringType& haystack) { return haystack.find(needle) != StringType::npos; } @@ -1775,21 +1766,22 @@ bool IsSubstringPred(const StringType& needle, // StringType here can be const char*, const wchar_t*, ::std::string, // or ::std::wstring. template -AssertionResult IsSubstringImpl( - bool expected_to_be_substring, - const char* needle_expr, const char* haystack_expr, - const StringType& needle, const StringType& haystack) { +AssertionResult IsSubstringImpl(bool expected_to_be_substring, + const char* needle_expr, + const char* haystack_expr, + const StringType& needle, + const StringType& haystack) { if (IsSubstringPred(needle, haystack) == expected_to_be_substring) return AssertionSuccess(); const bool is_wide_string = sizeof(needle[0]) > 1; const char* const begin_string_quote = is_wide_string ? "L\"" : "\""; return AssertionFailure() - << "Value of: " << needle_expr << "\n" - << " Actual: " << begin_string_quote << needle << "\"\n" - << "Expected: " << (expected_to_be_substring ? "" : "not ") - << "a substring of " << haystack_expr << "\n" - << "Which is: " << begin_string_quote << haystack << "\""; + << "Value of: " << needle_expr << "\n" + << " Actual: " << begin_string_quote << needle << "\"\n" + << "Expected: " << (expected_to_be_substring ? "" : "not ") + << "a substring of " << haystack_expr << "\n" + << "Which is: " << begin_string_quote << haystack << "\""; } } // namespace @@ -1798,52 +1790,52 @@ AssertionResult IsSubstringImpl( // substring of haystack (NULL is considered a substring of itself // only), and return an appropriate error message when they fail. -AssertionResult IsSubstring( - const char* needle_expr, const char* haystack_expr, - const char* needle, const char* haystack) { +AssertionResult IsSubstring(const char* needle_expr, const char* haystack_expr, + const char* needle, const char* haystack) { return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack); } -AssertionResult IsSubstring( - const char* needle_expr, const char* haystack_expr, - const wchar_t* needle, const wchar_t* haystack) { +AssertionResult IsSubstring(const char* needle_expr, const char* haystack_expr, + const wchar_t* needle, const wchar_t* haystack) { return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack); } -AssertionResult IsNotSubstring( - const char* needle_expr, const char* haystack_expr, - const char* needle, const char* haystack) { +AssertionResult IsNotSubstring(const char* needle_expr, + const char* haystack_expr, const char* needle, + const char* haystack) { return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack); } -AssertionResult IsNotSubstring( - const char* needle_expr, const char* haystack_expr, - const wchar_t* needle, const wchar_t* haystack) { +AssertionResult IsNotSubstring(const char* needle_expr, + const char* haystack_expr, const wchar_t* needle, + const wchar_t* haystack) { return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack); } -AssertionResult IsSubstring( - const char* needle_expr, const char* haystack_expr, - const ::std::string& needle, const ::std::string& haystack) { +AssertionResult IsSubstring(const char* needle_expr, const char* haystack_expr, + const ::std::string& needle, + const ::std::string& haystack) { return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack); } -AssertionResult IsNotSubstring( - const char* needle_expr, const char* haystack_expr, - const ::std::string& needle, const ::std::string& haystack) { +AssertionResult IsNotSubstring(const char* needle_expr, + const char* haystack_expr, + const ::std::string& needle, + const ::std::string& haystack) { return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack); } #if GTEST_HAS_STD_WSTRING -AssertionResult IsSubstring( - const char* needle_expr, const char* haystack_expr, - const ::std::wstring& needle, const ::std::wstring& haystack) { +AssertionResult IsSubstring(const char* needle_expr, const char* haystack_expr, + const ::std::wstring& needle, + const ::std::wstring& haystack) { return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack); } -AssertionResult IsNotSubstring( - const char* needle_expr, const char* haystack_expr, - const ::std::wstring& needle, const ::std::wstring& haystack) { +AssertionResult IsNotSubstring(const char* needle_expr, + const char* haystack_expr, + const ::std::wstring& needle, + const ::std::wstring& haystack) { return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack); } #endif // GTEST_HAS_STD_WSTRING @@ -1855,43 +1847,42 @@ namespace internal { namespace { // Helper function for IsHRESULT{SuccessFailure} predicates -AssertionResult HRESULTFailureHelper(const char* expr, - const char* expected, +AssertionResult HRESULTFailureHelper(const char* expr, const char* expected, long hr) { // NOLINT -# if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_WINDOWS_TV_TITLE +#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_WINDOWS_TV_TITLE // Windows CE doesn't support FormatMessage. const char error_text[] = ""; -# else +#else // Looks up the human-readable system message for the HRESULT code // and since we're not passing any params to FormatMessage, we don't // want inserts expanded. - const DWORD kFlags = FORMAT_MESSAGE_FROM_SYSTEM | - FORMAT_MESSAGE_IGNORE_INSERTS; + const DWORD kFlags = + FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS; const DWORD kBufSize = 4096; // Gets the system's human readable message string for this HRESULT. - char error_text[kBufSize] = { '\0' }; + char error_text[kBufSize] = {'\0'}; DWORD message_length = ::FormatMessageA(kFlags, - 0, // no source, we're asking system + 0, // no source, we're asking system static_cast(hr), // the error - 0, // no line width restrictions + 0, // no line width restrictions error_text, // output buffer kBufSize, // buf size nullptr); // no arguments for inserts // Trims tailing white space (FormatMessage leaves a trailing CR-LF) for (; message_length && IsSpace(error_text[message_length - 1]); - --message_length) { + --message_length) { error_text[message_length - 1] = '\0'; } -# endif // GTEST_OS_WINDOWS_MOBILE +#endif // GTEST_OS_WINDOWS_MOBILE const std::string error_hex("0x" + String::FormatHexInt(hr)); return ::testing::AssertionFailure() - << "Expected: " << expr << " " << expected << ".\n" - << " Actual: " << error_hex << " " << error_text << "\n"; + << "Expected: " << expr << " " << expected << ".\n" + << " Actual: " << error_hex << " " << error_text << "\n"; } } // namespace @@ -1925,16 +1916,18 @@ AssertionResult IsHRESULTFailure(const char* expr, long hr) { // NOLINT // 17 - 21 bits 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx // The maximum code-point a one-byte UTF-8 sequence can represent. -constexpr uint32_t kMaxCodePoint1 = (static_cast(1) << 7) - 1; +constexpr uint32_t kMaxCodePoint1 = (static_cast(1) << 7) - 1; // The maximum code-point a two-byte UTF-8 sequence can represent. constexpr uint32_t kMaxCodePoint2 = (static_cast(1) << (5 + 6)) - 1; // The maximum code-point a three-byte UTF-8 sequence can represent. -constexpr uint32_t kMaxCodePoint3 = (static_cast(1) << (4 + 2*6)) - 1; +constexpr uint32_t kMaxCodePoint3 = + (static_cast(1) << (4 + 2 * 6)) - 1; // The maximum code-point a four-byte UTF-8 sequence can represent. -constexpr uint32_t kMaxCodePoint4 = (static_cast(1) << (3 + 3*6)) - 1; +constexpr uint32_t kMaxCodePoint4 = + (static_cast(1) << (3 + 3 * 6)) - 1; // Chops off the n lowest bits from a bit pattern. Returns the n // lowest bits. As a side effect, the original bit pattern will be @@ -1959,7 +1952,7 @@ std::string CodePointToUtf8(uint32_t code_point) { char str[5]; // Big enough for the largest valid code point. if (code_point <= kMaxCodePoint1) { str[1] = '\0'; - str[0] = static_cast(code_point); // 0xxxxxxx + str[0] = static_cast(code_point); // 0xxxxxxx } else if (code_point <= kMaxCodePoint2) { str[2] = '\0'; str[1] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx @@ -1987,8 +1980,8 @@ std::string CodePointToUtf8(uint32_t code_point) { // and thus should be combined into a single Unicode code point // using CreateCodePointFromUtf16SurrogatePair. inline bool IsUtf16SurrogatePair(wchar_t first, wchar_t second) { - return sizeof(wchar_t) == 2 && - (first & 0xFC00) == 0xD800 && (second & 0xFC00) == 0xDC00; + return sizeof(wchar_t) == 2 && (first & 0xFC00) == 0xD800 && + (second & 0xFC00) == 0xDC00; } // Creates a Unicode code point from UTF16 surrogate pair. @@ -2019,8 +2012,7 @@ inline uint32_t CreateCodePointFromUtf16SurrogatePair(wchar_t first, // and contains invalid UTF-16 surrogate pairs, values in those pairs // will be encoded as individual Unicode characters from Basic Normal Plane. std::string WideStringToUtf8(const wchar_t* str, int num_chars) { - if (num_chars == -1) - num_chars = static_cast(wcslen(str)); + if (num_chars == -1) num_chars = static_cast(wcslen(str)); ::std::stringstream stream; for (int i = 0; i < num_chars; ++i) { @@ -2029,8 +2021,8 @@ std::string WideStringToUtf8(const wchar_t* str, int num_chars) { if (str[i] == L'\0') { break; } else if (i + 1 < num_chars && IsUtf16SurrogatePair(str[i], str[i + 1])) { - unicode_code_point = CreateCodePointFromUtf16SurrogatePair(str[i], - str[i + 1]); + unicode_code_point = + CreateCodePointFromUtf16SurrogatePair(str[i], str[i + 1]); i++; } else { unicode_code_point = static_cast(str[i]); @@ -2043,7 +2035,7 @@ std::string WideStringToUtf8(const wchar_t* str, int num_chars) { // Converts a wide C string to an std::string using the UTF-8 encoding. // NULL will be converted to "(null)". -std::string String::ShowWideCString(const wchar_t * wide_c_str) { +std::string String::ShowWideCString(const wchar_t* wide_c_str) { if (wide_c_str == nullptr) return "(null)"; return internal::WideStringToUtf8(wide_c_str, -1); @@ -2055,7 +2047,7 @@ std::string String::ShowWideCString(const wchar_t * wide_c_str) { // Unlike wcscmp(), this function can handle NULL argument(s). A NULL // C string is considered different to any non-NULL C string, // including the empty string. -bool String::WideCStringEquals(const wchar_t * lhs, const wchar_t * rhs) { +bool String::WideCStringEquals(const wchar_t* lhs, const wchar_t* rhs) { if (lhs == nullptr) return rhs == nullptr; if (rhs == nullptr) return false; @@ -2065,33 +2057,27 @@ bool String::WideCStringEquals(const wchar_t * lhs, const wchar_t * rhs) { // Helper function for *_STREQ on wide strings. AssertionResult CmpHelperSTREQ(const char* lhs_expression, - const char* rhs_expression, - const wchar_t* lhs, + const char* rhs_expression, const wchar_t* lhs, const wchar_t* rhs) { if (String::WideCStringEquals(lhs, rhs)) { return AssertionSuccess(); } - return EqFailure(lhs_expression, - rhs_expression, - PrintToString(lhs), - PrintToString(rhs), - false); + return EqFailure(lhs_expression, rhs_expression, PrintToString(lhs), + PrintToString(rhs), false); } // Helper function for *_STRNE on wide strings. AssertionResult CmpHelperSTRNE(const char* s1_expression, - const char* s2_expression, - const wchar_t* s1, + const char* s2_expression, const wchar_t* s1, const wchar_t* s2) { if (!String::WideCStringEquals(s1, s2)) { return AssertionSuccess(); } - return AssertionFailure() << "Expected: (" << s1_expression << ") != (" - << s2_expression << "), actual: " - << PrintToString(s1) - << " vs " << PrintToString(s2); + return AssertionFailure() + << "Expected: (" << s1_expression << ") != (" << s2_expression + << "), actual: " << PrintToString(s1) << " vs " << PrintToString(s2); } // Compares two C strings, ignoring case. Returns true if and only if they have @@ -2100,7 +2086,7 @@ AssertionResult CmpHelperSTRNE(const char* s1_expression, // Unlike strcasecmp(), this function can handle NULL argument(s). A // NULL C string is considered different to any non-NULL C string, // including the empty string. -bool String::CaseInsensitiveCStringEquals(const char * lhs, const char * rhs) { +bool String::CaseInsensitiveCStringEquals(const char* lhs, const char* rhs) { if (lhs == nullptr) return rhs == nullptr; if (rhs == nullptr) return false; return posix::StrCaseCmp(lhs, rhs) == 0; @@ -2142,8 +2128,8 @@ bool String::CaseInsensitiveWideCStringEquals(const wchar_t* lhs, // Returns true if and only if str ends with the given suffix, ignoring case. // Any string is considered to end with an empty suffix. -bool String::EndsWithCaseInsensitive( - const std::string& str, const std::string& suffix) { +bool String::EndsWithCaseInsensitive(const std::string& str, + const std::string& suffix) { const size_t str_len = str.length(); const size_t suffix_len = suffix.length(); return (str_len >= suffix_len) && @@ -2226,15 +2212,13 @@ TestResult::TestResult() : death_test_count_(0), start_timestamp_(0), elapsed_time_(0) {} // D'tor. -TestResult::~TestResult() { -} +TestResult::~TestResult() {} // Returns the i-th test part result among all the results. i can // range from 0 to total_part_count() - 1. If i is not in that range, // aborts the program. const TestPartResult& TestResult::GetTestPartResult(int i) const { - if (i < 0 || i >= total_part_count()) - internal::posix::Abort(); + if (i < 0 || i >= total_part_count()) internal::posix::Abort(); return test_part_results_.at(static_cast(i)); } @@ -2242,15 +2226,12 @@ const TestPartResult& TestResult::GetTestPartResult(int i) const { // test_property_count() - 1. If i is not in that range, aborts the // program. const TestProperty& TestResult::GetTestProperty(int i) const { - if (i < 0 || i >= test_property_count()) - internal::posix::Abort(); + if (i < 0 || i >= test_property_count()) internal::posix::Abort(); return test_properties_.at(static_cast(i)); } // Clears the test part results. -void TestResult::ClearTestPartResults() { - test_part_results_.clear(); -} +void TestResult::ClearTestPartResults() { test_part_results_.clear(); } // Adds a test part result to the list. void TestResult::AddTestPartResult(const TestPartResult& test_part_result) { @@ -2279,15 +2260,8 @@ void TestResult::RecordProperty(const std::string& xml_element, // The list of reserved attributes used in the element of XML // output. static const char* const kReservedTestSuitesAttributes[] = { - "disabled", - "errors", - "failures", - "name", - "random_seed", - "tests", - "time", - "timestamp" -}; + "disabled", "errors", "failures", "name", + "random_seed", "tests", "time", "timestamp"}; // The list of reserved attributes used in the element of XML // output. @@ -2297,8 +2271,8 @@ static const char* const kReservedTestSuiteAttributes[] = { // The list of reserved attributes used in the element of XML output. static const char* const kReservedTestCaseAttributes[] = { - "classname", "name", "status", "time", "type_param", - "value_param", "file", "line"}; + "classname", "name", "status", "time", + "type_param", "value_param", "file", "line"}; // Use a slightly different set for allowed output to ensure existing tests can // still RecordProperty("result") or "RecordProperty(timestamp") @@ -2360,7 +2334,7 @@ static bool ValidateTestPropertyName( const std::string& property_name, const std::vector& reserved_names) { if (std::find(reserved_names.begin(), reserved_names.end(), property_name) != - reserved_names.end()) { + reserved_names.end()) { ADD_FAILURE() << "Reserved key used in RecordProperty(): " << property_name << " (" << FormatWordList(reserved_names) << " are reserved by " << GTEST_NAME_ << ")"; @@ -2398,8 +2372,7 @@ bool TestResult::Skipped() const { // Returns true if and only if the test failed. bool TestResult::Failed() const { for (int i = 0; i < total_part_count(); ++i) { - if (GetTestPartResult(i).failed()) - return true; + if (GetTestPartResult(i).failed()) return true; } return false; } @@ -2440,27 +2413,22 @@ int TestResult::test_property_count() const { // Creates a Test object. // The c'tor saves the states of all flags. -Test::Test() - : gtest_flag_saver_(new GTEST_FLAG_SAVER_) { -} +Test::Test() : gtest_flag_saver_(new GTEST_FLAG_SAVER_) {} // The d'tor restores the states of all flags. The actual work is // done by the d'tor of the gtest_flag_saver_ field, and thus not // visible here. -Test::~Test() { -} +Test::~Test() {} // Sets up the test fixture. // // A sub-class may override this. -void Test::SetUp() { -} +void Test::SetUp() {} // Tears down the test fixture. // // A sub-class may override this. -void Test::TearDown() { -} +void Test::TearDown() {} // Allows user supplied key value pairs to be recorded for later output. void Test::RecordProperty(const std::string& key, const std::string& value) { @@ -2565,8 +2533,8 @@ bool Test::HasSameFixtureClass() { static std::string* FormatSehExceptionMessage(DWORD exception_code, const char* location) { Message message; - message << "SEH exception with code 0x" << std::setbase(16) << - exception_code << std::setbase(10) << " thrown in " << location << "."; + message << "SEH exception with code 0x" << std::setbase(16) << exception_code + << std::setbase(10) << " thrown in " << location << "."; return new std::string(message.GetString()); } @@ -2609,8 +2577,8 @@ GoogleTestFailureException::GoogleTestFailureException( // exceptions in the same function. Therefore, we provide a separate // wrapper function for handling SEH exceptions.) template -Result HandleSehExceptionsInMethodIfSupported( - T* object, Result (T::*method)(), const char* location) { +Result HandleSehExceptionsInMethodIfSupported(T* object, Result (T::*method)(), + const char* location) { #if GTEST_HAS_SEH __try { return (object->*method)(); @@ -2619,8 +2587,8 @@ Result HandleSehExceptionsInMethodIfSupported( // We create the exception message on the heap because VC++ prohibits // creation of objects with destructors on stack in functions using __try // (see error C2712). - std::string* exception_message = FormatSehExceptionMessage( - GetExceptionCode(), location); + std::string* exception_message = + FormatSehExceptionMessage(GetExceptionCode(), location); internal::ReportFailureInUnknownLocation(TestPartResult::kFatalFailure, *exception_message); delete exception_message; @@ -2636,8 +2604,8 @@ Result HandleSehExceptionsInMethodIfSupported( // exceptions, if they are supported; returns the 0-value for type // Result in case of an SEH exception. template -Result HandleExceptionsInMethodIfSupported( - T* object, Result (T::*method)(), const char* location) { +Result HandleExceptionsInMethodIfSupported(T* object, Result (T::*method)(), + const char* location) { // NOTE: The user code can affect the way in which Google Test handles // exceptions by setting GTEST_FLAG(catch_exceptions), but only before // RUN_ALL_TESTS() starts. It is technically possible to check the flag @@ -2703,16 +2671,16 @@ void Test::Run() { // GTEST_SKIP(). if (!HasFatalFailure() && !IsSkipped()) { impl->os_stack_trace_getter()->UponLeavingGTest(); - internal::HandleExceptionsInMethodIfSupported( - this, &Test::TestBody, "the test body"); + internal::HandleExceptionsInMethodIfSupported(this, &Test::TestBody, + "the test body"); } // However, we want to clean up as much as possible. Hence we will // always call TearDown(), even if SetUp() or the test body has // failed. impl->os_stack_trace_getter()->UponLeavingGTest(); - internal::HandleExceptionsInMethodIfSupported( - this, &Test::TearDown, "TearDown()"); + internal::HandleExceptionsInMethodIfSupported(this, &Test::TearDown, + "TearDown()"); } // Returns true if and only if the current test has a fatal failure. @@ -2722,8 +2690,9 @@ bool Test::HasFatalFailure() { // Returns true if and only if the current test has a non-fatal failure. bool Test::HasNonfatalFailure() { - return internal::GetUnitTestImpl()->current_test_result()-> - HasNonfatalFailure(); + return internal::GetUnitTestImpl() + ->current_test_result() + ->HasNonfatalFailure(); } // Returns true if and only if the current test was skipped. @@ -2823,11 +2792,10 @@ class TestNameIs { // Constructor. // // TestNameIs has NO default constructor. - explicit TestNameIs(const char* name) - : name_(name) {} + explicit TestNameIs(const char* name) : name_(name) {} // Returns true if and only if the test name of test_info matches name_. - bool operator()(const TestInfo * test_info) const { + bool operator()(const TestInfo* test_info) const { return test_info && test_info->name() == name_; } @@ -2855,20 +2823,20 @@ void UnitTestImpl::RegisterParameterizedTests() { // Creates the test object, runs it, records its result, and then // deletes it. void TestInfo::Run() { - if (!should_run_) return; + TestEventListener* repeater = UnitTest::GetInstance()->listeners().repeater(); + if (!should_run_) { + if (is_disabled_ && matches_filter_) repeater->OnTestDisabled(*this); + return; + } // Tells UnitTest where to store test result. internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); impl->set_current_test_info(this); - TestEventListener* repeater = UnitTest::GetInstance()->listeners().repeater(); - // Notifies the unit test event listeners that a test is about to start. repeater->OnTestStart(*this); - result_.set_start_timestamp(internal::GetTimeInMillis()); internal::Timer timer; - impl->os_stack_trace_getter()->UponLeavingGTest(); // Creates the test object. @@ -3033,10 +3001,16 @@ void TestSuite::Run() { internal::HandleExceptionsInMethodIfSupported( this, &TestSuite::RunSetUpTestSuite, "SetUpTestSuite()"); + const bool skip_all = ad_hoc_test_result().Failed(); + start_timestamp_ = internal::GetTimeInMillis(); internal::Timer timer; for (int i = 0; i < total_test_count(); i++) { - GetMutableTestInfo(i)->Run(); + if (skip_all) { + GetMutableTestInfo(i)->Skip(); + } else { + GetMutableTestInfo(i)->Run(); + } if (GTEST_FLAG_GET(fail_fast) && GetMutableTestInfo(i)->result()->Failed()) { for (int j = i + 1; j < total_test_count(); j++) { @@ -3114,11 +3088,10 @@ void TestSuite::UnshuffleTests() { // // FormatCountableNoun(1, "formula", "formuli") returns "1 formula". // FormatCountableNoun(5, "book", "books") returns "5 books". -static std::string FormatCountableNoun(int count, - const char * singular_form, - const char * plural_form) { +static std::string FormatCountableNoun(int count, const char* singular_form, + const char* plural_form) { return internal::StreamableToString(count) + " " + - (count == 1 ? singular_form : plural_form); + (count == 1 ? singular_form : plural_form); } // Formats the count of tests. @@ -3135,7 +3108,7 @@ static std::string FormatTestSuiteCount(int test_suite_count) { // representation. Both kNonFatalFailure and kFatalFailure are translated // to "Failure", as the user usually doesn't care about the difference // between the two when viewing the test result. -static const char * TestPartResultTypeToString(TestPartResult::Type type) { +static const char* TestPartResultTypeToString(TestPartResult::Type type) { switch (type) { case TestPartResult::kSkip: return "Skipped\n"; @@ -3162,17 +3135,18 @@ enum class GTestColor { kDefault, kRed, kGreen, kYellow }; // Prints a TestPartResult to an std::string. static std::string PrintTestPartResultToString( const TestPartResult& test_part_result) { - return (Message() - << internal::FormatFileLocation(test_part_result.file_name(), - test_part_result.line_number()) - << " " << TestPartResultTypeToString(test_part_result.type()) - << test_part_result.message()).GetString(); + return (Message() << internal::FormatFileLocation( + test_part_result.file_name(), + test_part_result.line_number()) + << " " + << TestPartResultTypeToString(test_part_result.type()) + << test_part_result.message()) + .GetString(); } // Prints a TestPartResult. static void PrintTestPartResult(const TestPartResult& test_part_result) { - const std::string& result = - PrintTestPartResultToString(test_part_result); + const std::string& result = PrintTestPartResultToString(test_part_result); printf("%s\n", result.c_str()); fflush(stdout); // If the test program runs in Visual Studio or a debugger, the @@ -3189,8 +3163,8 @@ static void PrintTestPartResult(const TestPartResult& test_part_result) { } // class PrettyUnitTestResultPrinter -#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE && \ - !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT && !GTEST_OS_WINDOWS_MINGW +#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE && !GTEST_OS_WINDOWS_PHONE && \ + !GTEST_OS_WINDOWS_RT && !GTEST_OS_WINDOWS_MINGW // Returns the character attribute for the given color. static WORD GetColorAttribute(GTestColor color) { @@ -3201,7 +3175,8 @@ static WORD GetColorAttribute(GTestColor color) { return FOREGROUND_GREEN; case GTestColor::kYellow: return FOREGROUND_RED | FOREGROUND_GREEN; - default: return 0; + default: + return 0; } } @@ -3285,9 +3260,9 @@ bool ShouldUseColor(bool stdout_is_tty) { } return String::CaseInsensitiveCStringEquals(gtest_color, "yes") || - String::CaseInsensitiveCStringEquals(gtest_color, "true") || - String::CaseInsensitiveCStringEquals(gtest_color, "t") || - String::CStringEquals(gtest_color, "1"); + String::CaseInsensitiveCStringEquals(gtest_color, "true") || + String::CaseInsensitiveCStringEquals(gtest_color, "t") || + String::CStringEquals(gtest_color, "1"); // We take "yes", "true", "t", and "1" as meaning "yes". If the // value is neither one of these nor "auto", we treat it as "no" to // be conservative. @@ -3299,18 +3274,13 @@ bool ShouldUseColor(bool stdout_is_tty) { // that would be colored when printed, as can be done on Linux. GTEST_ATTRIBUTE_PRINTF_(2, 3) -static void ColoredPrintf(GTestColor color, const char *fmt, ...) { +static void ColoredPrintf(GTestColor color, const char* fmt, ...) { va_list args; va_start(args, fmt); -#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_ZOS || GTEST_OS_IOS || \ - GTEST_OS_WINDOWS_PHONE || GTEST_OS_WINDOWS_RT || defined(ESP_PLATFORM) - const bool use_color = AlwaysFalse(); -#else static const bool in_color_mode = ShouldUseColor(posix::IsATTY(posix::FileNo(stdout)) != 0); const bool use_color = in_color_mode && (color != GTestColor::kDefault); -#endif // GTEST_OS_WINDOWS_MOBILE || GTEST_OS_ZOS if (!use_color) { vprintf(fmt, args); @@ -3318,8 +3288,8 @@ static void ColoredPrintf(GTestColor color, const char *fmt, ...) { return; } -#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE && \ - !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT && !GTEST_OS_WINDOWS_MINGW +#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE && !GTEST_OS_WINDOWS_PHONE && \ + !GTEST_OS_WINDOWS_RT && !GTEST_OS_WINDOWS_MINGW const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE); // Gets the current text color. @@ -3390,6 +3360,7 @@ class PrettyUnitTestResultPrinter : public TestEventListener { #endif // OnTestCaseStart void OnTestStart(const TestInfo& test_info) override; + void OnTestDisabled(const TestInfo& test_info) override; void OnTestPartResult(const TestPartResult& result) override; void OnTestEnd(const TestInfo& test_info) override; @@ -3410,7 +3381,7 @@ class PrettyUnitTestResultPrinter : public TestEventListener { static void PrintSkippedTests(const UnitTest& unit_test); }; - // Fired before each iteration of tests starts. +// Fired before each iteration of tests starts. void PrettyUnitTestResultPrinter::OnTestIterationStart( const UnitTest& unit_test, int iteration) { if (GTEST_FLAG_GET(repeat) != 1) @@ -3489,6 +3460,13 @@ void PrettyUnitTestResultPrinter::OnTestStart(const TestInfo& test_info) { fflush(stdout); } +void PrettyUnitTestResultPrinter::OnTestDisabled(const TestInfo& test_info) { + ColoredPrintf(GTestColor::kYellow, "[ DISABLED ] "); + PrintTestName(test_info.test_suite_name(), test_info.name()); + printf("\n"); + fflush(stdout); +} + // Called after an assertion failure. void PrettyUnitTestResultPrinter::OnTestPartResult( const TestPartResult& result) { @@ -3513,12 +3491,12 @@ void PrettyUnitTestResultPrinter::OnTestEnd(const TestInfo& test_info) { ColoredPrintf(GTestColor::kRed, "[ FAILED ] "); } PrintTestName(test_info.test_suite_name(), test_info.name()); - if (test_info.result()->Failed()) - PrintFullTestCommentIfPresent(test_info); + if (test_info.result()->Failed()) PrintFullTestCommentIfPresent(test_info); if (GTEST_FLAG_GET(print_time)) { - printf(" (%s ms)\n", internal::StreamableToString( - test_info.result()->elapsed_time()).c_str()); + printf(" (%s ms)\n", + internal::StreamableToString(test_info.result()->elapsed_time()) + .c_str()); } else { printf("\n"); } @@ -3691,6 +3669,7 @@ class BriefUnitTestResultPrinter : public TestEventListener { #endif // OnTestCaseStart void OnTestStart(const TestInfo& /*test_info*/) override {} + void OnTestDisabled(const TestInfo& /*test_info*/) override {} void OnTestPartResult(const TestPartResult& result) override; void OnTestEnd(const TestInfo& test_info) override; @@ -3779,7 +3758,7 @@ class TestEventRepeater : public TestEventListener { public: TestEventRepeater() : forwarding_enabled_(true) {} ~TestEventRepeater() override; - void Append(TestEventListener *listener); + void Append(TestEventListener* listener); TestEventListener* Release(TestEventListener* listener); // Controls whether events will be forwarded to listeners_. Set to false @@ -3797,6 +3776,7 @@ class TestEventRepeater : public TestEventListener { #endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ void OnTestSuiteStart(const TestSuite& parameter) override; void OnTestStart(const TestInfo& test_info) override; + void OnTestDisabled(const TestInfo& test_info) override; void OnTestPartResult(const TestPartResult& result) override; void OnTestEnd(const TestInfo& test_info) override; // Legacy API is deprecated but still available @@ -3816,18 +3796,19 @@ class TestEventRepeater : public TestEventListener { // The list of listeners that receive events. std::vector listeners_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(TestEventRepeater); + TestEventRepeater(const TestEventRepeater&) = delete; + TestEventRepeater& operator=(const TestEventRepeater&) = delete; }; TestEventRepeater::~TestEventRepeater() { ForEach(listeners_, Delete); } -void TestEventRepeater::Append(TestEventListener *listener) { +void TestEventRepeater::Append(TestEventListener* listener) { listeners_.push_back(listener); } -TestEventListener* TestEventRepeater::Release(TestEventListener *listener) { +TestEventListener* TestEventRepeater::Release(TestEventListener* listener) { for (size_t i = 0; i < listeners_.size(); ++i) { if (listeners_[i] == listener) { listeners_.erase(listeners_.begin() + static_cast(i)); @@ -3840,14 +3821,14 @@ TestEventListener* TestEventRepeater::Release(TestEventListener *listener) { // Since most methods are very similar, use macros to reduce boilerplate. // This defines a member that forwards the call to all listeners. -#define GTEST_REPEATER_METHOD_(Name, Type) \ -void TestEventRepeater::Name(const Type& parameter) { \ - if (forwarding_enabled_) { \ - for (size_t i = 0; i < listeners_.size(); i++) { \ - listeners_[i]->Name(parameter); \ - } \ - } \ -} +#define GTEST_REPEATER_METHOD_(Name, Type) \ + void TestEventRepeater::Name(const Type& parameter) { \ + if (forwarding_enabled_) { \ + for (size_t i = 0; i < listeners_.size(); i++) { \ + listeners_[i]->Name(parameter); \ + } \ + } \ + } // This defines a member that forwards the call to all listeners in reverse // order. #define GTEST_REVERSE_REPEATER_METHOD_(Name, Type) \ @@ -3867,6 +3848,7 @@ GTEST_REPEATER_METHOD_(OnTestCaseStart, TestSuite) #endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ GTEST_REPEATER_METHOD_(OnTestSuiteStart, TestSuite) GTEST_REPEATER_METHOD_(OnTestStart, TestInfo) +GTEST_REPEATER_METHOD_(OnTestDisabled, TestInfo) GTEST_REPEATER_METHOD_(OnTestPartResult, TestPartResult) GTEST_REPEATER_METHOD_(OnEnvironmentsTearDownStart, UnitTest) GTEST_REVERSE_REPEATER_METHOD_(OnEnvironmentsSetUpEnd, UnitTest) @@ -3917,12 +3899,13 @@ class XmlUnitTestResultPrinter : public EmptyTestEventListener { private: // Is c a whitespace character that is normalized to a space character // when it appears in an XML attribute value? - static bool IsNormalizableWhitespace(char c) { - return c == 0x9 || c == 0xA || c == 0xD; + static bool IsNormalizableWhitespace(unsigned char c) { + return c == '\t' || c == '\n' || c == '\r'; } // May c appear in a well-formed XML document? - static bool IsValidXmlCharacter(char c) { + // https://www.w3.org/TR/REC-xml/#charsets + static bool IsValidXmlCharacter(unsigned char c) { return IsNormalizableWhitespace(c) || c >= 0x20; } @@ -3992,7 +3975,8 @@ class XmlUnitTestResultPrinter : public EmptyTestEventListener { // The output file. const std::string output_file_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(XmlUnitTestResultPrinter); + XmlUnitTestResultPrinter(const XmlUnitTestResultPrinter&) = delete; + XmlUnitTestResultPrinter& operator=(const XmlUnitTestResultPrinter&) = delete; }; // Creates a new XmlUnitTestResultPrinter. @@ -4032,8 +4016,8 @@ void XmlUnitTestResultPrinter::ListTestsMatchingFilter( // module will consist of ordinary English text. // If this module is ever modified to produce version 1.1 XML output, // most invalid characters can be retained using character references. -std::string XmlUnitTestResultPrinter::EscapeXml( - const std::string& str, bool is_attribute) { +std::string XmlUnitTestResultPrinter::EscapeXml(const std::string& str, + bool is_attribute) { Message m; for (size_t i = 0; i < str.size(); ++i) { @@ -4061,8 +4045,9 @@ std::string XmlUnitTestResultPrinter::EscapeXml( m << '"'; break; default: - if (IsValidXmlCharacter(ch)) { - if (is_attribute && IsNormalizableWhitespace(ch)) + if (IsValidXmlCharacter(static_cast(ch))) { + if (is_attribute && + IsNormalizableWhitespace(static_cast(ch))) m << "&#x" << String::FormatByte(static_cast(ch)) << ";"; else @@ -4083,7 +4068,7 @@ std::string XmlUnitTestResultPrinter::RemoveInvalidXmlCharacters( std::string output; output.reserve(str.size()); for (std::string::const_iterator it = str.begin(); it != str.end(); ++it) - if (IsValidXmlCharacter(*it)) + if (IsValidXmlCharacter(static_cast(*it))) output.push_back(*it); return output; @@ -4091,7 +4076,6 @@ std::string XmlUnitTestResultPrinter::RemoveInvalidXmlCharacters( // The following routines generate an XML representation of a UnitTest // object. -// GOOGLETEST_CM0009 DO NOT DELETE // // This is how Google Test concepts map to the DTD: // @@ -4140,12 +4124,12 @@ std::string FormatEpochTimeInMillisAsIso8601(TimeInMillis ms) { return ""; // YYYY-MM-DDThh:mm:ss.sss return StreamableToString(time_struct.tm_year + 1900) + "-" + - String::FormatIntWidth2(time_struct.tm_mon + 1) + "-" + - String::FormatIntWidth2(time_struct.tm_mday) + "T" + - String::FormatIntWidth2(time_struct.tm_hour) + ":" + - String::FormatIntWidth2(time_struct.tm_min) + ":" + - String::FormatIntWidth2(time_struct.tm_sec) + "." + - String::FormatIntWidthN(static_cast(ms % 1000), 3); + String::FormatIntWidth2(time_struct.tm_mon + 1) + "-" + + String::FormatIntWidth2(time_struct.tm_mday) + "T" + + String::FormatIntWidth2(time_struct.tm_hour) + ":" + + String::FormatIntWidth2(time_struct.tm_min) + ":" + + String::FormatIntWidth2(time_struct.tm_sec) + "." + + String::FormatIntWidthN(static_cast(ms % 1000), 3); } // Streams an XML CDATA section, escaping invalid CDATA sequences as needed. @@ -4156,8 +4140,8 @@ void XmlUnitTestResultPrinter::OutputXmlCDataSection(::std::ostream* stream, for (;;) { const char* const next_segment = strstr(segment, "]]>"); if (next_segment != nullptr) { - stream->write( - segment, static_cast(next_segment - segment)); + stream->write(segment, + static_cast(next_segment - segment)); *stream << "]]>]]>"); } else { @@ -4169,15 +4153,13 @@ void XmlUnitTestResultPrinter::OutputXmlCDataSection(::std::ostream* stream, } void XmlUnitTestResultPrinter::OutputXmlAttribute( - std::ostream* stream, - const std::string& element_name, - const std::string& name, - const std::string& value) { + std::ostream* stream, const std::string& element_name, + const std::string& name, const std::string& value) { const std::vector& allowed_names = GetReservedOutputAttributesForElement(element_name); GTEST_CHECK_(std::find(allowed_names.begin(), allowed_names.end(), name) != - allowed_names.end()) + allowed_names.end()) << "Attribute " << name << " is not allowed for element <" << element_name << ">."; @@ -4243,10 +4225,11 @@ void XmlUnitTestResultPrinter::OutputXmlTestInfo(::std::ostream* stream, OutputXmlAttribute(stream, kTestsuite, "type_param", test_info.type_param()); } + + OutputXmlAttribute(stream, kTestsuite, "file", test_info.file()); + OutputXmlAttribute(stream, kTestsuite, "line", + StreamableToString(test_info.line())); if (GTEST_FLAG_GET(list_tests)) { - OutputXmlAttribute(stream, kTestsuite, "file", test_info.file()); - OutputXmlAttribute(stream, kTestsuite, "line", - StreamableToString(test_info.line())); *stream << " />\n"; return; } @@ -4281,8 +4264,7 @@ void XmlUnitTestResultPrinter::OutputXmlTestResult(::std::ostream* stream, internal::FormatCompilerIndependentFileLocation(part.file_name(), part.line_number()); const std::string summary = location + "\n" + part.summary(); - *stream << " "; const std::string detail = location + "\n" + part.message(); OutputXmlCDataSection(stream, RemoveInvalidXmlCharacters(detail).c_str()); @@ -4423,7 +4405,7 @@ std::string XmlUnitTestResultPrinter::TestPropertiesAsXmlAttributes( for (int i = 0; i < result.test_property_count(); ++i) { const TestProperty& property = result.GetTestProperty(i); attributes << " " << property.key() << "=" - << "\"" << EscapeXmlAttribute(property.value()) << "\""; + << "\"" << EscapeXmlAttribute(property.value()) << "\""; } return attributes.GetString(); } @@ -4437,15 +4419,15 @@ void XmlUnitTestResultPrinter::OutputXmlTestProperties( return; } - *stream << "<" << kProperties << ">\n"; + *stream << " <" << kProperties << ">\n"; for (int i = 0; i < result.test_property_count(); ++i) { const TestProperty& property = result.GetTestProperty(i); - *stream << "<" << kProperty; + *stream << " <" << kProperty; *stream << " name=\"" << EscapeXmlAttribute(property.key()) << "\""; *stream << " value=\"" << EscapeXmlAttribute(property.value()) << "\""; *stream << "/>\n"; } - *stream << "\n"; + *stream << " \n"; } // End XmlUnitTestResultPrinter @@ -4469,16 +4451,12 @@ class JsonUnitTestResultPrinter : public EmptyTestEventListener { //// streams the attribute as JSON. static void OutputJsonKey(std::ostream* stream, const std::string& element_name, - const std::string& name, - const std::string& value, - const std::string& indent, - bool comma = true); + const std::string& name, const std::string& value, + const std::string& indent, bool comma = true); static void OutputJsonKey(std::ostream* stream, const std::string& element_name, - const std::string& name, - int value, - const std::string& indent, - bool comma = true); + const std::string& name, int value, + const std::string& indent, bool comma = true); // Streams a test suite JSON stanza containing the given test result. // @@ -4511,7 +4489,9 @@ class JsonUnitTestResultPrinter : public EmptyTestEventListener { // The output file. const std::string output_file_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(JsonUnitTestResultPrinter); + JsonUnitTestResultPrinter(const JsonUnitTestResultPrinter&) = delete; + JsonUnitTestResultPrinter& operator=(const JsonUnitTestResultPrinter&) = + delete; }; // Creates a new JsonUnitTestResultPrinter. @@ -4523,7 +4503,7 @@ JsonUnitTestResultPrinter::JsonUnitTestResultPrinter(const char* output_file) } void JsonUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test, - int /*iteration*/) { + int /*iteration*/) { FILE* jsonout = OpenFileForWriting(output_file_); std::stringstream stream; PrintJsonUnitTest(&stream, unit_test); @@ -4589,55 +4569,48 @@ static std::string FormatEpochTimeInMillisAsRFC3339(TimeInMillis ms) { return ""; // YYYY-MM-DDThh:mm:ss return StreamableToString(time_struct.tm_year + 1900) + "-" + - String::FormatIntWidth2(time_struct.tm_mon + 1) + "-" + - String::FormatIntWidth2(time_struct.tm_mday) + "T" + - String::FormatIntWidth2(time_struct.tm_hour) + ":" + - String::FormatIntWidth2(time_struct.tm_min) + ":" + - String::FormatIntWidth2(time_struct.tm_sec) + "Z"; + String::FormatIntWidth2(time_struct.tm_mon + 1) + "-" + + String::FormatIntWidth2(time_struct.tm_mday) + "T" + + String::FormatIntWidth2(time_struct.tm_hour) + ":" + + String::FormatIntWidth2(time_struct.tm_min) + ":" + + String::FormatIntWidth2(time_struct.tm_sec) + "Z"; } static inline std::string Indent(size_t width) { return std::string(width, ' '); } -void JsonUnitTestResultPrinter::OutputJsonKey( - std::ostream* stream, - const std::string& element_name, - const std::string& name, - const std::string& value, - const std::string& indent, - bool comma) { +void JsonUnitTestResultPrinter::OutputJsonKey(std::ostream* stream, + const std::string& element_name, + const std::string& name, + const std::string& value, + const std::string& indent, + bool comma) { const std::vector& allowed_names = GetReservedOutputAttributesForElement(element_name); GTEST_CHECK_(std::find(allowed_names.begin(), allowed_names.end(), name) != - allowed_names.end()) + allowed_names.end()) << "Key \"" << name << "\" is not allowed for value \"" << element_name << "\"."; *stream << indent << "\"" << name << "\": \"" << EscapeJson(value) << "\""; - if (comma) - *stream << ",\n"; + if (comma) *stream << ",\n"; } void JsonUnitTestResultPrinter::OutputJsonKey( - std::ostream* stream, - const std::string& element_name, - const std::string& name, - int value, - const std::string& indent, - bool comma) { + std::ostream* stream, const std::string& element_name, + const std::string& name, int value, const std::string& indent, bool comma) { const std::vector& allowed_names = GetReservedOutputAttributesForElement(element_name); GTEST_CHECK_(std::find(allowed_names.begin(), allowed_names.end(), name) != - allowed_names.end()) + allowed_names.end()) << "Key \"" << name << "\" is not allowed for value \"" << element_name << "\"."; *stream << indent << "\"" << name << "\": " << StreamableToString(value); - if (comma) - *stream << ",\n"; + if (comma) *stream << ",\n"; } // Streams a test suite JSON stanza containing the given test result. @@ -4701,11 +4674,14 @@ void JsonUnitTestResultPrinter::OutputJsonTestInfo(::std::ostream* stream, OutputJsonKey(stream, kTestsuite, "type_param", test_info.type_param(), kIndent); } + + OutputJsonKey(stream, kTestsuite, "file", test_info.file(), kIndent); + OutputJsonKey(stream, kTestsuite, "line", test_info.line(), kIndent, false); if (GTEST_FLAG_GET(list_tests)) { - OutputJsonKey(stream, kTestsuite, "file", test_info.file(), kIndent); - OutputJsonKey(stream, kTestsuite, "line", test_info.line(), kIndent, false); *stream << "\n" << Indent(8) << "}"; return; + } else { + *stream << ",\n"; } OutputJsonKey(stream, kTestsuite, "status", @@ -4737,7 +4713,9 @@ void JsonUnitTestResultPrinter::OutputJsonTestResult(::std::ostream* stream, if (part.failed()) { *stream << ",\n"; if (++failures == 1) { - *stream << kIndent << "\"" << "failures" << "\": [\n"; + *stream << kIndent << "\"" + << "failures" + << "\": [\n"; } const std::string location = internal::FormatCompilerIndependentFileLocation(part.file_name(), @@ -4750,8 +4728,7 @@ void JsonUnitTestResultPrinter::OutputJsonTestResult(::std::ostream* stream, } } - if (failures > 0) - *stream << "\n" << kIndent << "]"; + if (failures > 0) *stream << "\n" << kIndent << "]"; *stream << "\n" << Indent(8) << "}"; } @@ -4847,7 +4824,9 @@ void JsonUnitTestResultPrinter::PrintJsonUnitTest(std::ostream* stream, OutputJsonTestSuiteForTestResult(stream, unit_test.ad_hoc_test_result()); } - *stream << "\n" << kIndent << "]\n" << "}\n"; + *stream << "\n" + << kIndent << "]\n" + << "}\n"; } void JsonUnitTestResultPrinter::PrintJsonTestList( @@ -4882,7 +4861,8 @@ std::string JsonUnitTestResultPrinter::TestPropertiesAsJson( Message attributes; for (int i = 0; i < result.test_property_count(); ++i) { const TestProperty& property = result.GetTestProperty(i); - attributes << ",\n" << indent << "\"" << property.key() << "\": " + attributes << ",\n" + << indent << "\"" << property.key() << "\": " << "\"" << EscapeJson(property.value()) << "\""; } return attributes.GetString(); @@ -4922,14 +4902,14 @@ void StreamingListener::SocketWriter::MakeConnection() { addrinfo hints; memset(&hints, 0, sizeof(hints)); - hints.ai_family = AF_UNSPEC; // To allow both IPv4 and IPv6 addresses. + hints.ai_family = AF_UNSPEC; // To allow both IPv4 and IPv6 addresses. hints.ai_socktype = SOCK_STREAM; addrinfo* servinfo = nullptr; // Use the getaddrinfo() to get a linked list of IP addresses for // the given host name. - const int error_num = getaddrinfo( - host_name_.c_str(), port_num_.c_str(), &hints, &servinfo); + const int error_num = + getaddrinfo(host_name_.c_str(), port_num_.c_str(), &hints, &servinfo); if (error_num != 0) { GTEST_LOG_(WARNING) << "stream_result_to: getaddrinfo() failed: " << gai_strerror(error_num); @@ -4938,8 +4918,8 @@ void StreamingListener::SocketWriter::MakeConnection() { // Loop through all the results and connect to the first we can. for (addrinfo* cur_addr = servinfo; sockfd_ == -1 && cur_addr != nullptr; cur_addr = cur_addr->ai_next) { - sockfd_ = socket( - cur_addr->ai_family, cur_addr->ai_socktype, cur_addr->ai_protocol); + sockfd_ = socket(cur_addr->ai_family, cur_addr->ai_socktype, + cur_addr->ai_protocol); if (sockfd_ != -1) { // Connect the client socket to the server socket. if (connect(sockfd_, cur_addr->ai_addr, cur_addr->ai_addrlen) == -1) { @@ -5008,7 +4988,7 @@ std::string OsStackTraceGetter::CurrentStackTrace(int max_depth, int skip_count) return result; -#else // !GTEST_HAS_ABSL +#else // !GTEST_HAS_ABSL static_cast(max_depth); static_cast(skip_count); return ""; @@ -5032,14 +5012,14 @@ void OsStackTraceGetter::UponLeavingGTest() GTEST_LOCK_EXCLUDED_(mutex_) { class ScopedPrematureExitFile { public: explicit ScopedPrematureExitFile(const char* premature_exit_filepath) - : premature_exit_filepath_(premature_exit_filepath ? - premature_exit_filepath : "") { + : premature_exit_filepath_( + premature_exit_filepath ? premature_exit_filepath : "") { // If a path to the premature-exit file is specified... if (!premature_exit_filepath_.empty()) { // create the file with a single "0" character in it. I/O // errors are ignored as there's nothing better we can do and we // don't want to fail the test because of this. - FILE* pfile = posix::FOpen(premature_exit_filepath, "w"); + FILE* pfile = posix::FOpen(premature_exit_filepath_.c_str(), "w"); fwrite("0", 1, 1, pfile); fclose(pfile); } @@ -5061,7 +5041,8 @@ class ScopedPrematureExitFile { private: const std::string premature_exit_filepath_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedPrematureExitFile); + ScopedPrematureExitFile(const ScopedPrematureExitFile&) = delete; + ScopedPrematureExitFile& operator=(const ScopedPrematureExitFile&) = delete; }; } // namespace internal @@ -5235,7 +5216,7 @@ int UnitTest::test_to_run_count() const { return impl()->test_to_run_count(); } // Gets the time of the test program start, in ms from the start of the // UNIX epoch. internal::TimeInMillis UnitTest::start_timestamp() const { - return impl()->start_timestamp(); + return impl()->start_timestamp(); } // Gets the elapsed time, in milliseconds. @@ -5278,9 +5259,7 @@ TestSuite* UnitTest::GetMutableTestSuite(int i) { // Returns the list of event listeners that can be used to track events // inside Google Test. -TestEventListeners& UnitTest::listeners() { - return *impl()->listeners(); -} +TestEventListeners& UnitTest::listeners() { return *impl()->listeners(); } // Registers and returns a global test environment. When a test // program is run, all global test environments will be set-up in the @@ -5305,12 +5284,11 @@ Environment* UnitTest::AddEnvironment(Environment* env) { // assertion macros (e.g. ASSERT_TRUE, EXPECT_EQ, etc) eventually call // this to report their results. The user code should use the // assertion macros instead of calling this directly. -void UnitTest::AddTestPartResult( - TestPartResult::Type result_type, - const char* file_name, - int line_number, - const std::string& message, - const std::string& os_stack_trace) GTEST_LOCK_EXCLUDED_(mutex_) { +void UnitTest::AddTestPartResult(TestPartResult::Type result_type, + const char* file_name, int line_number, + const std::string& message, + const std::string& os_stack_trace) + GTEST_LOCK_EXCLUDED_(mutex_) { Message msg; msg << message; @@ -5320,8 +5298,9 @@ void UnitTest::AddTestPartResult( for (size_t i = impl_->gtest_trace_stack().size(); i > 0; --i) { const internal::TraceInfo& trace = impl_->gtest_trace_stack()[i - 1]; - msg << "\n" << internal::FormatFileLocation(trace.file, trace.line) - << " " << trace.message; + msg << "\n" + << internal::FormatFileLocation(trace.file, trace.line) << " " + << trace.message; } } @@ -5331,8 +5310,8 @@ void UnitTest::AddTestPartResult( const TestPartResult result = TestPartResult( result_type, file_name, line_number, msg.GetString().c_str()); - impl_->GetTestPartResultReporterForCurrentThread()-> - ReportTestPartResult(result); + impl_->GetTestPartResultReporterForCurrentThread()->ReportTestPartResult( + result); if (result_type != TestPartResult::kSuccess && result_type != TestPartResult::kSkip) { @@ -5425,20 +5404,20 @@ int UnitTest::Run() { // process. In either case the user does not want to see pop-up dialogs // about crashes - they are expected. if (impl()->catch_exceptions() || in_death_test_child_process) { -# if !GTEST_OS_WINDOWS_MOBILE && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT +#if !GTEST_OS_WINDOWS_MOBILE && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT // SetErrorMode doesn't exist on CE. SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOALIGNMENTFAULTEXCEPT | SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX); -# endif // !GTEST_OS_WINDOWS_MOBILE +#endif // !GTEST_OS_WINDOWS_MOBILE -# if (defined(_MSC_VER) || GTEST_OS_WINDOWS_MINGW) && !GTEST_OS_WINDOWS_MOBILE +#if (defined(_MSC_VER) || GTEST_OS_WINDOWS_MINGW) && !GTEST_OS_WINDOWS_MOBILE // Death test children can be terminated with _abort(). On Windows, // _abort() can show a dialog with a warning message. This forces the // abort message to go to stderr instead. _set_error_mode(_OUT_TO_STDERR); -# endif +#endif -# if defined(_MSC_VER) && !GTEST_OS_WINDOWS_MOBILE +#if defined(_MSC_VER) && !GTEST_OS_WINDOWS_MOBILE // In the debug version, Visual Studio pops up a separate dialog // offering a choice to debug the aborted program. We need to suppress // this dialog or it will pop up for every EXPECT/ASSERT_DEATH statement @@ -5458,14 +5437,15 @@ int UnitTest::Run() { _CRTDBG_MODE_FILE | _CRTDBG_MODE_DEBUG); (void)_CrtSetReportFile(_CRT_ASSERT, _CRTDBG_FILE_STDERR); } -# endif +#endif } #endif // GTEST_OS_WINDOWS return internal::HandleExceptionsInMethodIfSupported( - impl(), - &internal::UnitTestImpl::RunAllTests, - "auxiliary test code (environments or event listeners)") ? 0 : 1; + impl(), &internal::UnitTestImpl::RunAllTests, + "auxiliary test code (environments or event listeners)") + ? 0 + : 1; } // Returns the working directory when the first TEST() or TEST_F() was @@ -5510,14 +5490,10 @@ UnitTest::parameterized_test_registry() GTEST_LOCK_EXCLUDED_(mutex_) { } // Creates an empty UnitTest. -UnitTest::UnitTest() { - impl_ = new internal::UnitTestImpl(this); -} +UnitTest::UnitTest() { impl_ = new internal::UnitTestImpl(this); } // Destructor of UnitTest. -UnitTest::~UnitTest() { - delete impl_; -} +UnitTest::~UnitTest() { delete impl_; } // Pushes a trace defined by SCOPED_TRACE() on to the per-thread // Google Test trace stack. @@ -5528,8 +5504,7 @@ void UnitTest::PushGTestTrace(const internal::TraceInfo& trace) } // Pops a trace from the per-thread Google Test trace stack. -void UnitTest::PopGTestTrace() - GTEST_LOCK_EXCLUDED_(mutex_) { +void UnitTest::PopGTestTrace() GTEST_LOCK_EXCLUDED_(mutex_) { internal::MutexLock lock(&mutex_); impl_->gtest_trace_stack().pop_back(); } @@ -5630,8 +5605,8 @@ void UnitTestImpl::ConfigureStreamingOutput() { if (!target.empty()) { const size_t pos = target.find(':'); if (pos != std::string::npos) { - listeners()->Append(new StreamingListener(target.substr(0, pos), - target.substr(pos+1))); + listeners()->Append( + new StreamingListener(target.substr(0, pos), target.substr(pos + 1))); } else { GTEST_LOG_(WARNING) << "unrecognized streaming target \"" << target << "\" ignored."; @@ -5737,9 +5712,9 @@ TestSuite* UnitTestImpl::GetTestSuite( auto* const new_test_suite = new TestSuite(test_suite_name, type_param, set_up_tc, tear_down_tc); + const UnitTestFilter death_test_suite_filter(kDeathTestSuiteFilter); // Is this a death test suite? - if (internal::UnitTestOptions::MatchesFilter(test_suite_name, - kDeathTestSuiteFilter)) { + if (death_test_suite_filter.MatchesName(test_suite_name)) { // Yes. Inserts the test suite after the last death test suite // defined so far. This only works when the test suites haven't // been shuffled. Otherwise we may end up running a death test @@ -5776,8 +5751,7 @@ bool UnitTestImpl::RunAllTests() { const bool gtest_is_initialized_before_run_all_tests = GTestIsInitialized(); // Do not run any test if the --help flag was specified. - if (g_help_flag) - return true; + if (g_help_flag) return true; // Repeats the call to the post-flag parsing initialization in case the // user didn't call InitGoogleTest. @@ -5795,11 +5769,11 @@ bool UnitTestImpl::RunAllTests() { #if GTEST_HAS_DEATH_TEST in_subprocess_for_death_test = (internal_run_death_test_flag_.get() != nullptr); -# if defined(GTEST_EXTRA_DEATH_TEST_CHILD_SETUP_) +#if defined(GTEST_EXTRA_DEATH_TEST_CHILD_SETUP_) if (in_subprocess_for_death_test) { GTEST_EXTRA_DEATH_TEST_CHILD_SETUP_(); } -# endif // defined(GTEST_EXTRA_DEATH_TEST_CHILD_SETUP_) +#endif // defined(GTEST_EXTRA_DEATH_TEST_CHILD_SETUP_) #endif // GTEST_HAS_DEATH_TEST const bool should_shard = ShouldShard(kTestTotalShards, kTestShardIndex, @@ -5807,9 +5781,9 @@ bool UnitTestImpl::RunAllTests() { // Compares the full test names with the filter to decide which // tests to run. - const bool has_tests_to_run = FilterTests(should_shard - ? HONOR_SHARDING_PROTOCOL - : IGNORE_SHARDING_PROTOCOL) > 0; + const bool has_tests_to_run = + FilterTests(should_shard ? HONOR_SHARDING_PROTOCOL + : IGNORE_SHARDING_PROTOCOL) > 0; // Lists the tests and exits if the --gtest_list_tests flag was specified. if (GTEST_FLAG_GET(list_tests)) { @@ -5818,9 +5792,7 @@ bool UnitTestImpl::RunAllTests() { return true; } - random_seed_ = GTEST_FLAG_GET(shuffle) - ? GetRandomSeedFromFlag(GTEST_FLAG_GET(random_seed)) - : 0; + random_seed_ = GetRandomSeedFromFlag(GTEST_FLAG_GET(random_seed)); // True if and only if at least one test has failed. bool failed = false; @@ -5994,8 +5966,7 @@ void WriteToShardStatusFileIfNeeded() { // an error and exits. If in_subprocess_for_death_test, sharding is // disabled because it must only be applied to the original test // process. Otherwise, we could filter out death tests we intended to execute. -bool ShouldShard(const char* total_shards_env, - const char* shard_index_env, +bool ShouldShard(const char* total_shards_env, const char* shard_index_env, bool in_subprocess_for_death_test) { if (in_subprocess_for_death_test) { return false; @@ -6007,27 +5978,27 @@ bool ShouldShard(const char* total_shards_env, if (total_shards == -1 && shard_index == -1) { return false; } else if (total_shards == -1 && shard_index != -1) { - const Message msg = Message() - << "Invalid environment variables: you have " - << kTestShardIndex << " = " << shard_index - << ", but have left " << kTestTotalShards << " unset.\n"; + const Message msg = Message() << "Invalid environment variables: you have " + << kTestShardIndex << " = " << shard_index + << ", but have left " << kTestTotalShards + << " unset.\n"; ColoredPrintf(GTestColor::kRed, "%s", msg.GetString().c_str()); fflush(stdout); exit(EXIT_FAILURE); } else if (total_shards != -1 && shard_index == -1) { const Message msg = Message() - << "Invalid environment variables: you have " - << kTestTotalShards << " = " << total_shards - << ", but have left " << kTestShardIndex << " unset.\n"; + << "Invalid environment variables: you have " + << kTestTotalShards << " = " << total_shards + << ", but have left " << kTestShardIndex << " unset.\n"; ColoredPrintf(GTestColor::kRed, "%s", msg.GetString().c_str()); fflush(stdout); exit(EXIT_FAILURE); } else if (shard_index < 0 || shard_index >= total_shards) { - const Message msg = Message() - << "Invalid environment variables: we require 0 <= " - << kTestShardIndex << " < " << kTestTotalShards - << ", but you have " << kTestShardIndex << "=" << shard_index - << ", " << kTestTotalShards << "=" << total_shards << ".\n"; + const Message msg = + Message() << "Invalid environment variables: we require 0 <= " + << kTestShardIndex << " < " << kTestTotalShards + << ", but you have " << kTestShardIndex << "=" << shard_index + << ", " << kTestTotalShards << "=" << total_shards << ".\n"; ColoredPrintf(GTestColor::kRed, "%s", msg.GetString().c_str()); fflush(stdout); exit(EXIT_FAILURE); @@ -6069,11 +6040,16 @@ bool ShouldRunTestOnShard(int total_shards, int shard_index, int test_id) { // https://github.com/google/googletest/blob/master/googletest/docs/advanced.md // . Returns the number of tests that should run. int UnitTestImpl::FilterTests(ReactionToSharding shard_tests) { - const int32_t total_shards = shard_tests == HONOR_SHARDING_PROTOCOL ? - Int32FromEnvOrDie(kTestTotalShards, -1) : -1; - const int32_t shard_index = shard_tests == HONOR_SHARDING_PROTOCOL ? - Int32FromEnvOrDie(kTestShardIndex, -1) : -1; + const int32_t total_shards = shard_tests == HONOR_SHARDING_PROTOCOL + ? Int32FromEnvOrDie(kTestTotalShards, -1) + : -1; + const int32_t shard_index = shard_tests == HONOR_SHARDING_PROTOCOL + ? Int32FromEnvOrDie(kTestShardIndex, -1) + : -1; + const PositiveAndNegativeUnitTestFilter gtest_flag_filter( + GTEST_FLAG_GET(filter)); + const UnitTestFilter disable_test_filter(kDisableTestFilter); // num_runnable_tests are the number of tests that will // run across all shards (i.e., match filter and are not disabled). // num_selected_tests are the number of tests to be run on @@ -6089,14 +6065,13 @@ int UnitTestImpl::FilterTests(ReactionToSharding shard_tests) { const std::string test_name(test_info->name()); // A test is disabled if test suite name or test name matches // kDisableTestFilter. - const bool is_disabled = internal::UnitTestOptions::MatchesFilter( - test_suite_name, kDisableTestFilter) || - internal::UnitTestOptions::MatchesFilter( - test_name, kDisableTestFilter); + const bool is_disabled = + disable_test_filter.MatchesName(test_suite_name) || + disable_test_filter.MatchesName(test_name); test_info->is_disabled_ = is_disabled; - const bool matches_filter = internal::UnitTestOptions::FilterMatchesTest( - test_suite_name, test_name); + const bool matches_filter = + gtest_flag_filter.MatchesTest(test_suite_name, test_name); test_info->matches_filter_ = matches_filter; const bool is_runnable = @@ -6269,8 +6244,8 @@ void UnitTestImpl::UnshuffleTests() { // For example, if Foo() calls Bar(), which in turn calls // GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in // the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't. -std::string GetCurrentOsStackTraceExceptTop(UnitTest* /*unit_test*/, - int skip_count) { +GTEST_NO_INLINE_ GTEST_NO_TAIL_CALL_ std::string +GetCurrentOsStackTraceExceptTop(UnitTest* /*unit_test*/, int skip_count) { // We pass skip_count + 1 to skip this wrapper function in addition // to what the user really wants to skip. return GetUnitTestImpl()->CurrentOsStackTraceExceptTop(skip_count + 1); @@ -6280,7 +6255,7 @@ std::string GetCurrentOsStackTraceExceptTop(UnitTest* /*unit_test*/, // suppress unreachable code warnings. namespace { class ClassUniqueToAlwaysTrue {}; -} +} // namespace bool IsTrue(bool condition) { return condition; } @@ -6288,8 +6263,7 @@ bool AlwaysTrue() { #if GTEST_HAS_EXCEPTIONS // This condition is always false so AlwaysTrue() never actually throws, // but it makes the compiler think that it may throw. - if (IsTrue(false)) - throw ClassUniqueToAlwaysTrue(); + if (IsTrue(false)) throw ClassUniqueToAlwaysTrue(); #endif // GTEST_HAS_EXCEPTIONS return true; } @@ -6401,8 +6375,7 @@ static bool ParseFlag(const char* str, const char* flag_name, String* value) { // GTEST_INTERNAL_PREFIX_ followed by "internal_" are considered Google Test // internal flags and do not trigger the help message. static bool HasGoogleTestFlagPrefix(const char* str) { - return (SkipPrefix("--", &str) || - SkipPrefix("-", &str) || + return (SkipPrefix("--", &str) || SkipPrefix("-", &str) || SkipPrefix("/", &str)) && !SkipPrefix(GTEST_FLAG_PREFIX_ "internal_", &str) && (SkipPrefix(GTEST_FLAG_PREFIX_, &str) || @@ -6506,18 +6479,18 @@ static const char kColorEncodedHelpMessage[] = " Generate a JSON or XML report in the given directory or with the " "given\n" " file name. @YFILE_PATH@D defaults to @Gtest_detail.xml@D.\n" -# if GTEST_CAN_STREAM_RESULTS_ +#if GTEST_CAN_STREAM_RESULTS_ " @G--" GTEST_FLAG_PREFIX_ "stream_result_to=@YHOST@G:@YPORT@D\n" " Stream test results to the given server.\n" -# endif // GTEST_CAN_STREAM_RESULTS_ +#endif // GTEST_CAN_STREAM_RESULTS_ "\n" "Assertion Behavior:\n" -# if GTEST_HAS_DEATH_TEST && !GTEST_OS_WINDOWS +#if GTEST_HAS_DEATH_TEST && !GTEST_OS_WINDOWS " @G--" GTEST_FLAG_PREFIX_ "death_test_style=@Y(@Gfast@Y|@Gthreadsafe@Y)@D\n" " Set the default death test style.\n" -# endif // GTEST_HAS_DEATH_TEST && !GTEST_OS_WINDOWS +#endif // GTEST_HAS_DEATH_TEST && !GTEST_OS_WINDOWS " @G--" GTEST_FLAG_PREFIX_ "break_on_failure@D\n" " Turn assertion failures into debugger break-points.\n" @@ -6594,10 +6567,8 @@ static void LoadFlagsFromFile(const std::string& path) { std::vector lines; SplitString(contents, '\n', &lines); for (size_t i = 0; i < lines.size(); ++i) { - if (lines[i].empty()) - continue; - if (!ParseGoogleTestFlag(lines[i].c_str())) - g_help_flag = true; + if (lines[i].empty()) continue; + if (!ParseGoogleTestFlag(lines[i].c_str())) g_help_flag = true; } } #endif // GTEST_USE_OWN_FLAGFILE_FLAG_ @@ -6623,9 +6594,7 @@ void ParseGoogleTestFlagsOnlyImpl(int* argc, CharType** argv) { LoadFlagsFromFile(flagfile_value); remove_flag = true; #endif // GTEST_USE_OWN_FLAGFILE_FLAG_ - } else if (arg_string == "--help" || arg_string == "-h" || - arg_string == "-?" || arg_string == "/?" || - HasGoogleTestFlagPrefix(arg)) { + } else if (arg_string == "--help" || HasGoogleTestFlagPrefix(arg)) { // Both help flag and unrecognized Google Test flags (excluding // internal ones) trigger help display. g_help_flag = true; @@ -6660,7 +6629,27 @@ void ParseGoogleTestFlagsOnlyImpl(int* argc, CharType** argv) { // Parses the command line for Google Test flags, without initializing // other parts of Google Test. void ParseGoogleTestFlagsOnly(int* argc, char** argv) { +#if GTEST_HAS_ABSL + if (*argc > 0) { + // absl::ParseCommandLine() requires *argc > 0. + auto positional_args = absl::flags_internal::ParseCommandLineImpl( + *argc, argv, absl::flags_internal::ArgvListAction::kRemoveParsedArgs, + absl::flags_internal::UsageFlagsAction::kHandleUsage, + absl::flags_internal::OnUndefinedFlag::kReportUndefined); + // Any command-line positional arguments not part of any command-line flag + // (or arguments to a flag) are copied back out to argv, with the program + // invocation name at position 0, and argc is resized. This includes + // positional arguments after the flag-terminating delimiter '--'. + // See https://abseil.io/docs/cpp/guides/flags. + std::copy(positional_args.begin(), positional_args.end(), argv); + if (static_cast(positional_args.size()) < *argc) { + argv[positional_args.size()] = nullptr; + *argc = static_cast(positional_args.size()); + } + } +#else ParseGoogleTestFlagsOnlyImpl(argc, argv); +#endif // Fix the value of *_NSGetArgc() on macOS, but if and only if // *_NSGetArgv() == argv @@ -6695,6 +6684,12 @@ void InitGoogleTestImpl(int* argc, CharType** argv) { #if GTEST_HAS_ABSL absl::InitializeSymbolizer(g_argvs[0].c_str()); + + // When using the Abseil Flags library, set the program usage message to the + // help message, but remove the color-encoding from the message first. + absl::SetProgramUsageMessage(absl::StrReplaceAll( + kColorEncodedHelpMessage, + {{"@D", ""}, {"@R", ""}, {"@G", ""}, {"@Y", ""}, {"@@", "@"}})); #endif // GTEST_HAS_ABSL ParseGoogleTestFlagsOnly(argc, argv); @@ -6715,7 +6710,7 @@ void InitGoogleTestImpl(int* argc, CharType** argv) { void InitGoogleTest(int* argc, char** argv) { #if defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_) GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_(argc, argv); -#else // defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_) +#else // defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_) internal::InitGoogleTestImpl(argc, argv); #endif // defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_) } @@ -6725,7 +6720,7 @@ void InitGoogleTest(int* argc, char** argv) { void InitGoogleTest(int* argc, wchar_t** argv) { #if defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_) GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_(argc, argv); -#else // defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_) +#else // defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_) internal::InitGoogleTestImpl(argc, argv); #endif // defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_) } @@ -6741,42 +6736,42 @@ void InitGoogleTest() { #if defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_) GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_(&argc, argv); -#else // defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_) +#else // defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_) internal::InitGoogleTestImpl(&argc, argv); #endif // defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_) } +#if !defined(GTEST_CUSTOM_TEMPDIR_FUNCTION_) +// Return value of first environment variable that is set and contains +// a non-empty string. If there are none, return the "fallback" string. +// Since we like the temporary directory to have a directory separator suffix, +// add it if not provided in the environment variable value. +static std::string GetTempDirFromEnv( + std::initializer_list environment_variables, + const char* fallback, char separator) { + for (const char* variable_name : environment_variables) { + const char* value = internal::posix::GetEnv(variable_name); + if (value != nullptr && value[0] != '\0') { + if (value[strlen(value) - 1] != separator) { + return std::string(value).append(1, separator); + } + return value; + } + } + return fallback; +} +#endif + std::string TempDir() { #if defined(GTEST_CUSTOM_TEMPDIR_FUNCTION_) return GTEST_CUSTOM_TEMPDIR_FUNCTION_(); -#elif GTEST_OS_WINDOWS_MOBILE - return "\\temp\\"; -#elif GTEST_OS_WINDOWS - const char* temp_dir = internal::posix::GetEnv("TEMP"); - if (temp_dir == nullptr || temp_dir[0] == '\0') { - return "\\temp\\"; - } else if (temp_dir[strlen(temp_dir) - 1] == '\\') { - return temp_dir; - } else { - return std::string(temp_dir) + "\\"; - } +#elif GTEST_OS_WINDOWS || GTEST_OS_WINDOWS_MOBILE + return GetTempDirFromEnv({"TEST_TMPDIR", "TEMP"}, "\\temp\\", '\\'); #elif GTEST_OS_LINUX_ANDROID - const char* temp_dir = internal::posix::GetEnv("TEST_TMPDIR"); - if (temp_dir == nullptr || temp_dir[0] == '\0') { - return "/data/local/tmp/"; - } else { - return temp_dir; - } -#elif GTEST_OS_LINUX - const char* temp_dir = internal::posix::GetEnv("TEST_TMPDIR"); - if (temp_dir == nullptr || temp_dir[0] == '\0') { - return "/tmp/"; - } else { - return temp_dir; - } + return GetTempDirFromEnv({"TEST_TMPDIR", "TMPDIR"}, "/data/local/tmp/", '/'); #else - return "/tmp/"; -#endif // GTEST_OS_WINDOWS_MOBILE + return GetTempDirFromEnv({"TEST_TMPDIR", "TMPDIR"}, "/tmp/", '/'); +#endif } // Class ScopedTrace @@ -6793,8 +6788,7 @@ void ScopedTrace::PushTrace(const char* file, int line, std::string message) { } // Pops the info pushed by the c'tor. -ScopedTrace::~ScopedTrace() - GTEST_LOCK_EXCLUDED_(&UnitTest::mutex_) { +ScopedTrace::~ScopedTrace() GTEST_LOCK_EXCLUDED_(&UnitTest::mutex_) { UnitTest::GetInstance()->PopGTestTrace(); } diff --git a/ext/googletest/googletest/src/gtest_main.cc b/ext/googletest/googletest/src/gtest_main.cc index 46b27c3d7d..44976375c9 100644 --- a/ext/googletest/googletest/src/gtest_main.cc +++ b/ext/googletest/googletest/src/gtest_main.cc @@ -28,15 +28,14 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include + #include "gtest/gtest.h" #if GTEST_OS_ESP8266 || GTEST_OS_ESP32 #if GTEST_OS_ESP8266 extern "C" { #endif -void setup() { - testing::InitGoogleTest(); -} +void setup() { testing::InitGoogleTest(); } void loop() { RUN_ALL_TESTS(); } diff --git a/ext/googletest/googletest/test/BUILD.bazel b/ext/googletest/googletest/test/BUILD.bazel index 7b78555ed2..7754c1303f 100644 --- a/ext/googletest/googletest/test/BUILD.bazel +++ b/ext/googletest/googletest/test/BUILD.bazel @@ -30,7 +30,6 @@ # # Bazel BUILD for The Google C++ Testing Framework (Google Test) -load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_test") load("@rules_python//python:defs.bzl", "py_library", "py_test") licenses(["notice"]) @@ -175,6 +174,10 @@ py_test( name = "gtest_help_test", size = "small", srcs = ["gtest_help_test.py"], + args = select({ + "//:has_absl": ["--has_absl_flags"], + "//conditions:default": [], + }), data = [":gtest_help_test_"], deps = [":gtest_test_utils"], ) diff --git a/ext/googletest/googletest/test/googletest-break-on-failure-unittest.py b/ext/googletest/googletest/test/googletest-break-on-failure-unittest.py index a5dfbc693b..4eafba3e6b 100755 --- a/ext/googletest/googletest/test/googletest-break-on-failure-unittest.py +++ b/ext/googletest/googletest/test/googletest-break-on-failure-unittest.py @@ -39,7 +39,7 @@ Google Test) with different environments and command line flags. """ import os -import gtest_test_utils +from googletest.test import gtest_test_utils # Constants. diff --git a/ext/googletest/googletest/test/googletest-break-on-failure-unittest_.cc b/ext/googletest/googletest/test/googletest-break-on-failure-unittest_.cc index f84957a2d0..324294f33e 100644 --- a/ext/googletest/googletest/test/googletest-break-on-failure-unittest_.cc +++ b/ext/googletest/googletest/test/googletest-break-on-failure-unittest_.cc @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Unit test for Google Test's break-on-failure mode. // // A user can ask Google Test to seg-fault when an assertion fails, using @@ -41,34 +40,32 @@ #include "gtest/gtest.h" #if GTEST_OS_WINDOWS -# include -# include +#include +#include #endif namespace { // A test that's expected to fail. -TEST(Foo, Bar) { - EXPECT_EQ(2, 3); -} +TEST(Foo, Bar) { EXPECT_EQ(2, 3); } #if GTEST_HAS_SEH && !GTEST_OS_WINDOWS_MOBILE // On Windows Mobile global exception handlers are not supported. -LONG WINAPI ExitWithExceptionCode( - struct _EXCEPTION_POINTERS* exception_pointers) { +LONG WINAPI +ExitWithExceptionCode(struct _EXCEPTION_POINTERS* exception_pointers) { exit(exception_pointers->ExceptionRecord->ExceptionCode); } #endif } // namespace -int main(int argc, char **argv) { +int main(int argc, char** argv) { #if GTEST_OS_WINDOWS // Suppresses display of the Windows error dialog upon encountering // a general protection fault (segment violation). SetErrorMode(SEM_NOGPFAULTERRORBOX | SEM_FAILCRITICALERRORS); -# if GTEST_HAS_SEH && !GTEST_OS_WINDOWS_MOBILE +#if GTEST_HAS_SEH && !GTEST_OS_WINDOWS_MOBILE // The default unhandled exception filter does not always exit // with the exception code as exit code - for example it exits with @@ -78,7 +75,7 @@ int main(int argc, char **argv) { // exceptions. SetUnhandledExceptionFilter(ExitWithExceptionCode); -# endif +#endif #endif // GTEST_OS_WINDOWS testing::InitGoogleTest(&argc, argv); diff --git a/ext/googletest/googletest/test/googletest-catch-exceptions-test.py b/ext/googletest/googletest/test/googletest-catch-exceptions-test.py index 94a5b33f23..d38d91a62a 100755 --- a/ext/googletest/googletest/test/googletest-catch-exceptions-test.py +++ b/ext/googletest/googletest/test/googletest-catch-exceptions-test.py @@ -35,7 +35,7 @@ googletest-catch-exceptions-ex-test_ (programs written with Google Test) and verifies their output. """ -import gtest_test_utils +from googletest.test import gtest_test_utils # Constants. FLAG_PREFIX = '--gtest_' @@ -147,19 +147,19 @@ class CatchCxxExceptionsTest(gtest_test_utils.TestCase): self.assertTrue( 'CxxExceptionInConstructorTest::TearDownTestSuite() ' 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT) - self.assertTrue( + self.assertFalse( 'CxxExceptionInSetUpTestSuiteTest constructor ' 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT) - self.assertTrue( + self.assertFalse( 'CxxExceptionInSetUpTestSuiteTest destructor ' 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT) - self.assertTrue( + self.assertFalse( 'CxxExceptionInSetUpTestSuiteTest::SetUp() ' 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT) - self.assertTrue( + self.assertFalse( 'CxxExceptionInSetUpTestSuiteTest::TearDown() ' 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT) - self.assertTrue( + self.assertFalse( 'CxxExceptionInSetUpTestSuiteTest test body ' 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT) diff --git a/ext/googletest/googletest/test/googletest-catch-exceptions-test_.cc b/ext/googletest/googletest/test/googletest-catch-exceptions-test_.cc index 8c127d40b1..3c8f4f4b79 100644 --- a/ext/googletest/googletest/test/googletest-catch-exceptions-test_.cc +++ b/ext/googletest/googletest/test/googletest-catch-exceptions-test_.cc @@ -32,18 +32,18 @@ // exceptions, and the output is verified by // googletest-catch-exceptions-test.py. -#include // NOLINT +#include // NOLINT #include // For exit(). #include "gtest/gtest.h" #if GTEST_HAS_SEH -# include +#include #endif #if GTEST_HAS_EXCEPTIONS -# include // For set_terminate(). -# include +#include // For set_terminate(). +#include #endif using testing::Test; @@ -93,9 +93,7 @@ class SehExceptionInTearDownTest : public Test { TEST_F(SehExceptionInTearDownTest, ThrowsExceptionInTearDown) {} -TEST(SehExceptionTest, ThrowsSehException) { - RaiseException(42, 0, 0, NULL); -} +TEST(SehExceptionTest, ThrowsSehException) { RaiseException(42, 0, 0, NULL); } #endif // GTEST_HAS_SEH @@ -269,9 +267,7 @@ TEST_F(CxxExceptionInTestBodyTest, ThrowsStdCxxException) { throw std::runtime_error("Standard C++ exception"); } -TEST(CxxExceptionTest, ThrowsNonStdCxxException) { - throw "C-string"; -} +TEST(CxxExceptionTest, ThrowsNonStdCxxException) { throw "C-string"; } // This terminate handler aborts the program using exit() rather than abort(). // This avoids showing pop-ups on Windows systems and core dumps on Unix-like diff --git a/ext/googletest/googletest/test/googletest-color-test.py b/ext/googletest/googletest/test/googletest-color-test.py index f3b7c9990b..c22752db82 100755 --- a/ext/googletest/googletest/test/googletest-color-test.py +++ b/ext/googletest/googletest/test/googletest-color-test.py @@ -32,7 +32,7 @@ """Verifies that Google Test correctly determines whether to use colors.""" import os -import gtest_test_utils +from googletest.test import gtest_test_utils IS_WINDOWS = os.name == 'nt' diff --git a/ext/googletest/googletest/test/googletest-color-test_.cc b/ext/googletest/googletest/test/googletest-color-test_.cc index 220a3a0054..55657b7262 100644 --- a/ext/googletest/googletest/test/googletest-color-test_.cc +++ b/ext/googletest/googletest/test/googletest-color-test_.cc @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // A helper program for testing how Google Test determines whether to use // colors in the output. It prints "YES" and returns 1 if Google Test // decides to use colors, and prints "NO" and returns 0 otherwise. @@ -43,8 +42,7 @@ using testing::internal::ShouldUseColor; // created before main() is entered, and thus that ShouldUseColor() // works the same way as in a real Google-Test-based test. We don't actual // run the TEST itself. -TEST(GTestColorTest, Dummy) { -} +TEST(GTestColorTest, Dummy) {} int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); diff --git a/ext/googletest/googletest/test/googletest-death-test-test.cc b/ext/googletest/googletest/test/googletest-death-test-test.cc index e7e0cd7016..4737ff9bfb 100644 --- a/ext/googletest/googletest/test/googletest-death-test-test.cc +++ b/ext/googletest/googletest/test/googletest-death-test-test.cc @@ -31,7 +31,6 @@ // Tests for death tests. #include "gtest/gtest-death-test.h" - #include "gtest/gtest.h" #include "gtest/internal/gtest-filepath.h" @@ -40,25 +39,25 @@ using testing::internal::AlwaysTrue; #if GTEST_HAS_DEATH_TEST -# if GTEST_OS_WINDOWS -# include // For O_BINARY -# include // For chdir(). -# include -# else -# include -# include // For waitpid. -# endif // GTEST_OS_WINDOWS +#if GTEST_OS_WINDOWS +#include // For chdir(). +#include // For O_BINARY +#include +#else +#include // For waitpid. +#include +#endif // GTEST_OS_WINDOWS -# include -# include -# include +#include +#include +#include -# if GTEST_OS_LINUX -# include -# endif // GTEST_OS_LINUX +#if GTEST_OS_LINUX +#include +#endif // GTEST_OS_LINUX -# include "gtest/gtest-spi.h" -# include "src/gtest-internal-inl.h" +#include "gtest/gtest-spi.h" +#include "src/gtest-internal-inl.h" namespace posix = ::testing::internal::posix; @@ -90,6 +89,7 @@ class ReplaceDeathTestFactory { unit_test_impl_->death_test_factory_.release(); unit_test_impl_->death_test_factory_.reset(old_factory_); } + private: // Prevents copying ReplaceDeathTestFactory objects. ReplaceDeathTestFactory(const ReplaceDeathTestFactory&); @@ -116,8 +116,7 @@ void DieWithMessage(const ::std::string& message) { // Some compilers can recognize that _exit() never returns and issue the // 'unreachable code' warning for code following this function, unless // fooled by a fake condition. - if (AlwaysTrue()) - _exit(1); + if (AlwaysTrue()) _exit(1); } void DieInside(const ::std::string& function) { @@ -137,8 +136,7 @@ class TestForDeathTest : public testing::Test { // A method of the test fixture that may die. void MemberFunction() { - if (should_die_) - DieInside("MemberFunction"); + if (should_die_) DieInside("MemberFunction"); } // True if and only if MemberFunction() should die. @@ -153,8 +151,7 @@ class MayDie { // A member function that may die. void MemberFunction() const { - if (should_die_) - DieInside("MayDie::MemberFunction"); + if (should_die_) DieInside("MayDie::MemberFunction"); } private: @@ -173,8 +170,7 @@ int NonVoidFunction() { // A unary function that may die. void DieIf(bool should_die) { - if (should_die) - DieInside("DieIf"); + if (should_die) DieInside("DieIf"); } // A binary function that may die. @@ -195,16 +191,16 @@ void DeathTestSubroutine() { int DieInDebugElse12(int* sideeffect) { if (sideeffect) *sideeffect = 12; -# ifndef NDEBUG +#ifndef NDEBUG DieInside("DieInDebugElse12"); -# endif // NDEBUG +#endif // NDEBUG return 12; } -# if GTEST_OS_WINDOWS +#if GTEST_OS_WINDOWS // Death in dbg due to Windows CRT assertion failure, not opt. int DieInCRTDebugElse12(int* sideeffect) { @@ -224,7 +220,7 @@ int DieInCRTDebugElse12(int* sideeffect) { #endif // GTEST_OS_WINDOWS -# if GTEST_OS_WINDOWS || GTEST_OS_FUCHSIA +#if GTEST_OS_WINDOWS || GTEST_OS_FUCHSIA // Tests the ExitedWithCode predicate. TEST(ExitStatusPredicateTest, ExitedWithCode) { @@ -237,7 +233,7 @@ TEST(ExitStatusPredicateTest, ExitedWithCode) { EXPECT_FALSE(testing::ExitedWithCode(1)(0)); } -# else +#else // Returns the exit status of a process that calls _exit(2) with a // given exit code. This is a helper function for the @@ -270,14 +266,14 @@ static int KilledExitStatus(int signum) { // Tests the ExitedWithCode predicate. TEST(ExitStatusPredicateTest, ExitedWithCode) { - const int status0 = NormalExitStatus(0); - const int status1 = NormalExitStatus(1); + const int status0 = NormalExitStatus(0); + const int status1 = NormalExitStatus(1); const int status42 = NormalExitStatus(42); const testing::ExitedWithCode pred0(0); const testing::ExitedWithCode pred1(1); const testing::ExitedWithCode pred42(42); - EXPECT_PRED1(pred0, status0); - EXPECT_PRED1(pred1, status1); + EXPECT_PRED1(pred0, status0); + EXPECT_PRED1(pred1, status1); EXPECT_PRED1(pred42, status42); EXPECT_FALSE(pred0(status1)); EXPECT_FALSE(pred42(status0)); @@ -296,7 +292,7 @@ TEST(ExitStatusPredicateTest, KilledBySignal) { EXPECT_FALSE(pred_kill(status_segv)); } -# endif // GTEST_OS_WINDOWS || GTEST_OS_FUCHSIA +#endif // GTEST_OS_WINDOWS || GTEST_OS_FUCHSIA // The following code intentionally tests a suboptimal syntax. #ifdef __GNUC__ @@ -320,8 +316,7 @@ TEST_F(TestForDeathTest, SingleStatement) { // doesn't expand into an "if" statement without an "else" ; - if (AlwaysFalse()) - ASSERT_DEATH(return, "") << "did not die"; + if (AlwaysFalse()) ASSERT_DEATH(return, "") << "did not die"; if (AlwaysFalse()) ; @@ -332,7 +327,7 @@ TEST_F(TestForDeathTest, SingleStatement) { #pragma GCC diagnostic pop #endif -# if GTEST_USES_PCRE +#if GTEST_USES_PCRE void DieWithEmbeddedNul() { fprintf(stderr, "Hello%cmy null world.\n", '\0'); @@ -347,7 +342,7 @@ TEST_F(TestForDeathTest, EmbeddedNulInMessage) { ASSERT_DEATH(DieWithEmbeddedNul(), "my null world"); } -# endif // GTEST_USES_PCRE +#endif // GTEST_USES_PCRE // Tests that death test macros expand to code which interacts well with switch // statements. @@ -357,12 +352,12 @@ TEST_F(TestForDeathTest, SwitchStatement) { GTEST_DISABLE_MSC_WARNINGS_PUSH_(4065) switch (0) - default: - ASSERT_DEATH(_exit(1), "") << "exit in default switch handler"; + default: + ASSERT_DEATH(_exit(1), "") << "exit in default switch handler"; switch (0) - case 0: - EXPECT_DEATH(_exit(1), "") << "exit in switch case"; + case 0: + EXPECT_DEATH(_exit(1), "") << "exit in switch case"; GTEST_DISABLE_MSC_WARNINGS_POP_() } @@ -396,8 +391,9 @@ TEST_F(TestForDeathTest, FastDeathTestInChangedDir) { ASSERT_DEATH(_exit(1), ""); } -# if GTEST_OS_LINUX -void SigprofAction(int, siginfo_t*, void*) { /* no op */ } +#if GTEST_OS_LINUX +void SigprofAction(int, siginfo_t*, void*) { /* no op */ +} // Sets SIGPROF action and ITIMER_PROF timer (interval: 1ms). void SetSigprofActionAndTimer() { @@ -448,7 +444,7 @@ TEST_F(TestForDeathTest, ThreadSafeSigprofActionSet) { DisableSigprofActionAndTimer(&old_signal_action); EXPECT_TRUE(old_signal_action.sa_sigaction == SigprofAction); } -# endif // GTEST_OS_LINUX +#endif // GTEST_OS_LINUX // Repeats a representative sample of death tests in the "threadsafe" style: @@ -487,13 +483,11 @@ TEST_F(TestForDeathTest, MixedStyles) { EXPECT_DEATH(_exit(1), ""); } -# if GTEST_HAS_CLONE && GTEST_HAS_PTHREAD +#if GTEST_HAS_CLONE && GTEST_HAS_PTHREAD bool pthread_flag; -void SetPthreadFlag() { - pthread_flag = true; -} +void SetPthreadFlag() { pthread_flag = true; } TEST_F(TestForDeathTest, DoesNotExecuteAtforkHooks) { if (!GTEST_FLAG_GET(death_test_use_fork)) { @@ -505,7 +499,7 @@ TEST_F(TestForDeathTest, DoesNotExecuteAtforkHooks) { } } -# endif // GTEST_HAS_CLONE && GTEST_HAS_PTHREAD +#endif // GTEST_HAS_CLONE && GTEST_HAS_PTHREAD // Tests that a method of another class can be used in a death test. TEST_F(TestForDeathTest, MethodOfAnotherClass) { @@ -527,7 +521,7 @@ TEST_F(TestForDeathTest, AcceptsAnythingConvertibleToRE) { const testing::internal::RE regex(regex_c_str); EXPECT_DEATH(GlobalFunction(), regex); -# if !GTEST_USES_PCRE +#if !GTEST_USES_PCRE const ::std::string regex_std_str(regex_c_str); EXPECT_DEATH(GlobalFunction(), regex_std_str); @@ -536,7 +530,7 @@ TEST_F(TestForDeathTest, AcceptsAnythingConvertibleToRE) { // lifetime extension of the pointer is not sufficient. EXPECT_DEATH(GlobalFunction(), ::std::string(regex_c_str).c_str()); -# endif // !GTEST_USES_PCRE +#endif // !GTEST_USES_PCRE } // Tests that a non-void function can be used in a death test. @@ -551,9 +545,7 @@ TEST_F(TestForDeathTest, FunctionWithParameter) { } // Tests that ASSERT_DEATH can be used outside a TEST, TEST_F, or test fixture. -TEST_F(TestForDeathTest, OutsideFixture) { - DeathTestSubroutine(); -} +TEST_F(TestForDeathTest, OutsideFixture) { DeathTestSubroutine(); } // Tests that death tests can be done inside a loop. TEST_F(TestForDeathTest, InsideLoop) { @@ -564,25 +556,28 @@ TEST_F(TestForDeathTest, InsideLoop) { // Tests that a compound statement can be used in a death test. TEST_F(TestForDeathTest, CompoundStatement) { - EXPECT_DEATH({ // NOLINT - const int x = 2; - const int y = x + 1; - DieIfLessThan(x, y); - }, - "DieIfLessThan"); + EXPECT_DEATH( + { // NOLINT + const int x = 2; + const int y = x + 1; + DieIfLessThan(x, y); + }, + "DieIfLessThan"); } // Tests that code that doesn't die causes a death test to fail. TEST_F(TestForDeathTest, DoesNotDie) { - EXPECT_NONFATAL_FAILURE(EXPECT_DEATH(DieIf(false), "DieIf"), - "failed to die"); + EXPECT_NONFATAL_FAILURE(EXPECT_DEATH(DieIf(false), "DieIf"), "failed to die"); } // Tests that a death test fails when the error message isn't expected. TEST_F(TestForDeathTest, ErrorMessageMismatch) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_DEATH(DieIf(true), "DieIfLessThan") << "End of death test message."; - }, "died but not with expected error"); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_DEATH(DieIf(true), "DieIfLessThan") + << "End of death test message."; + }, + "died but not with expected error"); } // On exit, *aborted will be true if and only if the EXPECT_DEATH() @@ -596,19 +591,20 @@ void ExpectDeathTestHelper(bool* aborted) { // Tests that EXPECT_DEATH doesn't abort the test on failure. TEST_F(TestForDeathTest, EXPECT_DEATH) { bool aborted = true; - EXPECT_NONFATAL_FAILURE(ExpectDeathTestHelper(&aborted), - "failed to die"); + EXPECT_NONFATAL_FAILURE(ExpectDeathTestHelper(&aborted), "failed to die"); EXPECT_FALSE(aborted); } // Tests that ASSERT_DEATH does abort the test on failure. TEST_F(TestForDeathTest, ASSERT_DEATH) { static bool aborted; - EXPECT_FATAL_FAILURE({ // NOLINT - aborted = true; - ASSERT_DEATH(DieIf(false), "DieIf"); // This assertion should fail. - aborted = false; - }, "failed to die"); + EXPECT_FATAL_FAILURE( + { // NOLINT + aborted = true; + ASSERT_DEATH(DieIf(false), "DieIf"); // This assertion should fail. + aborted = false; + }, + "failed to die"); EXPECT_TRUE(aborted); } @@ -653,52 +649,36 @@ TEST_F(TestForDeathTest, TestExpectDebugDeath) { EXPECT_DEBUG_DEATH(DieInDebugElse12(&sideeffect), regex) << "Must accept a streamed message"; -# ifdef NDEBUG +#ifdef NDEBUG // Checks that the assignment occurs in opt mode (sideeffect). EXPECT_EQ(12, sideeffect); -# else +#else // Checks that the assignment does not occur in dbg mode (no sideeffect). EXPECT_EQ(0, sideeffect); -# endif +#endif } -# if GTEST_OS_WINDOWS +#if GTEST_OS_WINDOWS -// Tests that EXPECT_DEBUG_DEATH works as expected when in debug mode -// the Windows CRT crashes the process with an assertion failure. +// https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/crtsetreportmode +// In debug mode, the calls to _CrtSetReportMode and _CrtSetReportFile enable +// the dumping of assertions to stderr. Tests that EXPECT_DEATH works as +// expected when in CRT debug mode (compiled with /MTd or /MDd, which defines +// _DEBUG) the Windows CRT crashes the process with an assertion failure. // 1. Asserts on death. // 2. Has no side effect (doesn't pop up a window or wait for user input). -// -// And in opt mode, it: -// 1. Has side effects but does not assert. +#ifdef _DEBUG TEST_F(TestForDeathTest, CRTDebugDeath) { - int sideeffect = 0; - - // Put the regex in a local variable to make sure we don't get an "unused" - // warning in opt mode. - const char* regex = "dup.* : Assertion failed"; - - EXPECT_DEBUG_DEATH(DieInCRTDebugElse12(&sideeffect), regex) + EXPECT_DEATH(DieInCRTDebugElse12(nullptr), "dup.* : Assertion failed") << "Must accept a streamed message"; - -# ifdef NDEBUG - - // Checks that the assignment occurs in opt mode (sideeffect). - EXPECT_EQ(12, sideeffect); - -# else - - // Checks that the assignment does not occur in dbg mode (no sideeffect). - EXPECT_EQ(0, sideeffect); - -# endif } +#endif // _DEBUG -# endif // GTEST_OS_WINDOWS +#endif // GTEST_OS_WINDOWS // Tests that ASSERT_DEBUG_DEATH works as expected, that is, you can stream a // message to it, and in debug mode it: @@ -713,20 +693,20 @@ TEST_F(TestForDeathTest, TestAssertDebugDeath) { ASSERT_DEBUG_DEATH(DieInDebugElse12(&sideeffect), "death.*DieInDebugElse12") << "Must accept a streamed message"; -# ifdef NDEBUG +#ifdef NDEBUG // Checks that the assignment occurs in opt mode (sideeffect). EXPECT_EQ(12, sideeffect); -# else +#else // Checks that the assignment does not occur in dbg mode (no sideeffect). EXPECT_EQ(0, sideeffect); -# endif +#endif } -# ifndef NDEBUG +#ifndef NDEBUG void ExpectDebugDeathHelper(bool* aborted) { *aborted = true; @@ -734,10 +714,11 @@ void ExpectDebugDeathHelper(bool* aborted) { *aborted = false; } -# if GTEST_OS_WINDOWS +#if GTEST_OS_WINDOWS TEST(PopUpDeathTest, DoesNotShowPopUpOnAbort) { - printf("This test should be considered failing if it shows " - "any pop-up dialogs.\n"); + printf( + "This test should be considered failing if it shows " + "any pop-up dialogs.\n"); fflush(stdout); EXPECT_DEATH( @@ -747,7 +728,7 @@ TEST(PopUpDeathTest, DoesNotShowPopUpOnAbort) { }, ""); } -# endif // GTEST_OS_WINDOWS +#endif // GTEST_OS_WINDOWS // Tests that EXPECT_DEBUG_DEATH in debug mode does not abort // the function. @@ -838,42 +819,44 @@ TEST_F(TestForDeathTest, AssertDebugDeathAborts10) { EXPECT_TRUE(aborted); } -# endif // _NDEBUG +#endif // _NDEBUG // Tests the *_EXIT family of macros, using a variety of predicates. static void TestExitMacros() { - EXPECT_EXIT(_exit(1), testing::ExitedWithCode(1), ""); + EXPECT_EXIT(_exit(1), testing::ExitedWithCode(1), ""); ASSERT_EXIT(_exit(42), testing::ExitedWithCode(42), ""); -# if GTEST_OS_WINDOWS +#if GTEST_OS_WINDOWS // Of all signals effects on the process exit code, only those of SIGABRT // are documented on Windows. // See https://msdn.microsoft.com/en-us/query-bi/m/dwwzkt4c. EXPECT_EXIT(raise(SIGABRT), testing::ExitedWithCode(3), "") << "b_ar"; -# elif !GTEST_OS_FUCHSIA +#elif !GTEST_OS_FUCHSIA // Fuchsia has no unix signals. EXPECT_EXIT(raise(SIGKILL), testing::KilledBySignal(SIGKILL), "") << "foo"; ASSERT_EXIT(raise(SIGUSR2), testing::KilledBySignal(SIGUSR2), "") << "bar"; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_EXIT(_exit(0), testing::KilledBySignal(SIGSEGV), "") - << "This failure is expected, too."; - }, "This failure is expected, too."); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_EXIT(_exit(0), testing::KilledBySignal(SIGSEGV), "") + << "This failure is expected, too."; + }, + "This failure is expected, too."); -# endif // GTEST_OS_WINDOWS +#endif // GTEST_OS_WINDOWS - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_EXIT(raise(SIGSEGV), testing::ExitedWithCode(0), "") - << "This failure is expected."; - }, "This failure is expected."); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_EXIT(raise(SIGSEGV), testing::ExitedWithCode(0), "") + << "This failure is expected."; + }, + "This failure is expected."); } -TEST_F(TestForDeathTest, ExitMacros) { - TestExitMacros(); -} +TEST_F(TestForDeathTest, ExitMacros) { TestExitMacros(); } TEST_F(TestForDeathTest, ExitMacrosUsingFork) { GTEST_FLAG_SET(death_test_use_fork, true); @@ -882,39 +865,40 @@ TEST_F(TestForDeathTest, ExitMacrosUsingFork) { TEST_F(TestForDeathTest, InvalidStyle) { GTEST_FLAG_SET(death_test_style, "rococo"); - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_DEATH(_exit(0), "") << "This failure is expected."; - }, "This failure is expected."); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_DEATH(_exit(0), "") << "This failure is expected."; + }, + "This failure is expected."); } TEST_F(TestForDeathTest, DeathTestFailedOutput) { GTEST_FLAG_SET(death_test_style, "fast"); EXPECT_NONFATAL_FAILURE( - EXPECT_DEATH(DieWithMessage("death\n"), - "expected message"), + EXPECT_DEATH(DieWithMessage("death\n"), "expected message"), "Actual msg:\n" "[ DEATH ] death\n"); } TEST_F(TestForDeathTest, DeathTestUnexpectedReturnOutput) { GTEST_FLAG_SET(death_test_style, "fast"); - EXPECT_NONFATAL_FAILURE( - EXPECT_DEATH({ - fprintf(stderr, "returning\n"); - fflush(stderr); - return; - }, ""), - " Result: illegal return in test statement.\n" - " Error msg:\n" - "[ DEATH ] returning\n"); + EXPECT_NONFATAL_FAILURE(EXPECT_DEATH( + { + fprintf(stderr, "returning\n"); + fflush(stderr); + return; + }, + ""), + " Result: illegal return in test statement.\n" + " Error msg:\n" + "[ DEATH ] returning\n"); } TEST_F(TestForDeathTest, DeathTestBadExitCodeOutput) { GTEST_FLAG_SET(death_test_style, "fast"); EXPECT_NONFATAL_FAILURE( EXPECT_EXIT(DieWithMessage("exiting with rc 1\n"), - testing::ExitedWithCode(3), - "expected message"), + testing::ExitedWithCode(3), "expected message"), " Result: died but not with expected exit code:\n" " Exited with exit status 1\n" "Actual msg:\n" @@ -947,8 +931,8 @@ class MockDeathTestFactory : public DeathTestFactory { int line, DeathTest** test) override; // Sets the parameters for subsequent calls to Create. - void SetParameters(bool create, DeathTest::TestRole role, - int status, bool passed); + void SetParameters(bool create, DeathTest::TestRole role, int status, + bool passed); // Accessors. int AssumeRoleCalls() const { return assume_role_calls_; } @@ -990,17 +974,15 @@ class MockDeathTestFactory : public DeathTestFactory { bool test_deleted_; }; - // A DeathTest implementation useful in testing. It returns values set // at its creation from its various inherited DeathTest methods, and // reports calls to those methods to its parent MockDeathTestFactory // object. class MockDeathTest : public DeathTest { public: - MockDeathTest(MockDeathTestFactory *parent, - TestRole role, int status, bool passed) : - parent_(parent), role_(role), status_(status), passed_(passed) { - } + MockDeathTest(MockDeathTestFactory* parent, TestRole role, int status, + bool passed) + : parent_(parent), role_(role), status_(status), passed_(passed) {} ~MockDeathTest() override { parent_->test_deleted_ = true; } TestRole AssumeRole() override { ++parent_->assume_role_calls_; @@ -1025,7 +1007,6 @@ class MockDeathTest : public DeathTest { const bool passed_; }; - // MockDeathTestFactory constructor. MockDeathTestFactory::MockDeathTestFactory() : create_(true), @@ -1035,13 +1016,10 @@ MockDeathTestFactory::MockDeathTestFactory() assume_role_calls_(0), wait_calls_(0), passed_args_(), - abort_args_() { -} - + abort_args_() {} // Sets the parameters for subsequent calls to Create. -void MockDeathTestFactory::SetParameters(bool create, - DeathTest::TestRole role, +void MockDeathTestFactory::SetParameters(bool create, DeathTest::TestRole role, int status, bool passed) { create_ = create; role_ = role; @@ -1054,7 +1032,6 @@ void MockDeathTestFactory::SetParameters(bool create, abort_args_.clear(); } - // Sets test to NULL (if create_ is false) or to the address of a new // MockDeathTest object with parameters taken from the last call // to SetParameters (if create_ is true). Always returns true. @@ -1094,10 +1071,12 @@ class MacroLogicDeathTest : public testing::Test { // test cannot be run directly from a test routine that uses a // MockDeathTest, or the remainder of the routine will not be executed. static void RunReturningDeathTest(bool* flag) { - ASSERT_DEATH({ // NOLINT - *flag = true; - return; - }, ""); + ASSERT_DEATH( + { // NOLINT + *flag = true; + return; + }, + ""); } }; @@ -1182,8 +1161,7 @@ TEST_F(MacroLogicDeathTest, ChildDoesNotDie) { // _exit(2) is called in that case by ForkingDeathTest, but not by // our MockDeathTest. ASSERT_EQ(2U, factory_->AbortCalls()); - EXPECT_EQ(DeathTest::TEST_DID_NOT_DIE, - factory_->AbortArgument(0)); + EXPECT_EQ(DeathTest::TEST_DID_NOT_DIE, factory_->AbortArgument(0)); EXPECT_EQ(DeathTest::TEST_ENCOUNTERED_RETURN_STATEMENT, factory_->AbortArgument(1)); EXPECT_TRUE(factory_->TestDeleted()); @@ -1199,12 +1177,16 @@ TEST(SuccessRegistrationDeathTest, NoSuccessPart) { TEST(StreamingAssertionsDeathTest, DeathTest) { EXPECT_DEATH(_exit(1), "") << "unexpected failure"; ASSERT_DEATH(_exit(1), "") << "unexpected failure"; - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_DEATH(_exit(0), "") << "expected failure"; - }, "expected failure"); - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_DEATH(_exit(0), "") << "expected failure"; - }, "expected failure"); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_DEATH(_exit(0), "") << "expected failure"; + }, + "expected failure"); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_DEATH(_exit(0), "") << "expected failure"; + }, + "expected failure"); } // Tests that GetLastErrnoDescription returns an empty string when the @@ -1216,7 +1198,7 @@ TEST(GetLastErrnoDescription, GetLastErrnoDescriptionWorks) { EXPECT_STREQ("", GetLastErrnoDescription().c_str()); } -# if GTEST_OS_WINDOWS +#if GTEST_OS_WINDOWS TEST(AutoHandleTest, AutoHandleWorks) { HANDLE handle = ::CreateEvent(NULL, FALSE, FALSE, NULL); ASSERT_NE(INVALID_HANDLE_VALUE, handle); @@ -1241,15 +1223,15 @@ TEST(AutoHandleTest, AutoHandleWorks) { testing::internal::AutoHandle auto_handle2; EXPECT_EQ(INVALID_HANDLE_VALUE, auto_handle2.Get()); } -# endif // GTEST_OS_WINDOWS +#endif // GTEST_OS_WINDOWS -# if GTEST_OS_WINDOWS +#if GTEST_OS_WINDOWS typedef unsigned __int64 BiggestParsable; typedef signed __int64 BiggestSignedParsable; -# else +#else typedef unsigned long long BiggestParsable; typedef signed long long BiggestSignedParsable; -# endif // GTEST_OS_WINDOWS +#endif // GTEST_OS_WINDOWS // We cannot use std::numeric_limits::max() as it clashes with the // max() macro defined by . @@ -1340,11 +1322,11 @@ TEST(ParseNaturalNumberTest, WorksForShorterIntegers) { EXPECT_EQ(123, char_result); } -# if GTEST_OS_WINDOWS +#if GTEST_OS_WINDOWS TEST(EnvironmentTest, HandleFitsIntoSizeT) { ASSERT_TRUE(sizeof(HANDLE) <= sizeof(size_t)); } -# endif // GTEST_OS_WINDOWS +#endif // GTEST_OS_WINDOWS // Tests that EXPECT_DEATH_IF_SUPPORTED/ASSERT_DEATH_IF_SUPPORTED trigger // failures when death tests are available on the system. @@ -1362,21 +1344,25 @@ TEST(ConditionalDeathMacrosDeathTest, ExpectsDeathWhenDeathTestsAvailable) { TEST(InDeathTestChildDeathTest, ReportsDeathTestCorrectlyInFastStyle) { GTEST_FLAG_SET(death_test_style, "fast"); EXPECT_FALSE(InDeathTestChild()); - EXPECT_DEATH({ - fprintf(stderr, InDeathTestChild() ? "Inside" : "Outside"); - fflush(stderr); - _exit(1); - }, "Inside"); + EXPECT_DEATH( + { + fprintf(stderr, InDeathTestChild() ? "Inside" : "Outside"); + fflush(stderr); + _exit(1); + }, + "Inside"); } TEST(InDeathTestChildDeathTest, ReportsDeathTestCorrectlyInThreadSafeStyle) { GTEST_FLAG_SET(death_test_style, "threadsafe"); EXPECT_FALSE(InDeathTestChild()); - EXPECT_DEATH({ - fprintf(stderr, InDeathTestChild() ? "Inside" : "Outside"); - fflush(stderr); - _exit(1); - }, "Inside"); + EXPECT_DEATH( + { + fprintf(stderr, InDeathTestChild() ? "Inside" : "Outside"); + fflush(stderr); + _exit(1); + }, + "Inside"); } void DieWithMessage(const char* message) { @@ -1504,8 +1490,7 @@ TEST(ConditionalDeathMacrosSyntaxDeathTest, SingleStatement) { // doesn't expand into an "if" statement without an "else" ; // NOLINT - if (AlwaysFalse()) - ASSERT_DEATH_IF_SUPPORTED(return, "") << "did not die"; + if (AlwaysFalse()) ASSERT_DEATH_IF_SUPPORTED(return, "") << "did not die"; if (AlwaysFalse()) ; // NOLINT @@ -1524,21 +1509,18 @@ TEST(ConditionalDeathMacrosSyntaxDeathTest, SwitchStatement) { GTEST_DISABLE_MSC_WARNINGS_PUSH_(4065) switch (0) - default: - ASSERT_DEATH_IF_SUPPORTED(_exit(1), "") - << "exit in default switch handler"; + default: + ASSERT_DEATH_IF_SUPPORTED(_exit(1), "") << "exit in default switch handler"; switch (0) - case 0: - EXPECT_DEATH_IF_SUPPORTED(_exit(1), "") << "exit in switch case"; + case 0: + EXPECT_DEATH_IF_SUPPORTED(_exit(1), "") << "exit in switch case"; GTEST_DISABLE_MSC_WARNINGS_POP_() } // Tests that a test case whose name ends with "DeathTest" works fine // on Windows. -TEST(NotADeathTest, Test) { - SUCCEED(); -} +TEST(NotADeathTest, Test) { SUCCEED(); } } // namespace diff --git a/ext/googletest/googletest/test/googletest-death-test_ex_test.cc b/ext/googletest/googletest/test/googletest-death-test_ex_test.cc index bbacc8ae88..f2515e377e 100644 --- a/ext/googletest/googletest/test/googletest-death-test_ex_test.cc +++ b/ext/googletest/googletest/test/googletest-death-test_ex_test.cc @@ -35,15 +35,15 @@ #if GTEST_HAS_DEATH_TEST -# if GTEST_HAS_SEH -# include // For RaiseException(). -# endif +#if GTEST_HAS_SEH +#include // For RaiseException(). +#endif -# include "gtest/gtest-spi.h" +#include "gtest/gtest-spi.h" -# if GTEST_HAS_EXCEPTIONS +#if GTEST_HAS_EXCEPTIONS -# include // For std::exception. +#include // For std::exception. // Tests that death tests report thrown exceptions as failures and that the // exceptions do not escape death test macros. @@ -67,12 +67,11 @@ TEST(CxxExceptionDeathTest, PrintsMessageForStdExceptions) { EXPECT_NONFATAL_FAILURE(EXPECT_DEATH(throw TestException(), ""), "exceptional message"); // Verifies that the location is mentioned in the failure text. - EXPECT_NONFATAL_FAILURE(EXPECT_DEATH(throw TestException(), ""), - __FILE__); + EXPECT_NONFATAL_FAILURE(EXPECT_DEATH(throw TestException(), ""), __FILE__); } -# endif // GTEST_HAS_EXCEPTIONS +#endif // GTEST_HAS_EXCEPTIONS -# if GTEST_HAS_SEH +#if GTEST_HAS_SEH // Tests that enabling interception of SEH exceptions with the // catch_exceptions flag does not interfere with SEH exceptions being // treated as death by death tests. @@ -81,7 +80,7 @@ TEST(SehExceptionDeasTest, CatchExceptionsDoesNotInterfere) { << "with catch_exceptions " << (GTEST_FLAG_GET(catch_exceptions) ? "enabled" : "disabled"); } -# endif +#endif #endif // GTEST_HAS_DEATH_TEST diff --git a/ext/googletest/googletest/test/googletest-env-var-test.py b/ext/googletest/googletest/test/googletest-env-var-test.py index 02c3655c39..bc4d87d938 100755 --- a/ext/googletest/googletest/test/googletest-env-var-test.py +++ b/ext/googletest/googletest/test/googletest-env-var-test.py @@ -32,7 +32,7 @@ """Verifies that Google Test correctly parses environment variables.""" import os -import gtest_test_utils +from googletest.test import gtest_test_utils IS_WINDOWS = os.name == 'nt' diff --git a/ext/googletest/googletest/test/googletest-env-var-test_.cc b/ext/googletest/googletest/test/googletest-env-var-test_.cc index 0ff015228f..3653375080 100644 --- a/ext/googletest/googletest/test/googletest-env-var-test_.cc +++ b/ext/googletest/googletest/test/googletest-env-var-test_.cc @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // A helper program for testing that Google Test parses the environment // variables correctly. @@ -43,8 +42,7 @@ namespace testing { // The purpose of this is to make the test more realistic by ensuring // that the UnitTest singleton is created before main() is entered. // We don't actual run the TEST itself. -TEST(GTestEnvVarTest, Dummy) { -} +TEST(GTestEnvVarTest, Dummy) {} void PrintFlag(const char* flag) { if (strcmp(flag, "break_on_failure") == 0) { diff --git a/ext/googletest/googletest/test/googletest-failfast-unittest.py b/ext/googletest/googletest/test/googletest-failfast-unittest.py index 3aeb2dffea..1356d4f8b5 100755 --- a/ext/googletest/googletest/test/googletest-failfast-unittest.py +++ b/ext/googletest/googletest/test/googletest-failfast-unittest.py @@ -41,7 +41,7 @@ line flags. """ import os -import gtest_test_utils +from googletest.test import gtest_test_utils # Constants. diff --git a/ext/googletest/googletest/test/googletest-failfast-unittest_.cc b/ext/googletest/googletest/test/googletest-failfast-unittest_.cc index 0b2c951bc0..3bd05a8eb2 100644 --- a/ext/googletest/googletest/test/googletest-failfast-unittest_.cc +++ b/ext/googletest/googletest/test/googletest-failfast-unittest_.cc @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Unit test for Google Test test filters. // // A user can specify which test(s) in a Google Test program to run via @@ -160,7 +159,7 @@ TEST(HasSkipTest, Test4) { FAIL() << "Expected failure."; } } // namespace -int main(int argc, char **argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); ::testing::UnitTest::GetInstance()->listeners().Append(new MyTestListener()); return RUN_ALL_TESTS(); diff --git a/ext/googletest/googletest/test/googletest-filepath-test.cc b/ext/googletest/googletest/test/googletest-filepath-test.cc index aafad36f3f..fe53f848fc 100644 --- a/ext/googletest/googletest/test/googletest-filepath-test.cc +++ b/ext/googletest/googletest/test/googletest-filepath-test.cc @@ -35,15 +35,15 @@ // This file is #included from gtest-internal.h. // Do not #include this file anywhere else! -#include "gtest/internal/gtest-filepath.h" #include "gtest/gtest.h" +#include "gtest/internal/gtest-filepath.h" #include "src/gtest-internal-inl.h" #if GTEST_OS_WINDOWS_MOBILE -# include // NOLINT +#include // NOLINT #elif GTEST_OS_WINDOWS -# include // NOLINT -#endif // GTEST_OS_WINDOWS_MOBILE +#include // NOLINT +#endif // GTEST_OS_WINDOWS_MOBILE namespace testing { namespace internal { @@ -55,16 +55,16 @@ namespace { int remove(const char* path) { LPCWSTR wpath = String::AnsiToUtf16(path); int ret = DeleteFile(wpath) ? 0 : -1; - delete [] wpath; + delete[] wpath; return ret; } // Windows CE doesn't have the _rmdir C function. int _rmdir(const char* path) { FilePath filepath(path); - LPCWSTR wpath = String::AnsiToUtf16( - filepath.RemoveTrailingPathSeparator().c_str()); + LPCWSTR wpath = + String::AnsiToUtf16(filepath.RemoveTrailingPathSeparator().c_str()); int ret = RemoveDirectory(wpath) ? 0 : -1; - delete [] wpath; + delete[] wpath; return ret; } @@ -78,18 +78,18 @@ TEST(GetCurrentDirTest, ReturnsCurrentDir) { const FilePath cwd = FilePath::GetCurrentDir(); posix::ChDir(original_dir.c_str()); -# if GTEST_OS_WINDOWS || GTEST_OS_OS2 +#if GTEST_OS_WINDOWS || GTEST_OS_OS2 // Skips the ":". const char* const cwd_without_drive = strchr(cwd.c_str(), ':'); ASSERT_TRUE(cwd_without_drive != NULL); EXPECT_STREQ(GTEST_PATH_SEP_, cwd_without_drive + 1); -# else +#else EXPECT_EQ(GTEST_PATH_SEP_, cwd.string()); -# endif +#endif } #endif // GTEST_OS_WINDOWS_MOBILE @@ -112,33 +112,34 @@ TEST(RemoveDirectoryNameTest, WhenEmptyName) { // RemoveDirectoryName "afile" -> "afile" TEST(RemoveDirectoryNameTest, ButNoDirectory) { - EXPECT_EQ("afile", - FilePath("afile").RemoveDirectoryName().string()); + EXPECT_EQ("afile", FilePath("afile").RemoveDirectoryName().string()); } // RemoveDirectoryName "/afile" -> "afile" TEST(RemoveDirectoryNameTest, RootFileShouldGiveFileName) { EXPECT_EQ("afile", - FilePath(GTEST_PATH_SEP_ "afile").RemoveDirectoryName().string()); + FilePath(GTEST_PATH_SEP_ "afile").RemoveDirectoryName().string()); } // RemoveDirectoryName "adir/" -> "" TEST(RemoveDirectoryNameTest, WhereThereIsNoFileName) { EXPECT_EQ("", - FilePath("adir" GTEST_PATH_SEP_).RemoveDirectoryName().string()); + FilePath("adir" GTEST_PATH_SEP_).RemoveDirectoryName().string()); } // RemoveDirectoryName "adir/afile" -> "afile" TEST(RemoveDirectoryNameTest, ShouldGiveFileName) { - EXPECT_EQ("afile", + EXPECT_EQ( + "afile", FilePath("adir" GTEST_PATH_SEP_ "afile").RemoveDirectoryName().string()); } // RemoveDirectoryName "adir/subdir/afile" -> "afile" TEST(RemoveDirectoryNameTest, ShouldAlsoGiveFileName) { EXPECT_EQ("afile", - FilePath("adir" GTEST_PATH_SEP_ "subdir" GTEST_PATH_SEP_ "afile") - .RemoveDirectoryName().string()); + FilePath("adir" GTEST_PATH_SEP_ "subdir" GTEST_PATH_SEP_ "afile") + .RemoveDirectoryName() + .string()); } #if GTEST_HAS_ALT_PATH_SEP_ @@ -182,7 +183,7 @@ TEST(RemoveFileNameTest, EmptyName) { // RemoveFileName "adir/" -> "adir/" TEST(RemoveFileNameTest, ButNoFile) { EXPECT_EQ("adir" GTEST_PATH_SEP_, - FilePath("adir" GTEST_PATH_SEP_).RemoveFileName().string()); + FilePath("adir" GTEST_PATH_SEP_).RemoveFileName().string()); } // RemoveFileName "adir/afile" -> "adir/" @@ -194,14 +195,15 @@ TEST(RemoveFileNameTest, GivesDirName) { // RemoveFileName "adir/subdir/afile" -> "adir/subdir/" TEST(RemoveFileNameTest, GivesDirAndSubDirName) { EXPECT_EQ("adir" GTEST_PATH_SEP_ "subdir" GTEST_PATH_SEP_, - FilePath("adir" GTEST_PATH_SEP_ "subdir" GTEST_PATH_SEP_ "afile") - .RemoveFileName().string()); + FilePath("adir" GTEST_PATH_SEP_ "subdir" GTEST_PATH_SEP_ "afile") + .RemoveFileName() + .string()); } // RemoveFileName "/afile" -> "/" TEST(RemoveFileNameTest, GivesRootDir) { EXPECT_EQ(GTEST_PATH_SEP_, - FilePath(GTEST_PATH_SEP_ "afile").RemoveFileName().string()); + FilePath(GTEST_PATH_SEP_ "afile").RemoveFileName().string()); } #if GTEST_HAS_ALT_PATH_SEP_ @@ -235,44 +237,43 @@ TEST(RemoveFileNameTest, GivesRootDirForAlternateSeparator) { #endif TEST(MakeFileNameTest, GenerateWhenNumberIsZero) { - FilePath actual = FilePath::MakeFileName(FilePath("foo"), FilePath("bar"), - 0, "xml"); + FilePath actual = + FilePath::MakeFileName(FilePath("foo"), FilePath("bar"), 0, "xml"); EXPECT_EQ("foo" GTEST_PATH_SEP_ "bar.xml", actual.string()); } TEST(MakeFileNameTest, GenerateFileNameNumberGtZero) { - FilePath actual = FilePath::MakeFileName(FilePath("foo"), FilePath("bar"), - 12, "xml"); + FilePath actual = + FilePath::MakeFileName(FilePath("foo"), FilePath("bar"), 12, "xml"); EXPECT_EQ("foo" GTEST_PATH_SEP_ "bar_12.xml", actual.string()); } TEST(MakeFileNameTest, GenerateFileNameWithSlashNumberIsZero) { FilePath actual = FilePath::MakeFileName(FilePath("foo" GTEST_PATH_SEP_), - FilePath("bar"), 0, "xml"); + FilePath("bar"), 0, "xml"); EXPECT_EQ("foo" GTEST_PATH_SEP_ "bar.xml", actual.string()); } TEST(MakeFileNameTest, GenerateFileNameWithSlashNumberGtZero) { FilePath actual = FilePath::MakeFileName(FilePath("foo" GTEST_PATH_SEP_), - FilePath("bar"), 12, "xml"); + FilePath("bar"), 12, "xml"); EXPECT_EQ("foo" GTEST_PATH_SEP_ "bar_12.xml", actual.string()); } TEST(MakeFileNameTest, GenerateWhenNumberIsZeroAndDirIsEmpty) { - FilePath actual = FilePath::MakeFileName(FilePath(""), FilePath("bar"), - 0, "xml"); + FilePath actual = + FilePath::MakeFileName(FilePath(""), FilePath("bar"), 0, "xml"); EXPECT_EQ("bar.xml", actual.string()); } TEST(MakeFileNameTest, GenerateWhenNumberIsNotZeroAndDirIsEmpty) { - FilePath actual = FilePath::MakeFileName(FilePath(""), FilePath("bar"), - 14, "xml"); + FilePath actual = + FilePath::MakeFileName(FilePath(""), FilePath("bar"), 14, "xml"); EXPECT_EQ("bar_14.xml", actual.string()); } TEST(ConcatPathsTest, WorksWhenDirDoesNotEndWithPathSep) { - FilePath actual = FilePath::ConcatPaths(FilePath("foo"), - FilePath("bar.xml")); + FilePath actual = FilePath::ConcatPaths(FilePath("foo"), FilePath("bar.xml")); EXPECT_EQ("foo" GTEST_PATH_SEP_ "bar.xml", actual.string()); } @@ -283,8 +284,7 @@ TEST(ConcatPathsTest, WorksWhenPath1EndsWithPathSep) { } TEST(ConcatPathsTest, Path1BeingEmpty) { - FilePath actual = FilePath::ConcatPaths(FilePath(""), - FilePath("bar.xml")); + FilePath actual = FilePath::ConcatPaths(FilePath(""), FilePath("bar.xml")); EXPECT_EQ("bar.xml", actual.string()); } @@ -294,8 +294,7 @@ TEST(ConcatPathsTest, Path2BeingEmpty) { } TEST(ConcatPathsTest, BothPathBeingEmpty) { - FilePath actual = FilePath::ConcatPaths(FilePath(""), - FilePath("")); + FilePath actual = FilePath::ConcatPaths(FilePath(""), FilePath("")); EXPECT_EQ("", actual.string()); } @@ -307,16 +306,16 @@ TEST(ConcatPathsTest, Path1ContainsPathSep) { } TEST(ConcatPathsTest, Path2ContainsPathSep) { - FilePath actual = FilePath::ConcatPaths( - FilePath("foo" GTEST_PATH_SEP_), - FilePath("bar" GTEST_PATH_SEP_ "bar.xml")); + FilePath actual = + FilePath::ConcatPaths(FilePath("foo" GTEST_PATH_SEP_), + FilePath("bar" GTEST_PATH_SEP_ "bar.xml")); EXPECT_EQ("foo" GTEST_PATH_SEP_ "bar" GTEST_PATH_SEP_ "bar.xml", actual.string()); } TEST(ConcatPathsTest, Path2EndsWithPathSep) { - FilePath actual = FilePath::ConcatPaths(FilePath("foo"), - FilePath("bar" GTEST_PATH_SEP_)); + FilePath actual = + FilePath::ConcatPaths(FilePath("foo"), FilePath("bar" GTEST_PATH_SEP_)); EXPECT_EQ("foo" GTEST_PATH_SEP_ "bar" GTEST_PATH_SEP_, actual.string()); } @@ -332,7 +331,8 @@ TEST(RemoveTrailingPathSeparatorTest, FileNoSlashString) { // RemoveTrailingPathSeparator "foo/" -> "foo" TEST(RemoveTrailingPathSeparatorTest, ShouldRemoveTrailingSeparator) { - EXPECT_EQ("foo", + EXPECT_EQ( + "foo", FilePath("foo" GTEST_PATH_SEP_).RemoveTrailingPathSeparator().string()); #if GTEST_HAS_ALT_PATH_SEP_ EXPECT_EQ("foo", FilePath("foo/").RemoveTrailingPathSeparator().string()); @@ -343,18 +343,19 @@ TEST(RemoveTrailingPathSeparatorTest, ShouldRemoveTrailingSeparator) { TEST(RemoveTrailingPathSeparatorTest, ShouldRemoveLastSeparator) { EXPECT_EQ("foo" GTEST_PATH_SEP_ "bar", FilePath("foo" GTEST_PATH_SEP_ "bar" GTEST_PATH_SEP_) - .RemoveTrailingPathSeparator().string()); + .RemoveTrailingPathSeparator() + .string()); } // RemoveTrailingPathSeparator "foo/bar" -> "foo/bar" TEST(RemoveTrailingPathSeparatorTest, ShouldReturnUnmodified) { - EXPECT_EQ("foo" GTEST_PATH_SEP_ "bar", - FilePath("foo" GTEST_PATH_SEP_ "bar") - .RemoveTrailingPathSeparator().string()); + EXPECT_EQ("foo" GTEST_PATH_SEP_ "bar", FilePath("foo" GTEST_PATH_SEP_ "bar") + .RemoveTrailingPathSeparator() + .string()); } TEST(DirectoryTest, RootDirectoryExists) { -#if GTEST_OS_WINDOWS // We are on Windows. +#if GTEST_OS_WINDOWS // We are on Windows. char current_drive[_MAX_PATH]; // NOLINT current_drive[0] = static_cast(_getdrive() + 'A' - 1); current_drive[1] = ':'; @@ -393,12 +394,12 @@ TEST(DirectoryTest, EmptyPathDirectoryDoesNotExist) { TEST(DirectoryTest, CurrentDirectoryExists) { #if GTEST_OS_WINDOWS // We are on Windows. -# ifndef _WIN32_CE // Windows CE doesn't have a current directory. +#ifndef _WIN32_CE // Windows CE doesn't have a current directory. EXPECT_TRUE(FilePath(".").DirectoryExists()); EXPECT_TRUE(FilePath(".\\").DirectoryExists()); -# endif // _WIN32_CE +#endif // _WIN32_CE #else EXPECT_TRUE(FilePath(".").DirectoryExists()); EXPECT_TRUE(FilePath("./").DirectoryExists()); @@ -406,34 +407,35 @@ TEST(DirectoryTest, CurrentDirectoryExists) { } // "foo/bar" == foo//bar" == "foo///bar" -TEST(NormalizeTest, MultipleConsecutiveSepaparatorsInMidstring) { +TEST(NormalizeTest, MultipleConsecutiveSeparatorsInMidstring) { EXPECT_EQ("foo" GTEST_PATH_SEP_ "bar", FilePath("foo" GTEST_PATH_SEP_ "bar").string()); EXPECT_EQ("foo" GTEST_PATH_SEP_ "bar", FilePath("foo" GTEST_PATH_SEP_ GTEST_PATH_SEP_ "bar").string()); - EXPECT_EQ("foo" GTEST_PATH_SEP_ "bar", - FilePath("foo" GTEST_PATH_SEP_ GTEST_PATH_SEP_ - GTEST_PATH_SEP_ "bar").string()); + EXPECT_EQ( + "foo" GTEST_PATH_SEP_ "bar", + FilePath("foo" GTEST_PATH_SEP_ GTEST_PATH_SEP_ GTEST_PATH_SEP_ "bar") + .string()); } // "/bar" == //bar" == "///bar" -TEST(NormalizeTest, MultipleConsecutiveSepaparatorsAtStringStart) { +TEST(NormalizeTest, MultipleConsecutiveSeparatorsAtStringStart) { + EXPECT_EQ(GTEST_PATH_SEP_ "bar", FilePath(GTEST_PATH_SEP_ "bar").string()); EXPECT_EQ(GTEST_PATH_SEP_ "bar", - FilePath(GTEST_PATH_SEP_ "bar").string()); - EXPECT_EQ(GTEST_PATH_SEP_ "bar", - FilePath(GTEST_PATH_SEP_ GTEST_PATH_SEP_ "bar").string()); - EXPECT_EQ(GTEST_PATH_SEP_ "bar", - FilePath(GTEST_PATH_SEP_ GTEST_PATH_SEP_ GTEST_PATH_SEP_ "bar").string()); + FilePath(GTEST_PATH_SEP_ GTEST_PATH_SEP_ "bar").string()); + EXPECT_EQ( + GTEST_PATH_SEP_ "bar", + FilePath(GTEST_PATH_SEP_ GTEST_PATH_SEP_ GTEST_PATH_SEP_ "bar").string()); } // "foo/" == foo//" == "foo///" -TEST(NormalizeTest, MultipleConsecutiveSepaparatorsAtStringEnd) { +TEST(NormalizeTest, MultipleConsecutiveSeparatorsAtStringEnd) { + EXPECT_EQ("foo" GTEST_PATH_SEP_, FilePath("foo" GTEST_PATH_SEP_).string()); EXPECT_EQ("foo" GTEST_PATH_SEP_, - FilePath("foo" GTEST_PATH_SEP_).string()); - EXPECT_EQ("foo" GTEST_PATH_SEP_, - FilePath("foo" GTEST_PATH_SEP_ GTEST_PATH_SEP_).string()); - EXPECT_EQ("foo" GTEST_PATH_SEP_, - FilePath("foo" GTEST_PATH_SEP_ GTEST_PATH_SEP_ GTEST_PATH_SEP_).string()); + FilePath("foo" GTEST_PATH_SEP_ GTEST_PATH_SEP_).string()); + EXPECT_EQ( + "foo" GTEST_PATH_SEP_, + FilePath("foo" GTEST_PATH_SEP_ GTEST_PATH_SEP_ GTEST_PATH_SEP_).string()); } #if GTEST_HAS_ALT_PATH_SEP_ @@ -442,12 +444,10 @@ TEST(NormalizeTest, MultipleConsecutiveSepaparatorsAtStringEnd) { // regardless of their combination (e.g. "foo\" =="foo/\" == // "foo\\/"). TEST(NormalizeTest, MixAlternateSeparatorAtStringEnd) { - EXPECT_EQ("foo" GTEST_PATH_SEP_, - FilePath("foo/").string()); + EXPECT_EQ("foo" GTEST_PATH_SEP_, FilePath("foo/").string()); EXPECT_EQ("foo" GTEST_PATH_SEP_, FilePath("foo" GTEST_PATH_SEP_ "/").string()); - EXPECT_EQ("foo" GTEST_PATH_SEP_, - FilePath("foo//" GTEST_PATH_SEP_).string()); + EXPECT_EQ("foo" GTEST_PATH_SEP_, FilePath("foo//" GTEST_PATH_SEP_).string()); } #endif @@ -478,15 +478,15 @@ TEST(AssignmentOperatorTest, ConstAssignedToNonConst) { class DirectoryCreationTest : public Test { protected: void SetUp() override { - testdata_path_.Set(FilePath( - TempDir() + GetCurrentExecutableName().string() + - "_directory_creation" GTEST_PATH_SEP_ "test" GTEST_PATH_SEP_)); + testdata_path_.Set( + FilePath(TempDir() + GetCurrentExecutableName().string() + + "_directory_creation" GTEST_PATH_SEP_ "test" GTEST_PATH_SEP_)); testdata_file_.Set(testdata_path_.RemoveTrailingPathSeparator()); - unique_file0_.Set(FilePath::MakeFileName(testdata_path_, FilePath("unique"), - 0, "txt")); - unique_file1_.Set(FilePath::MakeFileName(testdata_path_, FilePath("unique"), - 1, "txt")); + unique_file0_.Set( + FilePath::MakeFileName(testdata_path_, FilePath("unique"), 0, "txt")); + unique_file1_.Set( + FilePath::MakeFileName(testdata_path_, FilePath("unique"), 1, "txt")); remove(testdata_file_.c_str()); remove(unique_file0_.c_str()); @@ -512,8 +512,8 @@ class DirectoryCreationTest : public Test { // a directory named 'test' from a file named 'test'. Example names: FilePath testdata_path_; // "/tmp/directory_creation/test/" FilePath testdata_file_; // "/tmp/directory_creation/test" - FilePath unique_file0_; // "/tmp/directory_creation/test/unique.txt" - FilePath unique_file1_; // "/tmp/directory_creation/test/unique_1.txt" + FilePath unique_file0_; // "/tmp/directory_creation/test/unique.txt" + FilePath unique_file1_; // "/tmp/directory_creation/test/unique_1.txt" }; TEST_F(DirectoryCreationTest, CreateDirectoriesRecursively) { @@ -530,8 +530,8 @@ TEST_F(DirectoryCreationTest, CreateDirectoriesForAlreadyExistingPath) { } TEST_F(DirectoryCreationTest, CreateDirectoriesAndUniqueFilename) { - FilePath file_path(FilePath::GenerateUniqueFileName(testdata_path_, - FilePath("unique"), "txt")); + FilePath file_path(FilePath::GenerateUniqueFileName( + testdata_path_, FilePath("unique"), "txt")); EXPECT_EQ(unique_file0_.string(), file_path.string()); EXPECT_FALSE(file_path.FileOrDirectoryExists()); // file not there @@ -540,8 +540,8 @@ TEST_F(DirectoryCreationTest, CreateDirectoriesAndUniqueFilename) { CreateTextFile(file_path.c_str()); EXPECT_TRUE(file_path.FileOrDirectoryExists()); - FilePath file_path2(FilePath::GenerateUniqueFileName(testdata_path_, - FilePath("unique"), "txt")); + FilePath file_path2(FilePath::GenerateUniqueFileName( + testdata_path_, FilePath("unique"), "txt")); EXPECT_EQ(unique_file1_.string(), file_path2.string()); EXPECT_FALSE(file_path2.FileOrDirectoryExists()); // file not there CreateTextFile(file_path2.c_str()); @@ -614,14 +614,16 @@ TEST(FilePathTest, IsAbsolutePath) { EXPECT_FALSE(FilePath("is" GTEST_PATH_SEP_ "relative").IsAbsolutePath()); EXPECT_FALSE(FilePath("").IsAbsolutePath()); #if GTEST_OS_WINDOWS - EXPECT_TRUE(FilePath("c:\\" GTEST_PATH_SEP_ "is_not" - GTEST_PATH_SEP_ "relative").IsAbsolutePath()); + EXPECT_TRUE( + FilePath("c:\\" GTEST_PATH_SEP_ "is_not" GTEST_PATH_SEP_ "relative") + .IsAbsolutePath()); EXPECT_FALSE(FilePath("c:foo" GTEST_PATH_SEP_ "bar").IsAbsolutePath()); - EXPECT_TRUE(FilePath("c:/" GTEST_PATH_SEP_ "is_not" - GTEST_PATH_SEP_ "relative").IsAbsolutePath()); + EXPECT_TRUE( + FilePath("c:/" GTEST_PATH_SEP_ "is_not" GTEST_PATH_SEP_ "relative") + .IsAbsolutePath()); #else EXPECT_TRUE(FilePath(GTEST_PATH_SEP_ "is_not" GTEST_PATH_SEP_ "relative") - .IsAbsolutePath()); + .IsAbsolutePath()); #endif // GTEST_OS_WINDOWS } diff --git a/ext/googletest/googletest/test/googletest-filter-unittest.py b/ext/googletest/googletest/test/googletest-filter-unittest.py index 6b32f2d219..2c4a1b18a1 100755 --- a/ext/googletest/googletest/test/googletest-filter-unittest.py +++ b/ext/googletest/googletest/test/googletest-filter-unittest.py @@ -47,7 +47,7 @@ try: except ImportError: pass import sys -import gtest_test_utils +from googletest.test import gtest_test_utils # Constants. @@ -113,6 +113,9 @@ TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)') # Regex for parsing test names from Google Test's output. TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)') +# Regex for parsing disabled banner from Google Test's output +DISABLED_BANNER_REGEX = re.compile(r'^\[\s*DISABLED\s*\] (.*)') + # The command line flag to tell Google Test to output the list of tests it # will run. LIST_TESTS_FLAG = '--gtest_list_tests' @@ -206,6 +209,17 @@ def RunAndExtractTestList(args = None): return (tests_run, p.exit_code) +def RunAndExtractDisabledBannerList(args=None): + """Runs the test program and returns tests that printed a disabled banner.""" + p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ) + banners_printed = [] + for line in p.output.split('\n'): + match = DISABLED_BANNER_REGEX.match(line) + if match is not None: + banners_printed.append(match.group(1)) + return banners_printed + + def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs): """Runs the given function and arguments in a modified environment.""" try: @@ -613,6 +627,23 @@ class GTestFilterUnitTest(gtest_test_utils.TestCase): self.assert_(os.path.exists(shard_status_file)) os.remove(shard_status_file) + def testDisabledBanner(self): + """Tests that the disabled banner prints only tests that match filter.""" + make_filter = lambda s: ['--%s=%s' % (FILTER_FLAG, s)] + + banners = RunAndExtractDisabledBannerList(make_filter('*')) + self.AssertSetEqual(banners, [ + 'BarTest.DISABLED_TestFour', 'BarTest.DISABLED_TestFive', + 'BazTest.DISABLED_TestC' + ]) + + banners = RunAndExtractDisabledBannerList(make_filter('Bar*')) + self.AssertSetEqual( + banners, ['BarTest.DISABLED_TestFour', 'BarTest.DISABLED_TestFive']) + + banners = RunAndExtractDisabledBannerList(make_filter('*-Bar*')) + self.AssertSetEqual(banners, ['BazTest.DISABLED_TestC']) + if SUPPORTS_DEATH_TESTS: def testShardingWorksWithDeathTests(self): """Tests integration with death tests and sharding.""" diff --git a/ext/googletest/googletest/test/googletest-filter-unittest_.cc b/ext/googletest/googletest/test/googletest-filter-unittest_.cc index d30ec9c78b..bc7aa59408 100644 --- a/ext/googletest/googletest/test/googletest-filter-unittest_.cc +++ b/ext/googletest/googletest/test/googletest-filter-unittest_.cc @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Unit test for Google Test test filters. // // A user can specify which test(s) in a Google Test program to run via @@ -43,87 +42,57 @@ namespace { // Test case FooTest. -class FooTest : public testing::Test { -}; +class FooTest : public testing::Test {}; -TEST_F(FooTest, Abc) { -} +TEST_F(FooTest, Abc) {} -TEST_F(FooTest, Xyz) { - FAIL() << "Expected failure."; -} +TEST_F(FooTest, Xyz) { FAIL() << "Expected failure."; } // Test case BarTest. -TEST(BarTest, TestOne) { -} +TEST(BarTest, TestOne) {} -TEST(BarTest, TestTwo) { -} +TEST(BarTest, TestTwo) {} -TEST(BarTest, TestThree) { -} +TEST(BarTest, TestThree) {} -TEST(BarTest, DISABLED_TestFour) { - FAIL() << "Expected failure."; -} +TEST(BarTest, DISABLED_TestFour) { FAIL() << "Expected failure."; } -TEST(BarTest, DISABLED_TestFive) { - FAIL() << "Expected failure."; -} +TEST(BarTest, DISABLED_TestFive) { FAIL() << "Expected failure."; } // Test case BazTest. -TEST(BazTest, TestOne) { - FAIL() << "Expected failure."; -} +TEST(BazTest, TestOne) { FAIL() << "Expected failure."; } -TEST(BazTest, TestA) { -} +TEST(BazTest, TestA) {} -TEST(BazTest, TestB) { -} +TEST(BazTest, TestB) {} -TEST(BazTest, DISABLED_TestC) { - FAIL() << "Expected failure."; -} +TEST(BazTest, DISABLED_TestC) { FAIL() << "Expected failure."; } // Test case HasDeathTest -TEST(HasDeathTest, Test1) { - EXPECT_DEATH_IF_SUPPORTED(exit(1), ".*"); -} +TEST(HasDeathTest, Test1) { EXPECT_DEATH_IF_SUPPORTED(exit(1), ".*"); } // We need at least two death tests to make sure that the all death tests // aren't on the first shard. -TEST(HasDeathTest, Test2) { - EXPECT_DEATH_IF_SUPPORTED(exit(1), ".*"); -} +TEST(HasDeathTest, Test2) { EXPECT_DEATH_IF_SUPPORTED(exit(1), ".*"); } // Test case FoobarTest -TEST(DISABLED_FoobarTest, Test1) { - FAIL() << "Expected failure."; -} +TEST(DISABLED_FoobarTest, Test1) { FAIL() << "Expected failure."; } -TEST(DISABLED_FoobarTest, DISABLED_Test2) { - FAIL() << "Expected failure."; -} +TEST(DISABLED_FoobarTest, DISABLED_Test2) { FAIL() << "Expected failure."; } // Test case FoobarbazTest -TEST(DISABLED_FoobarbazTest, TestA) { - FAIL() << "Expected failure."; -} +TEST(DISABLED_FoobarbazTest, TestA) { FAIL() << "Expected failure."; } -class ParamTest : public testing::TestWithParam { -}; +class ParamTest : public testing::TestWithParam {}; -TEST_P(ParamTest, TestX) { -} +TEST_P(ParamTest, TestX) {} -TEST_P(ParamTest, TestY) { -} +TEST_P(ParamTest, TestY) {} INSTANTIATE_TEST_SUITE_P(SeqP, ParamTest, testing::Values(1, 2)); INSTANTIATE_TEST_SUITE_P(SeqQ, ParamTest, testing::Values(5, 6)); diff --git a/ext/googletest/googletest/test/googletest-global-environment-unittest.py b/ext/googletest/googletest/test/googletest-global-environment-unittest.py index f3475599be..265793442f 100644 --- a/ext/googletest/googletest/test/googletest-global-environment-unittest.py +++ b/ext/googletest/googletest/test/googletest-global-environment-unittest.py @@ -36,7 +36,7 @@ googletest-global-environment-unittest_ (a program written with Google Test). """ import re -import gtest_test_utils +from googletest.test import gtest_test_utils def RunAndReturnOutput(args=None): @@ -71,10 +71,13 @@ class GTestGlobalEnvironmentUnitTest(gtest_test_utils.TestCase): def testEnvironmentSetUpAndTornDownForEachRepeat(self): """Tests the behavior of test environments and gtest_repeat.""" - txt = RunAndReturnOutput(['--gtest_repeat=2']) + # When --gtest_recreate_environments_when_repeating is true, the global test + # environment should be set up and torn down for each iteration. + txt = RunAndReturnOutput([ + '--gtest_repeat=2', + '--gtest_recreate_environments_when_repeating=true', + ]) - # By default, with gtest_repeat=2, the global test environment should be set - # up and torn down for each iteration. expected_pattern = ('(.|\n)*' r'Repeating all tests \(iteration 1\)' '(.|\n)*' @@ -97,13 +100,12 @@ class GTestGlobalEnvironmentUnitTest(gtest_test_utils.TestCase): def testEnvironmentSetUpAndTornDownOnce(self): """Tests environment and --gtest_recreate_environments_when_repeating.""" + # By default the environment should only be set up and torn down once, at + # the start and end of the test respectively. txt = RunAndReturnOutput([ - '--gtest_repeat=2', '--gtest_recreate_environments_when_repeating=false' + '--gtest_repeat=2', ]) - # When --gtest_recreate_environments_when_repeating is false, the test - # environment should only be set up and torn down once, at the start and - # end of the test respectively. expected_pattern = ('(.|\n)*' r'Repeating all tests \(iteration 1\)' '(.|\n)*' diff --git a/ext/googletest/googletest/test/googletest-json-outfiles-test.py b/ext/googletest/googletest/test/googletest-json-outfiles-test.py index 8ef47b8f97..179283b853 100644 --- a/ext/googletest/googletest/test/googletest-json-outfiles-test.py +++ b/ext/googletest/googletest/test/googletest-json-outfiles-test.py @@ -32,8 +32,8 @@ import json import os -import gtest_json_test_utils -import gtest_test_utils +from googletest.test import gtest_json_test_utils +from googletest.test import gtest_test_utils GTEST_OUTPUT_SUBDIR = 'json_outfiles' GTEST_OUTPUT_1_TEST = 'gtest_xml_outfile1_test_' @@ -71,6 +71,8 @@ EXPECTED_1 = { u'*', u'testsuite': [{ u'name': u'TestSomeProperties', + u'file': u'gtest_xml_outfile1_test_.cc', + u'line': 41, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', @@ -115,6 +117,8 @@ EXPECTED_2 = { u'*', u'testsuite': [{ u'name': u'TestSomeProperties', + u'file': u'gtest_xml_outfile2_test_.cc', + u'line': 41, u'status': u'RUN', u'result': u'COMPLETED', u'timestamp': u'*', diff --git a/ext/googletest/googletest/test/googletest-json-output-unittest.py b/ext/googletest/googletest/test/googletest-json-output-unittest.py index 41c8565144..e0fbe46509 100644 --- a/ext/googletest/googletest/test/googletest-json-output-unittest.py +++ b/ext/googletest/googletest/test/googletest-json-output-unittest.py @@ -37,8 +37,8 @@ import os import re import sys -import gtest_json_test_utils -import gtest_test_utils +from googletest.test import gtest_json_test_utils +from googletest.test import gtest_test_utils GTEST_FILTER_FLAG = '--gtest_filter' GTEST_LIST_TESTS_FLAG = '--gtest_list_tests' @@ -90,6 +90,8 @@ EXPECTED_NON_EMPTY = { u'*', u'testsuite': [{ u'name': u'Succeeds', + u'file': u'gtest_xml_output_unittest_.cc', + u'line': 51, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', @@ -114,6 +116,10 @@ EXPECTED_NON_EMPTY = { u'testsuite': [{ u'name': u'Fails', + u'file': + u'gtest_xml_output_unittest_.cc', + u'line': + 59, u'status': u'RUN', u'result': @@ -148,6 +154,8 @@ EXPECTED_NON_EMPTY = { u'*', u'testsuite': [{ u'name': u'DISABLED_test_not_run', + u'file': u'gtest_xml_output_unittest_.cc', + u'line': 66, u'status': u'NOTRUN', u'result': u'SUPPRESSED', u'time': u'*', @@ -171,6 +179,8 @@ EXPECTED_NON_EMPTY = { u'*', u'testsuite': [{ u'name': u'Skipped', + u'file': 'gtest_xml_output_unittest_.cc', + u'line': 73, u'status': u'RUN', u'result': u'SKIPPED', u'time': u'*', @@ -178,6 +188,8 @@ EXPECTED_NON_EMPTY = { u'classname': u'SkippedTest' }, { u'name': u'SkippedWithMessage', + u'file': 'gtest_xml_output_unittest_.cc', + u'line': 77, u'status': u'RUN', u'result': u'SKIPPED', u'time': u'*', @@ -186,6 +198,10 @@ EXPECTED_NON_EMPTY = { }, { u'name': u'SkippedAfterFailure', + u'file': + 'gtest_xml_output_unittest_.cc', + u'line': + 81, u'status': u'RUN', u'result': @@ -220,6 +236,8 @@ EXPECTED_NON_EMPTY = { u'*', u'testsuite': [{ u'name': u'Succeeds', + u'file': 'gtest_xml_output_unittest_.cc', + u'line': 86, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', @@ -228,6 +246,10 @@ EXPECTED_NON_EMPTY = { }, { u'name': u'Fails', + u'file': + u'gtest_xml_output_unittest_.cc', + u'line': + 91, u'status': u'RUN', u'result': @@ -251,6 +273,8 @@ EXPECTED_NON_EMPTY = { }] }, { u'name': u'DISABLED_test', + u'file': u'gtest_xml_output_unittest_.cc', + u'line': 96, u'status': u'NOTRUN', u'result': u'SUPPRESSED', u'time': u'*', @@ -275,6 +299,10 @@ EXPECTED_NON_EMPTY = { u'testsuite': [{ u'name': u'OutputsCData', + u'file': + u'gtest_xml_output_unittest_.cc', + u'line': + 100, u'status': u'RUN', u'result': @@ -311,6 +339,10 @@ EXPECTED_NON_EMPTY = { u'testsuite': [{ u'name': u'InvalidCharactersInMessage', + u'file': + u'gtest_xml_output_unittest_.cc', + u'line': + 107, u'status': u'RUN', u'result': @@ -349,6 +381,8 @@ EXPECTED_NON_EMPTY = { u'aye', u'testsuite': [{ u'name': u'OneProperty', + u'file': u'gtest_xml_output_unittest_.cc', + u'line': 119, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', @@ -357,6 +391,8 @@ EXPECTED_NON_EMPTY = { u'key_1': u'1' }, { u'name': u'IntValuedProperty', + u'file': u'gtest_xml_output_unittest_.cc', + u'line': 123, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', @@ -365,6 +401,8 @@ EXPECTED_NON_EMPTY = { u'key_int': u'1' }, { u'name': u'ThreeProperties', + u'file': u'gtest_xml_output_unittest_.cc', + u'line': 127, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', @@ -375,6 +413,8 @@ EXPECTED_NON_EMPTY = { u'key_3': u'3' }, { u'name': u'TwoValuesForOneKeyUsesLastValue', + u'file': u'gtest_xml_output_unittest_.cc', + u'line': 133, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', @@ -399,6 +439,8 @@ EXPECTED_NON_EMPTY = { u'*', u'testsuite': [{ u'name': u'RecordProperty', + u'file': u'gtest_xml_output_unittest_.cc', + u'line': 138, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', @@ -407,6 +449,8 @@ EXPECTED_NON_EMPTY = { u'key': u'1' }, { u'name': u'ExternalUtilityThatCallsRecordIntValuedProperty', + u'file': u'gtest_xml_output_unittest_.cc', + u'line': 151, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', @@ -415,6 +459,8 @@ EXPECTED_NON_EMPTY = { u'key_for_utility_int': u'1' }, { u'name': u'ExternalUtilityThatCallsRecordStringValuedProperty', + u'file': u'gtest_xml_output_unittest_.cc', + u'line': 155, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', @@ -440,6 +486,8 @@ EXPECTED_NON_EMPTY = { u'testsuite': [{ u'name': u'HasTypeParamAttribute', u'type_param': u'int', + u'file': u'gtest_xml_output_unittest_.cc', + u'line': 171, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', @@ -464,6 +512,8 @@ EXPECTED_NON_EMPTY = { u'testsuite': [{ u'name': u'HasTypeParamAttribute', u'type_param': u'long', + u'file': u'gtest_xml_output_unittest_.cc', + u'line': 171, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', @@ -488,6 +538,8 @@ EXPECTED_NON_EMPTY = { u'testsuite': [{ u'name': u'HasTypeParamAttribute', u'type_param': u'int', + u'file': u'gtest_xml_output_unittest_.cc', + u'line': 178, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', @@ -512,6 +564,8 @@ EXPECTED_NON_EMPTY = { u'testsuite': [{ u'name': u'HasTypeParamAttribute', u'type_param': u'long', + u'file': u'gtest_xml_output_unittest_.cc', + u'line': 178, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', @@ -536,6 +590,8 @@ EXPECTED_NON_EMPTY = { u'testsuite': [{ u'name': u'HasValueParamAttribute/0', u'value_param': u'33', + u'file': u'gtest_xml_output_unittest_.cc', + u'line': 162, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', @@ -544,6 +600,8 @@ EXPECTED_NON_EMPTY = { }, { u'name': u'HasValueParamAttribute/1', u'value_param': u'42', + u'file': u'gtest_xml_output_unittest_.cc', + u'line': 162, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', @@ -552,6 +610,8 @@ EXPECTED_NON_EMPTY = { }, { u'name': u'AnotherTestThatHasValueParamAttribute/0', u'value_param': u'33', + u'file': u'gtest_xml_output_unittest_.cc', + u'line': 163, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', @@ -560,6 +620,8 @@ EXPECTED_NON_EMPTY = { }, { u'name': u'AnotherTestThatHasValueParamAttribute/1', u'value_param': u'42', + u'file': u'gtest_xml_output_unittest_.cc', + u'line': 163, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', @@ -603,6 +665,8 @@ EXPECTED_FILTERED = { u'*', u'testsuite': [{ u'name': u'Succeeds', + u'file': u'gtest_xml_output_unittest_.cc', + u'line': 51, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', diff --git a/ext/googletest/googletest/test/googletest-list-tests-unittest.py b/ext/googletest/googletest/test/googletest-list-tests-unittest.py index 81423a339e..9d56883d74 100755 --- a/ext/googletest/googletest/test/googletest-list-tests-unittest.py +++ b/ext/googletest/googletest/test/googletest-list-tests-unittest.py @@ -38,7 +38,7 @@ Google Test) the command line flags. """ import re -import gtest_test_utils +from googletest.test import gtest_test_utils # Constants. diff --git a/ext/googletest/googletest/test/googletest-list-tests-unittest_.cc b/ext/googletest/googletest/test/googletest-list-tests-unittest_.cc index 493c6f0046..5577e8961c 100644 --- a/ext/googletest/googletest/test/googletest-list-tests-unittest_.cc +++ b/ext/googletest/googletest/test/googletest-list-tests-unittest_.cc @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Unit test for Google Test's --gtest_list_tests flag. // // A user can ask Google Test to list all tests that will run @@ -40,38 +39,27 @@ #include "gtest/gtest.h" // Several different test cases and tests that will be listed. -TEST(Foo, Bar1) { -} +TEST(Foo, Bar1) {} -TEST(Foo, Bar2) { -} +TEST(Foo, Bar2) {} -TEST(Foo, DISABLED_Bar3) { -} +TEST(Foo, DISABLED_Bar3) {} -TEST(Abc, Xyz) { -} +TEST(Abc, Xyz) {} -TEST(Abc, Def) { -} +TEST(Abc, Def) {} -TEST(FooBar, Baz) { -} +TEST(FooBar, Baz) {} -class FooTest : public testing::Test { -}; +class FooTest : public testing::Test {}; -TEST_F(FooTest, Test1) { -} +TEST_F(FooTest, Test1) {} -TEST_F(FooTest, DISABLED_Test2) { -} +TEST_F(FooTest, DISABLED_Test2) {} -TEST_F(FooTest, Test3) { -} +TEST_F(FooTest, Test3) {} -TEST(FooDeathTest, Test1) { -} +TEST(FooDeathTest, Test1) {} // A group of value-parameterized tests. @@ -86,70 +74,66 @@ class MyType { }; // Teaches Google Test how to print a MyType. -void PrintTo(const MyType& x, std::ostream* os) { - *os << x.value(); -} +void PrintTo(const MyType& x, std::ostream* os) { *os << x.value(); } -class ValueParamTest : public testing::TestWithParam { -}; +class ValueParamTest : public testing::TestWithParam {}; -TEST_P(ValueParamTest, TestA) { -} +TEST_P(ValueParamTest, TestA) {} -TEST_P(ValueParamTest, TestB) { -} +TEST_P(ValueParamTest, TestB) {} INSTANTIATE_TEST_SUITE_P( MyInstantiation, ValueParamTest, - testing::Values(MyType("one line"), - MyType("two\nlines"), - MyType("a very\nloooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong line"))); // NOLINT + testing::Values( + MyType("one line"), MyType("two\nlines"), + MyType("a " + "very\nloooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + "ooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + "ooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + "ooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + "ooooong line"))); // NOLINT // A group of typed tests. // A deliberately long type name for testing the line-truncating // behavior when printing a type parameter. -class VeryLoooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooogName { // NOLINT +class + VeryLoooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooogName { // NOLINT }; template -class TypedTest : public testing::Test { -}; +class TypedTest : public testing::Test {}; template -class MyArray { -}; +class MyArray {}; -typedef testing::Types > MyTypes; +typedef testing::Types< + VeryLoooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooogName, // NOLINT + int*, MyArray > + MyTypes; TYPED_TEST_SUITE(TypedTest, MyTypes); -TYPED_TEST(TypedTest, TestA) { -} +TYPED_TEST(TypedTest, TestA) {} -TYPED_TEST(TypedTest, TestB) { -} +TYPED_TEST(TypedTest, TestB) {} // A group of type-parameterized tests. template -class TypeParamTest : public testing::Test { -}; +class TypeParamTest : public testing::Test {}; TYPED_TEST_SUITE_P(TypeParamTest); -TYPED_TEST_P(TypeParamTest, TestA) { -} +TYPED_TEST_P(TypeParamTest, TestA) {} -TYPED_TEST_P(TypeParamTest, TestB) { -} +TYPED_TEST_P(TypeParamTest, TestB) {} REGISTER_TYPED_TEST_SUITE_P(TypeParamTest, TestA, TestB); INSTANTIATE_TYPED_TEST_SUITE_P(My, TypeParamTest, MyTypes); -int main(int argc, char **argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); diff --git a/ext/googletest/googletest/test/googletest-listener-test.cc b/ext/googletest/googletest/test/googletest-listener-test.cc index 9d6c9caba3..89d01b37ff 100644 --- a/ext/googletest/googletest/test/googletest-listener-test.cc +++ b/ext/googletest/googletest/test/googletest-listener-test.cc @@ -41,10 +41,10 @@ using ::testing::AddGlobalTestEnvironment; using ::testing::Environment; using ::testing::InitGoogleTest; using ::testing::Test; -using ::testing::TestSuite; using ::testing::TestEventListener; using ::testing::TestInfo; using ::testing::TestPartResult; +using ::testing::TestSuite; using ::testing::UnitTest; // Used by tests to register their events. @@ -65,8 +65,8 @@ class EventRecordingListener : public TestEventListener { void OnTestIterationStart(const UnitTest& /*unit_test*/, int iteration) override { Message message; - message << GetFullMethodName("OnTestIterationStart") - << "(" << iteration << ")"; + message << GetFullMethodName("OnTestIterationStart") << "(" << iteration + << ")"; g_events->push_back(message.GetString()); } @@ -112,8 +112,8 @@ class EventRecordingListener : public TestEventListener { void OnTestIterationEnd(const UnitTest& /*unit_test*/, int iteration) override { Message message; - message << GetFullMethodName("OnTestIterationEnd") - << "(" << iteration << ")"; + message << GetFullMethodName("OnTestIterationEnd") << "(" << iteration + << ")"; g_events->push_back(message.GetString()); } @@ -122,9 +122,7 @@ class EventRecordingListener : public TestEventListener { } private: - std::string GetFullMethodName(const char* name) { - return name_ + "." + name; - } + std::string GetFullMethodName(const char* name) { return name_ + "." + name; } std::string name_; }; @@ -252,22 +250,21 @@ void VerifyResults(const std::vector& data, EXPECT_EQ(expected_data_size, actual_size); // Compares the common prefix. - const size_t shorter_size = expected_data_size <= actual_size ? - expected_data_size : actual_size; + const size_t shorter_size = + expected_data_size <= actual_size ? expected_data_size : actual_size; size_t i = 0; for (; i < shorter_size; ++i) { - ASSERT_STREQ(expected_data[i], data[i].c_str()) - << "at position " << i; + ASSERT_STREQ(expected_data[i], data[i].c_str()) << "at position " << i; } // Prints extra elements in the actual data. for (; i < actual_size; ++i) { - printf(" Actual event #%lu: %s\n", - static_cast(i), data[i].c_str()); + printf(" Actual event #%lu: %s\n", static_cast(i), + data[i].c_str()); } } -int main(int argc, char **argv) { +int main(int argc, char** argv) { std::vector events; g_events = &events; InitGoogleTest(&argc, argv); @@ -285,6 +282,7 @@ int main(int argc, char **argv) { << "AddGlobalTestEnvironment should not generate any events itself."; GTEST_FLAG_SET(repeat, 2); + GTEST_FLAG_SET(recreate_environments_when_repeating, true); int ret_val = RUN_ALL_TESTS(); #ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ @@ -505,14 +503,12 @@ int main(int argc, char **argv) { "1st.OnTestProgramEnd"}; #endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ - VerifyResults(events, - expected_events, - sizeof(expected_events)/sizeof(expected_events[0])); + VerifyResults(events, expected_events, + sizeof(expected_events) / sizeof(expected_events[0])); // We need to check manually for ad hoc test failures that happen after // RUN_ALL_TESTS finishes. - if (UnitTest::GetInstance()->Failed()) - ret_val = 1; + if (UnitTest::GetInstance()->Failed()) ret_val = 1; return ret_val; } diff --git a/ext/googletest/googletest/test/googletest-message-test.cc b/ext/googletest/googletest/test/googletest-message-test.cc index 962d519114..252a861977 100644 --- a/ext/googletest/googletest/test/googletest-message-test.cc +++ b/ext/googletest/googletest/test/googletest-message-test.cc @@ -31,7 +31,6 @@ // Tests for the Message class. #include "gtest/gtest-message.h" - #include "gtest/gtest.h" namespace { @@ -69,8 +68,9 @@ TEST(MessageTest, StreamsFloat) { // Tests streaming a double. TEST(MessageTest, StreamsDouble) { - const std::string s = (Message() << 1260570880.4555497 << " " - << 1260572265.1954534).GetString(); + const std::string s = + (Message() << 1260570880.4555497 << " " << 1260572265.1954534) + .GetString(); // Both numbers should be printed with enough precision. EXPECT_PRED_FORMAT2(testing::IsSubstring, "1260570880.45", s.c_str()); EXPECT_PRED_FORMAT2(testing::IsSubstring, " 1260572265.19", s.c_str()); @@ -108,8 +108,7 @@ TEST(MessageTest, StreamsString) { // Tests that we can output strings containing embedded NULs. TEST(MessageTest, StreamsStringWithEmbeddedNUL) { - const char char_array_with_nul[] = - "Here's a NUL\0 and some more string"; + const char char_array_with_nul[] = "Here's a NUL\0 and some more string"; const ::std::string string_with_nul(char_array_with_nul, sizeof(char_array_with_nul) - 1); EXPECT_EQ("Here's a NUL\\0 and some more string", @@ -129,10 +128,11 @@ TEST(MessageTest, StreamsInt) { // Tests that basic IO manipulators (endl, ends, and flush) can be // streamed to Message. TEST(MessageTest, StreamsBasicIoManip) { - EXPECT_EQ("Line 1.\nA NUL char \\0 in line 2.", - (Message() << "Line 1." << std::endl - << "A NUL char " << std::ends << std::flush - << " in line 2.").GetString()); + EXPECT_EQ( + "Line 1.\nA NUL char \\0 in line 2.", + (Message() << "Line 1." << std::endl + << "A NUL char " << std::ends << std::flush << " in line 2.") + .GetString()); } // Tests Message::GetString() diff --git a/ext/googletest/googletest/test/googletest-options-test.cc b/ext/googletest/googletest/test/googletest-options-test.cc index cd386ff23d..1265c22511 100644 --- a/ext/googletest/googletest/test/googletest-options-test.cc +++ b/ext/googletest/googletest/test/googletest-options-test.cc @@ -39,9 +39,9 @@ #include "gtest/gtest.h" #if GTEST_OS_WINDOWS_MOBILE -# include +#include #elif GTEST_OS_WINDOWS -# include +#include #elif GTEST_OS_OS2 // For strcasecmp on OS/2 #include @@ -85,9 +85,9 @@ TEST(XmlOutputTest, GetOutputFileSingleFile) { TEST(XmlOutputTest, GetOutputFileFromDirectoryPath) { GTEST_FLAG_SET(output, "xml:path" GTEST_PATH_SEP_); const std::string expected_output_file = - GetAbsolutePathOf( - FilePath(std::string("path") + GTEST_PATH_SEP_ + - GetCurrentExecutableName().string() + ".xml")).string(); + GetAbsolutePathOf(FilePath(std::string("path") + GTEST_PATH_SEP_ + + GetCurrentExecutableName().string() + ".xml")) + .string(); const std::string& output_file = UnitTestOptions::GetAbsolutePathToOutputFile(); #if GTEST_OS_WINDOWS @@ -115,13 +115,10 @@ TEST(OutputFileHelpersTest, GetCurrentExecutableName) { const bool success = exe_str == "app"; #else const bool success = - exe_str == "googletest-options-test" || - exe_str == "gtest_all_test" || - exe_str == "lt-gtest_all_test" || - exe_str == "gtest_dll_test"; + exe_str == "googletest-options-test" || exe_str == "gtest_all_test" || + exe_str == "lt-gtest_all_test" || exe_str == "gtest_dll_test"; #endif // GTEST_OS_WINDOWS - if (!success) - FAIL() << "GetCurrentExecutableName() returns " << exe_str; + if (!success) FAIL() << "GetCurrentExecutableName() returns " << exe_str; } #if !GTEST_OS_FUCHSIA @@ -145,23 +142,26 @@ class XmlOutputChangeDirTest : public Test { TEST_F(XmlOutputChangeDirTest, PreserveOriginalWorkingDirWithDefault) { GTEST_FLAG_SET(output, ""); - EXPECT_EQ(FilePath::ConcatPaths(original_working_dir_, - FilePath("test_detail.xml")).string(), - UnitTestOptions::GetAbsolutePathToOutputFile()); + EXPECT_EQ( + FilePath::ConcatPaths(original_working_dir_, FilePath("test_detail.xml")) + .string(), + UnitTestOptions::GetAbsolutePathToOutputFile()); } TEST_F(XmlOutputChangeDirTest, PreserveOriginalWorkingDirWithDefaultXML) { GTEST_FLAG_SET(output, "xml"); - EXPECT_EQ(FilePath::ConcatPaths(original_working_dir_, - FilePath("test_detail.xml")).string(), - UnitTestOptions::GetAbsolutePathToOutputFile()); + EXPECT_EQ( + FilePath::ConcatPaths(original_working_dir_, FilePath("test_detail.xml")) + .string(), + UnitTestOptions::GetAbsolutePathToOutputFile()); } TEST_F(XmlOutputChangeDirTest, PreserveOriginalWorkingDirWithRelativeFile) { GTEST_FLAG_SET(output, "xml:filename.abc"); - EXPECT_EQ(FilePath::ConcatPaths(original_working_dir_, - FilePath("filename.abc")).string(), - UnitTestOptions::GetAbsolutePathToOutputFile()); + EXPECT_EQ( + FilePath::ConcatPaths(original_working_dir_, FilePath("filename.abc")) + .string(), + UnitTestOptions::GetAbsolutePathToOutputFile()); } TEST_F(XmlOutputChangeDirTest, PreserveOriginalWorkingDirWithRelativePath) { @@ -170,7 +170,8 @@ TEST_F(XmlOutputChangeDirTest, PreserveOriginalWorkingDirWithRelativePath) { FilePath::ConcatPaths( original_working_dir_, FilePath(std::string("path") + GTEST_PATH_SEP_ + - GetCurrentExecutableName().string() + ".xml")).string(); + GetCurrentExecutableName().string() + ".xml")) + .string(); const std::string& output_file = UnitTestOptions::GetAbsolutePathToOutputFile(); #if GTEST_OS_WINDOWS diff --git a/ext/googletest/googletest/test/googletest-output-test-golden-lin.txt b/ext/googletest/googletest/test/googletest-output-test-golden-lin.txt index 3fab3b97d8..1f24fb7915 100644 --- a/ext/googletest/googletest/test/googletest-output-test-golden-lin.txt +++ b/ext/googletest/googletest/test/googletest-output-test-golden-lin.txt @@ -12,7 +12,7 @@ Expected equality of these values: 3 Stack trace: (omitted) -[==========] Running 88 tests from 41 test suites. +[==========] Running 89 tests from 42 test suites. [----------] Global test environment set-up. FooEnvironment::SetUp() called. BarEnvironment::SetUp() called. @@ -956,6 +956,17 @@ Stack trace: (omitted) ~DynamicFixture() [ FAILED ] BadDynamicFixture2.Derived DynamicFixture::TearDownTestSuite +[----------] 1 test from TestSuiteThatFailsToSetUp +googletest-output-test_.cc:#: Failure +Value of: false + Actual: false +Expected: true +Stack trace: (omitted) + +[ RUN ] TestSuiteThatFailsToSetUp.ShouldNotRun +googletest-output-test_.cc:#: Skipped + +[ SKIPPED ] TestSuiteThatFailsToSetUp.ShouldNotRun [----------] 1 test from PrintingFailingParams/FailingParamTest [ RUN ] PrintingFailingParams/FailingParamTest.Fails/0 googletest-output-test_.cc:#: Failure @@ -1032,8 +1043,10 @@ Failed Expected fatal failure. Stack trace: (omitted) -[==========] 88 tests from 41 test suites ran. +[==========] 89 tests from 42 test suites ran. [ PASSED ] 31 tests. +[ SKIPPED ] 1 test, listed below: +[ SKIPPED ] TestSuiteThatFailsToSetUp.ShouldNotRun [ FAILED ] 57 tests, listed below: [ FAILED ] NonfatalFailureTest.EscapesStringOperands [ FAILED ] NonfatalFailureTest.DiffForLongStrings @@ -1094,6 +1107,9 @@ Stack trace: (omitted) [ FAILED ] GoogleTestVerification.UninstantiatedTypeParameterizedTestSuite 57 FAILED TESTS +[ FAILED ] TestSuiteThatFailsToSetUp: SetUpTestSuite or TearDownTestSuite + + 1 FAILED TEST SUITE  YOU HAVE 1 DISABLED TEST Note: Google Test filter = FatalFailureTest.*:LoggingTest.* diff --git a/ext/googletest/googletest/test/googletest-output-test.py b/ext/googletest/googletest/test/googletest-output-test.py index 09028f66f9..ff44483331 100755 --- a/ext/googletest/googletest/test/googletest-output-test.py +++ b/ext/googletest/googletest/test/googletest-output-test.py @@ -42,7 +42,7 @@ import difflib import os import re import sys -import gtest_test_utils +from googletest.test import gtest_test_utils # The flag for generating the golden file diff --git a/ext/googletest/googletest/test/googletest-output-test_.cc b/ext/googletest/googletest/test/googletest-output-test_.cc index 9e5465c975..c2f96d980d 100644 --- a/ext/googletest/googletest/test/googletest-output-test_.cc +++ b/ext/googletest/googletest/test/googletest-output-test_.cc @@ -33,12 +33,12 @@ // desired messages. Therefore, most tests in this file are MEANT TO // FAIL. +#include + #include "gtest/gtest-spi.h" #include "gtest/gtest.h" #include "src/gtest-internal-inl.h" -#include - #if _MSC_VER GTEST_DISABLE_MSC_WARNINGS_PUSH_(4127 /* conditional expression is constant */) #endif // _MSC_VER @@ -56,9 +56,7 @@ namespace posix = ::testing::internal::posix; // Tests catching fatal failures. // A subroutine used by the following test. -void TestEq1(int x) { - ASSERT_EQ(1, x); -} +void TestEq1(int x) { ASSERT_EQ(1, x); } // This function calls a test subroutine, catches the fatal failure it // generates, and then returns early. @@ -76,24 +74,19 @@ void TryTestSubroutine() { FAIL() << "This should never be reached."; } -TEST(PassingTest, PassingTest1) { -} +TEST(PassingTest, PassingTest1) {} -TEST(PassingTest, PassingTest2) { -} +TEST(PassingTest, PassingTest2) {} // Tests that parameters of failing parameterized tests are printed in the // failing test summary. class FailingParamTest : public testing::TestWithParam {}; -TEST_P(FailingParamTest, Fails) { - EXPECT_EQ(1, GetParam()); -} +TEST_P(FailingParamTest, Fails) { EXPECT_EQ(1, GetParam()); } // This generates a test which will fail. Google Test is expected to print // its parameter when it outputs the list of all failed tests. -INSTANTIATE_TEST_SUITE_P(PrintingFailingParams, - FailingParamTest, +INSTANTIATE_TEST_SUITE_P(PrintingFailingParams, FailingParamTest, testing::Values(2)); // Tests that an empty value for the test suite basename yields just @@ -146,18 +139,16 @@ TEST(FatalFailureTest, FatalFailureInNestedSubroutine) { // Tests HasFatalFailure() after a failed EXPECT check. TEST(FatalFailureTest, NonfatalFailureInSubroutine) { printf("(expecting a failure on false)\n"); - EXPECT_TRUE(false); // Generates a nonfatal failure + EXPECT_TRUE(false); // Generates a nonfatal failure ASSERT_FALSE(HasFatalFailure()); // This should succeed. } // Tests interleaving user logging and Google Test assertions. TEST(LoggingTest, InterleavingLoggingAndAssertions) { - static const int a[4] = { - 3, 9, 2, 6 - }; + static const int a[4] = {3, 9, 2, 6}; printf("(expecting 2 failures on (3) >= (a[i]))\n"); - for (int i = 0; i < static_cast(sizeof(a)/sizeof(*a)); i++) { + for (int i = 0; i < static_cast(sizeof(a) / sizeof(*a)); i++) { printf("i == %d\n", i); EXPECT_GE(3, a[i]); } @@ -297,16 +288,14 @@ struct CheckPoints { static void ThreadWithScopedTrace(CheckPoints* check_points) { { SCOPED_TRACE("Trace B"); - ADD_FAILURE() - << "Expected failure #1 (in thread B, only trace B alive)."; + ADD_FAILURE() << "Expected failure #1 (in thread B, only trace B alive)."; check_points->n1.Notify(); check_points->n2.WaitForNotification(); ADD_FAILURE() << "Expected failure #3 (in thread B, trace A & B both alive)."; } // Trace B dies here. - ADD_FAILURE() - << "Expected failure #4 (in thread B, only trace A alive)."; + ADD_FAILURE() << "Expected failure #4 (in thread B, only trace A alive)."; check_points->n3.Notify(); } @@ -325,11 +314,9 @@ TEST(SCOPED_TRACETest, WorksConcurrently) { check_points.n2.Notify(); check_points.n3.WaitForNotification(); - ADD_FAILURE() - << "Expected failure #5 (in thread A, only trace A alive)."; + ADD_FAILURE() << "Expected failure #5 (in thread A, only trace A alive)."; } // Trace A dies here. - ADD_FAILURE() - << "Expected failure #6 (in thread A, no trace alive)."; + ADD_FAILURE() << "Expected failure #6 (in thread A, no trace alive)."; thread.Join(); } #endif // GTEST_IS_THREADSAFE @@ -412,9 +399,7 @@ class FatalFailureInFixtureConstructorTest : public testing::Test { } private: - void Init() { - FAIL() << "Expected failure #1, in the test fixture c'tor."; - } + void Init() { FAIL() << "Expected failure #1, in the test fixture c'tor."; } }; TEST_F(FatalFailureInFixtureConstructorTest, FailureInConstructor) { @@ -436,9 +421,7 @@ class NonFatalFailureInSetUpTest : public testing::Test { void TearDown() override { FAIL() << "Expected failure #3, in TearDown()."; } private: - void Deinit() { - FAIL() << "Expected failure #4, in the test fixture d'tor."; - } + void Deinit() { FAIL() << "Expected failure #4, in the test fixture d'tor."; } }; TEST_F(NonFatalFailureInSetUpTest, FailureInSetUp) { @@ -458,9 +441,7 @@ class FatalFailureInSetUpTest : public testing::Test { void TearDown() override { FAIL() << "Expected failure #2, in TearDown()."; } private: - void Deinit() { - FAIL() << "Expected failure #3, in the test fixture d'tor."; - } + void Deinit() { FAIL() << "Expected failure #3, in the test fixture d'tor."; } }; TEST_F(FatalFailureInSetUpTest, FailureInSetUp) { @@ -488,14 +469,12 @@ TEST(GtestFailAtTest, MessageContainsSpecifiedFileAndLineNumber) { namespace foo { -class MixedUpTestSuiteTest : public testing::Test { -}; +class MixedUpTestSuiteTest : public testing::Test {}; TEST_F(MixedUpTestSuiteTest, FirstTestFromNamespaceFoo) {} TEST_F(MixedUpTestSuiteTest, SecondTestFromNamespaceFoo) {} -class MixedUpTestSuiteWithSameTestNameTest : public testing::Test { -}; +class MixedUpTestSuiteWithSameTestNameTest : public testing::Test {}; TEST_F(MixedUpTestSuiteWithSameTestNameTest, TheSecondTestWithThisNameShouldFail) {} @@ -504,16 +483,14 @@ TEST_F(MixedUpTestSuiteWithSameTestNameTest, namespace bar { -class MixedUpTestSuiteTest : public testing::Test { -}; +class MixedUpTestSuiteTest : public testing::Test {}; // The following two tests are expected to fail. We rely on the // golden file to check that Google Test generates the right error message. TEST_F(MixedUpTestSuiteTest, ThisShouldFail) {} TEST_F(MixedUpTestSuiteTest, ThisShouldFailToo) {} -class MixedUpTestSuiteWithSameTestNameTest : public testing::Test { -}; +class MixedUpTestSuiteWithSameTestNameTest : public testing::Test {}; // Expected to fail. We rely on the golden file to check that Google Test // generates the right error message. @@ -527,8 +504,7 @@ TEST_F(MixedUpTestSuiteWithSameTestNameTest, // test case checks the scenario where TEST_F appears before TEST, and // the second one checks where TEST appears before TEST_F. -class TEST_F_before_TEST_in_same_test_case : public testing::Test { -}; +class TEST_F_before_TEST_in_same_test_case : public testing::Test {}; TEST_F(TEST_F_before_TEST_in_same_test_case, DefinedUsingTEST_F) {} @@ -536,15 +512,13 @@ TEST_F(TEST_F_before_TEST_in_same_test_case, DefinedUsingTEST_F) {} // generates the right error message. TEST(TEST_F_before_TEST_in_same_test_case, DefinedUsingTESTAndShouldFail) {} -class TEST_before_TEST_F_in_same_test_case : public testing::Test { -}; +class TEST_before_TEST_F_in_same_test_case : public testing::Test {}; TEST(TEST_before_TEST_F_in_same_test_case, DefinedUsingTEST) {} // Expected to fail. We rely on the golden file to check that Google Test // generates the right error message. -TEST_F(TEST_before_TEST_F_in_same_test_case, DefinedUsingTEST_FAndShouldFail) { -} +TEST_F(TEST_before_TEST_F_in_same_test_case, DefinedUsingTEST_FAndShouldFail) {} // Used for testing EXPECT_NONFATAL_FAILURE() and EXPECT_FATAL_FAILURE(). int global_integer = 0; @@ -552,9 +526,9 @@ int global_integer = 0; // Tests that EXPECT_NONFATAL_FAILURE() can reference global variables. TEST(ExpectNonfatalFailureTest, CanReferenceGlobalVariables) { global_integer = 0; - EXPECT_NONFATAL_FAILURE({ - EXPECT_EQ(1, global_integer) << "Expected non-fatal failure."; - }, "Expected non-fatal failure."); + EXPECT_NONFATAL_FAILURE( + { EXPECT_EQ(1, global_integer) << "Expected non-fatal failure."; }, + "Expected non-fatal failure."); } // Tests that EXPECT_NONFATAL_FAILURE() can reference local variables @@ -563,53 +537,48 @@ TEST(ExpectNonfatalFailureTest, CanReferenceLocalVariables) { int m = 0; static int n; n = 1; - EXPECT_NONFATAL_FAILURE({ - EXPECT_EQ(m, n) << "Expected non-fatal failure."; - }, "Expected non-fatal failure."); + EXPECT_NONFATAL_FAILURE({ EXPECT_EQ(m, n) << "Expected non-fatal failure."; }, + "Expected non-fatal failure."); } // Tests that EXPECT_NONFATAL_FAILURE() succeeds when there is exactly // one non-fatal failure and no fatal failure. TEST(ExpectNonfatalFailureTest, SucceedsWhenThereIsOneNonfatalFailure) { - EXPECT_NONFATAL_FAILURE({ - ADD_FAILURE() << "Expected non-fatal failure."; - }, "Expected non-fatal failure."); + EXPECT_NONFATAL_FAILURE({ ADD_FAILURE() << "Expected non-fatal failure."; }, + "Expected non-fatal failure."); } // Tests that EXPECT_NONFATAL_FAILURE() fails when there is no // non-fatal failure. TEST(ExpectNonfatalFailureTest, FailsWhenThereIsNoNonfatalFailure) { printf("(expecting a failure)\n"); - EXPECT_NONFATAL_FAILURE({ - }, ""); + EXPECT_NONFATAL_FAILURE({}, ""); } // Tests that EXPECT_NONFATAL_FAILURE() fails when there are two // non-fatal failures. TEST(ExpectNonfatalFailureTest, FailsWhenThereAreTwoNonfatalFailures) { printf("(expecting a failure)\n"); - EXPECT_NONFATAL_FAILURE({ - ADD_FAILURE() << "Expected non-fatal failure 1."; - ADD_FAILURE() << "Expected non-fatal failure 2."; - }, ""); + EXPECT_NONFATAL_FAILURE( + { + ADD_FAILURE() << "Expected non-fatal failure 1."; + ADD_FAILURE() << "Expected non-fatal failure 2."; + }, + ""); } // Tests that EXPECT_NONFATAL_FAILURE() fails when there is one fatal // failure. TEST(ExpectNonfatalFailureTest, FailsWhenThereIsOneFatalFailure) { printf("(expecting a failure)\n"); - EXPECT_NONFATAL_FAILURE({ - FAIL() << "Expected fatal failure."; - }, ""); + EXPECT_NONFATAL_FAILURE({ FAIL() << "Expected fatal failure."; }, ""); } // Tests that EXPECT_NONFATAL_FAILURE() fails when the statement being // tested returns. TEST(ExpectNonfatalFailureTest, FailsWhenStatementReturns) { printf("(expecting a failure)\n"); - EXPECT_NONFATAL_FAILURE({ - return; - }, ""); + EXPECT_NONFATAL_FAILURE({ return; }, ""); } #if GTEST_HAS_EXCEPTIONS @@ -619,10 +588,8 @@ TEST(ExpectNonfatalFailureTest, FailsWhenStatementReturns) { TEST(ExpectNonfatalFailureTest, FailsWhenStatementThrows) { printf("(expecting a failure)\n"); try { - EXPECT_NONFATAL_FAILURE({ - throw 0; - }, ""); - } catch(int) { // NOLINT + EXPECT_NONFATAL_FAILURE({ throw 0; }, ""); + } catch (int) { // NOLINT } } @@ -631,9 +598,9 @@ TEST(ExpectNonfatalFailureTest, FailsWhenStatementThrows) { // Tests that EXPECT_FATAL_FAILURE() can reference global variables. TEST(ExpectFatalFailureTest, CanReferenceGlobalVariables) { global_integer = 0; - EXPECT_FATAL_FAILURE({ - ASSERT_EQ(1, global_integer) << "Expected fatal failure."; - }, "Expected fatal failure."); + EXPECT_FATAL_FAILURE( + { ASSERT_EQ(1, global_integer) << "Expected fatal failure."; }, + "Expected fatal failure."); } // Tests that EXPECT_FATAL_FAILURE() can reference local static @@ -641,58 +608,51 @@ TEST(ExpectFatalFailureTest, CanReferenceGlobalVariables) { TEST(ExpectFatalFailureTest, CanReferenceLocalStaticVariables) { static int n; n = 1; - EXPECT_FATAL_FAILURE({ - ASSERT_EQ(0, n) << "Expected fatal failure."; - }, "Expected fatal failure."); + EXPECT_FATAL_FAILURE({ ASSERT_EQ(0, n) << "Expected fatal failure."; }, + "Expected fatal failure."); } // Tests that EXPECT_FATAL_FAILURE() succeeds when there is exactly // one fatal failure and no non-fatal failure. TEST(ExpectFatalFailureTest, SucceedsWhenThereIsOneFatalFailure) { - EXPECT_FATAL_FAILURE({ - FAIL() << "Expected fatal failure."; - }, "Expected fatal failure."); + EXPECT_FATAL_FAILURE({ FAIL() << "Expected fatal failure."; }, + "Expected fatal failure."); } // Tests that EXPECT_FATAL_FAILURE() fails when there is no fatal // failure. TEST(ExpectFatalFailureTest, FailsWhenThereIsNoFatalFailure) { printf("(expecting a failure)\n"); - EXPECT_FATAL_FAILURE({ - }, ""); + EXPECT_FATAL_FAILURE({}, ""); } // A helper for generating a fatal failure. -void FatalFailure() { - FAIL() << "Expected fatal failure."; -} +void FatalFailure() { FAIL() << "Expected fatal failure."; } // Tests that EXPECT_FATAL_FAILURE() fails when there are two // fatal failures. TEST(ExpectFatalFailureTest, FailsWhenThereAreTwoFatalFailures) { printf("(expecting a failure)\n"); - EXPECT_FATAL_FAILURE({ - FatalFailure(); - FatalFailure(); - }, ""); + EXPECT_FATAL_FAILURE( + { + FatalFailure(); + FatalFailure(); + }, + ""); } // Tests that EXPECT_FATAL_FAILURE() fails when there is one non-fatal // failure. TEST(ExpectFatalFailureTest, FailsWhenThereIsOneNonfatalFailure) { printf("(expecting a failure)\n"); - EXPECT_FATAL_FAILURE({ - ADD_FAILURE() << "Expected non-fatal failure."; - }, ""); + EXPECT_FATAL_FAILURE({ ADD_FAILURE() << "Expected non-fatal failure."; }, ""); } // Tests that EXPECT_FATAL_FAILURE() fails when the statement being // tested returns. TEST(ExpectFatalFailureTest, FailsWhenStatementReturns) { printf("(expecting a failure)\n"); - EXPECT_FATAL_FAILURE({ - return; - }, ""); + EXPECT_FATAL_FAILURE({ return; }, ""); } #if GTEST_HAS_EXCEPTIONS @@ -702,10 +662,8 @@ TEST(ExpectFatalFailureTest, FailsWhenStatementReturns) { TEST(ExpectFatalFailureTest, FailsWhenStatementThrows) { printf("(expecting a failure)\n"); try { - EXPECT_FATAL_FAILURE({ - throw 0; - }, ""); - } catch(int) { // NOLINT + EXPECT_FATAL_FAILURE({ throw 0; }, ""); + } catch (int) { // NOLINT } } @@ -717,21 +675,14 @@ std::string ParamNameFunc(const testing::TestParamInfo& info) { return info.param; } -class ParamTest : public testing::TestWithParam { -}; +class ParamTest : public testing::TestWithParam {}; -TEST_P(ParamTest, Success) { - EXPECT_EQ("a", GetParam()); -} +TEST_P(ParamTest, Success) { EXPECT_EQ("a", GetParam()); } -TEST_P(ParamTest, Failure) { - EXPECT_EQ("b", GetParam()) << "Expected failure"; -} +TEST_P(ParamTest, Failure) { EXPECT_EQ("b", GetParam()) << "Expected failure"; } -INSTANTIATE_TEST_SUITE_P(PrintingStrings, - ParamTest, - testing::Values(std::string("a")), - ParamNameFunc); +INSTANTIATE_TEST_SUITE_P(PrintingStrings, ParamTest, + testing::Values(std::string("a")), ParamNameFunc); // The case where a suite has INSTANTIATE_TEST_SUITE_P but not TEST_P. using NoTests = ParamTest; @@ -739,20 +690,17 @@ INSTANTIATE_TEST_SUITE_P(ThisIsOdd, NoTests, ::testing::Values("Hello")); // fails under kErrorOnUninstantiatedParameterizedTest=true class DetectNotInstantiatedTest : public testing::TestWithParam {}; -TEST_P(DetectNotInstantiatedTest, Used) { } +TEST_P(DetectNotInstantiatedTest, Used) {} // This would make the test failure from the above go away. // INSTANTIATE_TEST_SUITE_P(Fix, DetectNotInstantiatedTest, testing::Values(1)); template -class TypedTest : public testing::Test { -}; +class TypedTest : public testing::Test {}; TYPED_TEST_SUITE(TypedTest, testing::Types); -TYPED_TEST(TypedTest, Success) { - EXPECT_EQ(0, TypeParam()); -} +TYPED_TEST(TypedTest, Success) { EXPECT_EQ(0, TypeParam()); } TYPED_TEST(TypedTest, Failure) { EXPECT_EQ(1, TypeParam()) << "Expected failure"; @@ -781,14 +729,11 @@ TYPED_TEST(TypedTestWithNames, Success) {} TYPED_TEST(TypedTestWithNames, Failure) { FAIL(); } template -class TypedTestP : public testing::Test { -}; +class TypedTestP : public testing::Test {}; TYPED_TEST_SUITE_P(TypedTestP); -TYPED_TEST_P(TypedTestP, Success) { - EXPECT_EQ(0U, TypeParam()); -} +TYPED_TEST_P(TypedTestP, Success) { EXPECT_EQ(0U, TypeParam()); } TYPED_TEST_P(TypedTestP, Failure) { EXPECT_EQ(1U, TypeParam()) << "Expected failure"; @@ -813,7 +758,7 @@ class TypedTestPNames { }; INSTANTIATE_TYPED_TEST_SUITE_P(UnsignedCustomName, TypedTestP, UnsignedTypes, - TypedTestPNames); + TypedTestPNames); template class DetectNotInstantiatedTypesTest : public testing::Test {}; @@ -835,34 +780,28 @@ REGISTER_TYPED_TEST_SUITE_P(DetectNotInstantiatedTypesTest, Used); // We rely on the golden file to verify that tests whose test case // name ends with DeathTest are run first. -TEST(ADeathTest, ShouldRunFirst) { -} +TEST(ADeathTest, ShouldRunFirst) {} // We rely on the golden file to verify that typed tests whose test // case name ends with DeathTest are run first. template -class ATypedDeathTest : public testing::Test { -}; +class ATypedDeathTest : public testing::Test {}; typedef testing::Types NumericTypes; TYPED_TEST_SUITE(ATypedDeathTest, NumericTypes); -TYPED_TEST(ATypedDeathTest, ShouldRunFirst) { -} - +TYPED_TEST(ATypedDeathTest, ShouldRunFirst) {} // We rely on the golden file to verify that type-parameterized tests // whose test case name ends with DeathTest are run first. template -class ATypeParamDeathTest : public testing::Test { -}; +class ATypeParamDeathTest : public testing::Test {}; TYPED_TEST_SUITE_P(ATypeParamDeathTest); -TYPED_TEST_P(ATypeParamDeathTest, ShouldRunFirst) { -} +TYPED_TEST_P(ATypeParamDeathTest, ShouldRunFirst) {} REGISTER_TYPED_TEST_SUITE_P(ATypeParamDeathTest, ShouldRunFirst); @@ -874,10 +813,7 @@ INSTANTIATE_TYPED_TEST_SUITE_P(My, ATypeParamDeathTest, NumericTypes); // EXPECT_{,NON}FATAL_FAILURE{,_ON_ALL_THREADS}. class ExpectFailureTest : public testing::Test { public: // Must be public and not protected due to a bug in g++ 3.4.2. - enum FailureMode { - FATAL_FAILURE, - NONFATAL_FAILURE - }; + enum FailureMode { FATAL_FAILURE, NONFATAL_FAILURE }; static void AddFailure(FailureMode failure) { if (failure == FATAL_FAILURE) { FAIL() << "Expected fatal failure."; @@ -893,11 +829,13 @@ TEST_F(ExpectFailureTest, ExpectFatalFailure) { EXPECT_FATAL_FAILURE(SUCCEED(), "Expected fatal failure."); // Expected fatal failure, but got a non-fatal failure. printf("(expecting 1 failure)\n"); - EXPECT_FATAL_FAILURE(AddFailure(NONFATAL_FAILURE), "Expected non-fatal " + EXPECT_FATAL_FAILURE(AddFailure(NONFATAL_FAILURE), + "Expected non-fatal " "failure."); // Wrong message. printf("(expecting 1 failure)\n"); - EXPECT_FATAL_FAILURE(AddFailure(FATAL_FAILURE), "Some other fatal failure " + EXPECT_FATAL_FAILURE(AddFailure(FATAL_FAILURE), + "Some other fatal failure " "expected."); } @@ -910,7 +848,8 @@ TEST_F(ExpectFailureTest, ExpectNonFatalFailure) { EXPECT_NONFATAL_FAILURE(AddFailure(FATAL_FAILURE), "Expected fatal failure."); // Wrong message. printf("(expecting 1 failure)\n"); - EXPECT_NONFATAL_FAILURE(AddFailure(NONFATAL_FAILURE), "Some other non-fatal " + EXPECT_NONFATAL_FAILURE(AddFailure(NONFATAL_FAILURE), + "Some other non-fatal " "failure."); } @@ -975,7 +914,8 @@ TEST_F(ExpectFailureTest, ExpectFatalFailureOnAllThreads) { TEST_F(ExpectFailureTest, ExpectNonFatalFailureOnAllThreads) { // Expected non-fatal failure, but succeeds. printf("(expecting 1 failure)\n"); - EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS(SUCCEED(), "Expected non-fatal " + EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS(SUCCEED(), + "Expected non-fatal " "failure."); // Expected non-fatal failure, but got a fatal failure. printf("(expecting 1 failure)\n"); @@ -1060,12 +1000,18 @@ class BarEnvironment : public testing::Environment { } }; +class TestSuiteThatFailsToSetUp : public testing::Test { + public: + static void SetUpTestSuite() { EXPECT_TRUE(false); } +}; +TEST_F(TestSuiteThatFailsToSetUp, ShouldNotRun) { std::abort(); } + // The main function. // // The idea is to use Google Test to run all the tests we have defined (some // of them are intended to fail), and then compare the test results // with the "golden" file. -int main(int argc, char **argv) { +int main(int argc, char** argv) { GTEST_FLAG_SET(print_time, false); // We just run the tests, knowing some of them are intended to fail. @@ -1073,7 +1019,7 @@ int main(int argc, char **argv) { // this program with the golden file. // It's hard to test InitGoogleTest() directly, as it has many - // global side effects. The following line serves as a sanity test + // global side effects. The following line serves as a test // for it. testing::InitGoogleTest(&argc, argv); bool internal_skip_environment_and_ad_hoc_tests = @@ -1084,17 +1030,16 @@ int main(int argc, char **argv) { if (GTEST_FLAG_GET(internal_run_death_test) != "") { // Skip the usual output capturing if we're running as the child // process of an threadsafe-style death test. -# if GTEST_OS_WINDOWS +#if GTEST_OS_WINDOWS posix::FReopen("nul:", "w", stdout); -# else +#else posix::FReopen("/dev/null", "w", stdout); -# endif // GTEST_OS_WINDOWS +#endif // GTEST_OS_WINDOWS return RUN_ALL_TESTS(); } #endif // GTEST_HAS_DEATH_TEST - if (internal_skip_environment_and_ad_hoc_tests) - return RUN_ALL_TESTS(); + if (internal_skip_environment_and_ad_hoc_tests) return RUN_ALL_TESTS(); // Registers two global test environments. // The golden file verifies that they are set up in the order they @@ -1102,7 +1047,7 @@ int main(int argc, char **argv) { testing::AddGlobalTestEnvironment(new FooEnvironment); testing::AddGlobalTestEnvironment(new BarEnvironment); #if _MSC_VER -GTEST_DISABLE_MSC_WARNINGS_POP_() // 4127 -#endif // _MSC_VER + GTEST_DISABLE_MSC_WARNINGS_POP_() // 4127 +#endif // _MSC_VER return RunAllTests(); } diff --git a/ext/googletest/googletest/test/googletest-param-test-invalid-name1-test.py b/ext/googletest/googletest/test/googletest-param-test-invalid-name1-test.py index 2a08477a77..b8d609a700 100644 --- a/ext/googletest/googletest/test/googletest-param-test-invalid-name1-test.py +++ b/ext/googletest/googletest/test/googletest-param-test-invalid-name1-test.py @@ -30,7 +30,7 @@ """Verifies that Google Test warns the user when not initialized properly.""" -import gtest_test_utils +from googletest.test import gtest_test_utils binary_name = 'googletest-param-test-invalid-name1-test_' COMMAND = gtest_test_utils.GetTestExecutablePath(binary_name) diff --git a/ext/googletest/googletest/test/googletest-param-test-invalid-name1-test_.cc b/ext/googletest/googletest/test/googletest-param-test-invalid-name1-test_.cc index 955d699900..004733a12c 100644 --- a/ext/googletest/googletest/test/googletest-param-test-invalid-name1-test_.cc +++ b/ext/googletest/googletest/test/googletest-param-test-invalid-name1-test_.cc @@ -27,17 +27,14 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - #include "gtest/gtest.h" namespace { class DummyTest : public ::testing::TestWithParam {}; -TEST_P(DummyTest, Dummy) { -} +TEST_P(DummyTest, Dummy) {} -INSTANTIATE_TEST_SUITE_P(InvalidTestName, - DummyTest, +INSTANTIATE_TEST_SUITE_P(InvalidTestName, DummyTest, ::testing::Values("InvalidWithQuotes"), ::testing::PrintToStringParamName()); @@ -47,4 +44,3 @@ int main(int argc, char *argv[]) { testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } - diff --git a/ext/googletest/googletest/test/googletest-param-test-invalid-name2-test.py b/ext/googletest/googletest/test/googletest-param-test-invalid-name2-test.py index ab838f4632..d92fa065ae 100644 --- a/ext/googletest/googletest/test/googletest-param-test-invalid-name2-test.py +++ b/ext/googletest/googletest/test/googletest-param-test-invalid-name2-test.py @@ -30,7 +30,7 @@ """Verifies that Google Test warns the user when not initialized properly.""" -import gtest_test_utils +from googletest.test import gtest_test_utils binary_name = 'googletest-param-test-invalid-name2-test_' COMMAND = gtest_test_utils.GetTestExecutablePath(binary_name) diff --git a/ext/googletest/googletest/test/googletest-param-test-invalid-name2-test_.cc b/ext/googletest/googletest/test/googletest-param-test-invalid-name2-test_.cc index 76371df54f..d0c44da544 100644 --- a/ext/googletest/googletest/test/googletest-param-test-invalid-name2-test_.cc +++ b/ext/googletest/googletest/test/googletest-param-test-invalid-name2-test_.cc @@ -27,22 +27,19 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - #include "gtest/gtest.h" namespace { class DummyTest : public ::testing::TestWithParam {}; std::string StringParamTestSuffix( - const testing::TestParamInfo& info) { + const testing::TestParamInfo &info) { return std::string(info.param); } -TEST_P(DummyTest, Dummy) { -} +TEST_P(DummyTest, Dummy) {} -INSTANTIATE_TEST_SUITE_P(DuplicateTestNames, - DummyTest, +INSTANTIATE_TEST_SUITE_P(DuplicateTestNames, DummyTest, ::testing::Values("a", "b", "a", "c"), StringParamTestSuffix); } // namespace @@ -51,5 +48,3 @@ int main(int argc, char *argv[]) { testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } - - diff --git a/ext/googletest/googletest/test/googletest-param-test-test.cc b/ext/googletest/googletest/test/googletest-param-test-test.cc index 023aa46d69..848ef9752c 100644 --- a/ext/googletest/googletest/test/googletest-param-test-test.cc +++ b/ext/googletest/googletest/test/googletest-param-test-test.cc @@ -32,21 +32,21 @@ // generators objects produce correct parameter sequences and that // Google Test runtime instantiates correct tests from those sequences. +#include "test/googletest-param-test-test.h" + +#include +#include +#include +#include +#include +#include +#include + #include "gtest/gtest.h" +#include "src/gtest-internal-inl.h" // for UnitTestOptions -# include -# include -# include -# include -# include -# include -# include - -# include "src/gtest-internal-inl.h" // for UnitTestOptions -# include "test/googletest-param-test-test.h" - -using ::std::vector; using ::std::sort; +using ::std::vector; using ::testing::AddGlobalTestEnvironment; using ::testing::Bool; @@ -85,15 +85,14 @@ void VerifyGenerator(const ParamGenerator& generator, // We cannot use EXPECT_EQ() here as the values may be tuples, // which don't support <<. EXPECT_TRUE(expected_values[i] == *it) - << "where i is " << i - << ", expected_values[i] is " << PrintValue(expected_values[i]) - << ", *it is " << PrintValue(*it) + << "where i is " << i << ", expected_values[i] is " + << PrintValue(expected_values[i]) << ", *it is " << PrintValue(*it) << ", and 'it' is an iterator created with the copy constructor.\n"; ++it; } EXPECT_TRUE(it == generator.end()) - << "At the presumed end of sequence when accessing via an iterator " - << "created with the copy constructor.\n"; + << "At the presumed end of sequence when accessing via an iterator " + << "created with the copy constructor.\n"; // Test the iterator assignment. The following lines verify that // the sequence accessed via an iterator initialized via the @@ -105,15 +104,14 @@ void VerifyGenerator(const ParamGenerator& generator, << "At element " << i << " when accessing via an iterator " << "created with the assignment operator.\n"; EXPECT_TRUE(expected_values[i] == *it) - << "where i is " << i - << ", expected_values[i] is " << PrintValue(expected_values[i]) - << ", *it is " << PrintValue(*it) + << "where i is " << i << ", expected_values[i] is " + << PrintValue(expected_values[i]) << ", *it is " << PrintValue(*it) << ", and 'it' is an iterator created with the copy constructor.\n"; ++it; } EXPECT_TRUE(it == generator.end()) - << "At the presumed end of sequence when accessing via an iterator " - << "created with the assignment operator.\n"; + << "At the presumed end of sequence when accessing via an iterator " + << "created with the assignment operator.\n"; } template @@ -216,8 +214,7 @@ class DogAdder { DogAdder(const DogAdder& other) : value_(other.value_.c_str()) {} DogAdder operator=(const DogAdder& other) { - if (this != &other) - value_ = other.value_; + if (this != &other) value_ = other.value_; return *this; } DogAdder operator+(const DogAdder& other) const { @@ -225,9 +222,7 @@ class DogAdder { msg << value_.c_str() << other.value_.c_str(); return DogAdder(msg.GetString().c_str()); } - bool operator<(const DogAdder& other) const { - return value_ < other.value_; - } + bool operator<(const DogAdder& other) const { return value_ < other.value_; } const std::string& value() const { return value_; } private: @@ -372,19 +367,17 @@ TEST(ValuesTest, ValuesWorksForValuesOfCompatibleTypes) { } TEST(ValuesTest, ValuesWorksForMaxLengthList) { - const ParamGenerator gen = Values( - 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, - 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, - 210, 220, 230, 240, 250, 260, 270, 280, 290, 300, - 310, 320, 330, 340, 350, 360, 370, 380, 390, 400, - 410, 420, 430, 440, 450, 460, 470, 480, 490, 500); + const ParamGenerator gen = + Values(10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, + 160, 170, 180, 190, 200, 210, 220, 230, 240, 250, 260, 270, 280, + 290, 300, 310, 320, 330, 340, 350, 360, 370, 380, 390, 400, 410, + 420, 430, 440, 450, 460, 470, 480, 490, 500); const int expected_values[] = { - 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, - 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, - 210, 220, 230, 240, 250, 260, 270, 280, 290, 300, - 310, 320, 330, 340, 350, 360, 370, 380, 390, 400, - 410, 420, 430, 440, 450, 460, 470, 480, 490, 500}; + 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, + 140, 150, 160, 170, 180, 190, 200, 210, 220, 230, 240, 250, 260, + 270, 280, 290, 300, 310, 320, 330, 340, 350, 360, 370, 380, 390, + 400, 410, 420, 430, 440, 450, 460, 470, 480, 490, 500}; VerifyGenerator(gen, expected_values); } @@ -530,7 +523,6 @@ TEST(CombineTest, NonDefaultConstructAssign) { EXPECT_TRUE(it == gen.end()); } - // Tests that an generator produces correct sequence after being // assigned from another generator. TEST(ParamGeneratorTest, AssignmentWorks) { @@ -573,7 +565,7 @@ class TestGenerationEnvironment : public ::testing::Environment { Message msg; msg << "TestsExpandedAndRun/" << i; if (UnitTestOptions::FilterMatchesTest( - "TestExpansionModule/MultipleTestGenerationTest", + "TestExpansionModule/MultipleTestGenerationTest", msg.GetString().c_str())) { perform_check = true; } @@ -595,15 +587,20 @@ class TestGenerationEnvironment : public ::testing::Environment { } private: - TestGenerationEnvironment() : fixture_constructor_count_(0), set_up_count_(0), - tear_down_count_(0), test_body_count_(0) {} + TestGenerationEnvironment() + : fixture_constructor_count_(0), + set_up_count_(0), + tear_down_count_(0), + test_body_count_(0) {} int fixture_constructor_count_; int set_up_count_; int tear_down_count_; int test_body_count_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(TestGenerationEnvironment); + TestGenerationEnvironment(const TestGenerationEnvironment&) = delete; + TestGenerationEnvironment& operator=(const TestGenerationEnvironment&) = + delete; }; const int test_generation_params[] = {36, 42, 72}; @@ -612,7 +609,7 @@ class TestGenerationTest : public TestWithParam { public: enum { PARAMETER_COUNT = - sizeof(test_generation_params)/sizeof(test_generation_params[0]) + sizeof(test_generation_params) / sizeof(test_generation_params[0]) }; typedef TestGenerationEnvironment Environment; @@ -636,9 +633,9 @@ class TestGenerationTest : public TestWithParam { for (int i = 0; i < PARAMETER_COUNT; ++i) { Message test_name; test_name << "TestsExpandedAndRun/" << i; - if ( !UnitTestOptions::FilterMatchesTest( - "TestExpansionModule/MultipleTestGenerationTest", - test_name.GetString())) { + if (!UnitTestOptions::FilterMatchesTest( + "TestExpansionModule/MultipleTestGenerationTest", + test_name.GetString())) { all_tests_in_test_case_selected = false; } } @@ -668,7 +665,8 @@ class TestGenerationTest : public TestWithParam { static vector collected_parameters_; private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(TestGenerationTest); + TestGenerationTest(const TestGenerationTest&) = delete; + TestGenerationTest& operator=(const TestGenerationTest&) = delete; }; vector TestGenerationTest::collected_parameters_; @@ -729,8 +727,7 @@ TEST_P(ExternalInstantiationTest, IsMultipleOf33) { // Tests that a parameterized test case can be instantiated with multiple // generators. class MultipleInstantiationTest : public TestWithParam {}; -TEST_P(MultipleInstantiationTest, AllowsMultipleInstances) { -} +TEST_P(MultipleInstantiationTest, AllowsMultipleInstances) {} INSTANTIATE_TEST_SUITE_P(Sequence1, MultipleInstantiationTest, Values(1, 2)); INSTANTIATE_TEST_SUITE_P(Sequence2, MultipleInstantiationTest, Range(3, 5)); @@ -780,7 +777,7 @@ class NamingTest : public TestWithParam {}; TEST_P(NamingTest, TestsReportCorrectNamesAndParameters) { const ::testing::TestInfo* const test_info = - ::testing::UnitTest::GetInstance()->current_test_info(); + ::testing::UnitTest::GetInstance()->current_test_info(); EXPECT_STREQ("ZeroToFiveSequence/NamingTest", test_info->test_suite_name()); @@ -801,7 +798,7 @@ class MacroNamingTest : public TestWithParam {}; TEST_P(PREFIX_WITH_MACRO(NamingTest), PREFIX_WITH_FOO(SomeTestName)) { const ::testing::TestInfo* const test_info = - ::testing::UnitTest::GetInstance()->current_test_info(); + ::testing::UnitTest::GetInstance()->current_test_info(); EXPECT_STREQ("FortyTwo/MacroNamingTest", test_info->test_suite_name()); EXPECT_STREQ("FooSomeTestName/0", test_info->name()); @@ -815,7 +812,7 @@ class MacroNamingTestNonParametrized : public ::testing::Test {}; TEST_F(PREFIX_WITH_MACRO(NamingTestNonParametrized), PREFIX_WITH_FOO(SomeTestName)) { const ::testing::TestInfo* const test_info = - ::testing::UnitTest::GetInstance()->current_test_info(); + ::testing::UnitTest::GetInstance()->current_test_info(); EXPECT_STREQ("MacroNamingTestNonParametrized", test_info->test_suite_name()); EXPECT_STREQ("FooSomeTestName", test_info->name()); @@ -839,9 +836,8 @@ TEST(MacroNameing, LookupNames) { EXPECT_NE( // know_suite_names.find("FortyTwo/MacroNamingTest"), know_suite_names.end()); - EXPECT_NE( - know_suite_names.find("MacroNamingTestNonParametrized"), - know_suite_names.end()); + EXPECT_NE(know_suite_names.find("MacroNamingTestNonParametrized"), + know_suite_names.end()); // Check that the expected form of the test name actually exists. EXPECT_NE( // know_test_names.find("FortyTwo/MacroNamingTest.FooSomeTestName/0"), @@ -924,7 +920,7 @@ class CustomIntegerNamingTest : public TestWithParam {}; TEST_P(CustomIntegerNamingTest, TestsReportCorrectNames) { const ::testing::TestInfo* const test_info = - ::testing::UnitTest::GetInstance()->current_test_info(); + ::testing::UnitTest::GetInstance()->current_test_info(); Message test_name_stream; test_name_stream << "TestsReportCorrectNames/" << GetParam(); EXPECT_STREQ(test_name_stream.GetString().c_str(), test_info->name()); @@ -949,7 +945,7 @@ class CustomStructNamingTest : public TestWithParam {}; TEST_P(CustomStructNamingTest, TestsReportCorrectNames) { const ::testing::TestInfo* const test_info = - ::testing::UnitTest::GetInstance()->current_test_info(); + ::testing::UnitTest::GetInstance()->current_test_info(); Message test_name_stream; test_name_stream << "TestsReportCorrectNames/" << GetParam(); EXPECT_STREQ(test_name_stream.GetString().c_str(), test_info->name()); @@ -979,7 +975,7 @@ class StatefulNamingTest : public ::testing::TestWithParam { TEST_P(StatefulNamingTest, TestsReportCorrectNames) { const ::testing::TestInfo* const test_info = - ::testing::UnitTest::GetInstance()->current_test_info(); + ::testing::UnitTest::GetInstance()->current_test_info(); sum_ += GetParam(); Message test_name_stream; test_name_stream << "TestsReportCorrectNames/" << sum_; @@ -1007,7 +1003,7 @@ class CommentTest : public TestWithParam {}; TEST_P(CommentTest, TestsCorrectlyReportUnstreamableParams) { const ::testing::TestInfo* const test_info = - ::testing::UnitTest::GetInstance()->current_test_info(); + ::testing::UnitTest::GetInstance()->current_test_info(); EXPECT_EQ(::testing::PrintToString(GetParam()), test_info->value_param()); } @@ -1021,7 +1017,8 @@ INSTANTIATE_TEST_SUITE_P(InstantiationWithComments, CommentTest, // perform simple tests on both. class NonParameterizedBaseTest : public ::testing::Test { public: - NonParameterizedBaseTest() : n_(17) { } + NonParameterizedBaseTest() : n_(17) {} + protected: int n_; }; @@ -1029,16 +1026,14 @@ class NonParameterizedBaseTest : public ::testing::Test { class ParameterizedDerivedTest : public NonParameterizedBaseTest, public ::testing::WithParamInterface { protected: - ParameterizedDerivedTest() : count_(0) { } + ParameterizedDerivedTest() : count_(0) {} int count_; static int global_count_; }; int ParameterizedDerivedTest::global_count_ = 0; -TEST_F(NonParameterizedBaseTest, FixtureIsInitialized) { - EXPECT_EQ(17, n_); -} +TEST_F(NonParameterizedBaseTest, FixtureIsInitialized) { EXPECT_EQ(17, n_); } TEST_P(ParameterizedDerivedTest, SeesSequence) { EXPECT_EQ(17, n_); @@ -1046,11 +1041,10 @@ TEST_P(ParameterizedDerivedTest, SeesSequence) { EXPECT_EQ(GetParam(), global_count_++); } -class ParameterizedDeathTest : public ::testing::TestWithParam { }; +class ParameterizedDeathTest : public ::testing::TestWithParam {}; TEST_F(ParameterizedDeathTest, GetParamDiesFromTestF) { - EXPECT_DEATH_IF_SUPPORTED(GetParam(), - ".* value-parameterized test .*"); + EXPECT_DEATH_IF_SUPPORTED(GetParam(), ".* value-parameterized test .*"); } INSTANTIATE_TEST_SUITE_P(RangeZeroToFive, ParameterizedDerivedTest, @@ -1084,11 +1078,11 @@ class NotInstantiatedTest : public testing::TestWithParam {}; // ... we mark is as allowed. GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(NotInstantiatedTest); -TEST_P(NotInstantiatedTest, Used) { } +TEST_P(NotInstantiatedTest, Used) {} using OtherName = NotInstantiatedTest; GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(OtherName); -TEST_P(OtherName, Used) { } +TEST_P(OtherName, Used) {} // Used but not instantiated, this would fail. but... template @@ -1097,11 +1091,11 @@ TYPED_TEST_SUITE_P(NotInstantiatedTypeTest); // ... we mark is as allowed. GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(NotInstantiatedTypeTest); -TYPED_TEST_P(NotInstantiatedTypeTest, Used) { } +TYPED_TEST_P(NotInstantiatedTypeTest, Used) {} REGISTER_TYPED_TEST_SUITE_P(NotInstantiatedTypeTest, Used); } // namespace works_here -int main(int argc, char **argv) { +int main(int argc, char** argv) { // Used in TestGenerationTest test suite. AddGlobalTestEnvironment(TestGenerationTest::Environment::Instance()); // Used in GeneratorEvaluationTest test suite. Tests that the updated value diff --git a/ext/googletest/googletest/test/googletest-param-test-test.h b/ext/googletest/googletest/test/googletest-param-test-test.h index 891937538d..6d77e1049a 100644 --- a/ext/googletest/googletest/test/googletest-param-test-test.h +++ b/ext/googletest/googletest/test/googletest-param-test-test.h @@ -39,13 +39,11 @@ // Test fixture for testing definition and instantiation of a test // in separate translation units. -class ExternalInstantiationTest : public ::testing::TestWithParam { -}; +class ExternalInstantiationTest : public ::testing::TestWithParam {}; // Test fixture for testing instantiation of a test in multiple // translation units. class InstantiationInMultipleTranslationUnitsTest - : public ::testing::TestWithParam { -}; + : public ::testing::TestWithParam {}; #endif // GOOGLETEST_TEST_GOOGLETEST_PARAM_TEST_TEST_H_ diff --git a/ext/googletest/googletest/test/googletest-param-test2-test.cc b/ext/googletest/googletest/test/googletest-param-test2-test.cc index 2a29fb1d06..71727a674f 100644 --- a/ext/googletest/googletest/test/googletest-param-test2-test.cc +++ b/ext/googletest/googletest/test/googletest-param-test2-test.cc @@ -46,8 +46,7 @@ ParamGenerator extern_gen = Values(33); // and instantiated in another. The test is defined in // googletest-param-test-test.cc and ExternalInstantiationTest fixture class is // defined in gtest-param-test_test.h. -INSTANTIATE_TEST_SUITE_P(MultiplesOf33, - ExternalInstantiationTest, +INSTANTIATE_TEST_SUITE_P(MultiplesOf33, ExternalInstantiationTest, Values(33, 66)); // Tests that a parameterized test case can be instantiated @@ -55,7 +54,5 @@ INSTANTIATE_TEST_SUITE_P(MultiplesOf33, // in googletest-param-test-test.cc and // InstantiationInMultipleTranslationUnitsTest fixture is defined in // gtest-param-test_test.h -INSTANTIATE_TEST_SUITE_P(Sequence2, - InstantiationInMultipleTranslationUnitsTest, - Values(42*3, 42*4, 42*5)); - +INSTANTIATE_TEST_SUITE_P(Sequence2, InstantiationInMultipleTranslationUnitsTest, + Values(42 * 3, 42 * 4, 42 * 5)); diff --git a/ext/googletest/googletest/test/googletest-port-test.cc b/ext/googletest/googletest/test/googletest-port-test.cc index 16d30c46b7..c20dfa4c4c 100644 --- a/ext/googletest/googletest/test/googletest-port-test.cc +++ b/ext/googletest/googletest/test/googletest-port-test.cc @@ -33,16 +33,18 @@ #include "gtest/internal/gtest-port.h" #if GTEST_OS_MAC -# include +#include #endif // GTEST_OS_MAC +#include // NOLINT #include #include +#include // NOLINT #include // For std::pair and std::make_pair. #include -#include "gtest/gtest.h" #include "gtest/gtest-spi.h" +#include "gtest/gtest.h" #include "src/gtest-internal-inl.h" using std::make_pair; @@ -236,8 +238,8 @@ TEST(GtestCheckSyntaxTest, WorksWithSwitch) { } switch (0) - case 0: - GTEST_CHECK_(true) << "Check failed in switch case"; + case 0: + GTEST_CHECK_(true) << "Check failed in switch case"; } // Verifies behavior of FormatFileLocation. @@ -279,7 +281,7 @@ TEST(FormatCompilerIndependentFileLocationTest, FormatsUknownFileAndLine) { } #if GTEST_OS_LINUX || GTEST_OS_MAC || GTEST_OS_QNX || GTEST_OS_FUCHSIA || \ - GTEST_OS_DRAGONFLY || GTEST_OS_FREEBSD || GTEST_OS_GNU_KFREEBSD || \ + GTEST_OS_DRAGONFLY || GTEST_OS_FREEBSD || GTEST_OS_GNU_KFREEBSD || \ GTEST_OS_NETBSD || GTEST_OS_OPENBSD || GTEST_OS_GNU_HURD void* ThreadFunc(void* data) { internal::Mutex* mutex = static_cast(data); @@ -333,7 +335,7 @@ TEST(GetThreadCountTest, ReturnsCorrectValue) { break; } - SleepMilliseconds(100); + std::this_thread::sleep_for(std::chrono::milliseconds(100)); } // Retry if an arbitrary other thread was created or destroyed. @@ -355,13 +357,13 @@ TEST(GtestCheckDeathTest, DiesWithCorrectOutputOnFailure) { const bool a_false_condition = false; const char regex[] = #ifdef _MSC_VER - "googletest-port-test\\.cc\\(\\d+\\):" + "googletest-port-test\\.cc\\(\\d+\\):" #elif GTEST_USES_POSIX_RE - "googletest-port-test\\.cc:[0-9]+" + "googletest-port-test\\.cc:[0-9]+" #else - "googletest-port-test\\.cc:\\d+" + "googletest-port-test\\.cc:\\d+" #endif // _MSC_VER - ".*a_false_condition.*Extra info.*"; + ".*a_false_condition.*Extra info.*"; EXPECT_DEATH_IF_SUPPORTED(GTEST_CHECK_(a_false_condition) << "Extra info", regex); @@ -370,10 +372,12 @@ TEST(GtestCheckDeathTest, DiesWithCorrectOutputOnFailure) { #if GTEST_HAS_DEATH_TEST TEST(GtestCheckDeathTest, LivesSilentlyOnSuccess) { - EXPECT_EXIT({ - GTEST_CHECK_(true) << "Extra info"; - ::std::cerr << "Success\n"; - exit(0); }, + EXPECT_EXIT( + { + GTEST_CHECK_(true) << "Extra info"; + ::std::cerr << "Success\n"; + exit(0); + }, ::testing::ExitedWithCode(0), "Success"); } @@ -383,17 +387,13 @@ TEST(GtestCheckDeathTest, LivesSilentlyOnSuccess) { // the platform. The test will produce compiler errors in case of failure. // For simplicity, we only cover the most important platforms here. TEST(RegexEngineSelectionTest, SelectsCorrectRegexEngine) { -#if !GTEST_USES_PCRE -# if GTEST_HAS_POSIX_RE - +#if GTEST_HAS_ABSL + EXPECT_TRUE(GTEST_USES_RE2); +#elif GTEST_HAS_POSIX_RE EXPECT_TRUE(GTEST_USES_POSIX_RE); - -# else - +#else EXPECT_TRUE(GTEST_USES_SIMPLE_RE); - -# endif -#endif // !GTEST_USES_PCRE +#endif } #if GTEST_USES_POSIX_RE @@ -421,9 +421,9 @@ TYPED_TEST(RETest, ImplicitConstructorWorks) { // Tests that RE's constructors reject invalid regular expressions. TYPED_TEST(RETest, RejectsInvalidRegex) { - EXPECT_NONFATAL_FAILURE({ - const RE invalid(TypeParam("?")); - }, "\"?\" is not a valid POSIX Extended regular expression."); + EXPECT_NONFATAL_FAILURE( + { const RE invalid(TypeParam("?")); }, + "\"?\" is not a valid POSIX Extended regular expression."); } // Tests RE::FullMatch(). @@ -817,8 +817,7 @@ TEST(MatchRegexAtHeadTest, WorksWhenRegexStartsWithRepetition) { EXPECT_TRUE(MatchRegexAtHead("a?b", "ab")); } -TEST(MatchRegexAtHeadTest, - WorksWhenRegexStartsWithRepetionOfEscapeSequence) { +TEST(MatchRegexAtHeadTest, WorksWhenRegexStartsWithRepetionOfEscapeSequence) { EXPECT_FALSE(MatchRegexAtHead("\\.+a", "abc")); EXPECT_FALSE(MatchRegexAtHead("\\s?b", " b")); @@ -874,17 +873,14 @@ TEST(RETest, ImplicitConstructorWorks) { // Tests that RE's constructors reject invalid regular expressions. TEST(RETest, RejectsInvalidRegex) { - EXPECT_NONFATAL_FAILURE({ - const RE normal(NULL); - }, "NULL is not a valid simple regular expression"); + EXPECT_NONFATAL_FAILURE({ const RE normal(NULL); }, + "NULL is not a valid simple regular expression"); - EXPECT_NONFATAL_FAILURE({ - const RE normal(".*(\\w+"); - }, "'(' is unsupported"); + EXPECT_NONFATAL_FAILURE({ const RE normal(".*(\\w+"); }, + "'(' is unsupported"); - EXPECT_NONFATAL_FAILURE({ - const RE invalid("^?"); - }, "'?' can only follow a repeatable token"); + EXPECT_NONFATAL_FAILURE({ const RE invalid("^?"); }, + "'?' can only follow a repeatable token"); } // Tests RE::FullMatch(). @@ -1026,12 +1022,13 @@ TEST(ThreadWithParamTest, ConstructorExecutesThreadFunc) { TEST(MutexDeathTest, AssertHeldShouldAssertWhenNotLocked) { // AssertHeld() is flaky only in the presence of multiple threads accessing // the lock. In this case, the test is robust. - EXPECT_DEATH_IF_SUPPORTED({ - Mutex m; - { MutexLock lock(&m); } - m.AssertHeld(); - }, - "thread .*hold"); + EXPECT_DEATH_IF_SUPPORTED( + { + Mutex m; + { MutexLock lock(&m); } + m.AssertHeld(); + }, + "thread .*hold"); } TEST(MutexTest, AssertHeldShouldNotAssertWhenLocked) { @@ -1042,15 +1039,15 @@ TEST(MutexTest, AssertHeldShouldNotAssertWhenLocked) { class AtomicCounterWithMutex { public: - explicit AtomicCounterWithMutex(Mutex* mutex) : - value_(0), mutex_(mutex), random_(42) {} + explicit AtomicCounterWithMutex(Mutex* mutex) + : value_(0), mutex_(mutex), random_(42) {} void Increment() { MutexLock lock(mutex_); int temp = value_; { // We need to put up a memory barrier to prevent reads and writes to - // value_ rearranged with the call to SleepMilliseconds when observed + // value_ rearranged with the call to sleep_for when observed // from other threads. #if GTEST_HAS_PTHREAD // On POSIX, locking a mutex puts up a memory barrier. We cannot use @@ -1061,7 +1058,8 @@ class AtomicCounterWithMutex { pthread_mutex_init(&memory_barrier_mutex, nullptr)); GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_lock(&memory_barrier_mutex)); - SleepMilliseconds(static_cast(random_.Generate(30))); + std::this_thread::sleep_for( + std::chrono::milliseconds(random_.Generate(30))); GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_unlock(&memory_barrier_mutex)); GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_destroy(&memory_barrier_mutex)); @@ -1069,10 +1067,11 @@ class AtomicCounterWithMutex { // On Windows, performing an interlocked access puts up a memory barrier. volatile LONG dummy = 0; ::InterlockedIncrement(&dummy); - SleepMilliseconds(static_cast(random_.Generate(30))); + std::this_thread::sleep_for( + std::chrono::milliseconds(random_.Generate(30))); ::InterlockedIncrement(&dummy); #else -# error "Memory barrier not implemented on this platform." +#error "Memory barrier not implemented on this platform." #endif // GTEST_HAS_PTHREAD } value_ = temp + 1; @@ -1082,12 +1081,11 @@ class AtomicCounterWithMutex { private: volatile int value_; Mutex* const mutex_; // Protects value_. - Random random_; + Random random_; }; void CountingThreadFunc(pair param) { - for (int i = 0; i < param.second; ++i) - param.first->Increment(); + for (int i = 0; i < param.second; ++i) param.first->Increment(); } // Tests that the mutex only lets one thread at a time to lock it. @@ -1103,14 +1101,12 @@ TEST(MutexTest, OnlyOneThreadCanLockAtATime) { // Creates and runs kThreadCount threads that increment locked_counter // kCycleCount times each. for (int i = 0; i < kThreadCount; ++i) { - counting_threads[i].reset(new ThreadType(&CountingThreadFunc, - make_pair(&locked_counter, - kCycleCount), - &threads_can_start)); + counting_threads[i].reset(new ThreadType( + &CountingThreadFunc, make_pair(&locked_counter, kCycleCount), + &threads_can_start)); } threads_can_start.Notify(); - for (int i = 0; i < kThreadCount; ++i) - counting_threads[i]->Join(); + for (int i = 0; i < kThreadCount; ++i) counting_threads[i]->Join(); // If the mutex lets more than one thread to increment the counter at a // time, they are likely to encounter a race condition and have some @@ -1120,7 +1116,7 @@ TEST(MutexTest, OnlyOneThreadCanLockAtATime) { } template -void RunFromThread(void (func)(T), T param) { +void RunFromThread(void(func)(T), T param) { ThreadWithParam thread(func, param, nullptr); thread.Join(); } @@ -1186,7 +1182,8 @@ class DestructorCall { #endif static std::vector* const list_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(DestructorCall); + DestructorCall(const DestructorCall&) = delete; + DestructorCall& operator=(const DestructorCall&) = delete; }; std::vector* const DestructorCall::list_ = diff --git a/ext/googletest/googletest/test/googletest-printers-test.cc b/ext/googletest/googletest/test/googletest-printers-test.cc index e1e8e1c7a0..acfecf97b8 100644 --- a/ext/googletest/googletest/test/googletest-printers-test.cc +++ b/ext/googletest/googletest/test/googletest-printers-test.cc @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Test - The Google C++ Testing and Mocking Framework // // This file tests the universal value printer. @@ -56,30 +55,20 @@ // Some user-defined types for testing the universal value printer. // An anonymous enum type. -enum AnonymousEnum { - kAE1 = -1, - kAE2 = 1 -}; +enum AnonymousEnum { kAE1 = -1, kAE2 = 1 }; // An enum without a user-defined printer. -enum EnumWithoutPrinter { - kEWP1 = -2, - kEWP2 = 42 -}; +enum EnumWithoutPrinter { kEWP1 = -2, kEWP2 = 42 }; // An enum with a << operator. -enum EnumWithStreaming { - kEWS1 = 10 -}; +enum EnumWithStreaming { kEWS1 = 10 }; std::ostream& operator<<(std::ostream& os, EnumWithStreaming e) { return os << (e == kEWS1 ? "kEWS1" : "invalid"); } // An enum with a PrintTo() function. -enum EnumWithPrintTo { - kEWPT1 = 1 -}; +enum EnumWithPrintTo { kEWPT1 = 1 }; void PrintTo(EnumWithPrintTo e, std::ostream* os) { *os << (e == kEWPT1 ? "kEWPT1" : "invalid"); @@ -108,6 +97,7 @@ template class UnprintableTemplateInGlobal { public: UnprintableTemplateInGlobal() : value_() {} + private: T value_; }; @@ -133,6 +123,7 @@ class UnprintableInFoo { public: UnprintableInFoo() : z_(0) { memcpy(xy_, "\xEF\x12\x0\x0\x34\xAB\x0\x0", 8); } double z() const { return z_; } + private: char xy_[8]; double z_; @@ -149,8 +140,7 @@ void PrintTo(const PrintableViaPrintTo& x, ::std::ostream* os) { } // A type with a user-defined << for printing its pointer. -struct PointerPrintable { -}; +struct PointerPrintable {}; ::std::ostream& operator<<(::std::ostream& os, const PointerPrintable* /* x */) { @@ -164,6 +154,7 @@ class PrintableViaPrintToTemplate { explicit PrintableViaPrintToTemplate(const T& a_value) : value_(a_value) {} const T& value() const { return value_; } + private: T value_; }; @@ -180,6 +171,7 @@ class StreamableTemplateInFoo { StreamableTemplateInFoo() : value_() {} const T& value() const { return value_; } + private: T value_; }; @@ -255,7 +247,6 @@ class UniversalPrinter> { }; } // namespace internal - namespace gtest_printers_test { using ::std::deque; @@ -350,29 +341,21 @@ TEST(PrintCharTest, PlainChar) { // signed char. TEST(PrintCharTest, SignedChar) { EXPECT_EQ("'\\0'", Print(static_cast('\0'))); - EXPECT_EQ("'\\xCE' (-50)", - Print(static_cast(-50))); + EXPECT_EQ("'\\xCE' (-50)", Print(static_cast(-50))); } // unsigned char. TEST(PrintCharTest, UnsignedChar) { EXPECT_EQ("'\\0'", Print(static_cast('\0'))); - EXPECT_EQ("'b' (98, 0x62)", - Print(static_cast('b'))); + EXPECT_EQ("'b' (98, 0x62)", Print(static_cast('b'))); } -TEST(PrintCharTest, Char16) { - EXPECT_EQ("U+0041", Print(u'A')); -} +TEST(PrintCharTest, Char16) { EXPECT_EQ("U+0041", Print(u'A')); } -TEST(PrintCharTest, Char32) { - EXPECT_EQ("U+0041", Print(U'A')); -} +TEST(PrintCharTest, Char32) { EXPECT_EQ("U+0041", Print(U'A')); } #ifdef __cpp_char8_t -TEST(PrintCharTest, Char8) { - EXPECT_EQ("U+0041", Print(u8'A')); -} +TEST(PrintCharTest, Char8) { EXPECT_EQ("U+0041", Print(u8'A')); } #endif // Tests printing other simple, built-in types. @@ -414,8 +397,8 @@ TEST(PrintTypeSizeTest, Wchar_t) { TEST(PrintBuiltInTypeTest, Integer) { EXPECT_EQ("'\\xFF' (255)", Print(static_cast(255))); // uint8 EXPECT_EQ("'\\x80' (-128)", Print(static_cast(-128))); // int8 - EXPECT_EQ("65535", Print(std::numeric_limits::max())); // uint16 - EXPECT_EQ("-32768", Print(std::numeric_limits::min())); // int16 + EXPECT_EQ("65535", Print(std::numeric_limits::max())); // uint16 + EXPECT_EQ("-32768", Print(std::numeric_limits::min())); // int16 EXPECT_EQ("4294967295", Print(std::numeric_limits::max())); // uint32 EXPECT_EQ("-2147483648", @@ -446,15 +429,43 @@ TEST(PrintBuiltInTypeTest, Size_t) { #if !GTEST_OS_WINDOWS // Windows has no ssize_t type. EXPECT_EQ("-2", Print(static_cast(-2))); // ssize_t. -#endif // !GTEST_OS_WINDOWS +#endif // !GTEST_OS_WINDOWS } +// gcc/clang __{u,}int128_t values. +#if defined(__SIZEOF_INT128__) +TEST(PrintBuiltInTypeTest, Int128) { + // Small ones + EXPECT_EQ("0", Print(__int128_t{0})); + EXPECT_EQ("0", Print(__uint128_t{0})); + EXPECT_EQ("12345", Print(__int128_t{12345})); + EXPECT_EQ("12345", Print(__uint128_t{12345})); + EXPECT_EQ("-12345", Print(__int128_t{-12345})); + + // Large ones + EXPECT_EQ("340282366920938463463374607431768211455", Print(~__uint128_t{})); + __int128_t max_128 = static_cast<__int128_t>(~__uint128_t{} / 2); + EXPECT_EQ("-170141183460469231731687303715884105728", Print(~max_128)); + EXPECT_EQ("170141183460469231731687303715884105727", Print(max_128)); +} +#endif // __SIZEOF_INT128__ + // Floating-points. TEST(PrintBuiltInTypeTest, FloatingPoints) { EXPECT_EQ("1.5", Print(1.5f)); // float EXPECT_EQ("-2.5", Print(-2.5)); // double } +#if GTEST_HAS_RTTI +TEST(PrintBuiltInTypeTest, TypeInfo) { + struct MyStruct {}; + auto res = Print(typeid(MyStruct{})); + // We can't guarantee that we can demangle the name, but either name should + // contain the substring "MyStruct". + EXPECT_NE(res.find("MyStruct"), res.npos) << res; +} +#endif // GTEST_HAS_RTTI + // Since ::std::stringstream::operator<<(const void *) formats the pointer // output differently with different compilers, we have to create the expected // output first and use it as our expectation. @@ -488,8 +499,9 @@ TEST(PrintCStringTest, Null) { // Tests that C strings are escaped properly. TEST(PrintCStringTest, EscapesProperly) { const char* p = "'\"?\\\a\b\f\n\r\t\v\x7F\xFF a"; - EXPECT_EQ(PrintPointer(p) + " pointing to \"'\\\"?\\\\\\a\\b\\f" - "\\n\\r\\t\\v\\x7F\\xFF a\"", + EXPECT_EQ(PrintPointer(p) + + " pointing to \"'\\\"?\\\\\\a\\b\\f" + "\\n\\r\\t\\v\\x7F\\xFF a\"", Print(p)); } @@ -608,10 +620,12 @@ TEST(PrintWideCStringTest, Null) { // Tests that wide C strings are escaped properly. TEST(PrintWideCStringTest, EscapesProperly) { - const wchar_t s[] = {'\'', '"', '?', '\\', '\a', '\b', '\f', '\n', '\r', - '\t', '\v', 0xD3, 0x576, 0x8D3, 0xC74D, ' ', 'a', '\0'}; - EXPECT_EQ(PrintPointer(s) + " pointing to L\"'\\\"?\\\\\\a\\b\\f" - "\\n\\r\\t\\v\\xD3\\x576\\x8D3\\xC74D a\"", + const wchar_t s[] = {'\'', '"', '?', '\\', '\a', '\b', + '\f', '\n', '\r', '\t', '\v', 0xD3, + 0x576, 0x8D3, 0xC74D, ' ', 'a', '\0'}; + EXPECT_EQ(PrintPointer(s) + + " pointing to L\"'\\\"?\\\\\\a\\b\\f" + "\\n\\r\\t\\v\\xD3\\x576\\x8D3\\xC74D a\"", Print(static_cast(s))); } #endif // native wchar_t @@ -693,10 +707,9 @@ TEST(PrintPointerTest, NonMemberFunctionPointer) { // standard disallows casting between pointers to functions and // pointers to objects, and some compilers (e.g. GCC 3.4) enforce // this limitation. - EXPECT_EQ( - PrintPointer(reinterpret_cast( - reinterpret_cast(&MyFunction))), - Print(&MyFunction)); + EXPECT_EQ(PrintPointer(reinterpret_cast( + reinterpret_cast(&MyFunction))), + Print(&MyFunction)); int (*p)(bool) = NULL; // NOLINT EXPECT_EQ("NULL", Print(p)); } @@ -705,14 +718,13 @@ TEST(PrintPointerTest, NonMemberFunctionPointer) { // another. template AssertionResult HasPrefix(const StringType& str, const StringType& prefix) { - if (str.find(prefix, 0) == 0) - return AssertionSuccess(); + if (str.find(prefix, 0) == 0) return AssertionSuccess(); const bool is_wide_string = sizeof(prefix[0]) > 1; const char* const begin_string_quote = is_wide_string ? "L\"" : "\""; return AssertionFailure() - << begin_string_quote << prefix << "\" is not a prefix of " - << begin_string_quote << str << "\"\n"; + << begin_string_quote << prefix << "\" is not a prefix of " + << begin_string_quote << str << "\"\n"; } // Tests printing member variable pointers. Although they are called @@ -733,8 +745,7 @@ TEST(PrintPointerTest, MemberVariablePointer) { EXPECT_TRUE(HasPrefix(Print(&Foo::value), Print(sizeof(&Foo::value)) + "-byte object ")); int Foo::*p = NULL; // NOLINT - EXPECT_TRUE(HasPrefix(Print(p), - Print(sizeof(p)) + "-byte object ")); + EXPECT_TRUE(HasPrefix(Print(p), Print(sizeof(p)) + "-byte object ")); } // Tests printing member function pointers. Although they are called @@ -748,8 +759,7 @@ TEST(PrintPointerTest, MemberFunctionPointer) { HasPrefix(Print(&Foo::MyVirtualMethod), Print(sizeof((&Foo::MyVirtualMethod))) + "-byte object ")); int (Foo::*p)(char) = NULL; // NOLINT - EXPECT_TRUE(HasPrefix(Print(p), - Print(sizeof(p)) + "-byte object ")); + EXPECT_TRUE(HasPrefix(Print(p), Print(sizeof(p)) + "-byte object ")); } // Tests printing C arrays. @@ -763,29 +773,26 @@ std::string PrintArrayHelper(T (&a)[N]) { // One-dimensional array. TEST(PrintArrayTest, OneDimensionalArray) { - int a[5] = { 1, 2, 3, 4, 5 }; + int a[5] = {1, 2, 3, 4, 5}; EXPECT_EQ("{ 1, 2, 3, 4, 5 }", PrintArrayHelper(a)); } // Two-dimensional array. TEST(PrintArrayTest, TwoDimensionalArray) { - int a[2][5] = { - { 1, 2, 3, 4, 5 }, - { 6, 7, 8, 9, 0 } - }; + int a[2][5] = {{1, 2, 3, 4, 5}, {6, 7, 8, 9, 0}}; EXPECT_EQ("{ { 1, 2, 3, 4, 5 }, { 6, 7, 8, 9, 0 } }", PrintArrayHelper(a)); } // Array of const elements. TEST(PrintArrayTest, ConstArray) { - const bool a[1] = { false }; + const bool a[1] = {false}; EXPECT_EQ("{ false }", PrintArrayHelper(a)); } // char array without terminating NUL. TEST(PrintArrayTest, CharArrayWithNoTerminatingNul) { // Array a contains '\0' in the middle and doesn't end with '\0'. - char a[] = { 'H', '\0', 'i' }; + char a[] = {'H', '\0', 'i'}; EXPECT_EQ("\"H\\0i\" (no terminating NUL)", PrintArrayHelper(a)); } @@ -806,9 +813,7 @@ TEST(PrintArrayTest, Char8ArrayWithNoTerminatingNul) { // char8_t array with terminating NUL. TEST(PrintArrayTest, Char8ArrayWithTerminatingNul) { const char8_t a[] = u8"\0世界"; - EXPECT_EQ( - "u8\"\\0\\xE4\\xB8\\x96\\xE7\\x95\\x8C\"", - PrintArrayHelper(a)); + EXPECT_EQ("u8\"\\0\\xE4\\xB8\\x96\\xE7\\x95\\x8C\"", PrintArrayHelper(a)); } #endif @@ -861,7 +866,7 @@ TEST(PrintArrayTest, ObjectArray) { // Array with many elements. TEST(PrintArrayTest, BigArray) { - int a[100] = { 1, 2, 3 }; + int a[100] = {1, 2, 3}; EXPECT_EQ("{ 1, 2, 3, 0, 0, 0, 0, 0, ..., 0, 0, 0, 0, 0, 0, 0, 0 }", PrintArrayHelper(a)); } @@ -881,11 +886,14 @@ TEST(PrintStringTest, StringAmbiguousHex) { // '\x6', '\x6B', or '\x6BA'. // a hex escaping sequence following by a decimal digit - EXPECT_EQ("\"0\\x12\" \"3\"", Print(::std::string("0\x12" "3"))); + EXPECT_EQ("\"0\\x12\" \"3\"", Print(::std::string("0\x12" + "3"))); // a hex escaping sequence following by a hex digit (lower-case) - EXPECT_EQ("\"mm\\x6\" \"bananas\"", Print(::std::string("mm\x6" "bananas"))); + EXPECT_EQ("\"mm\\x6\" \"bananas\"", Print(::std::string("mm\x6" + "bananas"))); // a hex escaping sequence following by a hex digit (upper-case) - EXPECT_EQ("\"NOM\\x6\" \"BANANA\"", Print(::std::string("NOM\x6" "BANANA"))); + EXPECT_EQ("\"NOM\\x6\" \"BANANA\"", Print(::std::string("NOM\x6" + "BANANA"))); // a hex escaping sequence following by a non-xdigit EXPECT_EQ("\"!\\x5-!\"", Print(::std::string("!\x5-!"))); } @@ -895,19 +903,21 @@ TEST(PrintStringTest, StringAmbiguousHex) { // ::std::wstring. TEST(PrintWideStringTest, StringInStdNamespace) { const wchar_t s[] = L"'\"?\\\a\b\f\n\0\r\t\v\xD3\x576\x8D3\xC74D a"; - const ::std::wstring str(s, sizeof(s)/sizeof(wchar_t)); - EXPECT_EQ("L\"'\\\"?\\\\\\a\\b\\f\\n\\0\\r\\t\\v" - "\\xD3\\x576\\x8D3\\xC74D a\\0\"", - Print(str)); + const ::std::wstring str(s, sizeof(s) / sizeof(wchar_t)); + EXPECT_EQ( + "L\"'\\\"?\\\\\\a\\b\\f\\n\\0\\r\\t\\v" + "\\xD3\\x576\\x8D3\\xC74D a\\0\"", + Print(str)); } TEST(PrintWideStringTest, StringAmbiguousHex) { // same for wide strings. - EXPECT_EQ("L\"0\\x12\" L\"3\"", Print(::std::wstring(L"0\x12" L"3"))); - EXPECT_EQ("L\"mm\\x6\" L\"bananas\"", - Print(::std::wstring(L"mm\x6" L"bananas"))); - EXPECT_EQ("L\"NOM\\x6\" L\"BANANA\"", - Print(::std::wstring(L"NOM\x6" L"BANANA"))); + EXPECT_EQ("L\"0\\x12\" L\"3\"", Print(::std::wstring(L"0\x12" + L"3"))); + EXPECT_EQ("L\"mm\\x6\" L\"bananas\"", Print(::std::wstring(L"mm\x6" + L"bananas"))); + EXPECT_EQ("L\"NOM\\x6\" L\"BANANA\"", Print(::std::wstring(L"NOM\x6" + L"BANANA"))); EXPECT_EQ("L\"!\\x5-!\"", Print(::std::wstring(L"!\x5-!"))); } #endif // GTEST_HAS_STD_WSTRING @@ -1021,7 +1031,6 @@ TEST(PrintStlContainerTest, NonEmptyDeque) { EXPECT_EQ("{ 1, 3 }", Print(non_empty)); } - TEST(PrintStlContainerTest, OneElementHashMap) { ::std::unordered_map map1; map1[1] = 'a'; @@ -1037,11 +1046,9 @@ TEST(PrintStlContainerTest, HashMultiMap) { const std::string result = Print(map1); EXPECT_TRUE(result == "{ (5, true), (5, false) }" || result == "{ (5, false), (5, true) }") - << " where Print(map1) returns \"" << result << "\"."; + << " where Print(map1) returns \"" << result << "\"."; } - - TEST(PrintStlContainerTest, HashSet) { ::std::unordered_set set1; set1.insert(1); @@ -1050,7 +1057,7 @@ TEST(PrintStlContainerTest, HashSet) { TEST(PrintStlContainerTest, HashMultiSet) { const int kSize = 5; - int a[kSize] = { 1, 1, 2, 5, 1 }; + int a[kSize] = {1, 1, 2, 5, 1}; ::std::unordered_multiset set1(a, a + kSize); // Elements of hash_multiset can be printed in any order. @@ -1066,8 +1073,8 @@ TEST(PrintStlContainerTest, HashMultiSet) { ASSERT_NE(isdigit(static_cast(result[i])), 0); numbers.push_back(result[i] - '0'); } else { - EXPECT_EQ(expected_pattern[i], result[i]) << " where result is " - << result; + EXPECT_EQ(expected_pattern[i], result[i]) + << " where result is " << result; } } @@ -1077,7 +1084,6 @@ TEST(PrintStlContainerTest, HashMultiSet) { EXPECT_TRUE(std::equal(a, a + kSize, numbers.begin())); } - TEST(PrintStlContainerTest, List) { const std::string a[] = {"hello", "world"}; const list strings(a, a + 2); @@ -1107,20 +1113,19 @@ TEST(PrintStlContainerTest, MultiMap) { } TEST(PrintStlContainerTest, Set) { - const unsigned int a[] = { 3, 0, 5 }; + const unsigned int a[] = {3, 0, 5}; set set1(a, a + 3); EXPECT_EQ("{ 0, 3, 5 }", Print(set1)); } TEST(PrintStlContainerTest, MultiSet) { - const int a[] = { 1, 1, 2, 5, 1 }; + const int a[] = {1, 1, 2, 5, 1}; multiset set1(a, a + 5); EXPECT_EQ("{ 1, 1, 1, 2, 5 }", Print(set1)); } - TEST(PrintStlContainerTest, SinglyLinkedList) { - int a[] = { 9, 2, 8 }; + int a[] = {9, 2, 8}; const std::forward_list ints(a, a + 3); EXPECT_EQ("{ 9, 2, 8 }", Print(ints)); } @@ -1138,32 +1143,34 @@ TEST(PrintStlContainerTest, Vector) { } TEST(PrintStlContainerTest, LongSequence) { - const int a[100] = { 1, 2, 3 }; + const int a[100] = {1, 2, 3}; const vector v(a, a + 100); - EXPECT_EQ("{ 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, " - "0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... }", Print(v)); + EXPECT_EQ( + "{ 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, " + "0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... }", + Print(v)); } TEST(PrintStlContainerTest, NestedContainer) { - const int a1[] = { 1, 2 }; - const int a2[] = { 3, 4, 5 }; + const int a1[] = {1, 2}; + const int a2[] = {3, 4, 5}; const list l1(a1, a1 + 2); const list l2(a2, a2 + 3); - vector > v; + vector> v; v.push_back(l1); v.push_back(l2); EXPECT_EQ("{ { 1, 2 }, { 3, 4, 5 } }", Print(v)); } TEST(PrintStlContainerTest, OneDimensionalNativeArray) { - const int a[3] = { 1, 2, 3 }; + const int a[3] = {1, 2, 3}; NativeArray b(a, 3, RelationToSourceReference()); EXPECT_EQ("{ 1, 2, 3 }", Print(b)); } TEST(PrintStlContainerTest, TwoDimensionalNativeArray) { - const int a[2][3] = { { 1, 2, 3 }, { 4, 5, 6 } }; + const int a[2][3] = {{1, 2, 3}, {4, 5, 6}}; NativeArray b(a, 2, RelationToSourceReference()); EXPECT_EQ("{ { 1, 2, 3 }, { 4, 5, 6 } }", Print(b)); } @@ -1215,20 +1222,18 @@ TEST(PrintStdTupleTest, VariousSizes) { t10(false, 'a', static_cast(3), 4, 5, 1.5F, -2.5, str, // NOLINT nullptr, "10"); EXPECT_EQ("(false, 'a' (97, 0x61), 3, 4, 5, 1.5, -2.5, " + PrintPointer(str) + - " pointing to \"8\", NULL, \"10\")", + " pointing to \"8\", NULL, \"10\")", Print(t10)); } // Nested tuples. TEST(PrintStdTupleTest, NestedTuple) { - ::std::tuple< ::std::tuple, char> nested( - ::std::make_tuple(5, true), 'a'); + ::std::tuple<::std::tuple, char> nested(::std::make_tuple(5, true), + 'a'); EXPECT_EQ("((5, true), 'a' (97, 0x61))", Print(nested)); } -TEST(PrintNullptrT, Basic) { - EXPECT_EQ("(nullptr)", Print(nullptr)); -} +TEST(PrintNullptrT, Basic) { EXPECT_EQ("(nullptr)", Print(nullptr)); } TEST(PrintReferenceWrapper, Printable) { int x = 5; @@ -1252,8 +1257,7 @@ TEST(PrintReferenceWrapper, Unprintable) { // Unprintable types in the global namespace. TEST(PrintUnprintableTypeTest, InGlobalNamespace) { - EXPECT_EQ("1-byte object <00>", - Print(UnprintableTemplateInGlobal())); + EXPECT_EQ("1-byte object <00>", Print(UnprintableTemplateInGlobal())); } // Unprintable types in a user namespace. @@ -1270,14 +1274,15 @@ struct Big { }; TEST(PrintUnpritableTypeTest, BigObject) { - EXPECT_EQ("257-byte object <00-00 00-00 00-00 00-00 00-00 00-00 " - "00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 " - "00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 " - "00-00 00-00 00-00 00-00 00-00 00-00 ... 00-00 00-00 00-00 " - "00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 " - "00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 " - "00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00>", - Print(Big())); + EXPECT_EQ( + "257-byte object <00-00 00-00 00-00 00-00 00-00 00-00 " + "00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 " + "00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 " + "00-00 00-00 00-00 00-00 00-00 00-00 ... 00-00 00-00 00-00 " + "00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 " + "00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 " + "00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00>", + Print(Big())); } // Tests printing user-defined streamable types. @@ -1320,8 +1325,7 @@ TEST(PrintStreamableTypeTest, PathLikeInUserNamespace) { // Tests printing user-defined types that have a PrintTo() function. TEST(PrintPrintableTypeTest, InUserNamespace) { - EXPECT_EQ("PrintableViaPrintTo: 0", - Print(::foo::PrintableViaPrintTo())); + EXPECT_EQ("PrintableViaPrintTo: 0", Print(::foo::PrintableViaPrintTo())); } // Tests printing a pointer to a user-defined type that has a << @@ -1343,16 +1347,14 @@ TEST(PrintReferenceTest, PrintsAddressAndValue) { int n = 5; EXPECT_EQ("@" + PrintPointer(&n) + " 5", PrintByRef(n)); - int a[2][3] = { - { 0, 1, 2 }, - { 3, 4, 5 } - }; + int a[2][3] = {{0, 1, 2}, {3, 4, 5}}; EXPECT_EQ("@" + PrintPointer(a) + " { { 0, 1, 2 }, { 3, 4, 5 } }", PrintByRef(a)); const ::foo::UnprintableInFoo x; - EXPECT_EQ("@" + PrintPointer(&x) + " 16-byte object " - "", + EXPECT_EQ("@" + PrintPointer(&x) + + " 16-byte object " + "", PrintByRef(x)); } @@ -1368,33 +1370,29 @@ TEST(PrintReferenceTest, HandlesFunctionPointer) { // this limitation. const std::string fp_string = PrintPointer(reinterpret_cast( reinterpret_cast(fp))); - EXPECT_EQ("@" + fp_pointer_string + " " + fp_string, - PrintByRef(fp)); + EXPECT_EQ("@" + fp_pointer_string + " " + fp_string, PrintByRef(fp)); } // Tests that the universal printer prints a member function pointer // passed by reference. TEST(PrintReferenceTest, HandlesMemberFunctionPointer) { int (Foo::*p)(char ch) = &Foo::MyMethod; - EXPECT_TRUE(HasPrefix( - PrintByRef(p), - "@" + PrintPointer(reinterpret_cast(&p)) + " " + - Print(sizeof(p)) + "-byte object ")); + EXPECT_TRUE(HasPrefix(PrintByRef(p), + "@" + PrintPointer(reinterpret_cast(&p)) + + " " + Print(sizeof(p)) + "-byte object ")); char (Foo::*p2)(int n) = &Foo::MyVirtualMethod; - EXPECT_TRUE(HasPrefix( - PrintByRef(p2), - "@" + PrintPointer(reinterpret_cast(&p2)) + " " + - Print(sizeof(p2)) + "-byte object ")); + EXPECT_TRUE(HasPrefix(PrintByRef(p2), + "@" + PrintPointer(reinterpret_cast(&p2)) + + " " + Print(sizeof(p2)) + "-byte object ")); } // Tests that the universal printer prints a member variable pointer // passed by reference. TEST(PrintReferenceTest, HandlesMemberVariablePointer) { int Foo::*p = &Foo::value; // NOLINT - EXPECT_TRUE(HasPrefix( - PrintByRef(p), - "@" + PrintPointer(&p) + " " + Print(sizeof(p)) + "-byte object ")); + EXPECT_TRUE(HasPrefix(PrintByRef(p), "@" + PrintPointer(&p) + " " + + Print(sizeof(p)) + "-byte object ")); } // Tests that FormatForComparisonFailureMessage(), which is used to print @@ -1403,8 +1401,7 @@ TEST(PrintReferenceTest, HandlesMemberVariablePointer) { // scalar TEST(FormatForComparisonFailureMessageTest, WorksForScalar) { - EXPECT_STREQ("123", - FormatForComparisonFailureMessage(123, 124).c_str()); + EXPECT_STREQ("123", FormatForComparisonFailureMessage(123, 124).c_str()); } // non-char pointer @@ -1418,9 +1415,8 @@ TEST(FormatForComparisonFailureMessageTest, WorksForNonCharPointer) { TEST(FormatForComparisonFailureMessageTest, FormatsNonCharArrayAsPointer) { // In expression 'array == x', 'array' is compared by pointer. // Therefore we want to print an array operand as a pointer. - int n[] = { 1, 2, 3 }; - EXPECT_EQ(PrintPointer(n), - FormatForComparisonFailureMessage(n, n).c_str()); + int n[] = {1, 2, 3}; + EXPECT_EQ(PrintPointer(n), FormatForComparisonFailureMessage(n, n).c_str()); } // Tests formatting a char pointer when it's compared with another pointer. @@ -1436,8 +1432,7 @@ TEST(FormatForComparisonFailureMessageTest, WorksForCharPointerVsPointer) { // const char* const char* s = "hello"; - EXPECT_EQ(PrintPointer(s), - FormatForComparisonFailureMessage(s, s).c_str()); + EXPECT_EQ(PrintPointer(s), FormatForComparisonFailureMessage(s, s).c_str()); // char* char ch = 'a'; @@ -1454,8 +1449,7 @@ TEST(FormatForComparisonFailureMessageTest, WorksForWCharPointerVsPointer) { // const wchar_t* const wchar_t* s = L"hello"; - EXPECT_EQ(PrintPointer(s), - FormatForComparisonFailureMessage(s, s).c_str()); + EXPECT_EQ(PrintPointer(s), FormatForComparisonFailureMessage(s, s).c_str()); // wchar_t* wchar_t ch = L'a'; @@ -1552,13 +1546,11 @@ TEST(FormatForComparisonFailureMessageTest, WorksForWCharArrayVsStdWString) { // Useful for testing PrintToString(). We cannot use EXPECT_EQ() // there as its implementation uses PrintToString(). The caller must // ensure that 'value' has no side effect. -#define EXPECT_PRINT_TO_STRING_(value, expected_string) \ - EXPECT_TRUE(PrintToString(value) == (expected_string)) \ +#define EXPECT_PRINT_TO_STRING_(value, expected_string) \ + EXPECT_TRUE(PrintToString(value) == (expected_string)) \ << " where " #value " prints as " << (PrintToString(value)) -TEST(PrintToStringTest, WorksForScalar) { - EXPECT_PRINT_TO_STRING_(123, "123"); -} +TEST(PrintToStringTest, WorksForScalar) { EXPECT_PRINT_TO_STRING_(123, "123"); } TEST(PrintToStringTest, WorksForPointerToConstChar) { const char* p = "hello"; @@ -1583,7 +1575,7 @@ TEST(PrintToStringTest, EscapesForPointerToNonConstChar) { } TEST(PrintToStringTest, WorksForArray) { - int n[3] = { 1, 2, 3 }; + int n[3] = {1, 2, 3}; EXPECT_PRINT_TO_STRING_(n, "{ 1, 2, 3 }"); } @@ -1600,8 +1592,8 @@ TEST(PrintToStringTest, WorksForCharArrayWithEmbeddedNul) { EXPECT_PRINT_TO_STRING_(mutable_str_with_nul, "\"hello\\0 world\""); } - TEST(PrintToStringTest, ContainsNonLatin) { - // Sanity test with valid UTF-8. Prints both in hex and as text. +TEST(PrintToStringTest, ContainsNonLatin) { + // Test with valid UTF-8. Prints both in hex and as text. std::string non_ascii_str = ::std::string("오전 4:30"); EXPECT_PRINT_TO_STRING_(non_ascii_str, "\"\\xEC\\x98\\xA4\\xEC\\xA0\\x84 4:30\"\n" @@ -1617,57 +1609,58 @@ TEST(IsValidUTF8Test, IllFormedUTF8) { // as hex only (or ASCII, in case of ASCII bytes) because IsValidUTF8() is // expected to fail, thus output does not contain "As Text:". - static const char *const kTestdata[][2] = { - // 2-byte lead byte followed by a single-byte character. - {"\xC3\x74", "\"\\xC3t\""}, - // Valid 2-byte character followed by an orphan trail byte. - {"\xC3\x84\xA4", "\"\\xC3\\x84\\xA4\""}, - // Lead byte without trail byte. - {"abc\xC3", "\"abc\\xC3\""}, - // 3-byte lead byte, single-byte character, orphan trail byte. - {"x\xE2\x70\x94", "\"x\\xE2p\\x94\""}, - // Truncated 3-byte character. - {"\xE2\x80", "\"\\xE2\\x80\""}, - // Truncated 3-byte character followed by valid 2-byte char. - {"\xE2\x80\xC3\x84", "\"\\xE2\\x80\\xC3\\x84\""}, - // Truncated 3-byte character followed by a single-byte character. - {"\xE2\x80\x7A", "\"\\xE2\\x80z\""}, - // 3-byte lead byte followed by valid 3-byte character. - {"\xE2\xE2\x80\x94", "\"\\xE2\\xE2\\x80\\x94\""}, - // 4-byte lead byte followed by valid 3-byte character. - {"\xF0\xE2\x80\x94", "\"\\xF0\\xE2\\x80\\x94\""}, - // Truncated 4-byte character. - {"\xF0\xE2\x80", "\"\\xF0\\xE2\\x80\""}, - // Invalid UTF-8 byte sequences embedded in other chars. - {"abc\xE2\x80\x94\xC3\x74xyc", "\"abc\\xE2\\x80\\x94\\xC3txyc\""}, - {"abc\xC3\x84\xE2\x80\xC3\x84xyz", - "\"abc\\xC3\\x84\\xE2\\x80\\xC3\\x84xyz\""}, - // Non-shortest UTF-8 byte sequences are also ill-formed. - // The classics: xC0, xC1 lead byte. - {"\xC0\x80", "\"\\xC0\\x80\""}, - {"\xC1\x81", "\"\\xC1\\x81\""}, - // Non-shortest sequences. - {"\xE0\x80\x80", "\"\\xE0\\x80\\x80\""}, - {"\xf0\x80\x80\x80", "\"\\xF0\\x80\\x80\\x80\""}, - // Last valid code point before surrogate range, should be printed as text, - // too. - {"\xED\x9F\xBF", "\"\\xED\\x9F\\xBF\"\n As Text: \"퟿\""}, - // Start of surrogate lead. Surrogates are not printed as text. - {"\xED\xA0\x80", "\"\\xED\\xA0\\x80\""}, - // Last non-private surrogate lead. - {"\xED\xAD\xBF", "\"\\xED\\xAD\\xBF\""}, - // First private-use surrogate lead. - {"\xED\xAE\x80", "\"\\xED\\xAE\\x80\""}, - // Last private-use surrogate lead. - {"\xED\xAF\xBF", "\"\\xED\\xAF\\xBF\""}, - // Mid-point of surrogate trail. - {"\xED\xB3\xBF", "\"\\xED\\xB3\\xBF\""}, - // First valid code point after surrogate range, should be printed as text, - // too. - {"\xEE\x80\x80", "\"\\xEE\\x80\\x80\"\n As Text: \"\""} - }; + static const char* const kTestdata[][2] = { + // 2-byte lead byte followed by a single-byte character. + {"\xC3\x74", "\"\\xC3t\""}, + // Valid 2-byte character followed by an orphan trail byte. + {"\xC3\x84\xA4", "\"\\xC3\\x84\\xA4\""}, + // Lead byte without trail byte. + {"abc\xC3", "\"abc\\xC3\""}, + // 3-byte lead byte, single-byte character, orphan trail byte. + {"x\xE2\x70\x94", "\"x\\xE2p\\x94\""}, + // Truncated 3-byte character. + {"\xE2\x80", "\"\\xE2\\x80\""}, + // Truncated 3-byte character followed by valid 2-byte char. + {"\xE2\x80\xC3\x84", "\"\\xE2\\x80\\xC3\\x84\""}, + // Truncated 3-byte character followed by a single-byte character. + {"\xE2\x80\x7A", "\"\\xE2\\x80z\""}, + // 3-byte lead byte followed by valid 3-byte character. + {"\xE2\xE2\x80\x94", "\"\\xE2\\xE2\\x80\\x94\""}, + // 4-byte lead byte followed by valid 3-byte character. + {"\xF0\xE2\x80\x94", "\"\\xF0\\xE2\\x80\\x94\""}, + // Truncated 4-byte character. + {"\xF0\xE2\x80", "\"\\xF0\\xE2\\x80\""}, + // Invalid UTF-8 byte sequences embedded in other chars. + {"abc\xE2\x80\x94\xC3\x74xyc", "\"abc\\xE2\\x80\\x94\\xC3txyc\""}, + {"abc\xC3\x84\xE2\x80\xC3\x84xyz", + "\"abc\\xC3\\x84\\xE2\\x80\\xC3\\x84xyz\""}, + // Non-shortest UTF-8 byte sequences are also ill-formed. + // The classics: xC0, xC1 lead byte. + {"\xC0\x80", "\"\\xC0\\x80\""}, + {"\xC1\x81", "\"\\xC1\\x81\""}, + // Non-shortest sequences. + {"\xE0\x80\x80", "\"\\xE0\\x80\\x80\""}, + {"\xf0\x80\x80\x80", "\"\\xF0\\x80\\x80\\x80\""}, + // Last valid code point before surrogate range, should be printed as + // text, + // too. + {"\xED\x9F\xBF", "\"\\xED\\x9F\\xBF\"\n As Text: \"퟿\""}, + // Start of surrogate lead. Surrogates are not printed as text. + {"\xED\xA0\x80", "\"\\xED\\xA0\\x80\""}, + // Last non-private surrogate lead. + {"\xED\xAD\xBF", "\"\\xED\\xAD\\xBF\""}, + // First private-use surrogate lead. + {"\xED\xAE\x80", "\"\\xED\\xAE\\x80\""}, + // Last private-use surrogate lead. + {"\xED\xAF\xBF", "\"\\xED\\xAF\\xBF\""}, + // Mid-point of surrogate trail. + {"\xED\xB3\xBF", "\"\\xED\\xB3\\xBF\""}, + // First valid code point after surrogate range, should be printed as + // text, + // too. + {"\xEE\x80\x80", "\"\\xEE\\x80\\x80\"\n As Text: \"\""}}; - for (int i = 0; i < int(sizeof(kTestdata)/sizeof(kTestdata[0])); ++i) { + for (int i = 0; i < int(sizeof(kTestdata) / sizeof(kTestdata[0])); ++i) { EXPECT_PRINT_TO_STRING_(kTestdata[i][0], kTestdata[i][1]); } } @@ -1816,15 +1809,15 @@ TEST(UniversalTersePrintTupleFieldsToStringsTestWithStd, PrintsEmptyTuple) { } TEST(UniversalTersePrintTupleFieldsToStringsTestWithStd, PrintsOneTuple) { - Strings result = UniversalTersePrintTupleFieldsToStrings( - ::std::make_tuple(1)); + Strings result = + UniversalTersePrintTupleFieldsToStrings(::std::make_tuple(1)); ASSERT_EQ(1u, result.size()); EXPECT_EQ("1", result[0]); } TEST(UniversalTersePrintTupleFieldsToStringsTestWithStd, PrintsTwoTuple) { - Strings result = UniversalTersePrintTupleFieldsToStrings( - ::std::make_tuple(1, 'a')); + Strings result = + UniversalTersePrintTupleFieldsToStrings(::std::make_tuple(1, 'a')); ASSERT_EQ(2u, result.size()); EXPECT_EQ("1", result[0]); EXPECT_EQ("'a' (97, 0x61)", result[1]); @@ -1873,6 +1866,7 @@ TEST_F(PrintAnyTest, NonEmpty) { #if GTEST_INTERNAL_HAS_OPTIONAL TEST(PrintOptionalTest, Basic) { + EXPECT_EQ("(nullopt)", PrintToString(internal::Nullopt())); internal::Optional value; EXPECT_EQ("(nullopt)", PrintToString(value)); value = {7}; diff --git a/ext/googletest/googletest/test/googletest-setuptestsuite-test.py b/ext/googletest/googletest/test/googletest-setuptestsuite-test.py index c82162fc99..9d1fd0295c 100755 --- a/ext/googletest/googletest/test/googletest-setuptestsuite-test.py +++ b/ext/googletest/googletest/test/googletest-setuptestsuite-test.py @@ -31,7 +31,7 @@ """Verifies that SetUpTestSuite and TearDownTestSuite errors are noticed.""" -import gtest_test_utils +from googletest.test import gtest_test_utils COMMAND = gtest_test_utils.GetTestExecutablePath( 'googletest-setuptestsuite-test_') diff --git a/ext/googletest/googletest/test/googletest-setuptestsuite-test_.cc b/ext/googletest/googletest/test/googletest-setuptestsuite-test_.cc index a4bc4ef441..d20899f568 100644 --- a/ext/googletest/googletest/test/googletest-setuptestsuite-test_.cc +++ b/ext/googletest/googletest/test/googletest-setuptestsuite-test_.cc @@ -27,23 +27,18 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - #include "gtest/gtest.h" class SetupFailTest : public ::testing::Test { protected: - static void SetUpTestSuite() { - ASSERT_EQ("", "SET_UP_FAIL"); - } + static void SetUpTestSuite() { ASSERT_EQ("", "SET_UP_FAIL"); } }; TEST_F(SetupFailTest, NoopPassingTest) {} class TearDownFailTest : public ::testing::Test { protected: - static void TearDownTestSuite() { - ASSERT_EQ("", "TEAR_DOWN_FAIL"); - } + static void TearDownTestSuite() { ASSERT_EQ("", "TEAR_DOWN_FAIL"); } }; TEST_F(TearDownFailTest, NoopPassingTest) {} diff --git a/ext/googletest/googletest/test/googletest-shuffle-test.py b/ext/googletest/googletest/test/googletest-shuffle-test.py index 573cc5eca3..9d2adc1286 100755 --- a/ext/googletest/googletest/test/googletest-shuffle-test.py +++ b/ext/googletest/googletest/test/googletest-shuffle-test.py @@ -31,7 +31,7 @@ """Verifies that test shuffling works.""" import os -import gtest_test_utils +from googletest.test import gtest_test_utils # Command to run the googletest-shuffle-test_ program. COMMAND = gtest_test_utils.GetTestExecutablePath('googletest-shuffle-test_') diff --git a/ext/googletest/googletest/test/googletest-shuffle-test_.cc b/ext/googletest/googletest/test/googletest-shuffle-test_.cc index 4505663ae4..a14e22f98c 100644 --- a/ext/googletest/googletest/test/googletest-shuffle-test_.cc +++ b/ext/googletest/googletest/test/googletest-shuffle-test_.cc @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Verifies that test shuffling works. #include "gtest/gtest.h" @@ -88,7 +87,7 @@ class TestNamePrinter : public EmptyTestEventListener { } // namespace -int main(int argc, char **argv) { +int main(int argc, char** argv) { InitGoogleTest(&argc, argv); // Replaces the default printer with TestNamePrinter, which prints diff --git a/ext/googletest/googletest/test/googletest-test-part-test.cc b/ext/googletest/googletest/test/googletest-test-part-test.cc index 44cf7ca044..076e5be2fa 100644 --- a/ext/googletest/googletest/test/googletest-test-part-test.cc +++ b/ext/googletest/googletest/test/googletest-test-part-test.cc @@ -28,7 +28,6 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "gtest/gtest-test-part.h" - #include "gtest/gtest.h" using testing::Message; @@ -52,17 +51,14 @@ class TestPartResultTest : public Test { TestPartResult r1_, r2_, r3_, r4_; }; - TEST_F(TestPartResultTest, ConstructorWorks) { Message message; message << "something is terribly wrong"; message << static_cast(testing::internal::kStackTraceMarker); message << "some unimportant stack trace"; - const TestPartResult result(TestPartResult::kNonFatalFailure, - "some_file.cc", - 42, - message.GetString().c_str()); + const TestPartResult result(TestPartResult::kNonFatalFailure, "some_file.cc", + 42, message.GetString().c_str()); EXPECT_EQ(TestPartResult::kNonFatalFailure, result.type()); EXPECT_STREQ("some_file.cc", result.file_name()); @@ -72,9 +68,7 @@ TEST_F(TestPartResultTest, ConstructorWorks) { } TEST_F(TestPartResultTest, ResultAccessorsWork) { - const TestPartResult success(TestPartResult::kSuccess, - "file.cc", - 42, + const TestPartResult success(TestPartResult::kSuccess, "file.cc", 42, "message"); EXPECT_TRUE(success.passed()); EXPECT_FALSE(success.failed()); @@ -83,19 +77,15 @@ TEST_F(TestPartResultTest, ResultAccessorsWork) { EXPECT_FALSE(success.skipped()); const TestPartResult nonfatal_failure(TestPartResult::kNonFatalFailure, - "file.cc", - 42, - "message"); + "file.cc", 42, "message"); EXPECT_FALSE(nonfatal_failure.passed()); EXPECT_TRUE(nonfatal_failure.failed()); EXPECT_TRUE(nonfatal_failure.nonfatally_failed()); EXPECT_FALSE(nonfatal_failure.fatally_failed()); EXPECT_FALSE(nonfatal_failure.skipped()); - const TestPartResult fatal_failure(TestPartResult::kFatalFailure, - "file.cc", - 42, - "message"); + const TestPartResult fatal_failure(TestPartResult::kFatalFailure, "file.cc", + 42, "message"); EXPECT_FALSE(fatal_failure.passed()); EXPECT_TRUE(fatal_failure.failed()); EXPECT_FALSE(fatal_failure.nonfatally_failed()); diff --git a/ext/googletest/googletest/test/googletest-throw-on-failure-test.py b/ext/googletest/googletest/test/googletest-throw-on-failure-test.py index ea627c479d..772bbc5f39 100755 --- a/ext/googletest/googletest/test/googletest-throw-on-failure-test.py +++ b/ext/googletest/googletest/test/googletest-throw-on-failure-test.py @@ -36,7 +36,7 @@ Google Test) with different environments and command line flags. """ import os -import gtest_test_utils +from googletest.test import gtest_test_utils # Constants. diff --git a/ext/googletest/googletest/test/googletest-throw-on-failure-test_.cc b/ext/googletest/googletest/test/googletest-throw-on-failure-test_.cc index 83bb914c7e..3b81a5a1db 100644 --- a/ext/googletest/googletest/test/googletest-throw-on-failure-test_.cc +++ b/ext/googletest/googletest/test/googletest-throw-on-failure-test_.cc @@ -27,18 +27,18 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Tests Google Test's throw-on-failure mode with exceptions disabled. // // This program must be compiled with exceptions disabled. It will be // invoked by googletest-throw-on-failure-test.py, and is expected to exit // with non-zero in the throw-on-failure mode or 0 otherwise. -#include "gtest/gtest.h" +#include // for fflush, fprintf, NULL, etc. +#include // for exit -#include // for fflush, fprintf, NULL, etc. -#include // for exit -#include // for set_terminate +#include // for set_terminate + +#include "gtest/gtest.h" // This terminate handler aborts the program using exit() rather than abort(). // This avoids showing pop-ups on Windows systems and core dumps on Unix-like diff --git a/ext/googletest/googletest/test/googletest-uninitialized-test.py b/ext/googletest/googletest/test/googletest-uninitialized-test.py index 69595a0dde..73c91764a5 100755 --- a/ext/googletest/googletest/test/googletest-uninitialized-test.py +++ b/ext/googletest/googletest/test/googletest-uninitialized-test.py @@ -31,7 +31,7 @@ """Verifies that Google Test warns the user when not initialized properly.""" -import gtest_test_utils +from googletest.test import gtest_test_utils COMMAND = gtest_test_utils.GetTestExecutablePath('googletest-uninitialized-test_') diff --git a/ext/googletest/googletest/test/googletest-uninitialized-test_.cc b/ext/googletest/googletest/test/googletest-uninitialized-test_.cc index b4434d51ee..88b61fcefd 100644 --- a/ext/googletest/googletest/test/googletest-uninitialized-test_.cc +++ b/ext/googletest/googletest/test/googletest-uninitialized-test_.cc @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - #include "gtest/gtest.h" TEST(DummyTest, Dummy) { @@ -37,6 +36,4 @@ TEST(DummyTest, Dummy) { // testing::InitGoogleTest() being called first. } -int main() { - return RUN_ALL_TESTS(); -} +int main() { return RUN_ALL_TESTS(); } diff --git a/ext/googletest/googletest/test/gtest-typed-test2_test.cc b/ext/googletest/googletest/test/gtest-typed-test2_test.cc index e83ca2e11b..f2eae12058 100644 --- a/ext/googletest/googletest/test/gtest-typed-test2_test.cc +++ b/ext/googletest/googletest/test/gtest-typed-test2_test.cc @@ -27,11 +27,10 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - #include -#include "test/gtest-typed-test_test.h" #include "gtest/gtest.h" +#include "test/gtest-typed-test_test.h" // Tests that the same type-parameterized test case can be // instantiated in different translation units linked together. diff --git a/ext/googletest/googletest/test/gtest-typed-test_test.cc b/ext/googletest/googletest/test/gtest-typed-test_test.cc index 5fc678cb0d..af23f86d14 100644 --- a/ext/googletest/googletest/test/gtest-typed-test_test.cc +++ b/ext/googletest/googletest/test/gtest-typed-test_test.cc @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - #include "test/gtest-typed-test_test.h" #include @@ -50,9 +49,7 @@ class CommonTest : public Test { // For some technical reason, SetUpTestSuite() and TearDownTestSuite() // must be public. public: - static void SetUpTestSuite() { - shared_ = new T(5); - } + static void SetUpTestSuite() { shared_ = new T(5); } static void TearDownTestSuite() { delete shared_; @@ -130,8 +127,7 @@ TYPED_TEST(CommonTest, ValuesAreStillCorrect) { // translation unit. template -class TypedTest1 : public Test { -}; +class TypedTest1 : public Test {}; // Verifies that the second argument of TYPED_TEST_SUITE can be a // single type. @@ -139,8 +135,7 @@ TYPED_TEST_SUITE(TypedTest1, int); TYPED_TEST(TypedTest1, A) {} template -class TypedTest2 : public Test { -}; +class TypedTest2 : public Test {}; // Verifies that the second argument of TYPED_TEST_SUITE can be a // Types<...> type list. @@ -155,15 +150,12 @@ TYPED_TEST(TypedTest2, A) {} namespace library1 { template -class NumericTest : public Test { -}; +class NumericTest : public Test {}; typedef Types NumericTypes; TYPED_TEST_SUITE(NumericTest, NumericTypes); -TYPED_TEST(NumericTest, DefaultIsZero) { - EXPECT_EQ(0, TypeParam()); -} +TYPED_TEST(NumericTest, DefaultIsZero) { EXPECT_EQ(0, TypeParam()); } } // namespace library1 @@ -265,8 +257,7 @@ TEST_F(TypedTestSuitePStateDeathTest, DetectsTestAfterRegistration) { // and SetUp()/TearDown() work correctly in type-parameterized tests. template -class DerivedTest : public CommonTest { -}; +class DerivedTest : public CommonTest {}; TYPED_TEST_SUITE_P(DerivedTest); @@ -290,8 +281,8 @@ TYPED_TEST_P(DerivedTest, ValuesAreStillCorrect) { EXPECT_EQ(2, this->value_); } -REGISTER_TYPED_TEST_SUITE_P(DerivedTest, - ValuesAreCorrect, ValuesAreStillCorrect); +REGISTER_TYPED_TEST_SUITE_P(DerivedTest, ValuesAreCorrect, + ValuesAreStillCorrect); typedef Types MyTwoTypes; INSTANTIATE_TYPED_TEST_SUITE_P(My, DerivedTest, MyTwoTypes); @@ -334,14 +325,13 @@ class TypeParametrizedTestNames { }; INSTANTIATE_TYPED_TEST_SUITE_P(CustomName, TypeParametrizedTestWithNames, - TwoTypes, TypeParametrizedTestNames); + TwoTypes, TypeParametrizedTestNames); // Tests that multiple TYPED_TEST_SUITE_P's can be defined in the same // translation unit. template -class TypedTestP1 : public Test { -}; +class TypedTestP1 : public Test {}; TYPED_TEST_SUITE_P(TypedTestP1); @@ -359,8 +349,7 @@ using IntBeforeRegisterTypedTestSuiteP = int; REGISTER_TYPED_TEST_SUITE_P(TypedTestP1, A, B); template -class TypedTestP2 : public Test { -}; +class TypedTestP2 : public Test {}; TYPED_TEST_SUITE_P(TypedTestP2); @@ -396,21 +385,17 @@ INSTANTIATE_TYPED_TEST_SUITE_P(My, ContainerTest, MyContainers); namespace library2 { template -class NumericTest : public Test { -}; +class NumericTest : public Test {}; TYPED_TEST_SUITE_P(NumericTest); -TYPED_TEST_P(NumericTest, DefaultIsZero) { - EXPECT_EQ(0, TypeParam()); -} +TYPED_TEST_P(NumericTest, DefaultIsZero) { EXPECT_EQ(0, TypeParam()); } TYPED_TEST_P(NumericTest, ZeroIsLessThanOne) { EXPECT_LT(TypeParam(0), TypeParam(1)); } -REGISTER_TYPED_TEST_SUITE_P(NumericTest, - DefaultIsZero, ZeroIsLessThanOne); +REGISTER_TYPED_TEST_SUITE_P(NumericTest, DefaultIsZero, ZeroIsLessThanOne); typedef Types NumericTypes; INSTANTIATE_TYPED_TEST_SUITE_P(My, NumericTest, NumericTypes); @@ -418,20 +403,20 @@ static const char* GetTestName() { return testing::UnitTest::GetInstance()->current_test_info()->name(); } // Test the stripping of space from test names -template class TrimmedTest : public Test { }; +template +class TrimmedTest : public Test {}; TYPED_TEST_SUITE_P(TrimmedTest); TYPED_TEST_P(TrimmedTest, Test1) { EXPECT_STREQ("Test1", GetTestName()); } TYPED_TEST_P(TrimmedTest, Test2) { EXPECT_STREQ("Test2", GetTestName()); } TYPED_TEST_P(TrimmedTest, Test3) { EXPECT_STREQ("Test3", GetTestName()); } TYPED_TEST_P(TrimmedTest, Test4) { EXPECT_STREQ("Test4", GetTestName()); } TYPED_TEST_P(TrimmedTest, Test5) { EXPECT_STREQ("Test5", GetTestName()); } -REGISTER_TYPED_TEST_SUITE_P( - TrimmedTest, - Test1, Test2,Test3 , Test4 ,Test5 ); // NOLINT -template struct MyPair {}; +REGISTER_TYPED_TEST_SUITE_P(TrimmedTest, Test1, Test2, Test3, Test4, + Test5); // NOLINT +template +struct MyPair {}; // Be sure to try a type with a comma in its name just in case it matters. typedef Types > TrimTypes; INSTANTIATE_TYPED_TEST_SUITE_P(My, TrimmedTest, TrimTypes); } // namespace library2 - diff --git a/ext/googletest/googletest/test/gtest-typed-test_test.h b/ext/googletest/googletest/test/gtest-typed-test_test.h index 8ce559c99f..f3ef0a5962 100644 --- a/ext/googletest/googletest/test/gtest-typed-test_test.h +++ b/ext/googletest/googletest/test/gtest-typed-test_test.h @@ -40,21 +40,18 @@ using testing::Test; // and gtest-typed-test2_test.cc. template -class ContainerTest : public Test { -}; +class ContainerTest : public Test {}; TYPED_TEST_SUITE_P(ContainerTest); -TYPED_TEST_P(ContainerTest, CanBeDefaultConstructed) { - TypeParam container; -} +TYPED_TEST_P(ContainerTest, CanBeDefaultConstructed) { TypeParam container; } TYPED_TEST_P(ContainerTest, InitialSizeIsZero) { TypeParam container; EXPECT_EQ(0U, container.size()); } -REGISTER_TYPED_TEST_SUITE_P(ContainerTest, - CanBeDefaultConstructed, InitialSizeIsZero); +REGISTER_TYPED_TEST_SUITE_P(ContainerTest, CanBeDefaultConstructed, + InitialSizeIsZero); #endif // GOOGLETEST_TEST_GTEST_TYPED_TEST_TEST_H_ diff --git a/ext/googletest/googletest/test/gtest-unittest-api_test.cc b/ext/googletest/googletest/test/gtest-unittest-api_test.cc index 8ef505838c..2a13fa32ff 100644 --- a/ext/googletest/googletest/test/gtest-unittest-api_test.cc +++ b/ext/googletest/googletest/test/gtest-unittest-api_test.cc @@ -32,11 +32,12 @@ // This file contains tests verifying correctness of data provided via // UnitTest's public methods. -#include "gtest/gtest.h" - #include // For strcmp. + #include +#include "gtest/gtest.h" + using ::testing::InitGoogleTest; namespace testing { @@ -56,13 +57,12 @@ class UnitTestHelper { static TestSuite const** GetSortedTestSuites() { UnitTest& unit_test = *UnitTest::GetInstance(); auto const** const test_suites = new const TestSuite*[static_cast( - unit_test.total_test_suite_count())]; + unit_test.total_test_suite_count())]; for (int i = 0; i < unit_test.total_test_suite_count(); ++i) test_suites[i] = unit_test.GetTestSuite(i); - std::sort(test_suites, - test_suites + unit_test.total_test_suite_count(), + std::sort(test_suites, test_suites + unit_test.total_test_suite_count(), LessByName()); return test_suites; } @@ -73,8 +73,7 @@ class UnitTestHelper { UnitTest& unit_test = *UnitTest::GetInstance(); for (int i = 0; i < unit_test.total_test_suite_count(); ++i) { const TestSuite* test_suite = unit_test.GetTestSuite(i); - if (0 == strcmp(test_suite->name(), name)) - return test_suite; + if (0 == strcmp(test_suite->name(), name)) return test_suite; } return nullptr; } @@ -84,7 +83,7 @@ class UnitTestHelper { // array. static TestInfo const** GetSortedTests(const TestSuite* test_suite) { TestInfo const** const tests = new const TestInfo*[static_cast( - test_suite->total_test_count())]; + test_suite->total_test_count())]; for (int i = 0; i < test_suite->total_test_count(); ++i) tests[i] = test_suite->GetTestInfo(i); @@ -95,7 +94,8 @@ class UnitTestHelper { } }; -template class TestSuiteWithCommentTest : public Test {}; +template +class TestSuiteWithCommentTest : public Test {}; TYPED_TEST_SUITE(TestSuiteWithCommentTest, Types); TYPED_TEST(TestSuiteWithCommentTest, Dummy) {} @@ -319,7 +319,7 @@ class FinalSuccessChecker : public Environment { } // namespace internal } // namespace testing -int main(int argc, char **argv) { +int main(int argc, char** argv) { InitGoogleTest(&argc, argv); AddGlobalTestEnvironment(new testing::internal::FinalSuccessChecker()); diff --git a/ext/googletest/googletest/test/gtest_assert_by_exception_test.cc b/ext/googletest/googletest/test/gtest_assert_by_exception_test.cc index ada4cb30ef..f507eac475 100644 --- a/ext/googletest/googletest/test/gtest_assert_by_exception_test.cc +++ b/ext/googletest/googletest/test/gtest_assert_by_exception_test.cc @@ -27,16 +27,16 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Tests Google Test's assert-by-exception mode with exceptions enabled. -#include "gtest/gtest.h" - -#include #include +#include #include + #include +#include "gtest/gtest.h" + class ThrowListener : public testing::EmptyTestEventListener { void OnTestPartResult(const testing::TestPartResult& result) override { if (result.type() == testing::TestPartResult::kFatalFailure) { @@ -55,9 +55,7 @@ void Fail(const char* msg) { exit(1); } -static void AssertFalse() { - ASSERT_EQ(2, 3) << "Expected failure"; -} +static void AssertFalse() { ASSERT_EQ(2, 3) << "Expected failure"; } // Tests that an assertion failure throws a subclass of // std::runtime_error. @@ -65,21 +63,21 @@ TEST(Test, Test) { // A successful assertion shouldn't throw. try { EXPECT_EQ(3, 3); - } catch(...) { + } catch (...) { Fail("A successful assertion wrongfully threw."); } // A successful assertion shouldn't throw. try { EXPECT_EQ(3, 4); - } catch(...) { + } catch (...) { Fail("A failed non-fatal assertion wrongfully threw."); } // A failed assertion should throw. try { AssertFalse(); - } catch(const testing::AssertionException& e) { + } catch (const testing::AssertionException& e) { if (strstr(e.what(), "Expected failure") != nullptr) throw; printf("%s", @@ -87,7 +85,7 @@ TEST(Test, Test) { "but the message is incorrect. Instead of containing \"Expected " "failure\", it is:\n"); Fail(e.what()); - } catch(...) { + } catch (...) { Fail("A failed assertion threw the wrong type of exception."); } Fail("A failed assertion should've thrown but didn't."); @@ -95,9 +93,7 @@ TEST(Test, Test) { int kTestForContinuingTest = 0; -TEST(Test, Test2) { - kTestForContinuingTest = 1; -} +TEST(Test, Test2) { kTestForContinuingTest = 1; } int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); diff --git a/ext/googletest/googletest/test/gtest_environment_test.cc b/ext/googletest/googletest/test/gtest_environment_test.cc index c7facf5a39..122eaf3ca8 100644 --- a/ext/googletest/googletest/test/gtest_environment_test.cc +++ b/ext/googletest/googletest/test/gtest_environment_test.cc @@ -30,16 +30,15 @@ // // Tests using global test environments. -#include #include +#include + #include "gtest/gtest.h" #include "src/gtest-internal-inl.h" namespace { -enum FailureType { - NO_FAILURE, NON_FATAL_FAILURE, FATAL_FAILURE -}; +enum FailureType { NO_FAILURE, NON_FATAL_FAILURE, FATAL_FAILURE }; // For testing using global test environments. class MyEnvironment : public testing::Environment { @@ -79,9 +78,7 @@ class MyEnvironment : public testing::Environment { // We call this function to set the type of failure SetUp() should // generate. - void set_failure_in_set_up(FailureType type) { - failure_in_set_up_ = type; - } + void set_failure_in_set_up(FailureType type) { failure_in_set_up_ = type; } // Was SetUp() run? bool set_up_was_run() const { return set_up_was_run_; } @@ -100,9 +97,7 @@ bool test_was_run; // The sole purpose of this TEST is to enable us to check whether it // was run. -TEST(FooTest, Bar) { - test_was_run = true; -} +TEST(FooTest, Bar) { test_was_run = true; } // Prints the message and aborts the program if condition is false. void Check(bool condition, const char* msg) { @@ -126,7 +121,7 @@ int RunAllTests(MyEnvironment* env, FailureType failure) { } // namespace -int main(int argc, char **argv) { +int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); // Registers a global test environment, and verifies that the diff --git a/ext/googletest/googletest/test/gtest_help_test.py b/ext/googletest/googletest/test/gtest_help_test.py index 54d450474d..642ab86506 100755 --- a/ext/googletest/googletest/test/gtest_help_test.py +++ b/ext/googletest/googletest/test/gtest_help_test.py @@ -39,28 +39,29 @@ SYNOPSIS import os import re -import gtest_test_utils +import sys +from googletest.test import gtest_test_utils IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux' IS_GNUHURD = os.name == 'posix' and os.uname()[0] == 'GNU' IS_GNUKFREEBSD = os.name == 'posix' and os.uname()[0] == 'GNU/kFreeBSD' +IS_OPENBSD = os.name == 'posix' and os.uname()[0] == 'OpenBSD' IS_WINDOWS = os.name == 'nt' PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_') FLAG_PREFIX = '--gtest_' DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style' STREAM_RESULT_TO_FLAG = FLAG_PREFIX + 'stream_result_to' -UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing' +UNKNOWN_GTEST_PREFIXED_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing' LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests' -INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG), - re.sub('^--', '/', LIST_TESTS_FLAG), - re.sub('_', '-', LIST_TESTS_FLAG)] INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing' SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess( [PROGRAM_PATH, LIST_TESTS_FLAG]).output +HAS_ABSL_FLAGS = '--has_absl_flags' in sys.argv + # The help message must match this regex. HELP_REGEX = re.compile( FLAG_PREFIX + r'list_tests.*' + @@ -110,18 +111,37 @@ class GTestHelpTest(gtest_test_utils.TestCase): """ exit_code, output = RunWithFlag(flag) - self.assertEquals(0, exit_code) - self.assert_(HELP_REGEX.search(output), output) - - if IS_LINUX or IS_GNUHURD or IS_GNUKFREEBSD: - self.assert_(STREAM_RESULT_TO_FLAG in output, output) + if HAS_ABSL_FLAGS: + # The Abseil flags library prints the ProgramUsageMessage() with + # --help and returns 1. + self.assertEqual(1, exit_code) else: - self.assert_(STREAM_RESULT_TO_FLAG not in output, output) + self.assertEqual(0, exit_code) + + self.assertTrue(HELP_REGEX.search(output), output) + + if IS_LINUX or IS_GNUHURD or IS_GNUKFREEBSD or IS_OPENBSD: + self.assertIn(STREAM_RESULT_TO_FLAG, output) + else: + self.assertNotIn(STREAM_RESULT_TO_FLAG, output) if SUPPORTS_DEATH_TESTS and not IS_WINDOWS: - self.assert_(DEATH_TEST_STYLE_FLAG in output, output) + self.assertIn(DEATH_TEST_STYLE_FLAG, output) else: - self.assert_(DEATH_TEST_STYLE_FLAG not in output, output) + self.assertNotIn(DEATH_TEST_STYLE_FLAG, output) + + def TestUnknownFlagWithAbseil(self, flag): + """Verifies correct behavior when an unknown flag is specified. + + The right message must be printed and the tests must + skipped when the given flag is specified. + + Args: + flag: A flag to pass to the binary or None. + """ + exit_code, output = RunWithFlag(flag) + self.assertEqual(1, exit_code) + self.assertIn('ERROR: Unknown command line flag', output) def TestNonHelpFlag(self, flag): """Verifies correct behavior when no help flag is specified. @@ -134,27 +154,21 @@ class GTestHelpTest(gtest_test_utils.TestCase): """ exit_code, output = RunWithFlag(flag) - self.assert_(exit_code != 0) - self.assert_(not HELP_REGEX.search(output), output) + self.assertNotEqual(exit_code, 0) + self.assertFalse(HELP_REGEX.search(output), output) def testPrintsHelpWithFullFlag(self): self.TestHelpFlag('--help') - def testPrintsHelpWithShortFlag(self): - self.TestHelpFlag('-h') - - def testPrintsHelpWithQuestionFlag(self): - self.TestHelpFlag('-?') - - def testPrintsHelpWithWindowsStyleQuestionFlag(self): - self.TestHelpFlag('/?') - def testPrintsHelpWithUnrecognizedGoogleTestFlag(self): - self.TestHelpFlag(UNKNOWN_FLAG) - - def testPrintsHelpWithIncorrectFlagStyle(self): - for incorrect_flag in INCORRECT_FLAG_VARIANTS: - self.TestHelpFlag(incorrect_flag) + # The behavior is slightly different when Abseil flags is + # used. Abseil flags rejects all unknown flags, while the builtin + # GTest flags implementation interprets an unknown flag with a + # '--gtest_' prefix as a request for help. + if HAS_ABSL_FLAGS: + self.TestUnknownFlagWithAbseil(UNKNOWN_GTEST_PREFIXED_FLAG) + else: + self.TestHelpFlag(UNKNOWN_GTEST_PREFIXED_FLAG) def testRunsTestsWithoutHelpFlag(self): """Verifies that when no help flag is specified, the tests are run @@ -170,4 +184,6 @@ class GTestHelpTest(gtest_test_utils.TestCase): if __name__ == '__main__': + if '--has_absl_flags' in sys.argv: + sys.argv.remove('--has_absl_flags') gtest_test_utils.Main() diff --git a/ext/googletest/googletest/test/gtest_help_test_.cc b/ext/googletest/googletest/test/gtest_help_test_.cc index 750ae6ce95..da289f05f3 100644 --- a/ext/googletest/googletest/test/gtest_help_test_.cc +++ b/ext/googletest/googletest/test/gtest_help_test_.cc @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // This program is meant to be run by gtest_help_test.py. Do not run // it directly. diff --git a/ext/googletest/googletest/test/gtest_json_test_utils.py b/ext/googletest/googletest/test/gtest_json_test_utils.py index 62bbfc288f..f62896c923 100644 --- a/ext/googletest/googletest/test/gtest_json_test_utils.py +++ b/ext/googletest/googletest/test/gtest_json_test_utils.py @@ -50,6 +50,8 @@ def normalize(obj): elif key == 'failure': value = re.sub(r'^.*[/\\](.*:)\d+\n', '\\1*\n', value) return re.sub(r'Stack trace:\n(.|\n)*', 'Stack trace:\n*', value) + elif key == 'file': + return re.sub(r'^.*[/\\](.*)', '\\1', value) else: return normalize(value) if isinstance(obj, dict): diff --git a/ext/googletest/googletest/test/gtest_list_output_unittest.py b/ext/googletest/googletest/test/gtest_list_output_unittest.py index a442fc169c..faacf103c3 100644 --- a/ext/googletest/googletest/test/gtest_list_output_unittest.py +++ b/ext/googletest/googletest/test/gtest_list_output_unittest.py @@ -40,7 +40,7 @@ This script tests such functionality by invoking gtest_list_output_unittest_ import os import re -import gtest_test_utils +from googletest.test import gtest_test_utils GTEST_LIST_TESTS_FLAG = '--gtest_list_tests' GTEST_OUTPUT_FLAG = '--gtest_output' diff --git a/ext/googletest/googletest/test/gtest_main_unittest.cc b/ext/googletest/googletest/test/gtest_main_unittest.cc index eddedeabe8..29cd5510a4 100644 --- a/ext/googletest/googletest/test/gtest_main_unittest.cc +++ b/ext/googletest/googletest/test/gtest_main_unittest.cc @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - #include "gtest/gtest.h" // Tests that we don't have to define main() when we link to @@ -35,8 +34,7 @@ namespace { -TEST(GTestMainTest, ShouldSucceed) { -} +TEST(GTestMainTest, ShouldSucceed) {} } // namespace diff --git a/ext/googletest/googletest/test/gtest_pred_impl_unittest.cc b/ext/googletest/googletest/test/gtest_pred_impl_unittest.cc index bbef9947d9..3d43665374 100644 --- a/ext/googletest/googletest/test/gtest_pred_impl_unittest.cc +++ b/ext/googletest/googletest/test/gtest_pred_impl_unittest.cc @@ -27,9 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// This file is AUTOMATICALLY GENERATED on 11/05/2019 by command -// 'gen_gtest_pred_impl.py 5'. DO NOT EDIT BY HAND! - // Regression test for gtest_pred_impl.h // // This file is generated by a script and quite long. If you intend to @@ -49,8 +46,8 @@ #include -#include "gtest/gtest.h" #include "gtest/gtest-spi.h" +#include "gtest/gtest.h" // A user-defined data type. struct Bool { @@ -80,12 +77,8 @@ bool PredFunction1(T1 v1) { // The following two functions are needed because a compiler doesn't have // a context yet to know which template function must be instantiated. -bool PredFunction1Int(int v1) { - return v1 > 0; -} -bool PredFunction1Bool(Bool v1) { - return v1 > 0; -} +bool PredFunction1Int(int v1) { return v1 > 0; } +bool PredFunction1Bool(Bool v1) { return v1 > 0; } // A unary predicate functor. struct PredFunctor1 { @@ -97,22 +90,17 @@ struct PredFunctor1 { // A unary predicate-formatter function. template -testing::AssertionResult PredFormatFunction1(const char* e1, - const T1& v1) { - if (PredFunction1(v1)) - return testing::AssertionSuccess(); +testing::AssertionResult PredFormatFunction1(const char* e1, const T1& v1) { + if (PredFunction1(v1)) return testing::AssertionSuccess(); return testing::AssertionFailure() - << e1 - << " is expected to be positive, but evaluates to " - << v1 << "."; + << e1 << " is expected to be positive, but evaluates to " << v1 << "."; } // A unary predicate-formatter functor. struct PredFormatFunctor1 { template - testing::AssertionResult operator()(const char* e1, - const T1& v1) const { + testing::AssertionResult operator()(const char* e1, const T1& v1) const { return PredFormatFunction1(e1, v1); } }; @@ -130,13 +118,12 @@ class Predicate1Test : public testing::Test { void TearDown() override { // Verifies that each of the predicate's arguments was evaluated // exactly once. - EXPECT_EQ(1, n1_) << - "The predicate assertion didn't evaluate argument 2 " - "exactly once."; + EXPECT_EQ(1, n1_) << "The predicate assertion didn't evaluate argument 2 " + "exactly once."; // Verifies that the control flow in the test function is expected. if (expected_to_finish_ && !finished_) { - FAIL() << "The predicate assertion unexpactedly aborted the test."; + FAIL() << "The predicate assertion unexpectedly aborted the test."; } else if (!expected_to_finish_ && finished_) { FAIL() << "The failed predicate assertion didn't abort the test " "as expected."; @@ -164,104 +151,100 @@ typedef Predicate1Test ASSERT_PRED1Test; // Tests a successful EXPECT_PRED1 where the // predicate-formatter is a function on a built-in type (int). TEST_F(EXPECT_PRED1Test, FunctionOnBuiltInTypeSuccess) { - EXPECT_PRED1(PredFunction1Int, - ++n1_); + EXPECT_PRED1(PredFunction1Int, ++n1_); finished_ = true; } // Tests a successful EXPECT_PRED1 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(EXPECT_PRED1Test, FunctionOnUserTypeSuccess) { - EXPECT_PRED1(PredFunction1Bool, - Bool(++n1_)); + EXPECT_PRED1(PredFunction1Bool, Bool(++n1_)); finished_ = true; } // Tests a successful EXPECT_PRED1 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(EXPECT_PRED1Test, FunctorOnBuiltInTypeSuccess) { - EXPECT_PRED1(PredFunctor1(), - ++n1_); + EXPECT_PRED1(PredFunctor1(), ++n1_); finished_ = true; } // Tests a successful EXPECT_PRED1 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(EXPECT_PRED1Test, FunctorOnUserTypeSuccess) { - EXPECT_PRED1(PredFunctor1(), - Bool(++n1_)); + EXPECT_PRED1(PredFunctor1(), Bool(++n1_)); finished_ = true; } // Tests a failed EXPECT_PRED1 where the // predicate-formatter is a function on a built-in type (int). TEST_F(EXPECT_PRED1Test, FunctionOnBuiltInTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED1(PredFunction1Int, - n1_++); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED1(PredFunction1Int, n1_++); + finished_ = true; + }, + ""); } // Tests a failed EXPECT_PRED1 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(EXPECT_PRED1Test, FunctionOnUserTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED1(PredFunction1Bool, - Bool(n1_++)); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED1(PredFunction1Bool, Bool(n1_++)); + finished_ = true; + }, + ""); } // Tests a failed EXPECT_PRED1 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(EXPECT_PRED1Test, FunctorOnBuiltInTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED1(PredFunctor1(), - n1_++); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED1(PredFunctor1(), n1_++); + finished_ = true; + }, + ""); } // Tests a failed EXPECT_PRED1 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(EXPECT_PRED1Test, FunctorOnUserTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED1(PredFunctor1(), - Bool(n1_++)); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED1(PredFunctor1(), Bool(n1_++)); + finished_ = true; + }, + ""); } // Tests a successful ASSERT_PRED1 where the // predicate-formatter is a function on a built-in type (int). TEST_F(ASSERT_PRED1Test, FunctionOnBuiltInTypeSuccess) { - ASSERT_PRED1(PredFunction1Int, - ++n1_); + ASSERT_PRED1(PredFunction1Int, ++n1_); finished_ = true; } // Tests a successful ASSERT_PRED1 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(ASSERT_PRED1Test, FunctionOnUserTypeSuccess) { - ASSERT_PRED1(PredFunction1Bool, - Bool(++n1_)); + ASSERT_PRED1(PredFunction1Bool, Bool(++n1_)); finished_ = true; } // Tests a successful ASSERT_PRED1 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(ASSERT_PRED1Test, FunctorOnBuiltInTypeSuccess) { - ASSERT_PRED1(PredFunctor1(), - ++n1_); + ASSERT_PRED1(PredFunctor1(), ++n1_); finished_ = true; } // Tests a successful ASSERT_PRED1 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(ASSERT_PRED1Test, FunctorOnUserTypeSuccess) { - ASSERT_PRED1(PredFunctor1(), - Bool(++n1_)); + ASSERT_PRED1(PredFunctor1(), Bool(++n1_)); finished_ = true; } @@ -269,147 +252,147 @@ TEST_F(ASSERT_PRED1Test, FunctorOnUserTypeSuccess) { // predicate-formatter is a function on a built-in type (int). TEST_F(ASSERT_PRED1Test, FunctionOnBuiltInTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED1(PredFunction1Int, - n1_++); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED1(PredFunction1Int, n1_++); + finished_ = true; + }, + ""); } // Tests a failed ASSERT_PRED1 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(ASSERT_PRED1Test, FunctionOnUserTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED1(PredFunction1Bool, - Bool(n1_++)); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED1(PredFunction1Bool, Bool(n1_++)); + finished_ = true; + }, + ""); } // Tests a failed ASSERT_PRED1 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(ASSERT_PRED1Test, FunctorOnBuiltInTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED1(PredFunctor1(), - n1_++); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED1(PredFunctor1(), n1_++); + finished_ = true; + }, + ""); } // Tests a failed ASSERT_PRED1 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(ASSERT_PRED1Test, FunctorOnUserTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED1(PredFunctor1(), - Bool(n1_++)); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED1(PredFunctor1(), Bool(n1_++)); + finished_ = true; + }, + ""); } // Tests a successful EXPECT_PRED_FORMAT1 where the // predicate-formatter is a function on a built-in type (int). TEST_F(EXPECT_PRED_FORMAT1Test, FunctionOnBuiltInTypeSuccess) { - EXPECT_PRED_FORMAT1(PredFormatFunction1, - ++n1_); + EXPECT_PRED_FORMAT1(PredFormatFunction1, ++n1_); finished_ = true; } // Tests a successful EXPECT_PRED_FORMAT1 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(EXPECT_PRED_FORMAT1Test, FunctionOnUserTypeSuccess) { - EXPECT_PRED_FORMAT1(PredFormatFunction1, - Bool(++n1_)); + EXPECT_PRED_FORMAT1(PredFormatFunction1, Bool(++n1_)); finished_ = true; } // Tests a successful EXPECT_PRED_FORMAT1 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(EXPECT_PRED_FORMAT1Test, FunctorOnBuiltInTypeSuccess) { - EXPECT_PRED_FORMAT1(PredFormatFunctor1(), - ++n1_); + EXPECT_PRED_FORMAT1(PredFormatFunctor1(), ++n1_); finished_ = true; } // Tests a successful EXPECT_PRED_FORMAT1 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(EXPECT_PRED_FORMAT1Test, FunctorOnUserTypeSuccess) { - EXPECT_PRED_FORMAT1(PredFormatFunctor1(), - Bool(++n1_)); + EXPECT_PRED_FORMAT1(PredFormatFunctor1(), Bool(++n1_)); finished_ = true; } // Tests a failed EXPECT_PRED_FORMAT1 where the // predicate-formatter is a function on a built-in type (int). TEST_F(EXPECT_PRED_FORMAT1Test, FunctionOnBuiltInTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED_FORMAT1(PredFormatFunction1, - n1_++); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED_FORMAT1(PredFormatFunction1, n1_++); + finished_ = true; + }, + ""); } // Tests a failed EXPECT_PRED_FORMAT1 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(EXPECT_PRED_FORMAT1Test, FunctionOnUserTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED_FORMAT1(PredFormatFunction1, - Bool(n1_++)); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED_FORMAT1(PredFormatFunction1, Bool(n1_++)); + finished_ = true; + }, + ""); } // Tests a failed EXPECT_PRED_FORMAT1 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(EXPECT_PRED_FORMAT1Test, FunctorOnBuiltInTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED_FORMAT1(PredFormatFunctor1(), - n1_++); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED_FORMAT1(PredFormatFunctor1(), n1_++); + finished_ = true; + }, + ""); } // Tests a failed EXPECT_PRED_FORMAT1 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(EXPECT_PRED_FORMAT1Test, FunctorOnUserTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED_FORMAT1(PredFormatFunctor1(), - Bool(n1_++)); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED_FORMAT1(PredFormatFunctor1(), Bool(n1_++)); + finished_ = true; + }, + ""); } // Tests a successful ASSERT_PRED_FORMAT1 where the // predicate-formatter is a function on a built-in type (int). TEST_F(ASSERT_PRED_FORMAT1Test, FunctionOnBuiltInTypeSuccess) { - ASSERT_PRED_FORMAT1(PredFormatFunction1, - ++n1_); + ASSERT_PRED_FORMAT1(PredFormatFunction1, ++n1_); finished_ = true; } // Tests a successful ASSERT_PRED_FORMAT1 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(ASSERT_PRED_FORMAT1Test, FunctionOnUserTypeSuccess) { - ASSERT_PRED_FORMAT1(PredFormatFunction1, - Bool(++n1_)); + ASSERT_PRED_FORMAT1(PredFormatFunction1, Bool(++n1_)); finished_ = true; } // Tests a successful ASSERT_PRED_FORMAT1 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(ASSERT_PRED_FORMAT1Test, FunctorOnBuiltInTypeSuccess) { - ASSERT_PRED_FORMAT1(PredFormatFunctor1(), - ++n1_); + ASSERT_PRED_FORMAT1(PredFormatFunctor1(), ++n1_); finished_ = true; } // Tests a successful ASSERT_PRED_FORMAT1 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(ASSERT_PRED_FORMAT1Test, FunctorOnUserTypeSuccess) { - ASSERT_PRED_FORMAT1(PredFormatFunctor1(), - Bool(++n1_)); + ASSERT_PRED_FORMAT1(PredFormatFunctor1(), Bool(++n1_)); finished_ = true; } @@ -417,44 +400,48 @@ TEST_F(ASSERT_PRED_FORMAT1Test, FunctorOnUserTypeSuccess) { // predicate-formatter is a function on a built-in type (int). TEST_F(ASSERT_PRED_FORMAT1Test, FunctionOnBuiltInTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED_FORMAT1(PredFormatFunction1, - n1_++); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED_FORMAT1(PredFormatFunction1, n1_++); + finished_ = true; + }, + ""); } // Tests a failed ASSERT_PRED_FORMAT1 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(ASSERT_PRED_FORMAT1Test, FunctionOnUserTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED_FORMAT1(PredFormatFunction1, - Bool(n1_++)); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED_FORMAT1(PredFormatFunction1, Bool(n1_++)); + finished_ = true; + }, + ""); } // Tests a failed ASSERT_PRED_FORMAT1 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(ASSERT_PRED_FORMAT1Test, FunctorOnBuiltInTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED_FORMAT1(PredFormatFunctor1(), - n1_++); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED_FORMAT1(PredFormatFunctor1(), n1_++); + finished_ = true; + }, + ""); } // Tests a failed ASSERT_PRED_FORMAT1 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(ASSERT_PRED_FORMAT1Test, FunctorOnUserTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED_FORMAT1(PredFormatFunctor1(), - Bool(n1_++)); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED_FORMAT1(PredFormatFunctor1(), Bool(n1_++)); + finished_ = true; + }, + ""); } // Sample functions/functors for testing binary predicate assertions. @@ -466,44 +453,33 @@ bool PredFunction2(T1 v1, T2 v2) { // The following two functions are needed because a compiler doesn't have // a context yet to know which template function must be instantiated. -bool PredFunction2Int(int v1, int v2) { - return v1 + v2 > 0; -} -bool PredFunction2Bool(Bool v1, Bool v2) { - return v1 + v2 > 0; -} +bool PredFunction2Int(int v1, int v2) { return v1 + v2 > 0; } +bool PredFunction2Bool(Bool v1, Bool v2) { return v1 + v2 > 0; } // A binary predicate functor. struct PredFunctor2 { template - bool operator()(const T1& v1, - const T2& v2) { + bool operator()(const T1& v1, const T2& v2) { return v1 + v2 > 0; } }; // A binary predicate-formatter function. template -testing::AssertionResult PredFormatFunction2(const char* e1, - const char* e2, - const T1& v1, - const T2& v2) { - if (PredFunction2(v1, v2)) - return testing::AssertionSuccess(); +testing::AssertionResult PredFormatFunction2(const char* e1, const char* e2, + const T1& v1, const T2& v2) { + if (PredFunction2(v1, v2)) return testing::AssertionSuccess(); return testing::AssertionFailure() - << e1 << " + " << e2 - << " is expected to be positive, but evaluates to " - << v1 + v2 << "."; + << e1 << " + " << e2 + << " is expected to be positive, but evaluates to " << v1 + v2 << "."; } // A binary predicate-formatter functor. struct PredFormatFunctor2 { template - testing::AssertionResult operator()(const char* e1, - const char* e2, - const T1& v1, - const T2& v2) const { + testing::AssertionResult operator()(const char* e1, const char* e2, + const T1& v1, const T2& v2) const { return PredFormatFunction2(e1, e2, v1, v2); } }; @@ -521,16 +497,14 @@ class Predicate2Test : public testing::Test { void TearDown() override { // Verifies that each of the predicate's arguments was evaluated // exactly once. - EXPECT_EQ(1, n1_) << - "The predicate assertion didn't evaluate argument 2 " - "exactly once."; - EXPECT_EQ(1, n2_) << - "The predicate assertion didn't evaluate argument 3 " - "exactly once."; + EXPECT_EQ(1, n1_) << "The predicate assertion didn't evaluate argument 2 " + "exactly once."; + EXPECT_EQ(1, n2_) << "The predicate assertion didn't evaluate argument 3 " + "exactly once."; // Verifies that the control flow in the test function is expected. if (expected_to_finish_ && !finished_) { - FAIL() << "The predicate assertion unexpactedly aborted the test."; + FAIL() << "The predicate assertion unexpectedly aborted the test."; } else if (!expected_to_finish_ && finished_) { FAIL() << "The failed predicate assertion didn't abort the test " "as expected."; @@ -560,116 +534,100 @@ typedef Predicate2Test ASSERT_PRED2Test; // Tests a successful EXPECT_PRED2 where the // predicate-formatter is a function on a built-in type (int). TEST_F(EXPECT_PRED2Test, FunctionOnBuiltInTypeSuccess) { - EXPECT_PRED2(PredFunction2Int, - ++n1_, - ++n2_); + EXPECT_PRED2(PredFunction2Int, ++n1_, ++n2_); finished_ = true; } // Tests a successful EXPECT_PRED2 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(EXPECT_PRED2Test, FunctionOnUserTypeSuccess) { - EXPECT_PRED2(PredFunction2Bool, - Bool(++n1_), - Bool(++n2_)); + EXPECT_PRED2(PredFunction2Bool, Bool(++n1_), Bool(++n2_)); finished_ = true; } // Tests a successful EXPECT_PRED2 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(EXPECT_PRED2Test, FunctorOnBuiltInTypeSuccess) { - EXPECT_PRED2(PredFunctor2(), - ++n1_, - ++n2_); + EXPECT_PRED2(PredFunctor2(), ++n1_, ++n2_); finished_ = true; } // Tests a successful EXPECT_PRED2 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(EXPECT_PRED2Test, FunctorOnUserTypeSuccess) { - EXPECT_PRED2(PredFunctor2(), - Bool(++n1_), - Bool(++n2_)); + EXPECT_PRED2(PredFunctor2(), Bool(++n1_), Bool(++n2_)); finished_ = true; } // Tests a failed EXPECT_PRED2 where the // predicate-formatter is a function on a built-in type (int). TEST_F(EXPECT_PRED2Test, FunctionOnBuiltInTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED2(PredFunction2Int, - n1_++, - n2_++); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED2(PredFunction2Int, n1_++, n2_++); + finished_ = true; + }, + ""); } // Tests a failed EXPECT_PRED2 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(EXPECT_PRED2Test, FunctionOnUserTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED2(PredFunction2Bool, - Bool(n1_++), - Bool(n2_++)); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED2(PredFunction2Bool, Bool(n1_++), Bool(n2_++)); + finished_ = true; + }, + ""); } // Tests a failed EXPECT_PRED2 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(EXPECT_PRED2Test, FunctorOnBuiltInTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED2(PredFunctor2(), - n1_++, - n2_++); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED2(PredFunctor2(), n1_++, n2_++); + finished_ = true; + }, + ""); } // Tests a failed EXPECT_PRED2 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(EXPECT_PRED2Test, FunctorOnUserTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED2(PredFunctor2(), - Bool(n1_++), - Bool(n2_++)); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED2(PredFunctor2(), Bool(n1_++), Bool(n2_++)); + finished_ = true; + }, + ""); } // Tests a successful ASSERT_PRED2 where the // predicate-formatter is a function on a built-in type (int). TEST_F(ASSERT_PRED2Test, FunctionOnBuiltInTypeSuccess) { - ASSERT_PRED2(PredFunction2Int, - ++n1_, - ++n2_); + ASSERT_PRED2(PredFunction2Int, ++n1_, ++n2_); finished_ = true; } // Tests a successful ASSERT_PRED2 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(ASSERT_PRED2Test, FunctionOnUserTypeSuccess) { - ASSERT_PRED2(PredFunction2Bool, - Bool(++n1_), - Bool(++n2_)); + ASSERT_PRED2(PredFunction2Bool, Bool(++n1_), Bool(++n2_)); finished_ = true; } // Tests a successful ASSERT_PRED2 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(ASSERT_PRED2Test, FunctorOnBuiltInTypeSuccess) { - ASSERT_PRED2(PredFunctor2(), - ++n1_, - ++n2_); + ASSERT_PRED2(PredFunctor2(), ++n1_, ++n2_); finished_ = true; } // Tests a successful ASSERT_PRED2 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(ASSERT_PRED2Test, FunctorOnUserTypeSuccess) { - ASSERT_PRED2(PredFunctor2(), - Bool(++n1_), - Bool(++n2_)); + ASSERT_PRED2(PredFunctor2(), Bool(++n1_), Bool(++n2_)); finished_ = true; } @@ -677,163 +635,147 @@ TEST_F(ASSERT_PRED2Test, FunctorOnUserTypeSuccess) { // predicate-formatter is a function on a built-in type (int). TEST_F(ASSERT_PRED2Test, FunctionOnBuiltInTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED2(PredFunction2Int, - n1_++, - n2_++); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED2(PredFunction2Int, n1_++, n2_++); + finished_ = true; + }, + ""); } // Tests a failed ASSERT_PRED2 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(ASSERT_PRED2Test, FunctionOnUserTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED2(PredFunction2Bool, - Bool(n1_++), - Bool(n2_++)); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED2(PredFunction2Bool, Bool(n1_++), Bool(n2_++)); + finished_ = true; + }, + ""); } // Tests a failed ASSERT_PRED2 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(ASSERT_PRED2Test, FunctorOnBuiltInTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED2(PredFunctor2(), - n1_++, - n2_++); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED2(PredFunctor2(), n1_++, n2_++); + finished_ = true; + }, + ""); } // Tests a failed ASSERT_PRED2 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(ASSERT_PRED2Test, FunctorOnUserTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED2(PredFunctor2(), - Bool(n1_++), - Bool(n2_++)); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED2(PredFunctor2(), Bool(n1_++), Bool(n2_++)); + finished_ = true; + }, + ""); } // Tests a successful EXPECT_PRED_FORMAT2 where the // predicate-formatter is a function on a built-in type (int). TEST_F(EXPECT_PRED_FORMAT2Test, FunctionOnBuiltInTypeSuccess) { - EXPECT_PRED_FORMAT2(PredFormatFunction2, - ++n1_, - ++n2_); + EXPECT_PRED_FORMAT2(PredFormatFunction2, ++n1_, ++n2_); finished_ = true; } // Tests a successful EXPECT_PRED_FORMAT2 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(EXPECT_PRED_FORMAT2Test, FunctionOnUserTypeSuccess) { - EXPECT_PRED_FORMAT2(PredFormatFunction2, - Bool(++n1_), - Bool(++n2_)); + EXPECT_PRED_FORMAT2(PredFormatFunction2, Bool(++n1_), Bool(++n2_)); finished_ = true; } // Tests a successful EXPECT_PRED_FORMAT2 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(EXPECT_PRED_FORMAT2Test, FunctorOnBuiltInTypeSuccess) { - EXPECT_PRED_FORMAT2(PredFormatFunctor2(), - ++n1_, - ++n2_); + EXPECT_PRED_FORMAT2(PredFormatFunctor2(), ++n1_, ++n2_); finished_ = true; } // Tests a successful EXPECT_PRED_FORMAT2 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(EXPECT_PRED_FORMAT2Test, FunctorOnUserTypeSuccess) { - EXPECT_PRED_FORMAT2(PredFormatFunctor2(), - Bool(++n1_), - Bool(++n2_)); + EXPECT_PRED_FORMAT2(PredFormatFunctor2(), Bool(++n1_), Bool(++n2_)); finished_ = true; } // Tests a failed EXPECT_PRED_FORMAT2 where the // predicate-formatter is a function on a built-in type (int). TEST_F(EXPECT_PRED_FORMAT2Test, FunctionOnBuiltInTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED_FORMAT2(PredFormatFunction2, - n1_++, - n2_++); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED_FORMAT2(PredFormatFunction2, n1_++, n2_++); + finished_ = true; + }, + ""); } // Tests a failed EXPECT_PRED_FORMAT2 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(EXPECT_PRED_FORMAT2Test, FunctionOnUserTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED_FORMAT2(PredFormatFunction2, - Bool(n1_++), - Bool(n2_++)); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED_FORMAT2(PredFormatFunction2, Bool(n1_++), Bool(n2_++)); + finished_ = true; + }, + ""); } // Tests a failed EXPECT_PRED_FORMAT2 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(EXPECT_PRED_FORMAT2Test, FunctorOnBuiltInTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED_FORMAT2(PredFormatFunctor2(), - n1_++, - n2_++); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED_FORMAT2(PredFormatFunctor2(), n1_++, n2_++); + finished_ = true; + }, + ""); } // Tests a failed EXPECT_PRED_FORMAT2 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(EXPECT_PRED_FORMAT2Test, FunctorOnUserTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED_FORMAT2(PredFormatFunctor2(), - Bool(n1_++), - Bool(n2_++)); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED_FORMAT2(PredFormatFunctor2(), Bool(n1_++), Bool(n2_++)); + finished_ = true; + }, + ""); } // Tests a successful ASSERT_PRED_FORMAT2 where the // predicate-formatter is a function on a built-in type (int). TEST_F(ASSERT_PRED_FORMAT2Test, FunctionOnBuiltInTypeSuccess) { - ASSERT_PRED_FORMAT2(PredFormatFunction2, - ++n1_, - ++n2_); + ASSERT_PRED_FORMAT2(PredFormatFunction2, ++n1_, ++n2_); finished_ = true; } // Tests a successful ASSERT_PRED_FORMAT2 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(ASSERT_PRED_FORMAT2Test, FunctionOnUserTypeSuccess) { - ASSERT_PRED_FORMAT2(PredFormatFunction2, - Bool(++n1_), - Bool(++n2_)); + ASSERT_PRED_FORMAT2(PredFormatFunction2, Bool(++n1_), Bool(++n2_)); finished_ = true; } // Tests a successful ASSERT_PRED_FORMAT2 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(ASSERT_PRED_FORMAT2Test, FunctorOnBuiltInTypeSuccess) { - ASSERT_PRED_FORMAT2(PredFormatFunctor2(), - ++n1_, - ++n2_); + ASSERT_PRED_FORMAT2(PredFormatFunctor2(), ++n1_, ++n2_); finished_ = true; } // Tests a successful ASSERT_PRED_FORMAT2 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(ASSERT_PRED_FORMAT2Test, FunctorOnUserTypeSuccess) { - ASSERT_PRED_FORMAT2(PredFormatFunctor2(), - Bool(++n1_), - Bool(++n2_)); + ASSERT_PRED_FORMAT2(PredFormatFunctor2(), Bool(++n1_), Bool(++n2_)); finished_ = true; } @@ -841,48 +783,48 @@ TEST_F(ASSERT_PRED_FORMAT2Test, FunctorOnUserTypeSuccess) { // predicate-formatter is a function on a built-in type (int). TEST_F(ASSERT_PRED_FORMAT2Test, FunctionOnBuiltInTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED_FORMAT2(PredFormatFunction2, - n1_++, - n2_++); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED_FORMAT2(PredFormatFunction2, n1_++, n2_++); + finished_ = true; + }, + ""); } // Tests a failed ASSERT_PRED_FORMAT2 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(ASSERT_PRED_FORMAT2Test, FunctionOnUserTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED_FORMAT2(PredFormatFunction2, - Bool(n1_++), - Bool(n2_++)); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED_FORMAT2(PredFormatFunction2, Bool(n1_++), Bool(n2_++)); + finished_ = true; + }, + ""); } // Tests a failed ASSERT_PRED_FORMAT2 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(ASSERT_PRED_FORMAT2Test, FunctorOnBuiltInTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED_FORMAT2(PredFormatFunctor2(), - n1_++, - n2_++); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED_FORMAT2(PredFormatFunctor2(), n1_++, n2_++); + finished_ = true; + }, + ""); } // Tests a failed ASSERT_PRED_FORMAT2 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(ASSERT_PRED_FORMAT2Test, FunctorOnUserTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED_FORMAT2(PredFormatFunctor2(), - Bool(n1_++), - Bool(n2_++)); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED_FORMAT2(PredFormatFunctor2(), Bool(n1_++), Bool(n2_++)); + finished_ = true; + }, + ""); } // Sample functions/functors for testing ternary predicate assertions. @@ -894,49 +836,36 @@ bool PredFunction3(T1 v1, T2 v2, T3 v3) { // The following two functions are needed because a compiler doesn't have // a context yet to know which template function must be instantiated. -bool PredFunction3Int(int v1, int v2, int v3) { - return v1 + v2 + v3 > 0; -} -bool PredFunction3Bool(Bool v1, Bool v2, Bool v3) { - return v1 + v2 + v3 > 0; -} +bool PredFunction3Int(int v1, int v2, int v3) { return v1 + v2 + v3 > 0; } +bool PredFunction3Bool(Bool v1, Bool v2, Bool v3) { return v1 + v2 + v3 > 0; } // A ternary predicate functor. struct PredFunctor3 { template - bool operator()(const T1& v1, - const T2& v2, - const T3& v3) { + bool operator()(const T1& v1, const T2& v2, const T3& v3) { return v1 + v2 + v3 > 0; } }; // A ternary predicate-formatter function. template -testing::AssertionResult PredFormatFunction3(const char* e1, - const char* e2, - const char* e3, - const T1& v1, - const T2& v2, - const T3& v3) { - if (PredFunction3(v1, v2, v3)) - return testing::AssertionSuccess(); +testing::AssertionResult PredFormatFunction3(const char* e1, const char* e2, + const char* e3, const T1& v1, + const T2& v2, const T3& v3) { + if (PredFunction3(v1, v2, v3)) return testing::AssertionSuccess(); return testing::AssertionFailure() - << e1 << " + " << e2 << " + " << e3 - << " is expected to be positive, but evaluates to " - << v1 + v2 + v3 << "."; + << e1 << " + " << e2 << " + " << e3 + << " is expected to be positive, but evaluates to " << v1 + v2 + v3 + << "."; } // A ternary predicate-formatter functor. struct PredFormatFunctor3 { template - testing::AssertionResult operator()(const char* e1, - const char* e2, - const char* e3, - const T1& v1, - const T2& v2, - const T3& v3) const { + testing::AssertionResult operator()(const char* e1, const char* e2, + const char* e3, const T1& v1, + const T2& v2, const T3& v3) const { return PredFormatFunction3(e1, e2, e3, v1, v2, v3); } }; @@ -954,19 +883,16 @@ class Predicate3Test : public testing::Test { void TearDown() override { // Verifies that each of the predicate's arguments was evaluated // exactly once. - EXPECT_EQ(1, n1_) << - "The predicate assertion didn't evaluate argument 2 " - "exactly once."; - EXPECT_EQ(1, n2_) << - "The predicate assertion didn't evaluate argument 3 " - "exactly once."; - EXPECT_EQ(1, n3_) << - "The predicate assertion didn't evaluate argument 4 " - "exactly once."; + EXPECT_EQ(1, n1_) << "The predicate assertion didn't evaluate argument 2 " + "exactly once."; + EXPECT_EQ(1, n2_) << "The predicate assertion didn't evaluate argument 3 " + "exactly once."; + EXPECT_EQ(1, n3_) << "The predicate assertion didn't evaluate argument 4 " + "exactly once."; // Verifies that the control flow in the test function is expected. if (expected_to_finish_ && !finished_) { - FAIL() << "The predicate assertion unexpactedly aborted the test."; + FAIL() << "The predicate assertion unexpectedly aborted the test."; } else if (!expected_to_finish_ && finished_) { FAIL() << "The failed predicate assertion didn't abort the test " "as expected."; @@ -998,128 +924,100 @@ typedef Predicate3Test ASSERT_PRED3Test; // Tests a successful EXPECT_PRED3 where the // predicate-formatter is a function on a built-in type (int). TEST_F(EXPECT_PRED3Test, FunctionOnBuiltInTypeSuccess) { - EXPECT_PRED3(PredFunction3Int, - ++n1_, - ++n2_, - ++n3_); + EXPECT_PRED3(PredFunction3Int, ++n1_, ++n2_, ++n3_); finished_ = true; } // Tests a successful EXPECT_PRED3 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(EXPECT_PRED3Test, FunctionOnUserTypeSuccess) { - EXPECT_PRED3(PredFunction3Bool, - Bool(++n1_), - Bool(++n2_), - Bool(++n3_)); + EXPECT_PRED3(PredFunction3Bool, Bool(++n1_), Bool(++n2_), Bool(++n3_)); finished_ = true; } // Tests a successful EXPECT_PRED3 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(EXPECT_PRED3Test, FunctorOnBuiltInTypeSuccess) { - EXPECT_PRED3(PredFunctor3(), - ++n1_, - ++n2_, - ++n3_); + EXPECT_PRED3(PredFunctor3(), ++n1_, ++n2_, ++n3_); finished_ = true; } // Tests a successful EXPECT_PRED3 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(EXPECT_PRED3Test, FunctorOnUserTypeSuccess) { - EXPECT_PRED3(PredFunctor3(), - Bool(++n1_), - Bool(++n2_), - Bool(++n3_)); + EXPECT_PRED3(PredFunctor3(), Bool(++n1_), Bool(++n2_), Bool(++n3_)); finished_ = true; } // Tests a failed EXPECT_PRED3 where the // predicate-formatter is a function on a built-in type (int). TEST_F(EXPECT_PRED3Test, FunctionOnBuiltInTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED3(PredFunction3Int, - n1_++, - n2_++, - n3_++); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED3(PredFunction3Int, n1_++, n2_++, n3_++); + finished_ = true; + }, + ""); } // Tests a failed EXPECT_PRED3 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(EXPECT_PRED3Test, FunctionOnUserTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED3(PredFunction3Bool, - Bool(n1_++), - Bool(n2_++), - Bool(n3_++)); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED3(PredFunction3Bool, Bool(n1_++), Bool(n2_++), Bool(n3_++)); + finished_ = true; + }, + ""); } // Tests a failed EXPECT_PRED3 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(EXPECT_PRED3Test, FunctorOnBuiltInTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED3(PredFunctor3(), - n1_++, - n2_++, - n3_++); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED3(PredFunctor3(), n1_++, n2_++, n3_++); + finished_ = true; + }, + ""); } // Tests a failed EXPECT_PRED3 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(EXPECT_PRED3Test, FunctorOnUserTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED3(PredFunctor3(), - Bool(n1_++), - Bool(n2_++), - Bool(n3_++)); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED3(PredFunctor3(), Bool(n1_++), Bool(n2_++), Bool(n3_++)); + finished_ = true; + }, + ""); } // Tests a successful ASSERT_PRED3 where the // predicate-formatter is a function on a built-in type (int). TEST_F(ASSERT_PRED3Test, FunctionOnBuiltInTypeSuccess) { - ASSERT_PRED3(PredFunction3Int, - ++n1_, - ++n2_, - ++n3_); + ASSERT_PRED3(PredFunction3Int, ++n1_, ++n2_, ++n3_); finished_ = true; } // Tests a successful ASSERT_PRED3 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(ASSERT_PRED3Test, FunctionOnUserTypeSuccess) { - ASSERT_PRED3(PredFunction3Bool, - Bool(++n1_), - Bool(++n2_), - Bool(++n3_)); + ASSERT_PRED3(PredFunction3Bool, Bool(++n1_), Bool(++n2_), Bool(++n3_)); finished_ = true; } // Tests a successful ASSERT_PRED3 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(ASSERT_PRED3Test, FunctorOnBuiltInTypeSuccess) { - ASSERT_PRED3(PredFunctor3(), - ++n1_, - ++n2_, - ++n3_); + ASSERT_PRED3(PredFunctor3(), ++n1_, ++n2_, ++n3_); finished_ = true; } // Tests a successful ASSERT_PRED3 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(ASSERT_PRED3Test, FunctorOnUserTypeSuccess) { - ASSERT_PRED3(PredFunctor3(), - Bool(++n1_), - Bool(++n2_), - Bool(++n3_)); + ASSERT_PRED3(PredFunctor3(), Bool(++n1_), Bool(++n2_), Bool(++n3_)); finished_ = true; } @@ -1127,70 +1025,61 @@ TEST_F(ASSERT_PRED3Test, FunctorOnUserTypeSuccess) { // predicate-formatter is a function on a built-in type (int). TEST_F(ASSERT_PRED3Test, FunctionOnBuiltInTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED3(PredFunction3Int, - n1_++, - n2_++, - n3_++); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED3(PredFunction3Int, n1_++, n2_++, n3_++); + finished_ = true; + }, + ""); } // Tests a failed ASSERT_PRED3 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(ASSERT_PRED3Test, FunctionOnUserTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED3(PredFunction3Bool, - Bool(n1_++), - Bool(n2_++), - Bool(n3_++)); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED3(PredFunction3Bool, Bool(n1_++), Bool(n2_++), Bool(n3_++)); + finished_ = true; + }, + ""); } // Tests a failed ASSERT_PRED3 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(ASSERT_PRED3Test, FunctorOnBuiltInTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED3(PredFunctor3(), - n1_++, - n2_++, - n3_++); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED3(PredFunctor3(), n1_++, n2_++, n3_++); + finished_ = true; + }, + ""); } // Tests a failed ASSERT_PRED3 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(ASSERT_PRED3Test, FunctorOnUserTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED3(PredFunctor3(), - Bool(n1_++), - Bool(n2_++), - Bool(n3_++)); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED3(PredFunctor3(), Bool(n1_++), Bool(n2_++), Bool(n3_++)); + finished_ = true; + }, + ""); } // Tests a successful EXPECT_PRED_FORMAT3 where the // predicate-formatter is a function on a built-in type (int). TEST_F(EXPECT_PRED_FORMAT3Test, FunctionOnBuiltInTypeSuccess) { - EXPECT_PRED_FORMAT3(PredFormatFunction3, - ++n1_, - ++n2_, - ++n3_); + EXPECT_PRED_FORMAT3(PredFormatFunction3, ++n1_, ++n2_, ++n3_); finished_ = true; } // Tests a successful EXPECT_PRED_FORMAT3 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(EXPECT_PRED_FORMAT3Test, FunctionOnUserTypeSuccess) { - EXPECT_PRED_FORMAT3(PredFormatFunction3, - Bool(++n1_), - Bool(++n2_), + EXPECT_PRED_FORMAT3(PredFormatFunction3, Bool(++n1_), Bool(++n2_), Bool(++n3_)); finished_ = true; } @@ -1198,19 +1087,14 @@ TEST_F(EXPECT_PRED_FORMAT3Test, FunctionOnUserTypeSuccess) { // Tests a successful EXPECT_PRED_FORMAT3 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(EXPECT_PRED_FORMAT3Test, FunctorOnBuiltInTypeSuccess) { - EXPECT_PRED_FORMAT3(PredFormatFunctor3(), - ++n1_, - ++n2_, - ++n3_); + EXPECT_PRED_FORMAT3(PredFormatFunctor3(), ++n1_, ++n2_, ++n3_); finished_ = true; } // Tests a successful EXPECT_PRED_FORMAT3 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(EXPECT_PRED_FORMAT3Test, FunctorOnUserTypeSuccess) { - EXPECT_PRED_FORMAT3(PredFormatFunctor3(), - Bool(++n1_), - Bool(++n2_), + EXPECT_PRED_FORMAT3(PredFormatFunctor3(), Bool(++n1_), Bool(++n2_), Bool(++n3_)); finished_ = true; } @@ -1218,67 +1102,60 @@ TEST_F(EXPECT_PRED_FORMAT3Test, FunctorOnUserTypeSuccess) { // Tests a failed EXPECT_PRED_FORMAT3 where the // predicate-formatter is a function on a built-in type (int). TEST_F(EXPECT_PRED_FORMAT3Test, FunctionOnBuiltInTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED_FORMAT3(PredFormatFunction3, - n1_++, - n2_++, - n3_++); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED_FORMAT3(PredFormatFunction3, n1_++, n2_++, n3_++); + finished_ = true; + }, + ""); } // Tests a failed EXPECT_PRED_FORMAT3 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(EXPECT_PRED_FORMAT3Test, FunctionOnUserTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED_FORMAT3(PredFormatFunction3, - Bool(n1_++), - Bool(n2_++), - Bool(n3_++)); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED_FORMAT3(PredFormatFunction3, Bool(n1_++), Bool(n2_++), + Bool(n3_++)); + finished_ = true; + }, + ""); } // Tests a failed EXPECT_PRED_FORMAT3 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(EXPECT_PRED_FORMAT3Test, FunctorOnBuiltInTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED_FORMAT3(PredFormatFunctor3(), - n1_++, - n2_++, - n3_++); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED_FORMAT3(PredFormatFunctor3(), n1_++, n2_++, n3_++); + finished_ = true; + }, + ""); } // Tests a failed EXPECT_PRED_FORMAT3 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(EXPECT_PRED_FORMAT3Test, FunctorOnUserTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED_FORMAT3(PredFormatFunctor3(), - Bool(n1_++), - Bool(n2_++), - Bool(n3_++)); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED_FORMAT3(PredFormatFunctor3(), Bool(n1_++), Bool(n2_++), + Bool(n3_++)); + finished_ = true; + }, + ""); } // Tests a successful ASSERT_PRED_FORMAT3 where the // predicate-formatter is a function on a built-in type (int). TEST_F(ASSERT_PRED_FORMAT3Test, FunctionOnBuiltInTypeSuccess) { - ASSERT_PRED_FORMAT3(PredFormatFunction3, - ++n1_, - ++n2_, - ++n3_); + ASSERT_PRED_FORMAT3(PredFormatFunction3, ++n1_, ++n2_, ++n3_); finished_ = true; } // Tests a successful ASSERT_PRED_FORMAT3 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(ASSERT_PRED_FORMAT3Test, FunctionOnUserTypeSuccess) { - ASSERT_PRED_FORMAT3(PredFormatFunction3, - Bool(++n1_), - Bool(++n2_), + ASSERT_PRED_FORMAT3(PredFormatFunction3, Bool(++n1_), Bool(++n2_), Bool(++n3_)); finished_ = true; } @@ -1286,19 +1163,14 @@ TEST_F(ASSERT_PRED_FORMAT3Test, FunctionOnUserTypeSuccess) { // Tests a successful ASSERT_PRED_FORMAT3 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(ASSERT_PRED_FORMAT3Test, FunctorOnBuiltInTypeSuccess) { - ASSERT_PRED_FORMAT3(PredFormatFunctor3(), - ++n1_, - ++n2_, - ++n3_); + ASSERT_PRED_FORMAT3(PredFormatFunctor3(), ++n1_, ++n2_, ++n3_); finished_ = true; } // Tests a successful ASSERT_PRED_FORMAT3 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(ASSERT_PRED_FORMAT3Test, FunctorOnUserTypeSuccess) { - ASSERT_PRED_FORMAT3(PredFormatFunctor3(), - Bool(++n1_), - Bool(++n2_), + ASSERT_PRED_FORMAT3(PredFormatFunctor3(), Bool(++n1_), Bool(++n2_), Bool(++n3_)); finished_ = true; } @@ -1307,52 +1179,50 @@ TEST_F(ASSERT_PRED_FORMAT3Test, FunctorOnUserTypeSuccess) { // predicate-formatter is a function on a built-in type (int). TEST_F(ASSERT_PRED_FORMAT3Test, FunctionOnBuiltInTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED_FORMAT3(PredFormatFunction3, - n1_++, - n2_++, - n3_++); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED_FORMAT3(PredFormatFunction3, n1_++, n2_++, n3_++); + finished_ = true; + }, + ""); } // Tests a failed ASSERT_PRED_FORMAT3 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(ASSERT_PRED_FORMAT3Test, FunctionOnUserTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED_FORMAT3(PredFormatFunction3, - Bool(n1_++), - Bool(n2_++), - Bool(n3_++)); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED_FORMAT3(PredFormatFunction3, Bool(n1_++), Bool(n2_++), + Bool(n3_++)); + finished_ = true; + }, + ""); } // Tests a failed ASSERT_PRED_FORMAT3 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(ASSERT_PRED_FORMAT3Test, FunctorOnBuiltInTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED_FORMAT3(PredFormatFunctor3(), - n1_++, - n2_++, - n3_++); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED_FORMAT3(PredFormatFunctor3(), n1_++, n2_++, n3_++); + finished_ = true; + }, + ""); } // Tests a failed ASSERT_PRED_FORMAT3 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(ASSERT_PRED_FORMAT3Test, FunctorOnUserTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED_FORMAT3(PredFormatFunctor3(), - Bool(n1_++), - Bool(n2_++), - Bool(n3_++)); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED_FORMAT3(PredFormatFunctor3(), Bool(n1_++), Bool(n2_++), + Bool(n3_++)); + finished_ = true; + }, + ""); } // Sample functions/functors for testing 4-ary predicate assertions. @@ -1374,43 +1244,31 @@ bool PredFunction4Bool(Bool v1, Bool v2, Bool v3, Bool v4) { // A 4-ary predicate functor. struct PredFunctor4 { template - bool operator()(const T1& v1, - const T2& v2, - const T3& v3, - const T4& v4) { + bool operator()(const T1& v1, const T2& v2, const T3& v3, const T4& v4) { return v1 + v2 + v3 + v4 > 0; } }; // A 4-ary predicate-formatter function. template -testing::AssertionResult PredFormatFunction4(const char* e1, - const char* e2, - const char* e3, - const char* e4, - const T1& v1, - const T2& v2, - const T3& v3, - const T4& v4) { - if (PredFunction4(v1, v2, v3, v4)) - return testing::AssertionSuccess(); +testing::AssertionResult PredFormatFunction4(const char* e1, const char* e2, + const char* e3, const char* e4, + const T1& v1, const T2& v2, + const T3& v3, const T4& v4) { + if (PredFunction4(v1, v2, v3, v4)) return testing::AssertionSuccess(); return testing::AssertionFailure() - << e1 << " + " << e2 << " + " << e3 << " + " << e4 - << " is expected to be positive, but evaluates to " - << v1 + v2 + v3 + v4 << "."; + << e1 << " + " << e2 << " + " << e3 << " + " << e4 + << " is expected to be positive, but evaluates to " + << v1 + v2 + v3 + v4 << "."; } // A 4-ary predicate-formatter functor. struct PredFormatFunctor4 { template - testing::AssertionResult operator()(const char* e1, - const char* e2, - const char* e3, - const char* e4, - const T1& v1, - const T2& v2, - const T3& v3, + testing::AssertionResult operator()(const char* e1, const char* e2, + const char* e3, const char* e4, + const T1& v1, const T2& v2, const T3& v3, const T4& v4) const { return PredFormatFunction4(e1, e2, e3, e4, v1, v2, v3, v4); } @@ -1429,22 +1287,18 @@ class Predicate4Test : public testing::Test { void TearDown() override { // Verifies that each of the predicate's arguments was evaluated // exactly once. - EXPECT_EQ(1, n1_) << - "The predicate assertion didn't evaluate argument 2 " - "exactly once."; - EXPECT_EQ(1, n2_) << - "The predicate assertion didn't evaluate argument 3 " - "exactly once."; - EXPECT_EQ(1, n3_) << - "The predicate assertion didn't evaluate argument 4 " - "exactly once."; - EXPECT_EQ(1, n4_) << - "The predicate assertion didn't evaluate argument 5 " - "exactly once."; + EXPECT_EQ(1, n1_) << "The predicate assertion didn't evaluate argument 2 " + "exactly once."; + EXPECT_EQ(1, n2_) << "The predicate assertion didn't evaluate argument 3 " + "exactly once."; + EXPECT_EQ(1, n3_) << "The predicate assertion didn't evaluate argument 4 " + "exactly once."; + EXPECT_EQ(1, n4_) << "The predicate assertion didn't evaluate argument 5 " + "exactly once."; // Verifies that the control flow in the test function is expected. if (expected_to_finish_ && !finished_) { - FAIL() << "The predicate assertion unexpactedly aborted the test."; + FAIL() << "The predicate assertion unexpectedly aborted the test."; } else if (!expected_to_finish_ && finished_) { FAIL() << "The failed predicate assertion didn't abort the test " "as expected."; @@ -1478,21 +1332,14 @@ typedef Predicate4Test ASSERT_PRED4Test; // Tests a successful EXPECT_PRED4 where the // predicate-formatter is a function on a built-in type (int). TEST_F(EXPECT_PRED4Test, FunctionOnBuiltInTypeSuccess) { - EXPECT_PRED4(PredFunction4Int, - ++n1_, - ++n2_, - ++n3_, - ++n4_); + EXPECT_PRED4(PredFunction4Int, ++n1_, ++n2_, ++n3_, ++n4_); finished_ = true; } // Tests a successful EXPECT_PRED4 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(EXPECT_PRED4Test, FunctionOnUserTypeSuccess) { - EXPECT_PRED4(PredFunction4Bool, - Bool(++n1_), - Bool(++n2_), - Bool(++n3_), + EXPECT_PRED4(PredFunction4Bool, Bool(++n1_), Bool(++n2_), Bool(++n3_), Bool(++n4_)); finished_ = true; } @@ -1500,21 +1347,14 @@ TEST_F(EXPECT_PRED4Test, FunctionOnUserTypeSuccess) { // Tests a successful EXPECT_PRED4 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(EXPECT_PRED4Test, FunctorOnBuiltInTypeSuccess) { - EXPECT_PRED4(PredFunctor4(), - ++n1_, - ++n2_, - ++n3_, - ++n4_); + EXPECT_PRED4(PredFunctor4(), ++n1_, ++n2_, ++n3_, ++n4_); finished_ = true; } // Tests a successful EXPECT_PRED4 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(EXPECT_PRED4Test, FunctorOnUserTypeSuccess) { - EXPECT_PRED4(PredFunctor4(), - Bool(++n1_), - Bool(++n2_), - Bool(++n3_), + EXPECT_PRED4(PredFunctor4(), Bool(++n1_), Bool(++n2_), Bool(++n3_), Bool(++n4_)); finished_ = true; } @@ -1522,73 +1362,60 @@ TEST_F(EXPECT_PRED4Test, FunctorOnUserTypeSuccess) { // Tests a failed EXPECT_PRED4 where the // predicate-formatter is a function on a built-in type (int). TEST_F(EXPECT_PRED4Test, FunctionOnBuiltInTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED4(PredFunction4Int, - n1_++, - n2_++, - n3_++, - n4_++); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED4(PredFunction4Int, n1_++, n2_++, n3_++, n4_++); + finished_ = true; + }, + ""); } // Tests a failed EXPECT_PRED4 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(EXPECT_PRED4Test, FunctionOnUserTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED4(PredFunction4Bool, - Bool(n1_++), - Bool(n2_++), - Bool(n3_++), - Bool(n4_++)); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED4(PredFunction4Bool, Bool(n1_++), Bool(n2_++), Bool(n3_++), + Bool(n4_++)); + finished_ = true; + }, + ""); } // Tests a failed EXPECT_PRED4 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(EXPECT_PRED4Test, FunctorOnBuiltInTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED4(PredFunctor4(), - n1_++, - n2_++, - n3_++, - n4_++); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED4(PredFunctor4(), n1_++, n2_++, n3_++, n4_++); + finished_ = true; + }, + ""); } // Tests a failed EXPECT_PRED4 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(EXPECT_PRED4Test, FunctorOnUserTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED4(PredFunctor4(), - Bool(n1_++), - Bool(n2_++), - Bool(n3_++), - Bool(n4_++)); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED4(PredFunctor4(), Bool(n1_++), Bool(n2_++), Bool(n3_++), + Bool(n4_++)); + finished_ = true; + }, + ""); } // Tests a successful ASSERT_PRED4 where the // predicate-formatter is a function on a built-in type (int). TEST_F(ASSERT_PRED4Test, FunctionOnBuiltInTypeSuccess) { - ASSERT_PRED4(PredFunction4Int, - ++n1_, - ++n2_, - ++n3_, - ++n4_); + ASSERT_PRED4(PredFunction4Int, ++n1_, ++n2_, ++n3_, ++n4_); finished_ = true; } // Tests a successful ASSERT_PRED4 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(ASSERT_PRED4Test, FunctionOnUserTypeSuccess) { - ASSERT_PRED4(PredFunction4Bool, - Bool(++n1_), - Bool(++n2_), - Bool(++n3_), + ASSERT_PRED4(PredFunction4Bool, Bool(++n1_), Bool(++n2_), Bool(++n3_), Bool(++n4_)); finished_ = true; } @@ -1596,21 +1423,14 @@ TEST_F(ASSERT_PRED4Test, FunctionOnUserTypeSuccess) { // Tests a successful ASSERT_PRED4 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(ASSERT_PRED4Test, FunctorOnBuiltInTypeSuccess) { - ASSERT_PRED4(PredFunctor4(), - ++n1_, - ++n2_, - ++n3_, - ++n4_); + ASSERT_PRED4(PredFunctor4(), ++n1_, ++n2_, ++n3_, ++n4_); finished_ = true; } // Tests a successful ASSERT_PRED4 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(ASSERT_PRED4Test, FunctorOnUserTypeSuccess) { - ASSERT_PRED4(PredFunctor4(), - Bool(++n1_), - Bool(++n2_), - Bool(++n3_), + ASSERT_PRED4(PredFunctor4(), Bool(++n1_), Bool(++n2_), Bool(++n3_), Bool(++n4_)); finished_ = true; } @@ -1619,195 +1439,155 @@ TEST_F(ASSERT_PRED4Test, FunctorOnUserTypeSuccess) { // predicate-formatter is a function on a built-in type (int). TEST_F(ASSERT_PRED4Test, FunctionOnBuiltInTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED4(PredFunction4Int, - n1_++, - n2_++, - n3_++, - n4_++); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED4(PredFunction4Int, n1_++, n2_++, n3_++, n4_++); + finished_ = true; + }, + ""); } // Tests a failed ASSERT_PRED4 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(ASSERT_PRED4Test, FunctionOnUserTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED4(PredFunction4Bool, - Bool(n1_++), - Bool(n2_++), - Bool(n3_++), - Bool(n4_++)); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED4(PredFunction4Bool, Bool(n1_++), Bool(n2_++), Bool(n3_++), + Bool(n4_++)); + finished_ = true; + }, + ""); } // Tests a failed ASSERT_PRED4 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(ASSERT_PRED4Test, FunctorOnBuiltInTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED4(PredFunctor4(), - n1_++, - n2_++, - n3_++, - n4_++); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED4(PredFunctor4(), n1_++, n2_++, n3_++, n4_++); + finished_ = true; + }, + ""); } // Tests a failed ASSERT_PRED4 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(ASSERT_PRED4Test, FunctorOnUserTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED4(PredFunctor4(), - Bool(n1_++), - Bool(n2_++), - Bool(n3_++), - Bool(n4_++)); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED4(PredFunctor4(), Bool(n1_++), Bool(n2_++), Bool(n3_++), + Bool(n4_++)); + finished_ = true; + }, + ""); } // Tests a successful EXPECT_PRED_FORMAT4 where the // predicate-formatter is a function on a built-in type (int). TEST_F(EXPECT_PRED_FORMAT4Test, FunctionOnBuiltInTypeSuccess) { - EXPECT_PRED_FORMAT4(PredFormatFunction4, - ++n1_, - ++n2_, - ++n3_, - ++n4_); + EXPECT_PRED_FORMAT4(PredFormatFunction4, ++n1_, ++n2_, ++n3_, ++n4_); finished_ = true; } // Tests a successful EXPECT_PRED_FORMAT4 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(EXPECT_PRED_FORMAT4Test, FunctionOnUserTypeSuccess) { - EXPECT_PRED_FORMAT4(PredFormatFunction4, - Bool(++n1_), - Bool(++n2_), - Bool(++n3_), - Bool(++n4_)); + EXPECT_PRED_FORMAT4(PredFormatFunction4, Bool(++n1_), Bool(++n2_), + Bool(++n3_), Bool(++n4_)); finished_ = true; } // Tests a successful EXPECT_PRED_FORMAT4 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(EXPECT_PRED_FORMAT4Test, FunctorOnBuiltInTypeSuccess) { - EXPECT_PRED_FORMAT4(PredFormatFunctor4(), - ++n1_, - ++n2_, - ++n3_, - ++n4_); + EXPECT_PRED_FORMAT4(PredFormatFunctor4(), ++n1_, ++n2_, ++n3_, ++n4_); finished_ = true; } // Tests a successful EXPECT_PRED_FORMAT4 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(EXPECT_PRED_FORMAT4Test, FunctorOnUserTypeSuccess) { - EXPECT_PRED_FORMAT4(PredFormatFunctor4(), - Bool(++n1_), - Bool(++n2_), - Bool(++n3_), - Bool(++n4_)); + EXPECT_PRED_FORMAT4(PredFormatFunctor4(), Bool(++n1_), Bool(++n2_), + Bool(++n3_), Bool(++n4_)); finished_ = true; } // Tests a failed EXPECT_PRED_FORMAT4 where the // predicate-formatter is a function on a built-in type (int). TEST_F(EXPECT_PRED_FORMAT4Test, FunctionOnBuiltInTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED_FORMAT4(PredFormatFunction4, - n1_++, - n2_++, - n3_++, - n4_++); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED_FORMAT4(PredFormatFunction4, n1_++, n2_++, n3_++, n4_++); + finished_ = true; + }, + ""); } // Tests a failed EXPECT_PRED_FORMAT4 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(EXPECT_PRED_FORMAT4Test, FunctionOnUserTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED_FORMAT4(PredFormatFunction4, - Bool(n1_++), - Bool(n2_++), - Bool(n3_++), - Bool(n4_++)); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED_FORMAT4(PredFormatFunction4, Bool(n1_++), Bool(n2_++), + Bool(n3_++), Bool(n4_++)); + finished_ = true; + }, + ""); } // Tests a failed EXPECT_PRED_FORMAT4 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(EXPECT_PRED_FORMAT4Test, FunctorOnBuiltInTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED_FORMAT4(PredFormatFunctor4(), - n1_++, - n2_++, - n3_++, - n4_++); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED_FORMAT4(PredFormatFunctor4(), n1_++, n2_++, n3_++, n4_++); + finished_ = true; + }, + ""); } // Tests a failed EXPECT_PRED_FORMAT4 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(EXPECT_PRED_FORMAT4Test, FunctorOnUserTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED_FORMAT4(PredFormatFunctor4(), - Bool(n1_++), - Bool(n2_++), - Bool(n3_++), - Bool(n4_++)); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED_FORMAT4(PredFormatFunctor4(), Bool(n1_++), Bool(n2_++), + Bool(n3_++), Bool(n4_++)); + finished_ = true; + }, + ""); } // Tests a successful ASSERT_PRED_FORMAT4 where the // predicate-formatter is a function on a built-in type (int). TEST_F(ASSERT_PRED_FORMAT4Test, FunctionOnBuiltInTypeSuccess) { - ASSERT_PRED_FORMAT4(PredFormatFunction4, - ++n1_, - ++n2_, - ++n3_, - ++n4_); + ASSERT_PRED_FORMAT4(PredFormatFunction4, ++n1_, ++n2_, ++n3_, ++n4_); finished_ = true; } // Tests a successful ASSERT_PRED_FORMAT4 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(ASSERT_PRED_FORMAT4Test, FunctionOnUserTypeSuccess) { - ASSERT_PRED_FORMAT4(PredFormatFunction4, - Bool(++n1_), - Bool(++n2_), - Bool(++n3_), - Bool(++n4_)); + ASSERT_PRED_FORMAT4(PredFormatFunction4, Bool(++n1_), Bool(++n2_), + Bool(++n3_), Bool(++n4_)); finished_ = true; } // Tests a successful ASSERT_PRED_FORMAT4 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(ASSERT_PRED_FORMAT4Test, FunctorOnBuiltInTypeSuccess) { - ASSERT_PRED_FORMAT4(PredFormatFunctor4(), - ++n1_, - ++n2_, - ++n3_, - ++n4_); + ASSERT_PRED_FORMAT4(PredFormatFunctor4(), ++n1_, ++n2_, ++n3_, ++n4_); finished_ = true; } // Tests a successful ASSERT_PRED_FORMAT4 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(ASSERT_PRED_FORMAT4Test, FunctorOnUserTypeSuccess) { - ASSERT_PRED_FORMAT4(PredFormatFunctor4(), - Bool(++n1_), - Bool(++n2_), - Bool(++n3_), - Bool(++n4_)); + ASSERT_PRED_FORMAT4(PredFormatFunctor4(), Bool(++n1_), Bool(++n2_), + Bool(++n3_), Bool(++n4_)); finished_ = true; } @@ -1815,56 +1595,50 @@ TEST_F(ASSERT_PRED_FORMAT4Test, FunctorOnUserTypeSuccess) { // predicate-formatter is a function on a built-in type (int). TEST_F(ASSERT_PRED_FORMAT4Test, FunctionOnBuiltInTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED_FORMAT4(PredFormatFunction4, - n1_++, - n2_++, - n3_++, - n4_++); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED_FORMAT4(PredFormatFunction4, n1_++, n2_++, n3_++, n4_++); + finished_ = true; + }, + ""); } // Tests a failed ASSERT_PRED_FORMAT4 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(ASSERT_PRED_FORMAT4Test, FunctionOnUserTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED_FORMAT4(PredFormatFunction4, - Bool(n1_++), - Bool(n2_++), - Bool(n3_++), - Bool(n4_++)); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED_FORMAT4(PredFormatFunction4, Bool(n1_++), Bool(n2_++), + Bool(n3_++), Bool(n4_++)); + finished_ = true; + }, + ""); } // Tests a failed ASSERT_PRED_FORMAT4 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(ASSERT_PRED_FORMAT4Test, FunctorOnBuiltInTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED_FORMAT4(PredFormatFunctor4(), - n1_++, - n2_++, - n3_++, - n4_++); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED_FORMAT4(PredFormatFunctor4(), n1_++, n2_++, n3_++, n4_++); + finished_ = true; + }, + ""); } // Tests a failed ASSERT_PRED_FORMAT4 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(ASSERT_PRED_FORMAT4Test, FunctorOnUserTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED_FORMAT4(PredFormatFunctor4(), - Bool(n1_++), - Bool(n2_++), - Bool(n3_++), - Bool(n4_++)); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED_FORMAT4(PredFormatFunctor4(), Bool(n1_++), Bool(n2_++), + Bool(n3_++), Bool(n4_++)); + finished_ = true; + }, + ""); } // Sample functions/functors for testing 5-ary predicate assertions. @@ -1886,10 +1660,7 @@ bool PredFunction5Bool(Bool v1, Bool v2, Bool v3, Bool v4, Bool v5) { // A 5-ary predicate functor. struct PredFunctor5 { template - bool operator()(const T1& v1, - const T2& v2, - const T3& v3, - const T4& v4, + bool operator()(const T1& v1, const T2& v2, const T3& v3, const T4& v4, const T5& v5) { return v1 + v2 + v3 + v4 + v5 > 0; } @@ -1897,37 +1668,26 @@ struct PredFunctor5 { // A 5-ary predicate-formatter function. template -testing::AssertionResult PredFormatFunction5(const char* e1, - const char* e2, - const char* e3, - const char* e4, - const char* e5, - const T1& v1, - const T2& v2, - const T3& v3, - const T4& v4, - const T5& v5) { - if (PredFunction5(v1, v2, v3, v4, v5)) - return testing::AssertionSuccess(); +testing::AssertionResult PredFormatFunction5(const char* e1, const char* e2, + const char* e3, const char* e4, + const char* e5, const T1& v1, + const T2& v2, const T3& v3, + const T4& v4, const T5& v5) { + if (PredFunction5(v1, v2, v3, v4, v5)) return testing::AssertionSuccess(); return testing::AssertionFailure() - << e1 << " + " << e2 << " + " << e3 << " + " << e4 << " + " << e5 - << " is expected to be positive, but evaluates to " - << v1 + v2 + v3 + v4 + v5 << "."; + << e1 << " + " << e2 << " + " << e3 << " + " << e4 << " + " << e5 + << " is expected to be positive, but evaluates to " + << v1 + v2 + v3 + v4 + v5 << "."; } // A 5-ary predicate-formatter functor. struct PredFormatFunctor5 { template - testing::AssertionResult operator()(const char* e1, - const char* e2, - const char* e3, - const char* e4, - const char* e5, - const T1& v1, - const T2& v2, - const T3& v3, - const T4& v4, + testing::AssertionResult operator()(const char* e1, const char* e2, + const char* e3, const char* e4, + const char* e5, const T1& v1, + const T2& v2, const T3& v3, const T4& v4, const T5& v5) const { return PredFormatFunction5(e1, e2, e3, e4, e5, v1, v2, v3, v4, v5); } @@ -1946,25 +1706,20 @@ class Predicate5Test : public testing::Test { void TearDown() override { // Verifies that each of the predicate's arguments was evaluated // exactly once. - EXPECT_EQ(1, n1_) << - "The predicate assertion didn't evaluate argument 2 " - "exactly once."; - EXPECT_EQ(1, n2_) << - "The predicate assertion didn't evaluate argument 3 " - "exactly once."; - EXPECT_EQ(1, n3_) << - "The predicate assertion didn't evaluate argument 4 " - "exactly once."; - EXPECT_EQ(1, n4_) << - "The predicate assertion didn't evaluate argument 5 " - "exactly once."; - EXPECT_EQ(1, n5_) << - "The predicate assertion didn't evaluate argument 6 " - "exactly once."; + EXPECT_EQ(1, n1_) << "The predicate assertion didn't evaluate argument 2 " + "exactly once."; + EXPECT_EQ(1, n2_) << "The predicate assertion didn't evaluate argument 3 " + "exactly once."; + EXPECT_EQ(1, n3_) << "The predicate assertion didn't evaluate argument 4 " + "exactly once."; + EXPECT_EQ(1, n4_) << "The predicate assertion didn't evaluate argument 5 " + "exactly once."; + EXPECT_EQ(1, n5_) << "The predicate assertion didn't evaluate argument 6 " + "exactly once."; // Verifies that the control flow in the test function is expected. if (expected_to_finish_ && !finished_) { - FAIL() << "The predicate assertion unexpactedly aborted the test."; + FAIL() << "The predicate assertion unexpectedly aborted the test."; } else if (!expected_to_finish_ && finished_) { FAIL() << "The failed predicate assertion didn't abort the test " "as expected."; @@ -2000,152 +1755,106 @@ typedef Predicate5Test ASSERT_PRED5Test; // Tests a successful EXPECT_PRED5 where the // predicate-formatter is a function on a built-in type (int). TEST_F(EXPECT_PRED5Test, FunctionOnBuiltInTypeSuccess) { - EXPECT_PRED5(PredFunction5Int, - ++n1_, - ++n2_, - ++n3_, - ++n4_, - ++n5_); + EXPECT_PRED5(PredFunction5Int, ++n1_, ++n2_, ++n3_, ++n4_, ++n5_); finished_ = true; } // Tests a successful EXPECT_PRED5 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(EXPECT_PRED5Test, FunctionOnUserTypeSuccess) { - EXPECT_PRED5(PredFunction5Bool, - Bool(++n1_), - Bool(++n2_), - Bool(++n3_), - Bool(++n4_), - Bool(++n5_)); + EXPECT_PRED5(PredFunction5Bool, Bool(++n1_), Bool(++n2_), Bool(++n3_), + Bool(++n4_), Bool(++n5_)); finished_ = true; } // Tests a successful EXPECT_PRED5 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(EXPECT_PRED5Test, FunctorOnBuiltInTypeSuccess) { - EXPECT_PRED5(PredFunctor5(), - ++n1_, - ++n2_, - ++n3_, - ++n4_, - ++n5_); + EXPECT_PRED5(PredFunctor5(), ++n1_, ++n2_, ++n3_, ++n4_, ++n5_); finished_ = true; } // Tests a successful EXPECT_PRED5 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(EXPECT_PRED5Test, FunctorOnUserTypeSuccess) { - EXPECT_PRED5(PredFunctor5(), - Bool(++n1_), - Bool(++n2_), - Bool(++n3_), - Bool(++n4_), - Bool(++n5_)); + EXPECT_PRED5(PredFunctor5(), Bool(++n1_), Bool(++n2_), Bool(++n3_), + Bool(++n4_), Bool(++n5_)); finished_ = true; } // Tests a failed EXPECT_PRED5 where the // predicate-formatter is a function on a built-in type (int). TEST_F(EXPECT_PRED5Test, FunctionOnBuiltInTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED5(PredFunction5Int, - n1_++, - n2_++, - n3_++, - n4_++, - n5_++); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED5(PredFunction5Int, n1_++, n2_++, n3_++, n4_++, n5_++); + finished_ = true; + }, + ""); } // Tests a failed EXPECT_PRED5 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(EXPECT_PRED5Test, FunctionOnUserTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED5(PredFunction5Bool, - Bool(n1_++), - Bool(n2_++), - Bool(n3_++), - Bool(n4_++), - Bool(n5_++)); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED5(PredFunction5Bool, Bool(n1_++), Bool(n2_++), Bool(n3_++), + Bool(n4_++), Bool(n5_++)); + finished_ = true; + }, + ""); } // Tests a failed EXPECT_PRED5 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(EXPECT_PRED5Test, FunctorOnBuiltInTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED5(PredFunctor5(), - n1_++, - n2_++, - n3_++, - n4_++, - n5_++); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED5(PredFunctor5(), n1_++, n2_++, n3_++, n4_++, n5_++); + finished_ = true; + }, + ""); } // Tests a failed EXPECT_PRED5 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(EXPECT_PRED5Test, FunctorOnUserTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED5(PredFunctor5(), - Bool(n1_++), - Bool(n2_++), - Bool(n3_++), - Bool(n4_++), - Bool(n5_++)); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED5(PredFunctor5(), Bool(n1_++), Bool(n2_++), Bool(n3_++), + Bool(n4_++), Bool(n5_++)); + finished_ = true; + }, + ""); } // Tests a successful ASSERT_PRED5 where the // predicate-formatter is a function on a built-in type (int). TEST_F(ASSERT_PRED5Test, FunctionOnBuiltInTypeSuccess) { - ASSERT_PRED5(PredFunction5Int, - ++n1_, - ++n2_, - ++n3_, - ++n4_, - ++n5_); + ASSERT_PRED5(PredFunction5Int, ++n1_, ++n2_, ++n3_, ++n4_, ++n5_); finished_ = true; } // Tests a successful ASSERT_PRED5 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(ASSERT_PRED5Test, FunctionOnUserTypeSuccess) { - ASSERT_PRED5(PredFunction5Bool, - Bool(++n1_), - Bool(++n2_), - Bool(++n3_), - Bool(++n4_), - Bool(++n5_)); + ASSERT_PRED5(PredFunction5Bool, Bool(++n1_), Bool(++n2_), Bool(++n3_), + Bool(++n4_), Bool(++n5_)); finished_ = true; } // Tests a successful ASSERT_PRED5 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(ASSERT_PRED5Test, FunctorOnBuiltInTypeSuccess) { - ASSERT_PRED5(PredFunctor5(), - ++n1_, - ++n2_, - ++n3_, - ++n4_, - ++n5_); + ASSERT_PRED5(PredFunctor5(), ++n1_, ++n2_, ++n3_, ++n4_, ++n5_); finished_ = true; } // Tests a successful ASSERT_PRED5 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(ASSERT_PRED5Test, FunctorOnUserTypeSuccess) { - ASSERT_PRED5(PredFunctor5(), - Bool(++n1_), - Bool(++n2_), - Bool(++n3_), - Bool(++n4_), - Bool(++n5_)); + ASSERT_PRED5(PredFunctor5(), Bool(++n1_), Bool(++n2_), Bool(++n3_), + Bool(++n4_), Bool(++n5_)); finished_ = true; } @@ -2153,211 +1862,157 @@ TEST_F(ASSERT_PRED5Test, FunctorOnUserTypeSuccess) { // predicate-formatter is a function on a built-in type (int). TEST_F(ASSERT_PRED5Test, FunctionOnBuiltInTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED5(PredFunction5Int, - n1_++, - n2_++, - n3_++, - n4_++, - n5_++); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED5(PredFunction5Int, n1_++, n2_++, n3_++, n4_++, n5_++); + finished_ = true; + }, + ""); } // Tests a failed ASSERT_PRED5 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(ASSERT_PRED5Test, FunctionOnUserTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED5(PredFunction5Bool, - Bool(n1_++), - Bool(n2_++), - Bool(n3_++), - Bool(n4_++), - Bool(n5_++)); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED5(PredFunction5Bool, Bool(n1_++), Bool(n2_++), Bool(n3_++), + Bool(n4_++), Bool(n5_++)); + finished_ = true; + }, + ""); } // Tests a failed ASSERT_PRED5 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(ASSERT_PRED5Test, FunctorOnBuiltInTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED5(PredFunctor5(), - n1_++, - n2_++, - n3_++, - n4_++, - n5_++); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED5(PredFunctor5(), n1_++, n2_++, n3_++, n4_++, n5_++); + finished_ = true; + }, + ""); } // Tests a failed ASSERT_PRED5 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(ASSERT_PRED5Test, FunctorOnUserTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED5(PredFunctor5(), - Bool(n1_++), - Bool(n2_++), - Bool(n3_++), - Bool(n4_++), - Bool(n5_++)); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED5(PredFunctor5(), Bool(n1_++), Bool(n2_++), Bool(n3_++), + Bool(n4_++), Bool(n5_++)); + finished_ = true; + }, + ""); } // Tests a successful EXPECT_PRED_FORMAT5 where the // predicate-formatter is a function on a built-in type (int). TEST_F(EXPECT_PRED_FORMAT5Test, FunctionOnBuiltInTypeSuccess) { - EXPECT_PRED_FORMAT5(PredFormatFunction5, - ++n1_, - ++n2_, - ++n3_, - ++n4_, - ++n5_); + EXPECT_PRED_FORMAT5(PredFormatFunction5, ++n1_, ++n2_, ++n3_, ++n4_, ++n5_); finished_ = true; } // Tests a successful EXPECT_PRED_FORMAT5 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(EXPECT_PRED_FORMAT5Test, FunctionOnUserTypeSuccess) { - EXPECT_PRED_FORMAT5(PredFormatFunction5, - Bool(++n1_), - Bool(++n2_), - Bool(++n3_), - Bool(++n4_), - Bool(++n5_)); + EXPECT_PRED_FORMAT5(PredFormatFunction5, Bool(++n1_), Bool(++n2_), + Bool(++n3_), Bool(++n4_), Bool(++n5_)); finished_ = true; } // Tests a successful EXPECT_PRED_FORMAT5 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(EXPECT_PRED_FORMAT5Test, FunctorOnBuiltInTypeSuccess) { - EXPECT_PRED_FORMAT5(PredFormatFunctor5(), - ++n1_, - ++n2_, - ++n3_, - ++n4_, - ++n5_); + EXPECT_PRED_FORMAT5(PredFormatFunctor5(), ++n1_, ++n2_, ++n3_, ++n4_, ++n5_); finished_ = true; } // Tests a successful EXPECT_PRED_FORMAT5 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(EXPECT_PRED_FORMAT5Test, FunctorOnUserTypeSuccess) { - EXPECT_PRED_FORMAT5(PredFormatFunctor5(), - Bool(++n1_), - Bool(++n2_), - Bool(++n3_), - Bool(++n4_), - Bool(++n5_)); + EXPECT_PRED_FORMAT5(PredFormatFunctor5(), Bool(++n1_), Bool(++n2_), + Bool(++n3_), Bool(++n4_), Bool(++n5_)); finished_ = true; } // Tests a failed EXPECT_PRED_FORMAT5 where the // predicate-formatter is a function on a built-in type (int). TEST_F(EXPECT_PRED_FORMAT5Test, FunctionOnBuiltInTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED_FORMAT5(PredFormatFunction5, - n1_++, - n2_++, - n3_++, - n4_++, - n5_++); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED_FORMAT5(PredFormatFunction5, n1_++, n2_++, n3_++, n4_++, + n5_++); + finished_ = true; + }, + ""); } // Tests a failed EXPECT_PRED_FORMAT5 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(EXPECT_PRED_FORMAT5Test, FunctionOnUserTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED_FORMAT5(PredFormatFunction5, - Bool(n1_++), - Bool(n2_++), - Bool(n3_++), - Bool(n4_++), - Bool(n5_++)); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED_FORMAT5(PredFormatFunction5, Bool(n1_++), Bool(n2_++), + Bool(n3_++), Bool(n4_++), Bool(n5_++)); + finished_ = true; + }, + ""); } // Tests a failed EXPECT_PRED_FORMAT5 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(EXPECT_PRED_FORMAT5Test, FunctorOnBuiltInTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED_FORMAT5(PredFormatFunctor5(), - n1_++, - n2_++, - n3_++, - n4_++, - n5_++); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED_FORMAT5(PredFormatFunctor5(), n1_++, n2_++, n3_++, n4_++, + n5_++); + finished_ = true; + }, + ""); } // Tests a failed EXPECT_PRED_FORMAT5 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(EXPECT_PRED_FORMAT5Test, FunctorOnUserTypeFailure) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED_FORMAT5(PredFormatFunctor5(), - Bool(n1_++), - Bool(n2_++), - Bool(n3_++), - Bool(n4_++), - Bool(n5_++)); - finished_ = true; - }, ""); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED_FORMAT5(PredFormatFunctor5(), Bool(n1_++), Bool(n2_++), + Bool(n3_++), Bool(n4_++), Bool(n5_++)); + finished_ = true; + }, + ""); } // Tests a successful ASSERT_PRED_FORMAT5 where the // predicate-formatter is a function on a built-in type (int). TEST_F(ASSERT_PRED_FORMAT5Test, FunctionOnBuiltInTypeSuccess) { - ASSERT_PRED_FORMAT5(PredFormatFunction5, - ++n1_, - ++n2_, - ++n3_, - ++n4_, - ++n5_); + ASSERT_PRED_FORMAT5(PredFormatFunction5, ++n1_, ++n2_, ++n3_, ++n4_, ++n5_); finished_ = true; } // Tests a successful ASSERT_PRED_FORMAT5 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(ASSERT_PRED_FORMAT5Test, FunctionOnUserTypeSuccess) { - ASSERT_PRED_FORMAT5(PredFormatFunction5, - Bool(++n1_), - Bool(++n2_), - Bool(++n3_), - Bool(++n4_), - Bool(++n5_)); + ASSERT_PRED_FORMAT5(PredFormatFunction5, Bool(++n1_), Bool(++n2_), + Bool(++n3_), Bool(++n4_), Bool(++n5_)); finished_ = true; } // Tests a successful ASSERT_PRED_FORMAT5 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(ASSERT_PRED_FORMAT5Test, FunctorOnBuiltInTypeSuccess) { - ASSERT_PRED_FORMAT5(PredFormatFunctor5(), - ++n1_, - ++n2_, - ++n3_, - ++n4_, - ++n5_); + ASSERT_PRED_FORMAT5(PredFormatFunctor5(), ++n1_, ++n2_, ++n3_, ++n4_, ++n5_); finished_ = true; } // Tests a successful ASSERT_PRED_FORMAT5 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(ASSERT_PRED_FORMAT5Test, FunctorOnUserTypeSuccess) { - ASSERT_PRED_FORMAT5(PredFormatFunctor5(), - Bool(++n1_), - Bool(++n2_), - Bool(++n3_), - Bool(++n4_), - Bool(++n5_)); + ASSERT_PRED_FORMAT5(PredFormatFunctor5(), Bool(++n1_), Bool(++n2_), + Bool(++n3_), Bool(++n4_), Bool(++n5_)); finished_ = true; } @@ -2365,58 +2020,50 @@ TEST_F(ASSERT_PRED_FORMAT5Test, FunctorOnUserTypeSuccess) { // predicate-formatter is a function on a built-in type (int). TEST_F(ASSERT_PRED_FORMAT5Test, FunctionOnBuiltInTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED_FORMAT5(PredFormatFunction5, - n1_++, - n2_++, - n3_++, - n4_++, - n5_++); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED_FORMAT5(PredFormatFunction5, n1_++, n2_++, n3_++, n4_++, + n5_++); + finished_ = true; + }, + ""); } // Tests a failed ASSERT_PRED_FORMAT5 where the // predicate-formatter is a function on a user-defined type (Bool). TEST_F(ASSERT_PRED_FORMAT5Test, FunctionOnUserTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED_FORMAT5(PredFormatFunction5, - Bool(n1_++), - Bool(n2_++), - Bool(n3_++), - Bool(n4_++), - Bool(n5_++)); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED_FORMAT5(PredFormatFunction5, Bool(n1_++), Bool(n2_++), + Bool(n3_++), Bool(n4_++), Bool(n5_++)); + finished_ = true; + }, + ""); } // Tests a failed ASSERT_PRED_FORMAT5 where the // predicate-formatter is a functor on a built-in type (int). TEST_F(ASSERT_PRED_FORMAT5Test, FunctorOnBuiltInTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED_FORMAT5(PredFormatFunctor5(), - n1_++, - n2_++, - n3_++, - n4_++, - n5_++); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED_FORMAT5(PredFormatFunctor5(), n1_++, n2_++, n3_++, n4_++, + n5_++); + finished_ = true; + }, + ""); } // Tests a failed ASSERT_PRED_FORMAT5 where the // predicate-formatter is a functor on a user-defined type (Bool). TEST_F(ASSERT_PRED_FORMAT5Test, FunctorOnUserTypeFailure) { expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED_FORMAT5(PredFormatFunctor5(), - Bool(n1_++), - Bool(n2_++), - Bool(n3_++), - Bool(n4_++), - Bool(n5_++)); - finished_ = true; - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED_FORMAT5(PredFormatFunctor5(), Bool(n1_++), Bool(n2_++), + Bool(n3_++), Bool(n4_++), Bool(n5_++)); + finished_ = true; + }, + ""); } diff --git a/ext/googletest/googletest/test/gtest_premature_exit_test.cc b/ext/googletest/googletest/test/gtest_premature_exit_test.cc index 1d1187eff0..1a0c5ea4f0 100644 --- a/ext/googletest/googletest/test/gtest_premature_exit_test.cc +++ b/ext/googletest/googletest/test/gtest_premature_exit_test.cc @@ -81,15 +81,17 @@ TEST_F(PrematureExitDeathTest, FileExistsDuringExecutionOfDeathTest) { return; } - EXPECT_DEATH_IF_SUPPORTED({ - // If the file exists, crash the process such that the main test - // process will catch the (expected) crash and report a success; - // otherwise don't crash, which will cause the main test process - // to report that the death test has failed. - if (PrematureExitFileExists()) { - exit(1); - } - }, ""); + EXPECT_DEATH_IF_SUPPORTED( + { + // If the file exists, crash the process such that the main test + // process will catch the (expected) crash and report a success; + // otherwise don't crash, which will cause the main test process + // to report that the death test has failed. + if (PrematureExitFileExists()) { + exit(1); + } + }, + ""); } // Tests that the premature-exit file exists during the execution of a @@ -106,7 +108,7 @@ TEST_F(PrematureExitTest, PrematureExitFileExistsDuringTestExecution) { } // namespace -int main(int argc, char **argv) { +int main(int argc, char** argv) { InitGoogleTest(&argc, argv); const int exit_code = RUN_ALL_TESTS(); diff --git a/ext/googletest/googletest/test/gtest_repeat_test.cc b/ext/googletest/googletest/test/gtest_repeat_test.cc index c7af3efb6d..73fb8dc906 100644 --- a/ext/googletest/googletest/test/gtest_repeat_test.cc +++ b/ext/googletest/googletest/test/gtest_repeat_test.cc @@ -27,11 +27,12 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Tests the --gtest_repeat=number flag. #include + #include + #include "gtest/gtest.h" #include "src/gtest-internal-inl.h" @@ -39,20 +40,19 @@ namespace { // We need this when we are testing Google Test itself and therefore // cannot use Google Test assertions. -#define GTEST_CHECK_INT_EQ_(expected, actual) \ - do {\ - const int expected_val = (expected);\ - const int actual_val = (actual);\ - if (::testing::internal::IsTrue(expected_val != actual_val)) {\ - ::std::cout << "Value of: " #actual "\n"\ - << " Actual: " << actual_val << "\n"\ - << "Expected: " #expected "\n"\ - << "Which is: " << expected_val << "\n";\ - ::testing::internal::posix::Abort();\ - }\ +#define GTEST_CHECK_INT_EQ_(expected, actual) \ + do { \ + const int expected_val = (expected); \ + const int actual_val = (actual); \ + if (::testing::internal::IsTrue(expected_val != actual_val)) { \ + ::std::cout << "Value of: " #actual "\n" \ + << " Actual: " << actual_val << "\n" \ + << "Expected: " #expected "\n" \ + << "Which is: " << expected_val << "\n"; \ + ::testing::internal::posix::Abort(); \ + } \ } while (::testing::internal::AlwaysFalse()) - // Used for verifying that global environment set-up and tear-down are // inside the --gtest_repeat loop. @@ -79,9 +79,7 @@ TEST(FooTest, ShouldFail) { int g_should_pass_count = 0; -TEST(FooTest, ShouldPass) { - g_should_pass_count++; -} +TEST(FooTest, ShouldPass) { g_should_pass_count++; } // A test that contains a thread-safe death test and a fast death // test. It should pass. @@ -108,8 +106,7 @@ TEST_P(MyParamTest, ShouldPass) { GTEST_CHECK_INT_EQ_(g_param_test_count % kNumberOfParamTests, GetParam()); g_param_test_count++; } -INSTANTIATE_TEST_SUITE_P(MyParamSequence, - MyParamTest, +INSTANTIATE_TEST_SUITE_P(MyParamSequence, MyParamTest, testing::Range(0, kNumberOfParamTests)); // Resets the count for each test. @@ -142,6 +139,7 @@ void TestRepeatUnspecified() { // Tests the behavior of Google Test when --gtest_repeat has the given value. void TestRepeat(int repeat) { GTEST_FLAG_SET(repeat, repeat); + GTEST_FLAG_SET(recreate_environments_when_repeating, true); ResetCounts(); GTEST_CHECK_INT_EQ_(repeat > 0 ? 1 : 0, RUN_ALL_TESTS()); @@ -152,6 +150,7 @@ void TestRepeat(int repeat) { // set of tests. void TestRepeatWithEmptyFilter(int repeat) { GTEST_FLAG_SET(repeat, repeat); + GTEST_FLAG_SET(recreate_environments_when_repeating, true); GTEST_FLAG_SET(filter, "None"); ResetCounts(); @@ -163,6 +162,7 @@ void TestRepeatWithEmptyFilter(int repeat) { // successful tests. void TestRepeatWithFilterForSuccessfulTests(int repeat) { GTEST_FLAG_SET(repeat, repeat); + GTEST_FLAG_SET(recreate_environments_when_repeating, true); GTEST_FLAG_SET(filter, "*-*ShouldFail"); ResetCounts(); @@ -179,6 +179,7 @@ void TestRepeatWithFilterForSuccessfulTests(int repeat) { // failed tests. void TestRepeatWithFilterForFailedTests(int repeat) { GTEST_FLAG_SET(repeat, repeat); + GTEST_FLAG_SET(recreate_environments_when_repeating, true); GTEST_FLAG_SET(filter, "*ShouldFail"); ResetCounts(); diff --git a/ext/googletest/googletest/test/gtest_skip_check_output_test.py b/ext/googletest/googletest/test/gtest_skip_check_output_test.py index 14e63ab897..1c87b44f01 100755 --- a/ext/googletest/googletest/test/gtest_skip_check_output_test.py +++ b/ext/googletest/googletest/test/gtest_skip_check_output_test.py @@ -35,7 +35,7 @@ output. import re -import gtest_test_utils +from googletest.test import gtest_test_utils # Path to the gtest_skip_in_environment_setup_test binary EXE_PATH = gtest_test_utils.GetTestExecutablePath('gtest_skip_test') diff --git a/ext/googletest/googletest/test/gtest_skip_environment_check_output_test.py b/ext/googletest/googletest/test/gtest_skip_environment_check_output_test.py index 6e791556aa..6960b11a58 100755 --- a/ext/googletest/googletest/test/gtest_skip_environment_check_output_test.py +++ b/ext/googletest/googletest/test/gtest_skip_environment_check_output_test.py @@ -33,7 +33,7 @@ This script invokes gtest_skip_in_environment_setup_test_ and verifies its output. """ -import gtest_test_utils +from googletest.test import gtest_test_utils # Path to the gtest_skip_in_environment_setup_test binary EXE_PATH = gtest_test_utils.GetTestExecutablePath( diff --git a/ext/googletest/googletest/test/gtest_skip_in_environment_setup_test.cc b/ext/googletest/googletest/test/gtest_skip_in_environment_setup_test.cc index 9372310638..5f21c27dcd 100644 --- a/ext/googletest/googletest/test/gtest_skip_in_environment_setup_test.cc +++ b/ext/googletest/googletest/test/gtest_skip_in_environment_setup_test.cc @@ -31,6 +31,7 @@ // testcases being skipped. #include + #include "gtest/gtest.h" class SetupEnvironment : public testing::Environment { diff --git a/ext/googletest/googletest/test/gtest_skip_test.cc b/ext/googletest/googletest/test/gtest_skip_test.cc index 4a23004cca..e1b8d655ad 100644 --- a/ext/googletest/googletest/test/gtest_skip_test.cc +++ b/ext/googletest/googletest/test/gtest_skip_test.cc @@ -46,10 +46,6 @@ class Fixture : public Test { } }; -TEST_F(Fixture, SkipsOneTest) { - EXPECT_EQ(5, 7); -} +TEST_F(Fixture, SkipsOneTest) { EXPECT_EQ(5, 7); } -TEST_F(Fixture, SkipsAnotherTest) { - EXPECT_EQ(99, 100); -} +TEST_F(Fixture, SkipsAnotherTest) { EXPECT_EQ(99, 100); } diff --git a/ext/googletest/googletest/test/gtest_sole_header_test.cc b/ext/googletest/googletest/test/gtest_sole_header_test.cc index 1d94ac6b3a..e8e22a83c5 100644 --- a/ext/googletest/googletest/test/gtest_sole_header_test.cc +++ b/ext/googletest/googletest/test/gtest_sole_header_test.cc @@ -35,9 +35,7 @@ namespace { -void Subroutine() { - EXPECT_EQ(42, 42); -} +void Subroutine() { EXPECT_EQ(42, 42); } TEST(NoFatalFailureTest, ExpectNoFatalFailure) { EXPECT_NO_FATAL_FAILURE(;); diff --git a/ext/googletest/googletest/test/gtest_stress_test.cc b/ext/googletest/googletest/test/gtest_stress_test.cc index 843481910f..24b173ffd8 100644 --- a/ext/googletest/googletest/test/gtest_stress_test.cc +++ b/ext/googletest/googletest/test/gtest_stress_test.cc @@ -27,14 +27,12 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Tests that SCOPED_TRACE() and various Google Test assertions can be // used in a large number of threads concurrently. -#include "gtest/gtest.h" - #include +#include "gtest/gtest.h" #include "src/gtest-internal-inl.h" #if GTEST_IS_THREADSAFE @@ -66,8 +64,7 @@ std::string IdToString(int id) { } void ExpectKeyAndValueWereRecordedForId( - const std::vector& properties, - int id, const char* suffix) { + const std::vector& properties, int id, const char* suffix) { TestPropertyKeyIs matches_key(IdToKey(id, suffix).c_str()); const std::vector::const_iterator property = std::find_if(properties.begin(), properties.end(), matches_key); @@ -121,15 +118,13 @@ TEST(StressTest, CanUseScopedTraceAndAssertionsInManyThreads) { std::unique_ptr > threads[kThreadCount]; Notification threads_can_start; for (int i = 0; i != kThreadCount; i++) - threads[i].reset(new ThreadWithParam(&ManyAsserts, - i, - &threads_can_start)); + threads[i].reset( + new ThreadWithParam(&ManyAsserts, i, &threads_can_start)); threads_can_start.Notify(); // Blocks until all the threads are done. - for (int i = 0; i != kThreadCount; i++) - threads[i]->Join(); + for (int i = 0; i != kThreadCount; i++) threads[i]->Join(); } // Ensures that kThreadCount*kThreadCount failures have been reported. @@ -149,7 +144,7 @@ TEST(StressTest, CanUseScopedTraceAndAssertionsInManyThreads) { ExpectKeyAndValueWereRecordedForId(properties, i, "string"); ExpectKeyAndValueWereRecordedForId(properties, i, "int"); } - CheckTestFailureCount(kThreadCount*kThreadCount); + CheckTestFailureCount(kThreadCount * kThreadCount); } void FailingThread(bool is_fatal) { @@ -196,8 +191,8 @@ TEST(FatalFailureTest, ExpectFatalFailureIgnoresFailuresInOtherThreads) { TEST(FatalFailureOnAllThreadsTest, ExpectFatalFailureOnAllThreads) { // This statement should succeed, because failures in all threads are // considered. - EXPECT_FATAL_FAILURE_ON_ALL_THREADS( - GenerateFatalFailureInAnotherThread(true), "expected"); + EXPECT_FATAL_FAILURE_ON_ALL_THREADS(GenerateFatalFailureInAnotherThread(true), + "expected"); CheckTestFailureCount(0); // We need to add a failure, because main() checks that there are failures. // But when only this test is run, we shouldn't have any failures. @@ -226,7 +221,7 @@ TEST(NonFatalFailureOnAllThreadsTest, ExpectNonFatalFailureOnAllThreads) { } // namespace } // namespace testing -int main(int argc, char **argv) { +int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); const int result = RUN_ALL_TESTS(); // Expected to fail. @@ -238,8 +233,7 @@ int main(int argc, char **argv) { #else TEST(StressTest, - DISABLED_ThreadSafetyTestsAreSkippedWhenGoogleTestIsNotThreadSafe) { -} + DISABLED_ThreadSafetyTestsAreSkippedWhenGoogleTestIsNotThreadSafe) {} int main(int argc, char **argv) { testing::InitGoogleTest(&argc, argv); diff --git a/ext/googletest/googletest/test/gtest_test_macro_stack_footprint_test.cc b/ext/googletest/googletest/test/gtest_test_macro_stack_footprint_test.cc index a48db05012..45f368b3a5 100644 --- a/ext/googletest/googletest/test/gtest_test_macro_stack_footprint_test.cc +++ b/ext/googletest/googletest/test/gtest_test_macro_stack_footprint_test.cc @@ -39,42 +39,42 @@ // This macro defines 10 dummy tests. #define TEN_TESTS_(test_case_name) \ - TEST(test_case_name, T0) {} \ - TEST(test_case_name, T1) {} \ - TEST(test_case_name, T2) {} \ - TEST(test_case_name, T3) {} \ - TEST(test_case_name, T4) {} \ - TEST(test_case_name, T5) {} \ - TEST(test_case_name, T6) {} \ - TEST(test_case_name, T7) {} \ - TEST(test_case_name, T8) {} \ + TEST(test_case_name, T0) {} \ + TEST(test_case_name, T1) {} \ + TEST(test_case_name, T2) {} \ + TEST(test_case_name, T3) {} \ + TEST(test_case_name, T4) {} \ + TEST(test_case_name, T5) {} \ + TEST(test_case_name, T6) {} \ + TEST(test_case_name, T7) {} \ + TEST(test_case_name, T8) {} \ TEST(test_case_name, T9) {} // This macro defines 100 dummy tests. #define HUNDRED_TESTS_(test_case_name_prefix) \ - TEN_TESTS_(test_case_name_prefix ## 0) \ - TEN_TESTS_(test_case_name_prefix ## 1) \ - TEN_TESTS_(test_case_name_prefix ## 2) \ - TEN_TESTS_(test_case_name_prefix ## 3) \ - TEN_TESTS_(test_case_name_prefix ## 4) \ - TEN_TESTS_(test_case_name_prefix ## 5) \ - TEN_TESTS_(test_case_name_prefix ## 6) \ - TEN_TESTS_(test_case_name_prefix ## 7) \ - TEN_TESTS_(test_case_name_prefix ## 8) \ - TEN_TESTS_(test_case_name_prefix ## 9) + TEN_TESTS_(test_case_name_prefix##0) \ + TEN_TESTS_(test_case_name_prefix##1) \ + TEN_TESTS_(test_case_name_prefix##2) \ + TEN_TESTS_(test_case_name_prefix##3) \ + TEN_TESTS_(test_case_name_prefix##4) \ + TEN_TESTS_(test_case_name_prefix##5) \ + TEN_TESTS_(test_case_name_prefix##6) \ + TEN_TESTS_(test_case_name_prefix##7) \ + TEN_TESTS_(test_case_name_prefix##8) \ + TEN_TESTS_(test_case_name_prefix##9) // This macro defines 1000 dummy tests. #define THOUSAND_TESTS_(test_case_name_prefix) \ - HUNDRED_TESTS_(test_case_name_prefix ## 0) \ - HUNDRED_TESTS_(test_case_name_prefix ## 1) \ - HUNDRED_TESTS_(test_case_name_prefix ## 2) \ - HUNDRED_TESTS_(test_case_name_prefix ## 3) \ - HUNDRED_TESTS_(test_case_name_prefix ## 4) \ - HUNDRED_TESTS_(test_case_name_prefix ## 5) \ - HUNDRED_TESTS_(test_case_name_prefix ## 6) \ - HUNDRED_TESTS_(test_case_name_prefix ## 7) \ - HUNDRED_TESTS_(test_case_name_prefix ## 8) \ - HUNDRED_TESTS_(test_case_name_prefix ## 9) + HUNDRED_TESTS_(test_case_name_prefix##0) \ + HUNDRED_TESTS_(test_case_name_prefix##1) \ + HUNDRED_TESTS_(test_case_name_prefix##2) \ + HUNDRED_TESTS_(test_case_name_prefix##3) \ + HUNDRED_TESTS_(test_case_name_prefix##4) \ + HUNDRED_TESTS_(test_case_name_prefix##5) \ + HUNDRED_TESTS_(test_case_name_prefix##6) \ + HUNDRED_TESTS_(test_case_name_prefix##7) \ + HUNDRED_TESTS_(test_case_name_prefix##8) \ + HUNDRED_TESTS_(test_case_name_prefix##9) // Ensures that we can define 1000 TEST()s in the same translation // unit. diff --git a/ext/googletest/googletest/test/gtest_test_utils.py b/ext/googletest/googletest/test/gtest_test_utils.py index d0c24466a4..eecc53346c 100755 --- a/ext/googletest/googletest/test/gtest_test_utils.py +++ b/ext/googletest/googletest/test/gtest_test_utils.py @@ -32,6 +32,7 @@ # pylint: disable-msg=C6204 import os +import subprocess import sys IS_WINDOWS = os.name == 'nt' @@ -42,13 +43,6 @@ import atexit import shutil import tempfile import unittest as _test_module - -try: - import subprocess - _SUBPROCESS_MODULE_AVAILABLE = True -except: - import popen2 - _SUBPROCESS_MODULE_AVAILABLE = False # pylint: enable-msg=C6204 GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT' @@ -173,7 +167,7 @@ def GetTestExecutablePath(executable_name, build_dir=None): 'Unable to find the test binary "%s". Please make sure to provide\n' 'a path to the binary via the --build_dir flag or the BUILD_DIR\n' 'environment variable.' % path) - print >> sys.stderr, message + print(message, file=sys.stderr) sys.exit(1) return path @@ -224,69 +218,18 @@ class Subprocess: combined in a string. """ - # The subprocess module is the preferrable way of running programs - # since it is available and behaves consistently on all platforms, - # including Windows. But it is only available starting in python 2.4. - # In earlier python versions, we revert to the popen2 module, which is - # available in python 2.0 and later but doesn't provide required - # functionality (Popen4) under Windows. This allows us to support Mac - # OS X 10.4 Tiger, which has python 2.3 installed. - if _SUBPROCESS_MODULE_AVAILABLE: - if capture_stderr: - stderr = subprocess.STDOUT - else: - stderr = subprocess.PIPE - - p = subprocess.Popen(command, - stdout=subprocess.PIPE, stderr=stderr, - cwd=working_dir, universal_newlines=True, env=env) - # communicate returns a tuple with the file object for the child's - # output. - self.output = p.communicate()[0] - self._return_code = p.returncode + if capture_stderr: + stderr = subprocess.STDOUT else: - old_dir = os.getcwd() + stderr = subprocess.PIPE - def _ReplaceEnvDict(dest, src): - # Changes made by os.environ.clear are not inheritable by child - # processes until Python 2.6. To produce inheritable changes we have - # to delete environment items with the del statement. - for key in dest.keys(): - del dest[key] - dest.update(src) - - # When 'env' is not None, backup the environment variables and replace - # them with the passed 'env'. When 'env' is None, we simply use the - # current 'os.environ' for compatibility with the subprocess.Popen - # semantics used above. - if env is not None: - old_environ = os.environ.copy() - _ReplaceEnvDict(os.environ, env) - - try: - if working_dir is not None: - os.chdir(working_dir) - if capture_stderr: - p = popen2.Popen4(command) - else: - p = popen2.Popen3(command) - p.tochild.close() - self.output = p.fromchild.read() - ret_code = p.wait() - finally: - os.chdir(old_dir) - - # Restore the old environment variables - # if they were replaced. - if env is not None: - _ReplaceEnvDict(os.environ, old_environ) - - # Converts ret_code to match the semantics of - # subprocess.Popen.returncode. - if os.WIFSIGNALED(ret_code): - self._return_code = -os.WTERMSIG(ret_code) - else: # os.WIFEXITED(ret_code) should return True here. - self._return_code = os.WEXITSTATUS(ret_code) + p = subprocess.Popen(command, + stdout=subprocess.PIPE, stderr=stderr, + cwd=working_dir, universal_newlines=True, env=env) + # communicate returns a tuple with the file object for the child's + # output. + self.output = p.communicate()[0] + self._return_code = p.returncode if bool(self._return_code & 0x80000000): self.terminated_by_signal = True diff --git a/ext/googletest/googletest/test/gtest_testbridge_test.py b/ext/googletest/googletest/test/gtest_testbridge_test.py index 87ffad73d4..1c2a303a88 100755 --- a/ext/googletest/googletest/test/gtest_testbridge_test.py +++ b/ext/googletest/googletest/test/gtest_testbridge_test.py @@ -31,7 +31,7 @@ import os -import gtest_test_utils +from googletest.test import gtest_test_utils binary_name = 'gtest_testbridge_test_' COMMAND = gtest_test_utils.GetTestExecutablePath(binary_name) diff --git a/ext/googletest/googletest/test/gtest_testbridge_test_.cc b/ext/googletest/googletest/test/gtest_testbridge_test_.cc index 24617b209e..c2c000dca5 100644 --- a/ext/googletest/googletest/test/gtest_testbridge_test_.cc +++ b/ext/googletest/googletest/test/gtest_testbridge_test_.cc @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // This program is meant to be run by gtest_test_filter_test.py. Do not run // it directly. diff --git a/ext/googletest/googletest/test/gtest_throw_on_failure_ex_test.cc b/ext/googletest/googletest/test/gtest_throw_on_failure_ex_test.cc index aeead13feb..25d7c797ed 100644 --- a/ext/googletest/googletest/test/gtest_throw_on_failure_ex_test.cc +++ b/ext/googletest/googletest/test/gtest_throw_on_failure_ex_test.cc @@ -27,16 +27,16 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Tests Google Test's throw-on-failure mode with exceptions enabled. -#include "gtest/gtest.h" - -#include #include +#include #include + #include +#include "gtest/gtest.h" + // Prints the given failure message and exits the program with // non-zero. We use this instead of a Google Test assertion to // indicate a failure, as the latter is been tested and cannot be @@ -55,14 +55,14 @@ void TestFailureThrowsRuntimeError() { // A successful assertion shouldn't throw. try { EXPECT_EQ(3, 3); - } catch(...) { + } catch (...) { Fail("A successful assertion wrongfully threw."); } // A failed assertion should throw a subclass of std::runtime_error. try { EXPECT_EQ(2, 3) << "Expected failure"; - } catch(const std::runtime_error& e) { + } catch (const std::runtime_error& e) { if (strstr(e.what(), "Expected failure") != nullptr) return; printf("%s", @@ -70,7 +70,7 @@ void TestFailureThrowsRuntimeError() { "but the message is incorrect. Instead of containing \"Expected " "failure\", it is:\n"); Fail(e.what()); - } catch(...) { + } catch (...) { Fail("A failed assertion threw the wrong type of exception."); } Fail("A failed assertion should've thrown but didn't."); diff --git a/ext/googletest/googletest/test/gtest_unittest.cc b/ext/googletest/googletest/test/gtest_unittest.cc index 3f2f082fe7..cdfdc6c42f 100644 --- a/ext/googletest/googletest/test/gtest_unittest.cc +++ b/ext/googletest/googletest/test/gtest_unittest.cc @@ -111,15 +111,15 @@ TEST_F(StreamingListenerTest, OnTestIterationEnd) { EXPECT_EQ("event=TestIterationEnd&passed=1&elapsed_time=0ms\n", *output()); } -TEST_F(StreamingListenerTest, OnTestCaseStart) { +TEST_F(StreamingListenerTest, OnTestSuiteStart) { *output() = ""; - streamer_.OnTestCaseStart(TestCase("FooTest", "Bar", nullptr, nullptr)); + streamer_.OnTestSuiteStart(TestSuite("FooTest", "Bar", nullptr, nullptr)); EXPECT_EQ("event=TestCaseStart&name=FooTest\n", *output()); } -TEST_F(StreamingListenerTest, OnTestCaseEnd) { +TEST_F(StreamingListenerTest, OnTestSuiteEnd) { *output() = ""; - streamer_.OnTestCaseEnd(TestCase("FooTest", "Bar", nullptr, nullptr)); + streamer_.OnTestSuiteEnd(TestSuite("FooTest", "Bar", nullptr, nullptr)); EXPECT_EQ("event=TestCaseEnd&passed=1&elapsed_time=0ms\n", *output()); } @@ -137,8 +137,8 @@ TEST_F(StreamingListenerTest, OnTestEnd) { TEST_F(StreamingListenerTest, OnTestPartResult) { *output() = ""; - streamer_.OnTestPartResult(TestPartResult( - TestPartResult::kFatalFailure, "foo.cc", 42, "failed=\n&%")); + streamer_.OnTestPartResult(TestPartResult(TestPartResult::kFatalFailure, + "foo.cc", 42, "failed=\n&%")); // Meta characters in the failure message should be properly escaped. EXPECT_EQ( @@ -272,11 +272,9 @@ using testing::internal::GetCapturedStdout; using testing::internal::ThreadWithParam; #endif -class TestingVector : public std::vector { -}; +class TestingVector : public std::vector {}; -::std::ostream& operator<<(::std::ostream& os, - const TestingVector& vector) { +::std::ostream& operator<<(::std::ostream& os, const TestingVector& vector) { os << "{ "; for (size_t i = 0; i < vector.size(); i++) { os << vector[i] << " "; @@ -404,7 +402,7 @@ TEST(FormatTimeInMillisAsSecondsTest, FormatsNegativeNumber) { // Tests FormatEpochTimeInMillisAsIso8601(). The correctness of conversion // for particular dates below was verified in Python using -// datetime.datetime.fromutctimestamp(/1000). +// datetime.datetime.fromutctimestamp(/1000). // FormatEpochTimeInMillisAsIso8601 depends on the current timezone, so we // have to set up a particular timezone to obtain predictable results. @@ -420,8 +418,7 @@ class FormatEpochTimeInMillisAsIso8601Test : public Test { saved_tz_ = nullptr; GTEST_DISABLE_MSC_DEPRECATED_PUSH_(/* getenv, strdup: deprecated */) - if (getenv("TZ")) - saved_tz_ = strdup(getenv("TZ")); + if (getenv("TZ")) saved_tz_ = strdup(getenv("TZ")); GTEST_DISABLE_MSC_DEPRECATED_POP_() // Set up the time zone for FormatEpochTimeInMillisAsIso8601 to use. We @@ -450,6 +447,12 @@ class FormatEpochTimeInMillisAsIso8601Test : public Test { tzset(); GTEST_DISABLE_MSC_WARNINGS_POP_() #else +#if GTEST_OS_LINUX_ANDROID && __ANDROID_API__ < 21 + // Work around KitKat bug in tzset by setting "UTC" before setting "UTC+00". + // See https://github.com/android/ndk/issues/1604. + setenv("TZ", "UTC", 1); + tzset(); +#endif if (time_zone) { setenv(("TZ"), time_zone, 1); } else { @@ -470,9 +473,8 @@ TEST_F(FormatEpochTimeInMillisAsIso8601Test, PrintsTwoDigitSegments) { } TEST_F(FormatEpochTimeInMillisAsIso8601Test, IncludesMillisecondsAfterDot) { - EXPECT_EQ( - "2011-10-31T18:52:42.234", - FormatEpochTimeInMillisAsIso8601(1320087162 * kMillisPerSec + 234)); + EXPECT_EQ("2011-10-31T18:52:42.234", + FormatEpochTimeInMillisAsIso8601(1320087162 * kMillisPerSec + 234)); } TEST_F(FormatEpochTimeInMillisAsIso8601Test, PrintsLeadingZeroes) { @@ -489,10 +491,10 @@ TEST_F(FormatEpochTimeInMillisAsIso8601Test, PrintsEpochStart) { EXPECT_EQ("1970-01-01T00:00:00.000", FormatEpochTimeInMillisAsIso8601(0)); } -# ifdef __BORLANDC__ +#ifdef __BORLANDC__ // Silences warnings: "Condition is always true", "Unreachable code" -# pragma option push -w-ccc -w-rch -# endif +#pragma option push -w-ccc -w-rch +#endif // Tests that the LHS of EXPECT_EQ or ASSERT_EQ can be used as a null literal // when the RHS is a pointer type. @@ -566,10 +568,10 @@ TEST(NullLiteralTest, NoConversionNoWarning) { #pragma clang diagnostic pop #endif -# ifdef __BORLANDC__ +#ifdef __BORLANDC__ // Restores warnings after previous "#pragma option push" suppressed them. -# pragma option pop -# endif +#pragma option pop +#endif // // Tests CodePointToUtf8(). @@ -597,20 +599,17 @@ TEST(CodePointToUtf8Test, CanEncode8To11Bits) { // Some compilers (e.g., GCC on MinGW) cannot handle non-ASCII codepoints // in wide strings and wide chars. In order to accommodate them, we have to // introduce such character constants as integers. - EXPECT_EQ("\xD5\xB6", - CodePointToUtf8(static_cast(0x576))); + EXPECT_EQ("\xD5\xB6", CodePointToUtf8(static_cast(0x576))); } // Tests that Unicode code-points that have 12 to 16 bits are encoded // as 1110xxxx 10xxxxxx 10xxxxxx. TEST(CodePointToUtf8Test, CanEncode12To16Bits) { // 0000 1000 1101 0011 => 1110-0000 10-100011 10-010011 - EXPECT_EQ("\xE0\xA3\x93", - CodePointToUtf8(static_cast(0x8D3))); + EXPECT_EQ("\xE0\xA3\x93", CodePointToUtf8(static_cast(0x8D3))); // 1100 0111 0100 1101 => 1110-1100 10-011101 10-001101 - EXPECT_EQ("\xEC\x9D\x8D", - CodePointToUtf8(static_cast(0xC74D))); + EXPECT_EQ("\xEC\x9D\x8D", CodePointToUtf8(static_cast(0xC74D))); } #if !GTEST_WIDE_STRING_USES_UTF16_ @@ -662,7 +661,7 @@ TEST(WideStringToUtf8Test, CanEncode8To11Bits) { EXPECT_STREQ("\xC3\x93", WideStringToUtf8(L"\xD3", -1).c_str()); // 101 0111 0110 => 110-10101 10-110110 - const wchar_t s[] = { 0x576, '\0' }; + const wchar_t s[] = {0x576, '\0'}; EXPECT_STREQ("\xD5\xB6", WideStringToUtf8(s, 1).c_str()); EXPECT_STREQ("\xD5\xB6", WideStringToUtf8(s, -1).c_str()); } @@ -671,12 +670,12 @@ TEST(WideStringToUtf8Test, CanEncode8To11Bits) { // as 1110xxxx 10xxxxxx 10xxxxxx. TEST(WideStringToUtf8Test, CanEncode12To16Bits) { // 0000 1000 1101 0011 => 1110-0000 10-100011 10-010011 - const wchar_t s1[] = { 0x8D3, '\0' }; + const wchar_t s1[] = {0x8D3, '\0'}; EXPECT_STREQ("\xE0\xA3\x93", WideStringToUtf8(s1, 1).c_str()); EXPECT_STREQ("\xE0\xA3\x93", WideStringToUtf8(s1, -1).c_str()); // 1100 0111 0100 1101 => 1110-1100 10-011101 10-001101 - const wchar_t s2[] = { 0xC74D, '\0' }; + const wchar_t s2[] = {0xC74D, '\0'}; EXPECT_STREQ("\xEC\x9D\x8D", WideStringToUtf8(s2, 1).c_str()); EXPECT_STREQ("\xEC\x9D\x8D", WideStringToUtf8(s2, -1).c_str()); } @@ -711,11 +710,11 @@ TEST(WideStringToUtf8Test, CanEncodeInvalidCodePoint) { EXPECT_STREQ("(Invalid Unicode 0xABCDFF)", WideStringToUtf8(L"\xABCDFF", -1).c_str()); } -#else // !GTEST_WIDE_STRING_USES_UTF16_ +#else // !GTEST_WIDE_STRING_USES_UTF16_ // Tests that surrogate pairs are encoded correctly on the systems using // UTF-16 encoding in the wide strings. TEST(WideStringToUtf8Test, CanEncodeValidUtf16SUrrogatePairs) { - const wchar_t s[] = { 0xD801, 0xDC00, '\0' }; + const wchar_t s[] = {0xD801, 0xDC00, '\0'}; EXPECT_STREQ("\xF0\x90\x90\x80", WideStringToUtf8(s, -1).c_str()); } @@ -723,13 +722,13 @@ TEST(WideStringToUtf8Test, CanEncodeValidUtf16SUrrogatePairs) { // generates the expected result. TEST(WideStringToUtf8Test, CanEncodeInvalidUtf16SurrogatePair) { // Leading surrogate is at the end of the string. - const wchar_t s1[] = { 0xD800, '\0' }; + const wchar_t s1[] = {0xD800, '\0'}; EXPECT_STREQ("\xED\xA0\x80", WideStringToUtf8(s1, -1).c_str()); // Leading surrogate is not followed by the trailing surrogate. - const wchar_t s2[] = { 0xD800, 'M', '\0' }; + const wchar_t s2[] = {0xD800, 'M', '\0'}; EXPECT_STREQ("\xED\xA0\x80M", WideStringToUtf8(s2, -1).c_str()); // Trailing surrogate appearas without a leading surrogate. - const wchar_t s3[] = { 0xDC00, 'P', 'Q', 'R', '\0' }; + const wchar_t s3[] = {0xDC00, 'P', 'Q', 'R', '\0'}; EXPECT_STREQ("\xED\xB0\x80PQR", WideStringToUtf8(s3, -1).c_str()); } #endif // !GTEST_WIDE_STRING_USES_UTF16_ @@ -737,21 +736,24 @@ TEST(WideStringToUtf8Test, CanEncodeInvalidUtf16SurrogatePair) { // Tests that codepoint concatenation works correctly. #if !GTEST_WIDE_STRING_USES_UTF16_ TEST(WideStringToUtf8Test, ConcatenatesCodepointsCorrectly) { - const wchar_t s[] = { 0x108634, 0xC74D, '\n', 0x576, 0x8D3, 0x108634, '\0'}; + const wchar_t s[] = {0x108634, 0xC74D, '\n', 0x576, 0x8D3, 0x108634, '\0'}; EXPECT_STREQ( "\xF4\x88\x98\xB4" - "\xEC\x9D\x8D" - "\n" - "\xD5\xB6" - "\xE0\xA3\x93" - "\xF4\x88\x98\xB4", + "\xEC\x9D\x8D" + "\n" + "\xD5\xB6" + "\xE0\xA3\x93" + "\xF4\x88\x98\xB4", WideStringToUtf8(s, -1).c_str()); } #else TEST(WideStringToUtf8Test, ConcatenatesCodepointsCorrectly) { - const wchar_t s[] = { 0xC74D, '\n', 0x576, 0x8D3, '\0'}; + const wchar_t s[] = {0xC74D, '\n', 0x576, 0x8D3, '\0'}; EXPECT_STREQ( - "\xEC\x9D\x8D" "\n" "\xD5\xB6" "\xE0\xA3\x93", + "\xEC\x9D\x8D" + "\n" + "\xD5\xB6" + "\xE0\xA3\x93", WideStringToUtf8(s, -1).c_str()); } #endif // !GTEST_WIDE_STRING_USES_UTF16_ @@ -760,9 +762,8 @@ TEST(WideStringToUtf8Test, ConcatenatesCodepointsCorrectly) { TEST(RandomDeathTest, GeneratesCrashesOnInvalidRange) { testing::internal::Random random(42); - EXPECT_DEATH_IF_SUPPORTED( - random.Generate(0), - "Cannot generate a number in the range \\[0, 0\\)"); + EXPECT_DEATH_IF_SUPPORTED(random.Generate(0), + "Cannot generate a number in the range \\[0, 0\\)"); EXPECT_DEATH_IF_SUPPORTED( random.Generate(testing::internal::Random::kMaxRange + 1), "Generation of a number in \\[0, 2147483649\\) was requested, " @@ -891,7 +892,7 @@ class VectorShuffleTest : public Test { return true; } - bool found_in_vector[kVectorSize] = { false }; + bool found_in_vector[kVectorSize] = {false}; for (size_t i = 0; i < vector.size(); i++) { const int e = vector[i]; if (e < 0 || e >= static_cast(kVectorSize) || found_in_vector[e]) { @@ -918,8 +919,8 @@ class VectorShuffleTest : public Test { return false; } - static bool RangeIsUnshuffled( - const TestingVector& vector, int begin, int end) { + static bool RangeIsUnshuffled(const TestingVector& vector, int begin, + int end) { return !RangeIsShuffled(vector, begin, end); } @@ -944,7 +945,7 @@ TEST_F(VectorShuffleTest, HandlesEmptyRange) { ASSERT_PRED1(VectorIsUnshuffled, vector_); // ...in the middle... - ShuffleRange(&random_, kVectorSize/2, kVectorSize/2, &vector_); + ShuffleRange(&random_, kVectorSize / 2, kVectorSize / 2, &vector_); ASSERT_PRED1(VectorIsNotCorrupt, vector_); ASSERT_PRED1(VectorIsUnshuffled, vector_); @@ -966,7 +967,7 @@ TEST_F(VectorShuffleTest, HandlesRangeOfSizeOne) { ASSERT_PRED1(VectorIsUnshuffled, vector_); // ...in the middle... - ShuffleRange(&random_, kVectorSize/2, kVectorSize/2 + 1, &vector_); + ShuffleRange(&random_, kVectorSize / 2, kVectorSize / 2 + 1, &vector_); ASSERT_PRED1(VectorIsNotCorrupt, vector_); ASSERT_PRED1(VectorIsUnshuffled, vector_); @@ -991,7 +992,7 @@ TEST_F(VectorShuffleTest, ShufflesEntireVector) { } TEST_F(VectorShuffleTest, ShufflesStartOfVector) { - const int kRangeSize = kVectorSize/2; + const int kRangeSize = kVectorSize / 2; ShuffleRange(&random_, 0, kRangeSize, &vector_); @@ -1013,11 +1014,11 @@ TEST_F(VectorShuffleTest, ShufflesEndOfVector) { TEST_F(VectorShuffleTest, ShufflesMiddleOfVector) { const int kRangeSize = static_cast(kVectorSize) / 3; - ShuffleRange(&random_, kRangeSize, 2*kRangeSize, &vector_); + ShuffleRange(&random_, kRangeSize, 2 * kRangeSize, &vector_); ASSERT_PRED1(VectorIsNotCorrupt, vector_); EXPECT_PRED3(RangeIsUnshuffled, vector_, 0, kRangeSize); - EXPECT_PRED3(RangeIsShuffled, vector_, kRangeSize, 2*kRangeSize); + EXPECT_PRED3(RangeIsShuffled, vector_, kRangeSize, 2 * kRangeSize); EXPECT_PRED3(RangeIsUnshuffled, vector_, 2 * kRangeSize, static_cast(kVectorSize)); } @@ -1082,13 +1083,12 @@ TEST(StringTest, CaseInsensitiveWideCStringEquals) { // Tests String::ShowWideCString(). TEST(StringTest, ShowWideCString) { - EXPECT_STREQ("(null)", - String::ShowWideCString(NULL).c_str()); + EXPECT_STREQ("(null)", String::ShowWideCString(NULL).c_str()); EXPECT_STREQ("", String::ShowWideCString(L"").c_str()); EXPECT_STREQ("foo", String::ShowWideCString(L"foo").c_str()); } -# if GTEST_OS_WINDOWS_MOBILE +#if GTEST_OS_WINDOWS_MOBILE TEST(StringTest, AnsiAndUtf16Null) { EXPECT_EQ(NULL, String::AnsiToUtf16(NULL)); EXPECT_EQ(NULL, String::Utf16ToAnsi(NULL)); @@ -1097,21 +1097,21 @@ TEST(StringTest, AnsiAndUtf16Null) { TEST(StringTest, AnsiAndUtf16ConvertBasic) { const char* ansi = String::Utf16ToAnsi(L"str"); EXPECT_STREQ("str", ansi); - delete [] ansi; + delete[] ansi; const WCHAR* utf16 = String::AnsiToUtf16("str"); EXPECT_EQ(0, wcsncmp(L"str", utf16, 3)); - delete [] utf16; + delete[] utf16; } TEST(StringTest, AnsiAndUtf16ConvertPathChars) { const char* ansi = String::Utf16ToAnsi(L".:\\ \"*?"); EXPECT_STREQ(".:\\ \"*?", ansi); - delete [] ansi; + delete[] ansi; const WCHAR* utf16 = String::AnsiToUtf16(".:\\ \"*?"); EXPECT_EQ(0, wcsncmp(L".:\\ \"*?", utf16, 3)); - delete [] utf16; + delete[] utf16; } -# endif // GTEST_OS_WINDOWS_MOBILE +#endif // GTEST_OS_WINDOWS_MOBILE #endif // GTEST_OS_WINDOWS @@ -1133,9 +1133,7 @@ TEST(TestPropertyTest, ReplaceStringValue) { // AddFatalFailure() and AddNonfatalFailure() must be stand-alone // functions (i.e. their definitions cannot be inlined at the call // sites), or C++Builder won't compile the code. -static void AddFatalFailure() { - FAIL() << "Expected fatal failure."; -} +static void AddFatalFailure() { FAIL() << "Expected fatal failure."; } static void AddNonfatalFailure() { ADD_FAILURE() << "Expected non-fatal failure."; @@ -1143,10 +1141,7 @@ static void AddNonfatalFailure() { class ScopedFakeTestPartResultReporterTest : public Test { public: // Must be public and not protected due to a bug in g++ 3.4.2. - enum FailureMode { - FATAL_FAILURE, - NONFATAL_FAILURE - }; + enum FailureMode { FATAL_FAILURE, NONFATAL_FAILURE }; static void AddFailure(FailureMode failure) { if (failure == FATAL_FAILURE) { AddFatalFailure(); @@ -1186,7 +1181,7 @@ TEST_F(ScopedFakeTestPartResultReporterTest, DeprecatedConstructor) { #if GTEST_IS_THREADSAFE class ScopedFakeTestPartResultReporterWithThreadsTest - : public ScopedFakeTestPartResultReporterTest { + : public ScopedFakeTestPartResultReporterTest { protected: static void AddFailureInOtherThread(FailureMode failure) { ThreadWithParam thread(&AddFailure, failure, nullptr); @@ -1239,7 +1234,7 @@ TEST_F(ExpectFatalFailureTest, CatchesFatalFailureOnAllThreads) { #ifdef __BORLANDC__ // Silences warnings: "Condition is always true" -# pragma option push -w-ccc +#pragma option push -w-ccc #endif // Tests that EXPECT_FATAL_FAILURE() can be used in a non-void @@ -1267,7 +1262,7 @@ void DoesNotAbortHelper(bool* aborted) { #ifdef __BORLANDC__ // Restores warnings after previous "#pragma option push" suppressed them. -# pragma option pop +#pragma option pop #endif TEST_F(ExpectFatalFailureTest, DoesNotAbort) { @@ -1286,16 +1281,20 @@ static int global_var = 0; TEST_F(ExpectFatalFailureTest, AcceptsMacroThatExpandsToUnprotectedComma) { #ifndef __BORLANDC__ // ICE's in C++Builder. - EXPECT_FATAL_FAILURE({ - GTEST_USE_UNPROTECTED_COMMA_; - AddFatalFailure(); - }, ""); + EXPECT_FATAL_FAILURE( + { + GTEST_USE_UNPROTECTED_COMMA_; + AddFatalFailure(); + }, + ""); #endif - EXPECT_FATAL_FAILURE_ON_ALL_THREADS({ - GTEST_USE_UNPROTECTED_COMMA_; - AddFatalFailure(); - }, ""); + EXPECT_FATAL_FAILURE_ON_ALL_THREADS( + { + GTEST_USE_UNPROTECTED_COMMA_; + AddFatalFailure(); + }, + ""); } // Tests EXPECT_NONFATAL_FAILURE{,ON_ALL_THREADS}. @@ -1303,8 +1302,7 @@ TEST_F(ExpectFatalFailureTest, AcceptsMacroThatExpandsToUnprotectedComma) { typedef ScopedFakeTestPartResultReporterTest ExpectNonfatalFailureTest; TEST_F(ExpectNonfatalFailureTest, CatchesNonfatalFailure) { - EXPECT_NONFATAL_FAILURE(AddNonfatalFailure(), - "Expected non-fatal failure."); + EXPECT_NONFATAL_FAILURE(AddNonfatalFailure(), "Expected non-fatal failure."); } TEST_F(ExpectNonfatalFailureTest, AcceptsStdStringObject) { @@ -1323,15 +1321,19 @@ TEST_F(ExpectNonfatalFailureTest, CatchesNonfatalFailureOnAllThreads) { // statement that contains a macro which expands to code containing an // unprotected comma. TEST_F(ExpectNonfatalFailureTest, AcceptsMacroThatExpandsToUnprotectedComma) { - EXPECT_NONFATAL_FAILURE({ - GTEST_USE_UNPROTECTED_COMMA_; - AddNonfatalFailure(); - }, ""); + EXPECT_NONFATAL_FAILURE( + { + GTEST_USE_UNPROTECTED_COMMA_; + AddNonfatalFailure(); + }, + ""); - EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS({ - GTEST_USE_UNPROTECTED_COMMA_; - AddNonfatalFailure(); - }, ""); + EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS( + { + GTEST_USE_UNPROTECTED_COMMA_; + AddNonfatalFailure(); + }, + ""); } #if GTEST_IS_THREADSAFE @@ -1375,21 +1377,18 @@ class TestResultTest : public Test { typedef std::vector TPRVector; // We make use of 2 TestPartResult objects, - TestPartResult * pr1, * pr2; + TestPartResult *pr1, *pr2; // ... and 3 TestResult objects. - TestResult * r0, * r1, * r2; + TestResult *r0, *r1, *r2; void SetUp() override { // pr1 is for success. - pr1 = new TestPartResult(TestPartResult::kSuccess, - "foo/bar.cc", - 10, + pr1 = new TestPartResult(TestPartResult::kSuccess, "foo/bar.cc", 10, "Success!"); // pr2 is for fatal failure. - pr2 = new TestPartResult(TestPartResult::kFatalFailure, - "foo/bar.cc", + pr2 = new TestPartResult(TestPartResult::kFatalFailure, "foo/bar.cc", -1, // This line number means "unknown" "Failure!"); @@ -1402,10 +1401,10 @@ class TestResultTest : public Test { // state, in particular the TestPartResult vector it holds. // test_part_results() returns a const reference to this vector. // We cast it to a non-const object s.t. it can be modified - TPRVector* results1 = const_cast( - &TestResultAccessor::test_part_results(*r1)); - TPRVector* results2 = const_cast( - &TestResultAccessor::test_part_results(*r2)); + TPRVector* results1 = + const_cast(&TestResultAccessor::test_part_results(*r1)); + TPRVector* results2 = + const_cast(&TestResultAccessor::test_part_results(*r2)); // r0 is an empty TestResult. @@ -1656,15 +1655,11 @@ GTestFlagSaver* GTestFlagSaverTest::saver_ = nullptr; // tests are designed to work regardless of their order. // Modifies the Google Test flags in the test body. -TEST_F(GTestFlagSaverTest, ModifyGTestFlags) { - VerifyAndModifyFlags(); -} +TEST_F(GTestFlagSaverTest, ModifyGTestFlags) { VerifyAndModifyFlags(); } // Verifies that the Google Test flags in the body of the previous test were // restored to their original values. -TEST_F(GTestFlagSaverTest, VerifyGTestFlags) { - VerifyAndModifyFlags(); -} +TEST_F(GTestFlagSaverTest, VerifyGTestFlags) { VerifyAndModifyFlags(); } // Sets an environment variable with the given name to the given // value. If the value argument is "", unsets the environment @@ -1681,12 +1676,12 @@ static void SetEnv(const char* name, const char* value) { // Because putenv stores a pointer to the string buffer, we can't delete the // previous string (if present) until after it's replaced. - std::string *prev_env = NULL; + std::string* prev_env = NULL; if (added_env.find(name) != added_env.end()) { prev_env = added_env[name]; } - added_env[name] = new std::string( - (Message() << name << "=" << value).GetString()); + added_env[name] = + new std::string((Message() << name << "=" << value).GetString()); // The standard signature of putenv accepts a 'char*' argument. Other // implementations, like C++Builder's, accept a 'const char*'. @@ -1718,7 +1713,7 @@ TEST(Int32FromGTestEnvTest, ReturnsDefaultWhenVariableIsNotSet) { EXPECT_EQ(10, Int32FromGTestEnv("temp", 10)); } -# if !defined(GTEST_GET_INT32_FROM_ENV_) +#if !defined(GTEST_GET_INT32_FROM_ENV_) // Tests that Int32FromGTestEnv() returns the default value when the // environment variable overflows as an Int32. @@ -1744,7 +1739,7 @@ TEST(Int32FromGTestEnvTest, ReturnsDefaultWhenValueIsInvalid) { EXPECT_EQ(50, Int32FromGTestEnv("temp", 50)); } -# endif // !defined(GTEST_GET_INT32_FROM_ENV_) +#endif // !defined(GTEST_GET_INT32_FROM_ENV_) // Tests that Int32FromGTestEnv() parses and returns the value of the // environment variable when it represents a valid decimal integer in @@ -1828,8 +1823,7 @@ TEST(Int32FromEnvOrDieTest, ParsesAndReturnsValidValue) { TEST(Int32FromEnvOrDieDeathTest, AbortsOnFailure) { SetEnv(GTEST_FLAG_PREFIX_UPPER_ "VAR", "xxx"); EXPECT_DEATH_IF_SUPPORTED( - Int32FromEnvOrDie(GTEST_FLAG_PREFIX_UPPER_ "VAR", 123), - ".*"); + Int32FromEnvOrDie(GTEST_FLAG_PREFIX_UPPER_ "VAR", 123), ".*"); } // Tests that Int32FromEnvOrDie() aborts with an error message @@ -1837,8 +1831,7 @@ TEST(Int32FromEnvOrDieDeathTest, AbortsOnFailure) { TEST(Int32FromEnvOrDieDeathTest, AbortsOnInt32Overflow) { SetEnv(GTEST_FLAG_PREFIX_UPPER_ "VAR", "1234567891234567891234"); EXPECT_DEATH_IF_SUPPORTED( - Int32FromEnvOrDie(GTEST_FLAG_PREFIX_UPPER_ "VAR", 123), - ".*"); + Int32FromEnvOrDie(GTEST_FLAG_PREFIX_UPPER_ "VAR", 123), ".*"); } // Tests that ShouldRunTestOnShard() selects all tests @@ -1945,7 +1938,8 @@ TEST(ShouldRunTestOnShardTest, IsPartitionWhenThereAreFiveShards) { prev_selected_shard_index = shard_index; } else { ADD_FAILURE() << "Shard " << prev_selected_shard_index << " and " - << shard_index << " are both selected to run test " << test_id; + << shard_index << " are both selected to run test " + << test_id; } } } @@ -1957,7 +1951,7 @@ TEST(ShouldRunTestOnShardTest, IsPartitionWhenThereAreFiveShards) { int num_tests_on_shard = 0; for (int test_id = 0; test_id < num_tests; test_id++) { num_tests_on_shard += - ShouldRunTestOnShard(num_shards, shard_index, test_id); + ShouldRunTestOnShard(num_shards, shard_index, test_id); } EXPECT_GE(num_tests_on_shard, num_tests / num_shards); } @@ -1989,8 +1983,8 @@ TEST(UnitTestTest, ReturnsPlausibleTimestamp) { void ExpectNonFatalFailureRecordingPropertyWithReservedKey( const TestResult& test_result, const char* key) { EXPECT_NONFATAL_FAILURE(Test::RecordProperty(key, "1"), "Reserved key"); - ASSERT_EQ(0, test_result.test_property_count()) << "Property for key '" << key - << "' recorded unexpectedly."; + ASSERT_EQ(0, test_result.test_property_count()) + << "Property for key '" << key << "' recorded unexpectedly."; } void ExpectNonFatalFailureRecordingPropertyWithReservedKeyForCurrentTest( @@ -2017,10 +2011,10 @@ void ExpectNonFatalFailureRecordingPropertyWithReservedKeyOutsideOfTestSuite( } // Tests that property recording functions in UnitTest outside of tests -// functions correcly. Creating a separate instance of UnitTest ensures it +// functions correctly. Creating a separate instance of UnitTest ensures it // is in a state similar to the UnitTest's singleton's between tests. -class UnitTestRecordPropertyTest : - public testing::internal::UnitTestRecordPropertyTestHelper { +class UnitTestRecordPropertyTest + : public testing::internal::UnitTestRecordPropertyTestHelper { public: static void SetUpTestSuite() { ExpectNonFatalFailureRecordingPropertyWithReservedKeyForCurrentTestSuite( @@ -2059,8 +2053,7 @@ TEST_F(UnitTestRecordPropertyTest, OnePropertyFoundWhenAdded) { EXPECT_STREQ("key_1", unit_test_.ad_hoc_test_result().GetTestProperty(0).key()); - EXPECT_STREQ("1", - unit_test_.ad_hoc_test_result().GetTestProperty(0).value()); + EXPECT_STREQ("1", unit_test_.ad_hoc_test_result().GetTestProperty(0).value()); } // Tests TestResult has multiple properties when added. @@ -2101,16 +2094,13 @@ TEST_F(UnitTestRecordPropertyTest, OverridesValuesForDuplicateKeys) { TEST_F(UnitTestRecordPropertyTest, AddFailureInsideTestsWhenUsingTestSuiteReservedKeys) { - ExpectNonFatalFailureRecordingPropertyWithReservedKeyForCurrentTest( - "name"); + ExpectNonFatalFailureRecordingPropertyWithReservedKeyForCurrentTest("name"); ExpectNonFatalFailureRecordingPropertyWithReservedKeyForCurrentTest( "value_param"); ExpectNonFatalFailureRecordingPropertyWithReservedKeyForCurrentTest( "type_param"); - ExpectNonFatalFailureRecordingPropertyWithReservedKeyForCurrentTest( - "status"); - ExpectNonFatalFailureRecordingPropertyWithReservedKeyForCurrentTest( - "time"); + ExpectNonFatalFailureRecordingPropertyWithReservedKeyForCurrentTest("status"); + ExpectNonFatalFailureRecordingPropertyWithReservedKeyForCurrentTest("time"); ExpectNonFatalFailureRecordingPropertyWithReservedKeyForCurrentTest( "classname"); } @@ -2158,9 +2148,7 @@ static Environment* record_property_env GTEST_ATTRIBUTE_UNUSED_ = // First, some predicates and predicate-formatters needed by the tests. // Returns true if and only if the argument is an even number. -bool IsEven(int n) { - return (n % 2) == 0; -} +bool IsEven(int n) { return (n % 2) == 0; } // A functor that returns true if and only if the argument is an even number. struct IsEvenFunctor { @@ -2207,41 +2195,37 @@ struct AssertIsEvenFunctor { }; // Returns true if and only if the sum of the arguments is an even number. -bool SumIsEven2(int n1, int n2) { - return IsEven(n1 + n2); -} +bool SumIsEven2(int n1, int n2) { return IsEven(n1 + n2); } // A functor that returns true if and only if the sum of the arguments is an // even number. struct SumIsEven3Functor { - bool operator()(int n1, int n2, int n3) { - return IsEven(n1 + n2 + n3); - } + bool operator()(int n1, int n2, int n3) { return IsEven(n1 + n2 + n3); } }; // A predicate-formatter function that asserts the sum of the // arguments is an even number. -AssertionResult AssertSumIsEven4( - const char* e1, const char* e2, const char* e3, const char* e4, - int n1, int n2, int n3, int n4) { +AssertionResult AssertSumIsEven4(const char* e1, const char* e2, const char* e3, + const char* e4, int n1, int n2, int n3, + int n4) { const int sum = n1 + n2 + n3 + n4; if (IsEven(sum)) { return AssertionSuccess(); } Message msg; - msg << e1 << " + " << e2 << " + " << e3 << " + " << e4 - << " (" << n1 << " + " << n2 << " + " << n3 << " + " << n4 - << ") evaluates to " << sum << ", which is not even."; + msg << e1 << " + " << e2 << " + " << e3 << " + " << e4 << " (" << n1 << " + " + << n2 << " + " << n3 << " + " << n4 << ") evaluates to " << sum + << ", which is not even."; return AssertionFailure(msg); } // A predicate-formatter functor that asserts the sum of the arguments // is an even number. struct AssertSumIsEven5Functor { - AssertionResult operator()( - const char* e1, const char* e2, const char* e3, const char* e4, - const char* e5, int n1, int n2, int n3, int n4, int n5) { + AssertionResult operator()(const char* e1, const char* e2, const char* e3, + const char* e4, const char* e5, int n1, int n2, + int n3, int n4, int n5) { const int sum = n1 + n2 + n3 + n4 + n5; if (IsEven(sum)) { return AssertionSuccess(); @@ -2249,14 +2233,12 @@ struct AssertSumIsEven5Functor { Message msg; msg << e1 << " + " << e2 << " + " << e3 << " + " << e4 << " + " << e5 - << " (" - << n1 << " + " << n2 << " + " << n3 << " + " << n4 << " + " << n5 - << ") evaluates to " << sum << ", which is not even."; + << " (" << n1 << " + " << n2 << " + " << n3 << " + " << n4 << " + " + << n5 << ") evaluates to " << sum << ", which is not even."; return AssertionFailure(msg); } }; - // Tests unary predicate assertions. // Tests unary predicate assertions that don't use a custom formatter. @@ -2266,11 +2248,12 @@ TEST(Pred1Test, WithoutFormat) { ASSERT_PRED1(IsEven, 4); // Failure cases. - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED1(IsEven, 5) << "This failure is expected."; - }, "This failure is expected."); - EXPECT_FATAL_FAILURE(ASSERT_PRED1(IsEvenFunctor(), 5), - "evaluates to false"); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED1(IsEven, 5) << "This failure is expected."; + }, + "This failure is expected."); + EXPECT_FATAL_FAILURE(ASSERT_PRED1(IsEvenFunctor(), 5), "evaluates to false"); } // Tests unary predicate assertions that use a custom formatter. @@ -2278,15 +2261,17 @@ TEST(Pred1Test, WithFormat) { // Success cases. EXPECT_PRED_FORMAT1(AssertIsEven, 2); ASSERT_PRED_FORMAT1(AssertIsEvenFunctor(), 4) - << "This failure is UNEXPECTED!"; + << "This failure is UNEXPECTED!"; // Failure cases. const int n = 5; EXPECT_NONFATAL_FAILURE(EXPECT_PRED_FORMAT1(AssertIsEvenFunctor(), n), "n evaluates to 5, which is not even."); - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED_FORMAT1(AssertIsEven, 5) << "This failure is expected."; - }, "This failure is expected."); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED_FORMAT1(AssertIsEven, 5) << "This failure is expected."; + }, + "This failure is expected."); } // Tests that unary predicate assertions evaluates their arguments @@ -2298,14 +2283,15 @@ TEST(Pred1Test, SingleEvaluationOnFailure) { EXPECT_EQ(1, n) << "The argument is not evaluated exactly once."; // A failure case. - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED_FORMAT1(AssertIsEvenFunctor(), n++) - << "This failure is expected."; - }, "This failure is expected."); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED_FORMAT1(AssertIsEvenFunctor(), n++) + << "This failure is expected."; + }, + "This failure is expected."); EXPECT_EQ(2, n) << "The argument is not evaluated exactly once."; } - // Tests predicate assertions whose arity is >= 2. // Tests predicate assertions that don't use a custom formatter. @@ -2317,19 +2303,23 @@ TEST(PredTest, WithoutFormat) { // Failure cases. const int n1 = 1; const int n2 = 2; - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED2(SumIsEven2, n1, n2) << "This failure is expected."; - }, "This failure is expected."); - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED3(SumIsEven3Functor(), 1, 2, 4); - }, "evaluates to false"); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED2(SumIsEven2, n1, n2) << "This failure is expected."; + }, + "This failure is expected."); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED3(SumIsEven3Functor(), 1, 2, 4); + }, + "evaluates to false"); } // Tests predicate assertions that use a custom formatter. TEST(PredTest, WithFormat) { // Success cases. - ASSERT_PRED_FORMAT4(AssertSumIsEven4, 4, 6, 8, 10) << - "This failure is UNEXPECTED!"; + ASSERT_PRED_FORMAT4(AssertSumIsEven4, 4, 6, 8, 10) + << "This failure is UNEXPECTED!"; EXPECT_PRED_FORMAT5(AssertSumIsEven5Functor(), 2, 4, 6, 8, 10); // Failure cases. @@ -2337,13 +2327,17 @@ TEST(PredTest, WithFormat) { const int n2 = 2; const int n3 = 4; const int n4 = 6; - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED_FORMAT4(AssertSumIsEven4, n1, n2, n3, n4); - }, "evaluates to 13, which is not even."); - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED_FORMAT5(AssertSumIsEven5Functor(), 1, 2, 4, 6, 8) - << "This failure is expected."; - }, "This failure is expected."); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED_FORMAT4(AssertSumIsEven4, n1, n2, n3, n4); + }, + "evaluates to 13, which is not even."); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED_FORMAT5(AssertSumIsEven5Functor(), 1, 2, 4, 6, 8) + << "This failure is expected."; + }, + "This failure is expected."); } // Tests that predicate assertions evaluates their arguments @@ -2361,9 +2355,8 @@ TEST(PredTest, SingleEvaluationOnFailure) { int n3 = 0; int n4 = 0; int n5 = 0; - ASSERT_PRED_FORMAT5(AssertSumIsEven5Functor(), - n1++, n2++, n3++, n4++, n5++) - << "This failure is UNEXPECTED!"; + ASSERT_PRED_FORMAT5(AssertSumIsEven5Functor(), n1++, n2++, n3++, n4++, n5++) + << "This failure is UNEXPECTED!"; EXPECT_EQ(1, n1) << "Argument 1 is not evaluated exactly once."; EXPECT_EQ(1, n2) << "Argument 2 is not evaluated exactly once."; EXPECT_EQ(1, n3) << "Argument 3 is not evaluated exactly once."; @@ -2372,19 +2365,23 @@ TEST(PredTest, SingleEvaluationOnFailure) { // A failure case. n1 = n2 = n3 = 0; - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED3(SumIsEven3Functor(), ++n1, n2++, n3++) - << "This failure is expected."; - }, "This failure is expected."); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED3(SumIsEven3Functor(), ++n1, n2++, n3++) + << "This failure is expected."; + }, + "This failure is expected."); EXPECT_EQ(1, n1) << "Argument 1 is not evaluated exactly once."; EXPECT_EQ(1, n2) << "Argument 2 is not evaluated exactly once."; EXPECT_EQ(1, n3) << "Argument 3 is not evaluated exactly once."; // Another failure case. n1 = n2 = n3 = n4 = 0; - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED_FORMAT4(AssertSumIsEven4, ++n1, n2++, n3++, n4++); - }, "evaluates to 1, which is not even."); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED_FORMAT4(AssertSumIsEven4, ++n1, n2++, n3++, n4++); + }, + "evaluates to 1, which is not even."); EXPECT_EQ(1, n1) << "Argument 1 is not evaluated exactly once."; EXPECT_EQ(1, n2) << "Argument 2 is not evaluated exactly once."; EXPECT_EQ(1, n3) << "Argument 3 is not evaluated exactly once."; @@ -2395,7 +2392,7 @@ TEST(PredTest, SingleEvaluationOnFailure) { TEST(PredTest, ExpectPredEvalFailure) { std::set set_a = {2, 1, 3, 4, 5}; std::set set_b = {0, 4, 8}; - const auto compare_sets = [] (std::set, std::set) { return false; }; + const auto compare_sets = [](std::set, std::set) { return false; }; EXPECT_NONFATAL_FAILURE( EXPECT_PRED2(compare_sets, set_a, set_b), "compare_sets(set_a, set_b) evaluates to false, where\nset_a evaluates " @@ -2405,9 +2402,7 @@ TEST(PredTest, ExpectPredEvalFailure) { // Some helper functions for testing using overloaded/template // functions with ASSERT_PREDn and EXPECT_PREDn. -bool IsPositive(double x) { - return x > 0; -} +bool IsPositive(double x) { return x > 0; } template bool IsNegative(T x) { @@ -2423,7 +2418,7 @@ bool GreaterThan(T1 x1, T2 x2) { // their types are explicitly specified. TEST(PredicateAssertionTest, AcceptsOverloadedFunction) { // C++Builder requires C-style casts rather than static_cast. - EXPECT_PRED1((bool (*)(int))(IsPositive), 5); // NOLINT + EXPECT_PRED1((bool (*)(int))(IsPositive), 5); // NOLINT ASSERT_PRED1((bool (*)(double))(IsPositive), 6.0); // NOLINT } @@ -2436,31 +2431,27 @@ TEST(PredicateAssertionTest, AcceptsTemplateFunction) { ASSERT_PRED2((GreaterThan), 5, 0); } - // Some helper functions for testing using overloaded/template // functions with ASSERT_PRED_FORMATn and EXPECT_PRED_FORMATn. AssertionResult IsPositiveFormat(const char* /* expr */, int n) { - return n > 0 ? AssertionSuccess() : - AssertionFailure(Message() << "Failure"); + return n > 0 ? AssertionSuccess() : AssertionFailure(Message() << "Failure"); } AssertionResult IsPositiveFormat(const char* /* expr */, double x) { - return x > 0 ? AssertionSuccess() : - AssertionFailure(Message() << "Failure"); + return x > 0 ? AssertionSuccess() : AssertionFailure(Message() << "Failure"); } template AssertionResult IsNegativeFormat(const char* /* expr */, T x) { - return x < 0 ? AssertionSuccess() : - AssertionFailure(Message() << "Failure"); + return x < 0 ? AssertionSuccess() : AssertionFailure(Message() << "Failure"); } template AssertionResult EqualsFormat(const char* /* expr1 */, const char* /* expr2 */, const T1& x1, const T2& x2) { - return x1 == x2 ? AssertionSuccess() : - AssertionFailure(Message() << "Failure"); + return x1 == x2 ? AssertionSuccess() + : AssertionFailure(Message() << "Failure"); } // Tests that overloaded functions can be used in *_PRED_FORMAT* @@ -2477,20 +2468,18 @@ TEST(PredicateFormatAssertionTest, AcceptsTemplateFunction) { ASSERT_PRED_FORMAT2(EqualsFormat, 3, 3); } - // Tests string assertions. // Tests ASSERT_STREQ with non-NULL arguments. TEST(StringAssertionTest, ASSERT_STREQ) { - const char * const p1 = "good"; + const char* const p1 = "good"; ASSERT_STREQ(p1, p1); // Let p2 have the same content as p1, but be at a different address. const char p2[] = "good"; ASSERT_STREQ(p1, p2); - EXPECT_FATAL_FAILURE(ASSERT_STREQ("bad", "good"), - " \"bad\"\n \"good\""); + EXPECT_FATAL_FAILURE(ASSERT_STREQ("bad", "good"), " \"bad\"\n \"good\""); } // Tests ASSERT_STREQ with NULL arguments. @@ -2513,8 +2502,7 @@ TEST(StringAssertionTest, ASSERT_STRNE) { ASSERT_STRNE(nullptr, ""); ASSERT_STRNE("", "Hi"); ASSERT_STRNE("Hi", ""); - EXPECT_FATAL_FAILURE(ASSERT_STRNE("Hi", "Hi"), - "\"Hi\" vs \"Hi\""); + EXPECT_FATAL_FAILURE(ASSERT_STRNE("Hi", "Hi"), "\"Hi\" vs \"Hi\""); } // Tests ASSERT_STRCASEEQ. @@ -2523,8 +2511,7 @@ TEST(StringAssertionTest, ASSERT_STRCASEEQ) { ASSERT_STRCASEEQ(static_cast(nullptr), nullptr); ASSERT_STRCASEEQ("", ""); - EXPECT_FATAL_FAILURE(ASSERT_STRCASEEQ("Hi", "hi2"), - "Ignoring case"); + EXPECT_FATAL_FAILURE(ASSERT_STRCASEEQ("Hi", "hi2"), "Ignoring case"); } // Tests ASSERT_STRCASENE. @@ -2536,8 +2523,7 @@ TEST(StringAssertionTest, ASSERT_STRCASENE) { ASSERT_STRCASENE(nullptr, ""); ASSERT_STRCASENE("", "Hi"); ASSERT_STRCASENE("Hi", ""); - EXPECT_FATAL_FAILURE(ASSERT_STRCASENE("Hi", "hi"), - "(ignoring case)"); + EXPECT_FATAL_FAILURE(ASSERT_STRCASENE("Hi", "hi"), "(ignoring case)"); } // Tests *_STREQ on wide strings. @@ -2555,17 +2541,17 @@ TEST(StringAssertionTest, STREQ_Wide) { EXPECT_STREQ(L"Hi", L"Hi"); // Unequal strings. - EXPECT_NONFATAL_FAILURE(EXPECT_STREQ(L"abc", L"Abc"), - "Abc"); + EXPECT_NONFATAL_FAILURE(EXPECT_STREQ(L"abc", L"Abc"), "Abc"); // Strings containing wide characters. - EXPECT_NONFATAL_FAILURE(EXPECT_STREQ(L"abc\x8119", L"abc\x8120"), - "abc"); + EXPECT_NONFATAL_FAILURE(EXPECT_STREQ(L"abc\x8119", L"abc\x8120"), "abc"); // The streaming variation. - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_STREQ(L"abc\x8119", L"abc\x8121") << "Expected failure"; - }, "Expected failure"); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_STREQ(L"abc\x8119", L"abc\x8121") << "Expected failure"; + }, + "Expected failure"); } // Tests *_STRNE on wide strings. @@ -2578,22 +2564,19 @@ TEST(StringAssertionTest, STRNE_Wide) { ""); // Empty strings. - EXPECT_NONFATAL_FAILURE(EXPECT_STRNE(L"", L""), - "L\"\""); + EXPECT_NONFATAL_FAILURE(EXPECT_STRNE(L"", L""), "L\"\""); // Non-null vs NULL. ASSERT_STRNE(L"non-null", nullptr); // Equal strings. - EXPECT_NONFATAL_FAILURE(EXPECT_STRNE(L"Hi", L"Hi"), - "L\"Hi\""); + EXPECT_NONFATAL_FAILURE(EXPECT_STRNE(L"Hi", L"Hi"), "L\"Hi\""); // Unequal strings. EXPECT_STRNE(L"abc", L"Abc"); // Strings containing wide characters. - EXPECT_NONFATAL_FAILURE(EXPECT_STRNE(L"abc\x8119", L"abc\x8119"), - "abc"); + EXPECT_NONFATAL_FAILURE(EXPECT_STRNE(L"abc\x8119", L"abc\x8119"), "abc"); // The streaming variation. ASSERT_STRNE(L"abc\x8119", L"abc\x8120") << "This shouldn't happen"; @@ -2627,12 +2610,13 @@ TEST(IsSubstringTest, ReturnsCorrectResultForWideCString) { // Tests that IsSubstring() generates the correct message when the input // argument type is const char*. TEST(IsSubstringTest, GeneratesCorrectMessageForCString) { - EXPECT_STREQ("Value of: needle_expr\n" - " Actual: \"needle\"\n" - "Expected: a substring of haystack_expr\n" - "Which is: \"haystack\"", - IsSubstring("needle_expr", "haystack_expr", - "needle", "haystack").failure_message()); + EXPECT_STREQ( + "Value of: needle_expr\n" + " Actual: \"needle\"\n" + "Expected: a substring of haystack_expr\n" + "Which is: \"haystack\"", + IsSubstring("needle_expr", "haystack_expr", "needle", "haystack") + .failure_message()); } // Tests that IsSubstring returns the correct result when the input @@ -2653,13 +2637,14 @@ TEST(IsSubstringTest, ReturnsCorrectResultForStdWstring) { // Tests that IsSubstring() generates the correct message when the input // argument type is ::std::wstring. TEST(IsSubstringTest, GeneratesCorrectMessageForWstring) { - EXPECT_STREQ("Value of: needle_expr\n" - " Actual: L\"needle\"\n" - "Expected: a substring of haystack_expr\n" - "Which is: L\"haystack\"", - IsSubstring( - "needle_expr", "haystack_expr", - ::std::wstring(L"needle"), L"haystack").failure_message()); + EXPECT_STREQ( + "Value of: needle_expr\n" + " Actual: L\"needle\"\n" + "Expected: a substring of haystack_expr\n" + "Which is: L\"haystack\"", + IsSubstring("needle_expr", "haystack_expr", ::std::wstring(L"needle"), + L"haystack") + .failure_message()); } #endif // GTEST_HAS_STD_WSTRING @@ -2683,13 +2668,13 @@ TEST(IsNotSubstringTest, ReturnsCorrectResultForWideCString) { // Tests that IsNotSubstring() generates the correct message when the input // argument type is const wchar_t*. TEST(IsNotSubstringTest, GeneratesCorrectMessageForWideCString) { - EXPECT_STREQ("Value of: needle_expr\n" - " Actual: L\"needle\"\n" - "Expected: not a substring of haystack_expr\n" - "Which is: L\"two needles\"", - IsNotSubstring( - "needle_expr", "haystack_expr", - L"needle", L"two needles").failure_message()); + EXPECT_STREQ( + "Value of: needle_expr\n" + " Actual: L\"needle\"\n" + "Expected: not a substring of haystack_expr\n" + "Which is: L\"two needles\"", + IsNotSubstring("needle_expr", "haystack_expr", L"needle", L"two needles") + .failure_message()); } // Tests that IsNotSubstring returns the correct result when the input @@ -2702,13 +2687,14 @@ TEST(IsNotSubstringTest, ReturnsCorrectResultsForStdString) { // Tests that IsNotSubstring() generates the correct message when the input // argument type is ::std::string. TEST(IsNotSubstringTest, GeneratesCorrectMessageForStdString) { - EXPECT_STREQ("Value of: needle_expr\n" - " Actual: \"needle\"\n" - "Expected: not a substring of haystack_expr\n" - "Which is: \"two needles\"", - IsNotSubstring( - "needle_expr", "haystack_expr", - ::std::string("needle"), "two needles").failure_message()); + EXPECT_STREQ( + "Value of: needle_expr\n" + " Actual: \"needle\"\n" + "Expected: not a substring of haystack_expr\n" + "Which is: \"two needles\"", + IsNotSubstring("needle_expr", "haystack_expr", ::std::string("needle"), + "two needles") + .failure_message()); } #if GTEST_HAS_STD_WSTRING @@ -2755,20 +2741,20 @@ class FloatingPointTest : public Test { const Bits zero_bits = Floating(0).bits(); // Makes some numbers close to 0.0. - values_.close_to_positive_zero = Floating::ReinterpretBits( - zero_bits + max_ulps/2); - values_.close_to_negative_zero = -Floating::ReinterpretBits( - zero_bits + max_ulps - max_ulps/2); - values_.further_from_negative_zero = -Floating::ReinterpretBits( - zero_bits + max_ulps + 1 - max_ulps/2); + values_.close_to_positive_zero = + Floating::ReinterpretBits(zero_bits + max_ulps / 2); + values_.close_to_negative_zero = + -Floating::ReinterpretBits(zero_bits + max_ulps - max_ulps / 2); + values_.further_from_negative_zero = + -Floating::ReinterpretBits(zero_bits + max_ulps + 1 - max_ulps / 2); // The bits that represent 1.0. const Bits one_bits = Floating(1).bits(); // Makes some numbers close to 1.0. values_.close_to_one = Floating::ReinterpretBits(one_bits + max_ulps); - values_.further_from_one = Floating::ReinterpretBits( - one_bits + max_ulps + 1); + values_.further_from_one = + Floating::ReinterpretBits(one_bits + max_ulps + 1); // +infinity. values_.infinity = Floating::Infinity(); @@ -2777,23 +2763,23 @@ class FloatingPointTest : public Test { const Bits infinity_bits = Floating(values_.infinity).bits(); // Makes some numbers close to infinity. - values_.close_to_infinity = Floating::ReinterpretBits( - infinity_bits - max_ulps); - values_.further_from_infinity = Floating::ReinterpretBits( - infinity_bits - max_ulps - 1); + values_.close_to_infinity = + Floating::ReinterpretBits(infinity_bits - max_ulps); + values_.further_from_infinity = + Floating::ReinterpretBits(infinity_bits - max_ulps - 1); // Makes some NAN's. Sets the most significant bit of the fraction so that // our NaN's are quiet; trying to process a signaling NaN would raise an // exception if our environment enables floating point exceptions. - values_.nan1 = Floating::ReinterpretBits(Floating::kExponentBitMask - | (static_cast(1) << (Floating::kFractionBitCount - 1)) | 1); - values_.nan2 = Floating::ReinterpretBits(Floating::kExponentBitMask - | (static_cast(1) << (Floating::kFractionBitCount - 1)) | 200); + values_.nan1 = Floating::ReinterpretBits( + Floating::kExponentBitMask | + (static_cast(1) << (Floating::kFractionBitCount - 1)) | 1); + values_.nan2 = Floating::ReinterpretBits( + Floating::kExponentBitMask | + (static_cast(1) << (Floating::kFractionBitCount - 1)) | 200); } - void TestSize() { - EXPECT_EQ(sizeof(RawType), sizeof(Bits)); - } + void TestSize() { EXPECT_EQ(sizeof(RawType), sizeof(Bits)); } static TestValues values_; }; @@ -2806,17 +2792,13 @@ typename FloatingPointTest::TestValues typedef FloatingPointTest FloatTest; // Tests that the size of Float::Bits matches the size of float. -TEST_F(FloatTest, Size) { - TestSize(); -} +TEST_F(FloatTest, Size) { TestSize(); } // Tests comparing with +0 and -0. TEST_F(FloatTest, Zeros) { EXPECT_FLOAT_EQ(0.0, -0.0); - EXPECT_NONFATAL_FAILURE(EXPECT_FLOAT_EQ(-0.0, 1.0), - "1.0"); - EXPECT_FATAL_FAILURE(ASSERT_FLOAT_EQ(0.0, 1.5), - "1.5"); + EXPECT_NONFATAL_FAILURE(EXPECT_FLOAT_EQ(-0.0, 1.0), "1.0"); + EXPECT_FATAL_FAILURE(ASSERT_FLOAT_EQ(0.0, 1.5), "1.5"); } // Tests comparing numbers close to 0. @@ -2837,10 +2819,11 @@ TEST_F(FloatTest, AlmostZeros) { EXPECT_FLOAT_EQ(-0.0, v.close_to_negative_zero); EXPECT_FLOAT_EQ(v.close_to_positive_zero, v.close_to_negative_zero); - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_FLOAT_EQ(v.close_to_positive_zero, - v.further_from_negative_zero); - }, "v.further_from_negative_zero"); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_FLOAT_EQ(v.close_to_positive_zero, v.further_from_negative_zero); + }, + "v.further_from_negative_zero"); } // Tests comparing numbers close to each other. @@ -2852,8 +2835,7 @@ TEST_F(FloatTest, SmallDiff) { // Tests comparing numbers far apart. TEST_F(FloatTest, LargeDiff) { - EXPECT_NONFATAL_FAILURE(EXPECT_FLOAT_EQ(2.5, 3.0), - "3.0"); + EXPECT_NONFATAL_FAILURE(EXPECT_FLOAT_EQ(2.5, 3.0), "3.0"); } // Tests comparing with infinity. @@ -2882,15 +2864,11 @@ TEST_F(FloatTest, NaN) { // (parentheses). static const FloatTest::TestValues& v = this->values_; - EXPECT_NONFATAL_FAILURE(EXPECT_FLOAT_EQ(v.nan1, v.nan1), - "v.nan1"); - EXPECT_NONFATAL_FAILURE(EXPECT_FLOAT_EQ(v.nan1, v.nan2), - "v.nan2"); - EXPECT_NONFATAL_FAILURE(EXPECT_FLOAT_EQ(1.0, v.nan1), - "v.nan1"); + EXPECT_NONFATAL_FAILURE(EXPECT_FLOAT_EQ(v.nan1, v.nan1), "v.nan1"); + EXPECT_NONFATAL_FAILURE(EXPECT_FLOAT_EQ(v.nan1, v.nan2), "v.nan2"); + EXPECT_NONFATAL_FAILURE(EXPECT_FLOAT_EQ(1.0, v.nan1), "v.nan1"); - EXPECT_FATAL_FAILURE(ASSERT_FLOAT_EQ(v.nan1, v.infinity), - "v.infinity"); + EXPECT_FATAL_FAILURE(ASSERT_FLOAT_EQ(v.nan1, v.infinity), "v.infinity"); } // Tests that *_FLOAT_EQ are reflexive. @@ -2944,36 +2922,40 @@ TEST_F(FloatTest, FloatLEFails) { "(2.0f) <= (1.0f)"); // or by a small yet non-negligible margin, - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED_FORMAT2(FloatLE, values_.further_from_one, 1.0f); - }, "(values_.further_from_one) <= (1.0f)"); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED_FORMAT2(FloatLE, values_.further_from_one, 1.0f); + }, + "(values_.further_from_one) <= (1.0f)"); - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED_FORMAT2(FloatLE, values_.nan1, values_.infinity); - }, "(values_.nan1) <= (values_.infinity)"); - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED_FORMAT2(FloatLE, -values_.infinity, values_.nan1); - }, "(-values_.infinity) <= (values_.nan1)"); - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED_FORMAT2(FloatLE, values_.nan1, values_.nan1); - }, "(values_.nan1) <= (values_.nan1)"); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED_FORMAT2(FloatLE, values_.nan1, values_.infinity); + }, + "(values_.nan1) <= (values_.infinity)"); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED_FORMAT2(FloatLE, -values_.infinity, values_.nan1); + }, + "(-values_.infinity) <= (values_.nan1)"); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED_FORMAT2(FloatLE, values_.nan1, values_.nan1); + }, + "(values_.nan1) <= (values_.nan1)"); } // Instantiates FloatingPointTest for testing *_DOUBLE_EQ. typedef FloatingPointTest DoubleTest; // Tests that the size of Double::Bits matches the size of double. -TEST_F(DoubleTest, Size) { - TestSize(); -} +TEST_F(DoubleTest, Size) { TestSize(); } // Tests comparing with +0 and -0. TEST_F(DoubleTest, Zeros) { EXPECT_DOUBLE_EQ(0.0, -0.0); - EXPECT_NONFATAL_FAILURE(EXPECT_DOUBLE_EQ(-0.0, 1.0), - "1.0"); - EXPECT_FATAL_FAILURE(ASSERT_DOUBLE_EQ(0.0, 1.0), - "1.0"); + EXPECT_NONFATAL_FAILURE(EXPECT_DOUBLE_EQ(-0.0, 1.0), "1.0"); + EXPECT_FATAL_FAILURE(ASSERT_DOUBLE_EQ(0.0, 1.0), "1.0"); } // Tests comparing numbers close to 0. @@ -2994,10 +2976,12 @@ TEST_F(DoubleTest, AlmostZeros) { EXPECT_DOUBLE_EQ(-0.0, v.close_to_negative_zero); EXPECT_DOUBLE_EQ(v.close_to_positive_zero, v.close_to_negative_zero); - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_DOUBLE_EQ(v.close_to_positive_zero, - v.further_from_negative_zero); - }, "v.further_from_negative_zero"); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_DOUBLE_EQ(v.close_to_positive_zero, + v.further_from_negative_zero); + }, + "v.further_from_negative_zero"); } // Tests comparing numbers close to each other. @@ -3009,8 +2993,7 @@ TEST_F(DoubleTest, SmallDiff) { // Tests comparing numbers far apart. TEST_F(DoubleTest, LargeDiff) { - EXPECT_NONFATAL_FAILURE(EXPECT_DOUBLE_EQ(2.0, 3.0), - "3.0"); + EXPECT_NONFATAL_FAILURE(EXPECT_DOUBLE_EQ(2.0, 3.0), "3.0"); } // Tests comparing with infinity. @@ -3034,12 +3017,10 @@ TEST_F(DoubleTest, NaN) { static const DoubleTest::TestValues& v = this->values_; // Nokia's STLport crashes if we try to output infinity or NaN. - EXPECT_NONFATAL_FAILURE(EXPECT_DOUBLE_EQ(v.nan1, v.nan1), - "v.nan1"); + EXPECT_NONFATAL_FAILURE(EXPECT_DOUBLE_EQ(v.nan1, v.nan1), "v.nan1"); EXPECT_NONFATAL_FAILURE(EXPECT_DOUBLE_EQ(v.nan1, v.nan2), "v.nan2"); EXPECT_NONFATAL_FAILURE(EXPECT_DOUBLE_EQ(1.0, v.nan1), "v.nan1"); - EXPECT_FATAL_FAILURE(ASSERT_DOUBLE_EQ(v.nan1, v.infinity), - "v.infinity"); + EXPECT_FATAL_FAILURE(ASSERT_DOUBLE_EQ(v.nan1, v.infinity), "v.infinity"); } // Tests that *_DOUBLE_EQ are reflexive. @@ -3100,22 +3081,29 @@ TEST_F(DoubleTest, DoubleLEFails) { "(2.0) <= (1.0)"); // or by a small yet non-negligible margin, - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED_FORMAT2(DoubleLE, values_.further_from_one, 1.0); - }, "(values_.further_from_one) <= (1.0)"); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED_FORMAT2(DoubleLE, values_.further_from_one, 1.0); + }, + "(values_.further_from_one) <= (1.0)"); - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED_FORMAT2(DoubleLE, values_.nan1, values_.infinity); - }, "(values_.nan1) <= (values_.infinity)"); - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_PRED_FORMAT2(DoubleLE, -values_.infinity, values_.nan1); - }, " (-values_.infinity) <= (values_.nan1)"); - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_PRED_FORMAT2(DoubleLE, values_.nan1, values_.nan1); - }, "(values_.nan1) <= (values_.nan1)"); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED_FORMAT2(DoubleLE, values_.nan1, values_.infinity); + }, + "(values_.nan1) <= (values_.infinity)"); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_PRED_FORMAT2(DoubleLE, -values_.infinity, values_.nan1); + }, + " (-values_.infinity) <= (values_.nan1)"); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_PRED_FORMAT2(DoubleLE, values_.nan1, values_.nan1); + }, + "(values_.nan1) <= (values_.nan1)"); } - // Verifies that a test or test case whose name starts with DISABLED_ is // not run. @@ -3127,9 +3115,7 @@ TEST(DisabledTest, DISABLED_TestShouldNotRun) { // A test whose name does not start with DISABLED_. // Should run. -TEST(DisabledTest, NotDISABLED_TestShouldRun) { - EXPECT_EQ(1, 1); -} +TEST(DisabledTest, NotDISABLED_TestShouldRun) { EXPECT_EQ(1, 1); } // A test case whose name starts with DISABLED_. // Should not run. @@ -3169,8 +3155,7 @@ TEST_F(DisabledTestsTest, DISABLED_TestShouldNotRun_2) { // Tests that disabled typed tests aren't run. template -class TypedTest : public Test { -}; +class TypedTest : public Test {}; typedef testing::Types NumericTypes; TYPED_TEST_SUITE(TypedTest, NumericTypes); @@ -3180,8 +3165,7 @@ TYPED_TEST(TypedTest, DISABLED_ShouldNotRun) { } template -class DISABLED_TypedTest : public Test { -}; +class DISABLED_TypedTest : public Test {}; TYPED_TEST_SUITE(DISABLED_TypedTest, NumericTypes); @@ -3192,8 +3176,7 @@ TYPED_TEST(DISABLED_TypedTest, ShouldNotRun) { // Tests that disabled type-parameterized tests aren't run. template -class TypedTestP : public Test { -}; +class TypedTestP : public Test {}; TYPED_TEST_SUITE_P(TypedTestP); @@ -3207,8 +3190,7 @@ REGISTER_TYPED_TEST_SUITE_P(TypedTestP, DISABLED_ShouldNotRun); INSTANTIATE_TYPED_TEST_SUITE_P(My, TypedTestP, NumericTypes); template -class DISABLED_TypedTestP : public Test { -}; +class DISABLED_TypedTestP : public Test {}; TYPED_TEST_SUITE_P(DISABLED_TypedTestP); @@ -3228,15 +3210,11 @@ class SingleEvaluationTest : public Test { // This helper function is needed by the FailedASSERT_STREQ test // below. It's public to work around C++Builder's bug with scoping local // classes. - static void CompareAndIncrementCharPtrs() { - ASSERT_STREQ(p1_++, p2_++); - } + static void CompareAndIncrementCharPtrs() { ASSERT_STREQ(p1_++, p2_++); } // This helper function is needed by the FailedASSERT_NE test below. It's // public to work around C++Builder's bug with scoping local classes. - static void CompareAndIncrementInts() { - ASSERT_NE(a_++, b_++); - } + static void CompareAndIncrementInts() { ASSERT_NE(a_++, b_++); } protected: SingleEvaluationTest() { @@ -3279,8 +3257,7 @@ TEST_F(SingleEvaluationTest, ASSERT_STR) { EXPECT_EQ(s2_ + 1, p2_); // failed EXPECT_STRCASEEQ - EXPECT_NONFATAL_FAILURE(EXPECT_STRCASEEQ(p1_++, p2_++), - "Ignoring case"); + EXPECT_NONFATAL_FAILURE(EXPECT_STRCASEEQ(p1_++, p2_++), "Ignoring case"); EXPECT_EQ(s1_ + 2, p1_); EXPECT_EQ(s2_ + 2, p2_); } @@ -3340,34 +3317,39 @@ TEST_F(SingleEvaluationTest, OtherCases) { #endif // GTEST_HAS_RTTI -void ThrowAnInteger() { - throw 1; -} -void ThrowRuntimeError(const char* what) { - throw std::runtime_error(what); -} +void ThrowAnInteger() { throw 1; } +void ThrowRuntimeError(const char* what) { throw std::runtime_error(what); } // Tests that assertion arguments are evaluated exactly once. TEST_F(SingleEvaluationTest, ExceptionTests) { // successful EXPECT_THROW - EXPECT_THROW({ // NOLINT - a_++; - ThrowAnInteger(); - }, int); + EXPECT_THROW( + { // NOLINT + a_++; + ThrowAnInteger(); + }, + int); EXPECT_EQ(1, a_); // failed EXPECT_THROW, throws different - EXPECT_NONFATAL_FAILURE(EXPECT_THROW({ // NOLINT - a_++; - ThrowAnInteger(); - }, bool), "throws a different type"); + EXPECT_NONFATAL_FAILURE(EXPECT_THROW( + { // NOLINT + a_++; + ThrowAnInteger(); + }, + bool), + "throws a different type"); EXPECT_EQ(2, a_); // failed EXPECT_THROW, throws runtime error - EXPECT_NONFATAL_FAILURE(EXPECT_THROW({ // NOLINT - a_++; - ThrowRuntimeError("A description"); - }, bool), "throws " ERROR_DESC " with description \"A description\""); + EXPECT_NONFATAL_FAILURE(EXPECT_THROW( + { // NOLINT + a_++; + ThrowRuntimeError("A description"); + }, + bool), + "throws " ERROR_DESC + " with description \"A description\""); EXPECT_EQ(3, a_); // failed EXPECT_THROW, throws nothing @@ -3380,9 +3362,10 @@ TEST_F(SingleEvaluationTest, ExceptionTests) { // failed EXPECT_NO_THROW EXPECT_NONFATAL_FAILURE(EXPECT_NO_THROW({ // NOLINT - a_++; - ThrowAnInteger(); - }), "it throws"); + a_++; + ThrowAnInteger(); + }), + "it throws"); EXPECT_EQ(6, a_); // successful EXPECT_ANY_THROW @@ -3403,12 +3386,8 @@ TEST_F(SingleEvaluationTest, ExceptionTests) { class NoFatalFailureTest : public Test { protected: void Succeeds() {} - void FailsNonFatal() { - ADD_FAILURE() << "some non-fatal failure"; - } - void Fails() { - FAIL() << "some fatal failure"; - } + void FailsNonFatal() { ADD_FAILURE() << "some non-fatal failure"; } + void Fails() { FAIL() << "some fatal failure"; } void DoAssertNoFatalFailureOnFails() { ASSERT_NO_FATAL_FAILURE(Fails()); @@ -3427,12 +3406,10 @@ TEST_F(NoFatalFailureTest, NoFailure) { } TEST_F(NoFatalFailureTest, NonFatalIsNoFailure) { - EXPECT_NONFATAL_FAILURE( - EXPECT_NO_FATAL_FAILURE(FailsNonFatal()), - "some non-fatal failure"); - EXPECT_NONFATAL_FAILURE( - ASSERT_NO_FATAL_FAILURE(FailsNonFatal()), - "some non-fatal failure"); + EXPECT_NONFATAL_FAILURE(EXPECT_NO_FATAL_FAILURE(FailsNonFatal()), + "some non-fatal failure"); + EXPECT_NONFATAL_FAILURE(ASSERT_NO_FATAL_FAILURE(FailsNonFatal()), + "some non-fatal failure"); } TEST_F(NoFatalFailureTest, AssertNoFatalFailureOnFatalFailure) { @@ -3555,8 +3532,9 @@ TEST(EditDistance, TestSuites) { EditsToString(CalculateOptimalEdits(CharsToIndices(c->left), CharsToIndices(c->right)))) << "Left <" << c->left << "> Right <" << c->right << "> Edits <" - << EditsToString(CalculateOptimalEdits( - CharsToIndices(c->left), CharsToIndices(c->right))) << ">"; + << EditsToString(CalculateOptimalEdits(CharsToIndices(c->left), + CharsToIndices(c->right))) + << ">"; EXPECT_TRUE(c->expected_diff == CreateUnifiedDiff(CharsToLines(c->left), CharsToLines(c->right))) << "Left <" << c->left << "> Right <" << c->right << "> Diff <" @@ -3569,8 +3547,7 @@ TEST(EditDistance, TestSuites) { TEST(AssertionTest, EqFailure) { const std::string foo_val("5"), bar_val("6"); const std::string msg1( - EqFailure("foo", "bar", foo_val, bar_val, false) - .failure_message()); + EqFailure("foo", "bar", foo_val, bar_val, false).failure_message()); EXPECT_STREQ( "Expected equality of these values:\n" " foo\n" @@ -3580,8 +3557,7 @@ TEST(AssertionTest, EqFailure) { msg1.c_str()); const std::string msg2( - EqFailure("foo", "6", foo_val, bar_val, false) - .failure_message()); + EqFailure("foo", "6", foo_val, bar_val, false).failure_message()); EXPECT_STREQ( "Expected equality of these values:\n" " foo\n" @@ -3590,8 +3566,7 @@ TEST(AssertionTest, EqFailure) { msg2.c_str()); const std::string msg3( - EqFailure("5", "bar", foo_val, bar_val, false) - .failure_message()); + EqFailure("5", "bar", foo_val, bar_val, false).failure_message()); EXPECT_STREQ( "Expected equality of these values:\n" " 5\n" @@ -3608,9 +3583,8 @@ TEST(AssertionTest, EqFailure) { msg4.c_str()); const std::string msg5( - EqFailure("foo", "bar", - std::string("\"x\""), std::string("\"y\""), - true).failure_message()); + EqFailure("foo", "bar", std::string("\"x\""), std::string("\"y\""), true) + .failure_message()); EXPECT_STREQ( "Expected equality of these values:\n" " foo\n" @@ -3645,24 +3619,21 @@ TEST(AssertionTest, AppendUserMessage) { const std::string foo("foo"); Message msg; - EXPECT_STREQ("foo", - AppendUserMessage(foo, msg).c_str()); + EXPECT_STREQ("foo", AppendUserMessage(foo, msg).c_str()); msg << "bar"; - EXPECT_STREQ("foo\nbar", - AppendUserMessage(foo, msg).c_str()); + EXPECT_STREQ("foo\nbar", AppendUserMessage(foo, msg).c_str()); } #ifdef __BORLANDC__ // Silences warnings: "Condition is always true", "Unreachable code" -# pragma option push -w-ccc -w-rch +#pragma option push -w-ccc -w-rch #endif // Tests ASSERT_TRUE. TEST(AssertionTest, ASSERT_TRUE) { ASSERT_TRUE(2 > 1); // NOLINT - EXPECT_FATAL_FAILURE(ASSERT_TRUE(2 < 1), - "2 < 1"); + EXPECT_FATAL_FAILURE(ASSERT_TRUE(2 < 1), "2 < 1"); } // Tests ASSERT_TRUE(predicate) for predicates returning AssertionResult. @@ -3710,7 +3681,7 @@ TEST(AssertionTest, AssertFalseWithAssertionResult) { #ifdef __BORLANDC__ // Restores warnings after previous "#pragma option push" suppressed them -# pragma option pop +#pragma option pop #endif // Tests using ASSERT_EQ on double values. The purpose is to make @@ -3721,18 +3692,19 @@ TEST(ExpectTest, ASSERT_EQ_Double) { ASSERT_EQ(5.6, 5.6); // A failure. - EXPECT_FATAL_FAILURE(ASSERT_EQ(5.1, 5.2), - "5.1"); + EXPECT_FATAL_FAILURE(ASSERT_EQ(5.1, 5.2), "5.1"); } // Tests ASSERT_EQ. TEST(AssertionTest, ASSERT_EQ) { ASSERT_EQ(5, 2 + 3); + // clang-format off EXPECT_FATAL_FAILURE(ASSERT_EQ(5, 2*3), "Expected equality of these values:\n" " 5\n" " 2*3\n" " Which is: 6"); + // clang-format on } // Tests ASSERT_EQ(NULL, pointer). @@ -3757,8 +3729,7 @@ TEST(ExpectTest, ASSERT_EQ_0) { ASSERT_EQ(0, n); // A failure. - EXPECT_FATAL_FAILURE(ASSERT_EQ(0, 5.6), - " 0\n 5.6"); + EXPECT_FATAL_FAILURE(ASSERT_EQ(0, 5.6), " 0\n 5.6"); } // Tests ASSERT_NE. @@ -3773,30 +3744,26 @@ TEST(AssertionTest, ASSERT_NE) { TEST(AssertionTest, ASSERT_LE) { ASSERT_LE(2, 3); ASSERT_LE(2, 2); - EXPECT_FATAL_FAILURE(ASSERT_LE(2, 0), - "Expected: (2) <= (0), actual: 2 vs 0"); + EXPECT_FATAL_FAILURE(ASSERT_LE(2, 0), "Expected: (2) <= (0), actual: 2 vs 0"); } // Tests ASSERT_LT. TEST(AssertionTest, ASSERT_LT) { ASSERT_LT(2, 3); - EXPECT_FATAL_FAILURE(ASSERT_LT(2, 2), - "Expected: (2) < (2), actual: 2 vs 2"); + EXPECT_FATAL_FAILURE(ASSERT_LT(2, 2), "Expected: (2) < (2), actual: 2 vs 2"); } // Tests ASSERT_GE. TEST(AssertionTest, ASSERT_GE) { ASSERT_GE(2, 1); ASSERT_GE(2, 2); - EXPECT_FATAL_FAILURE(ASSERT_GE(2, 3), - "Expected: (2) >= (3), actual: 2 vs 3"); + EXPECT_FATAL_FAILURE(ASSERT_GE(2, 3), "Expected: (2) >= (3), actual: 2 vs 3"); } // Tests ASSERT_GT. TEST(AssertionTest, ASSERT_GT) { ASSERT_GT(2, 1); - EXPECT_FATAL_FAILURE(ASSERT_GT(2, 2), - "Expected: (2) > (2), actual: 2 vs 2"); + EXPECT_FATAL_FAILURE(ASSERT_GT(2, 2), "Expected: (2) > (2), actual: 2 vs 2"); } #if GTEST_HAS_EXCEPTIONS @@ -3807,7 +3774,7 @@ void ThrowNothing() {} TEST(AssertionTest, ASSERT_THROW) { ASSERT_THROW(ThrowAnInteger(), int); -# ifndef __BORLANDC__ +#ifndef __BORLANDC__ // ICE's in C++Builder 2007 and 2009. EXPECT_FATAL_FAILURE( @@ -3818,9 +3785,10 @@ TEST(AssertionTest, ASSERT_THROW) { ASSERT_THROW(ThrowRuntimeError("A description"), std::logic_error), "Expected: ThrowRuntimeError(\"A description\") " "throws an exception of type std::logic_error.\n " - "Actual: it throws " ERROR_DESC " " + "Actual: it throws " ERROR_DESC + " " "with description \"A description\"."); -# endif +#endif EXPECT_FATAL_FAILURE( ASSERT_THROW(ThrowNothing(), bool), @@ -3837,17 +3805,17 @@ TEST(AssertionTest, ASSERT_NO_THROW) { EXPECT_FATAL_FAILURE(ASSERT_NO_THROW(ThrowRuntimeError("A description")), "Expected: ThrowRuntimeError(\"A description\") " "doesn't throw an exception.\n " - "Actual: it throws " ERROR_DESC " " + "Actual: it throws " ERROR_DESC + " " "with description \"A description\"."); } // Tests ASSERT_ANY_THROW. TEST(AssertionTest, ASSERT_ANY_THROW) { ASSERT_ANY_THROW(ThrowAnInteger()); - EXPECT_FATAL_FAILURE( - ASSERT_ANY_THROW(ThrowNothing()), - "Expected: ThrowNothing() throws an exception.\n" - " Actual: it doesn't."); + EXPECT_FATAL_FAILURE(ASSERT_ANY_THROW(ThrowNothing()), + "Expected: ThrowNothing() throws an exception.\n" + " Actual: it doesn't."); } #endif // GTEST_HAS_EXCEPTIONS @@ -3861,14 +3829,11 @@ TEST(AssertionTest, AssertPrecedence) { } // A subroutine used by the following test. -void TestEq1(int x) { - ASSERT_EQ(1, x); -} +void TestEq1(int x) { ASSERT_EQ(1, x); } // Tests calling a test subroutine that's not part of a fixture. TEST(AssertionTest, NonFixtureSubroutine) { - EXPECT_FATAL_FAILURE(TestEq1(2), - " x\n Which is: 2"); + EXPECT_FATAL_FAILURE(TestEq1(2), " x\n Which is: 2"); } // An uncopyable class. @@ -3880,6 +3845,7 @@ class Uncopyable { bool operator==(const Uncopyable& rhs) const { return value() == rhs.value(); } + private: // This constructor deliberately has no implementation, as we don't // want this class to be copyable. @@ -3892,10 +3858,7 @@ class Uncopyable { return os << value.value(); } - -bool IsPositiveUncopyable(const Uncopyable& x) { - return x.value() > 0; -} +bool IsPositiveUncopyable(const Uncopyable& x) { return x.value() > 0; } // A subroutine used by the following test. void TestAssertNonPositive() { @@ -3914,8 +3877,9 @@ TEST(AssertionTest, AssertWorksWithUncopyableObject) { Uncopyable x(5); ASSERT_PRED1(IsPositiveUncopyable, x); ASSERT_EQ(x, x); - EXPECT_FATAL_FAILURE(TestAssertNonPositive(), - "IsPositiveUncopyable(y) evaluates to false, where\ny evaluates to -1"); + EXPECT_FATAL_FAILURE( + TestAssertNonPositive(), + "IsPositiveUncopyable(y) evaluates to false, where\ny evaluates to -1"); EXPECT_FATAL_FAILURE(TestAssertEqualsUncopyable(), "Expected equality of these values:\n" " x\n Which is: 5\n y\n Which is: -1"); @@ -3926,18 +3890,16 @@ TEST(AssertionTest, ExpectWorksWithUncopyableObject) { Uncopyable x(5); EXPECT_PRED1(IsPositiveUncopyable, x); Uncopyable y(-1); - EXPECT_NONFATAL_FAILURE(EXPECT_PRED1(IsPositiveUncopyable, y), - "IsPositiveUncopyable(y) evaluates to false, where\ny evaluates to -1"); + EXPECT_NONFATAL_FAILURE( + EXPECT_PRED1(IsPositiveUncopyable, y), + "IsPositiveUncopyable(y) evaluates to false, where\ny evaluates to -1"); EXPECT_EQ(x, x); EXPECT_NONFATAL_FAILURE(EXPECT_EQ(x, y), "Expected equality of these values:\n" " x\n Which is: 5\n y\n Which is: -1"); } -enum NamedEnum { - kE1 = 0, - kE2 = 1 -}; +enum NamedEnum { kE1 = 0, kE2 = 1 }; TEST(AssertionTest, NamedEnum) { EXPECT_EQ(kE1, kE1); @@ -3953,7 +3915,7 @@ TEST(AssertionTest, NamedEnum) { enum { kCaseA = -1, -# if GTEST_OS_LINUX +#if GTEST_OS_LINUX // We want to test the case where the size of the anonymous enum is // larger than sizeof(int), to make sure our implementation of the @@ -3966,21 +3928,21 @@ enum { // assertions. kCaseB = testing::internal::kMaxBiggestInt, -# else +#else kCaseB = INT_MAX, -# endif // GTEST_OS_LINUX +#endif // GTEST_OS_LINUX kCaseC = 42 }; TEST(AssertionTest, AnonymousEnum) { -# if GTEST_OS_LINUX +#if GTEST_OS_LINUX EXPECT_EQ(static_cast(kCaseA), static_cast(kCaseB)); -# endif // GTEST_OS_LINUX +#endif // GTEST_OS_LINUX EXPECT_EQ(kCaseA, kCaseA); EXPECT_NE(kCaseA, kCaseB); @@ -3988,10 +3950,8 @@ TEST(AssertionTest, AnonymousEnum) { EXPECT_LE(kCaseA, kCaseB); EXPECT_GT(kCaseB, kCaseA); EXPECT_GE(kCaseA, kCaseA); - EXPECT_NONFATAL_FAILURE(EXPECT_GE(kCaseA, kCaseB), - "(kCaseA) >= (kCaseB)"); - EXPECT_NONFATAL_FAILURE(EXPECT_GE(kCaseA, kCaseC), - "-1 vs 42"); + EXPECT_NONFATAL_FAILURE(EXPECT_GE(kCaseA, kCaseB), "(kCaseA) >= (kCaseB)"); + EXPECT_NONFATAL_FAILURE(EXPECT_GE(kCaseA, kCaseC), "-1 vs 42"); ASSERT_EQ(kCaseA, kCaseA); ASSERT_NE(kCaseA, kCaseB); @@ -4000,34 +3960,25 @@ TEST(AssertionTest, AnonymousEnum) { ASSERT_GT(kCaseB, kCaseA); ASSERT_GE(kCaseA, kCaseA); -# ifndef __BORLANDC__ +#ifndef __BORLANDC__ // ICE's in C++Builder. - EXPECT_FATAL_FAILURE(ASSERT_EQ(kCaseA, kCaseB), - " kCaseB\n Which is: "); - EXPECT_FATAL_FAILURE(ASSERT_EQ(kCaseA, kCaseC), - "\n Which is: 42"); -# endif + EXPECT_FATAL_FAILURE(ASSERT_EQ(kCaseA, kCaseB), " kCaseB\n Which is: "); + EXPECT_FATAL_FAILURE(ASSERT_EQ(kCaseA, kCaseC), "\n Which is: 42"); +#endif - EXPECT_FATAL_FAILURE(ASSERT_EQ(kCaseA, kCaseC), - "\n Which is: -1"); + EXPECT_FATAL_FAILURE(ASSERT_EQ(kCaseA, kCaseC), "\n Which is: -1"); } #endif // !GTEST_OS_MAC && !defined(__SUNPRO_CC) #if GTEST_OS_WINDOWS -static HRESULT UnexpectedHRESULTFailure() { - return E_UNEXPECTED; -} +static HRESULT UnexpectedHRESULTFailure() { return E_UNEXPECTED; } -static HRESULT OkHRESULTSuccess() { - return S_OK; -} +static HRESULT OkHRESULTSuccess() { return S_OK; } -static HRESULT FalseHRESULTSuccess() { - return S_FALSE; -} +static HRESULT FalseHRESULTSuccess() { return S_FALSE; } // HRESULT assertion tests test both zero and non-zero // success codes as well as failure message for each. @@ -4038,8 +3989,8 @@ TEST(HRESULTAssertionTest, EXPECT_HRESULT_SUCCEEDED) { EXPECT_HRESULT_SUCCEEDED(S_FALSE); EXPECT_NONFATAL_FAILURE(EXPECT_HRESULT_SUCCEEDED(UnexpectedHRESULTFailure()), - "Expected: (UnexpectedHRESULTFailure()) succeeds.\n" - " Actual: 0x8000FFFF"); + "Expected: (UnexpectedHRESULTFailure()) succeeds.\n" + " Actual: 0x8000FFFF"); } TEST(HRESULTAssertionTest, ASSERT_HRESULT_SUCCEEDED) { @@ -4047,35 +3998,35 @@ TEST(HRESULTAssertionTest, ASSERT_HRESULT_SUCCEEDED) { ASSERT_HRESULT_SUCCEEDED(S_FALSE); EXPECT_FATAL_FAILURE(ASSERT_HRESULT_SUCCEEDED(UnexpectedHRESULTFailure()), - "Expected: (UnexpectedHRESULTFailure()) succeeds.\n" - " Actual: 0x8000FFFF"); + "Expected: (UnexpectedHRESULTFailure()) succeeds.\n" + " Actual: 0x8000FFFF"); } TEST(HRESULTAssertionTest, EXPECT_HRESULT_FAILED) { EXPECT_HRESULT_FAILED(E_UNEXPECTED); EXPECT_NONFATAL_FAILURE(EXPECT_HRESULT_FAILED(OkHRESULTSuccess()), - "Expected: (OkHRESULTSuccess()) fails.\n" - " Actual: 0x0"); + "Expected: (OkHRESULTSuccess()) fails.\n" + " Actual: 0x0"); EXPECT_NONFATAL_FAILURE(EXPECT_HRESULT_FAILED(FalseHRESULTSuccess()), - "Expected: (FalseHRESULTSuccess()) fails.\n" - " Actual: 0x1"); + "Expected: (FalseHRESULTSuccess()) fails.\n" + " Actual: 0x1"); } TEST(HRESULTAssertionTest, ASSERT_HRESULT_FAILED) { ASSERT_HRESULT_FAILED(E_UNEXPECTED); -# ifndef __BORLANDC__ +#ifndef __BORLANDC__ // ICE's in C++Builder 2007 and 2009. EXPECT_FATAL_FAILURE(ASSERT_HRESULT_FAILED(OkHRESULTSuccess()), - "Expected: (OkHRESULTSuccess()) fails.\n" - " Actual: 0x0"); -# endif + "Expected: (OkHRESULTSuccess()) fails.\n" + " Actual: 0x0"); +#endif EXPECT_FATAL_FAILURE(ASSERT_HRESULT_FAILED(FalseHRESULTSuccess()), - "Expected: (FalseHRESULTSuccess()) fails.\n" - " Actual: 0x1"); + "Expected: (FalseHRESULTSuccess()) fails.\n" + " Actual: 0x1"); } // Tests that streaming to the HRESULT macros works. @@ -4085,25 +4036,23 @@ TEST(HRESULTAssertionTest, Streaming) { EXPECT_HRESULT_FAILED(E_UNEXPECTED) << "unexpected failure"; ASSERT_HRESULT_FAILED(E_UNEXPECTED) << "unexpected failure"; - EXPECT_NONFATAL_FAILURE( - EXPECT_HRESULT_SUCCEEDED(E_UNEXPECTED) << "expected failure", - "expected failure"); + EXPECT_NONFATAL_FAILURE(EXPECT_HRESULT_SUCCEEDED(E_UNEXPECTED) + << "expected failure", + "expected failure"); -# ifndef __BORLANDC__ +#ifndef __BORLANDC__ // ICE's in C++Builder 2007 and 2009. - EXPECT_FATAL_FAILURE( - ASSERT_HRESULT_SUCCEEDED(E_UNEXPECTED) << "expected failure", - "expected failure"); -# endif + EXPECT_FATAL_FAILURE(ASSERT_HRESULT_SUCCEEDED(E_UNEXPECTED) + << "expected failure", + "expected failure"); +#endif - EXPECT_NONFATAL_FAILURE( - EXPECT_HRESULT_FAILED(S_OK) << "expected failure", - "expected failure"); + EXPECT_NONFATAL_FAILURE(EXPECT_HRESULT_FAILED(S_OK) << "expected failure", + "expected failure"); - EXPECT_FATAL_FAILURE( - ASSERT_HRESULT_FAILED(S_OK) << "expected failure", - "expected failure"); + EXPECT_FATAL_FAILURE(ASSERT_HRESULT_FAILED(S_OK) << "expected failure", + "expected failure"); } #endif // GTEST_OS_WINDOWS @@ -4126,8 +4075,7 @@ TEST(AssertionSyntaxTest, BasicAssertionsBehavesLikeSingleStatement) { else ; // NOLINT - if (AlwaysFalse()) - ASSERT_LT(1, 3); + if (AlwaysFalse()) ASSERT_LT(1, 3); if (AlwaysFalse()) ; // NOLINT @@ -4165,24 +4113,21 @@ TEST(ExpectThrowTest, DoesNotGenerateDuplicateCatchClauseWarning) { #pragma GCC diagnostic ignored "-Wpragmas" #endif TEST(AssertionSyntaxTest, ExceptionAssertionsBehavesLikeSingleStatement) { - if (AlwaysFalse()) - EXPECT_THROW(ThrowNothing(), bool); + if (AlwaysFalse()) EXPECT_THROW(ThrowNothing(), bool); if (AlwaysTrue()) EXPECT_THROW(ThrowAnInteger(), int); else ; // NOLINT - if (AlwaysFalse()) - EXPECT_NO_THROW(ThrowAnInteger()); + if (AlwaysFalse()) EXPECT_NO_THROW(ThrowAnInteger()); if (AlwaysTrue()) EXPECT_NO_THROW(ThrowNothing()); else ; // NOLINT - if (AlwaysFalse()) - EXPECT_ANY_THROW(ThrowNothing()); + if (AlwaysFalse()) EXPECT_ANY_THROW(ThrowNothing()); if (AlwaysTrue()) EXPECT_ANY_THROW(ThrowAnInteger()); @@ -4238,8 +4183,8 @@ TEST(AssertionSyntaxTest, WorksWithSwitch) { } switch (0) - case 0: - EXPECT_FALSE(false) << "EXPECT_FALSE failed in switch case"; + case 0: + EXPECT_FALSE(false) << "EXPECT_FALSE failed in switch case"; // Binary assertions are implemented using a different code path // than the Boolean assertions. Hence we test them separately. @@ -4250,22 +4195,20 @@ TEST(AssertionSyntaxTest, WorksWithSwitch) { } switch (0) - case 0: - EXPECT_NE(1, 2); + case 0: + EXPECT_NE(1, 2); } #if GTEST_HAS_EXCEPTIONS -void ThrowAString() { - throw "std::string"; -} +void ThrowAString() { throw "std::string"; } // Test that the exception assertion macros compile and work with const // type qualifier. TEST(AssertionSyntaxTest, WorksWithConst) { - ASSERT_THROW(ThrowAString(), const char*); + ASSERT_THROW(ThrowAString(), const char*); - EXPECT_THROW(ThrowAString(), const char*); + EXPECT_THROW(ThrowAString(), const char*); } #endif // GTEST_HAS_EXCEPTIONS @@ -4363,22 +4306,19 @@ TEST(AssertionWithMessageTest, ASSERT_FLOATING) { // Tests using ASSERT_FALSE with a streamed message. TEST(AssertionWithMessageTest, ASSERT_FALSE) { ASSERT_FALSE(false) << "This shouldn't fail."; - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_FALSE(true) << "Expected failure: " << 2 << " > " << 1 - << " evaluates to " << true; - }, "Expected failure"); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_FALSE(true) << "Expected failure: " << 2 << " > " << 1 + << " evaluates to " << true; + }, + "Expected failure"); } // Tests using FAIL with a streamed message. -TEST(AssertionWithMessageTest, FAIL) { - EXPECT_FATAL_FAILURE(FAIL() << 0, - "0"); -} +TEST(AssertionWithMessageTest, FAIL) { EXPECT_FATAL_FAILURE(FAIL() << 0, "0"); } // Tests using SUCCEED with a streamed message. -TEST(AssertionWithMessageTest, SUCCEED) { - SUCCEED() << "Success == " << 1; -} +TEST(AssertionWithMessageTest, SUCCEED) { SUCCEED() << "Success == " << 1; } // Tests using ASSERT_TRUE with a streamed message. TEST(AssertionWithMessageTest, ASSERT_TRUE) { @@ -4395,13 +4335,16 @@ TEST(AssertionWithMessageTest, ASSERT_TRUE) { #if GTEST_OS_WINDOWS // Tests using wide strings in assertion messages. TEST(AssertionWithMessageTest, WideStringMessage) { - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_TRUE(false) << L"This failure is expected.\x8119"; - }, "This failure is expected."); - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_EQ(1, 2) << "This failure is " - << L"expected too.\x8120"; - }, "This failure is expected too."); + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_TRUE(false) << L"This failure is expected.\x8119"; + }, + "This failure is expected."); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_EQ(1, 2) << "This failure is " << L"expected too.\x8120"; + }, + "This failure is expected too."); } #endif // GTEST_OS_WINDOWS @@ -4417,8 +4360,7 @@ TEST(ExpectTest, EXPECT_TRUE) { "Value of: 2 < 1\n" " Actual: false\n" "Expected: true"); - EXPECT_NONFATAL_FAILURE(EXPECT_TRUE(2 > 3), - "2 > 3"); + EXPECT_NONFATAL_FAILURE(EXPECT_TRUE(2 > 3), "2 > 3"); } // Tests EXPECT_TRUE(predicate) for predicates returning AssertionResult. @@ -4447,8 +4389,7 @@ TEST(ExpectTest, EXPECT_FALSE) { "Value of: 2 > 1\n" " Actual: true\n" "Expected: false"); - EXPECT_NONFATAL_FAILURE(EXPECT_FALSE(2 < 3), - "2 < 3"); + EXPECT_NONFATAL_FAILURE(EXPECT_FALSE(2 < 3), "2 < 3"); } // Tests EXPECT_FALSE(predicate) for predicates returning AssertionResult. @@ -4467,19 +4408,20 @@ TEST(ExpectTest, ExpectFalseWithAssertionResult) { #ifdef __BORLANDC__ // Restores warnings after previous "#pragma option push" suppressed them -# pragma option pop +#pragma option pop #endif // Tests EXPECT_EQ. TEST(ExpectTest, EXPECT_EQ) { EXPECT_EQ(5, 2 + 3); + // clang-format off EXPECT_NONFATAL_FAILURE(EXPECT_EQ(5, 2*3), "Expected equality of these values:\n" " 5\n" " 2*3\n" " Which is: 6"); - EXPECT_NONFATAL_FAILURE(EXPECT_EQ(5, 2 - 3), - "2 - 3"); + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(5, 2 - 3), "2 - 3"); + // clang-format on } // Tests using EXPECT_EQ on double values. The purpose is to make @@ -4490,8 +4432,7 @@ TEST(ExpectTest, EXPECT_EQ_Double) { EXPECT_EQ(5.6, 5.6); // A failure. - EXPECT_NONFATAL_FAILURE(EXPECT_EQ(5.1, 5.2), - "5.1"); + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(5.1, 5.2), "5.1"); } // Tests EXPECT_EQ(NULL, pointer). @@ -4516,8 +4457,7 @@ TEST(ExpectTest, EXPECT_EQ_0) { EXPECT_EQ(0, n); // A failure. - EXPECT_NONFATAL_FAILURE(EXPECT_EQ(0, 5.6), - " 0\n 5.6"); + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(0, 5.6), " 0\n 5.6"); } // Tests EXPECT_NE. @@ -4527,19 +4467,16 @@ TEST(ExpectTest, EXPECT_NE) { EXPECT_NONFATAL_FAILURE(EXPECT_NE('a', 'a'), "Expected: ('a') != ('a'), " "actual: 'a' (97, 0x61) vs 'a' (97, 0x61)"); - EXPECT_NONFATAL_FAILURE(EXPECT_NE(2, 2), - "2"); + EXPECT_NONFATAL_FAILURE(EXPECT_NE(2, 2), "2"); char* const p0 = nullptr; - EXPECT_NONFATAL_FAILURE(EXPECT_NE(p0, p0), - "p0"); + EXPECT_NONFATAL_FAILURE(EXPECT_NE(p0, p0), "p0"); // Only way to get the Nokia compiler to compile the cast // is to have a separate void* variable first. Putting // the two casts on the same line doesn't work, neither does // a direct C-style to char*. void* pv1 = (void*)0x1234; // NOLINT char* const p1 = reinterpret_cast(pv1); - EXPECT_NONFATAL_FAILURE(EXPECT_NE(p1, p1), - "p1"); + EXPECT_NONFATAL_FAILURE(EXPECT_NE(p1, p1), "p1"); } // Tests EXPECT_LE. @@ -4548,8 +4485,7 @@ TEST(ExpectTest, EXPECT_LE) { EXPECT_LE(2, 2); EXPECT_NONFATAL_FAILURE(EXPECT_LE(2, 0), "Expected: (2) <= (0), actual: 2 vs 0"); - EXPECT_NONFATAL_FAILURE(EXPECT_LE(1.1, 0.9), - "(1.1) <= (0.9)"); + EXPECT_NONFATAL_FAILURE(EXPECT_LE(1.1, 0.9), "(1.1) <= (0.9)"); } // Tests EXPECT_LT. @@ -4557,8 +4493,7 @@ TEST(ExpectTest, EXPECT_LT) { EXPECT_LT(2, 3); EXPECT_NONFATAL_FAILURE(EXPECT_LT(2, 2), "Expected: (2) < (2), actual: 2 vs 2"); - EXPECT_NONFATAL_FAILURE(EXPECT_LT(2, 1), - "(2) < (1)"); + EXPECT_NONFATAL_FAILURE(EXPECT_LT(2, 1), "(2) < (1)"); } // Tests EXPECT_GE. @@ -4567,8 +4502,7 @@ TEST(ExpectTest, EXPECT_GE) { EXPECT_GE(2, 2); EXPECT_NONFATAL_FAILURE(EXPECT_GE(2, 3), "Expected: (2) >= (3), actual: 2 vs 3"); - EXPECT_NONFATAL_FAILURE(EXPECT_GE(0.9, 1.1), - "(0.9) >= (1.1)"); + EXPECT_NONFATAL_FAILURE(EXPECT_GE(0.9, 1.1), "(0.9) >= (1.1)"); } // Tests EXPECT_GT. @@ -4576,8 +4510,7 @@ TEST(ExpectTest, EXPECT_GT) { EXPECT_GT(2, 1); EXPECT_NONFATAL_FAILURE(EXPECT_GT(2, 2), "Expected: (2) > (2), actual: 2 vs 2"); - EXPECT_NONFATAL_FAILURE(EXPECT_GT(2, 3), - "(2) > (3)"); + EXPECT_NONFATAL_FAILURE(EXPECT_GT(2, 3), "(2) > (3)"); } #if GTEST_HAS_EXCEPTIONS @@ -4588,12 +4521,13 @@ TEST(ExpectTest, EXPECT_THROW) { EXPECT_NONFATAL_FAILURE(EXPECT_THROW(ThrowAnInteger(), bool), "Expected: ThrowAnInteger() throws an exception of " "type bool.\n Actual: it throws a different type."); - EXPECT_NONFATAL_FAILURE(EXPECT_THROW(ThrowRuntimeError("A description"), - std::logic_error), - "Expected: ThrowRuntimeError(\"A description\") " - "throws an exception of type std::logic_error.\n " - "Actual: it throws " ERROR_DESC " " - "with description \"A description\"."); + EXPECT_NONFATAL_FAILURE( + EXPECT_THROW(ThrowRuntimeError("A description"), std::logic_error), + "Expected: ThrowRuntimeError(\"A description\") " + "throws an exception of type std::logic_error.\n " + "Actual: it throws " ERROR_DESC + " " + "with description \"A description\"."); EXPECT_NONFATAL_FAILURE( EXPECT_THROW(ThrowNothing(), bool), "Expected: ThrowNothing() throws an exception of type bool.\n" @@ -4609,17 +4543,17 @@ TEST(ExpectTest, EXPECT_NO_THROW) { EXPECT_NONFATAL_FAILURE(EXPECT_NO_THROW(ThrowRuntimeError("A description")), "Expected: ThrowRuntimeError(\"A description\") " "doesn't throw an exception.\n " - "Actual: it throws " ERROR_DESC " " + "Actual: it throws " ERROR_DESC + " " "with description \"A description\"."); } // Tests EXPECT_ANY_THROW. TEST(ExpectTest, EXPECT_ANY_THROW) { EXPECT_ANY_THROW(ThrowAnInteger()); - EXPECT_NONFATAL_FAILURE( - EXPECT_ANY_THROW(ThrowNothing()), - "Expected: ThrowNothing() throws an exception.\n" - " Actual: it doesn't."); + EXPECT_NONFATAL_FAILURE(EXPECT_ANY_THROW(ThrowNothing()), + "Expected: ThrowNothing() throws an exception.\n" + " Actual: it doesn't."); } #endif // GTEST_HAS_EXCEPTIONS @@ -4631,7 +4565,6 @@ TEST(ExpectTest, ExpectPrecedence) { " true && false\n Which is: false"); } - // Tests the StreamableToString() function. // Tests using StreamableToString() on a scalar. @@ -4669,8 +4602,7 @@ TEST(StreamableToStringTest, NullCString) { TEST(StreamableTest, string) { static const std::string str( "This failure message is a std::string, and is expected."); - EXPECT_FATAL_FAILURE(FAIL() << str, - str.c_str()); + EXPECT_FATAL_FAILURE(FAIL() << str, str.c_str()); } // Tests that we can output strings containing embedded NULs. @@ -4678,25 +4610,24 @@ TEST(StreamableTest, string) { TEST(StreamableTest, stringWithEmbeddedNUL) { static const char char_array_with_nul[] = "Here's a NUL\0 and some more string"; - static const std::string string_with_nul(char_array_with_nul, - sizeof(char_array_with_nul) - - 1); // drops the trailing NUL + static const std::string string_with_nul( + char_array_with_nul, + sizeof(char_array_with_nul) - 1); // drops the trailing NUL EXPECT_FATAL_FAILURE(FAIL() << string_with_nul, "Here's a NUL\\0 and some more string"); } // Tests that we can output a NUL char. TEST(StreamableTest, NULChar) { - EXPECT_FATAL_FAILURE({ // NOLINT - FAIL() << "A NUL" << '\0' << " and some more string"; - }, "A NUL\\0 and some more string"); + EXPECT_FATAL_FAILURE( + { // NOLINT + FAIL() << "A NUL" << '\0' << " and some more string"; + }, + "A NUL\\0 and some more string"); } // Tests using int as an assertion message. -TEST(StreamableTest, int) { - EXPECT_FATAL_FAILURE(FAIL() << 900913, - "900913"); -} +TEST(StreamableTest, int) { EXPECT_FATAL_FAILURE(FAIL() << 900913, "900913"); } // Tests using NULL char pointer as an assertion message. // @@ -4710,10 +4641,12 @@ TEST(StreamableTest, NullCharPtr) { // Tests that basic IO manipulators (endl, ends, and flush) can be // streamed to testing::Message. TEST(StreamableTest, BasicIoManip) { - EXPECT_FATAL_FAILURE({ // NOLINT - FAIL() << "Line 1." << std::endl - << "A NUL char " << std::ends << std::flush << " in line 2."; - }, "Line 1.\nA NUL char \\0 in line 2."); + EXPECT_FATAL_FAILURE( + { // NOLINT + FAIL() << "Line 1." << std::endl + << "A NUL char " << std::ends << std::flush << " in line 2."; + }, + "Line 1.\nA NUL char \\0 in line 2."); } // Tests the macros that haven't been covered so far. @@ -4727,8 +4660,7 @@ void AddFailureHelper(bool* aborted) { // Tests ADD_FAILURE. TEST(MacroTest, ADD_FAILURE) { bool aborted = true; - EXPECT_NONFATAL_FAILURE(AddFailureHelper(&aborted), - "Intentional failure."); + EXPECT_NONFATAL_FAILURE(AddFailureHelper(&aborted), "Intentional failure."); EXPECT_FALSE(aborted); } @@ -4749,8 +4681,7 @@ TEST(MacroTest, ADD_FAILURE_AT) { // Tests FAIL. TEST(MacroTest, FAIL) { - EXPECT_FATAL_FAILURE(FAIL(), - "Failed"); + EXPECT_FATAL_FAILURE(FAIL(), "Failed"); EXPECT_FATAL_FAILURE(FAIL() << "Intentional failure.", "Intentional failure."); } @@ -4783,37 +4714,34 @@ TEST(MacroTest, SUCCEED) { // Tests using bool values in {EXPECT|ASSERT}_EQ. TEST(EqAssertionTest, Bool) { - EXPECT_EQ(true, true); - EXPECT_FATAL_FAILURE({ - bool false_value = false; - ASSERT_EQ(false_value, true); - }, " false_value\n Which is: false\n true"); + EXPECT_EQ(true, true); + EXPECT_FATAL_FAILURE( + { + bool false_value = false; + ASSERT_EQ(false_value, true); + }, + " false_value\n Which is: false\n true"); } // Tests using int values in {EXPECT|ASSERT}_EQ. TEST(EqAssertionTest, Int) { ASSERT_EQ(32, 32); - EXPECT_NONFATAL_FAILURE(EXPECT_EQ(32, 33), - " 32\n 33"); + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(32, 33), " 32\n 33"); } // Tests using time_t values in {EXPECT|ASSERT}_EQ. TEST(EqAssertionTest, Time_T) { - EXPECT_EQ(static_cast(0), - static_cast(0)); - EXPECT_FATAL_FAILURE(ASSERT_EQ(static_cast(0), - static_cast(1234)), - "1234"); + EXPECT_EQ(static_cast(0), static_cast(0)); + EXPECT_FATAL_FAILURE( + ASSERT_EQ(static_cast(0), static_cast(1234)), "1234"); } // Tests using char values in {EXPECT|ASSERT}_EQ. TEST(EqAssertionTest, Char) { ASSERT_EQ('z', 'z'); const char ch = 'b'; - EXPECT_NONFATAL_FAILURE(EXPECT_EQ('\0', ch), - " ch\n Which is: 'b'"); - EXPECT_NONFATAL_FAILURE(EXPECT_EQ('a', ch), - " ch\n Which is: 'b'"); + EXPECT_NONFATAL_FAILURE(EXPECT_EQ('\0', ch), " ch\n Which is: 'b'"); + EXPECT_NONFATAL_FAILURE(EXPECT_EQ('a', ch), " ch\n Which is: 'b'"); } // Tests using wchar_t values in {EXPECT|ASSERT}_EQ. @@ -4829,8 +4757,7 @@ TEST(EqAssertionTest, WideChar) { static wchar_t wchar; wchar = L'b'; - EXPECT_NONFATAL_FAILURE(EXPECT_EQ(L'a', wchar), - "wchar"); + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(L'a', wchar), "wchar"); wchar = 0x8119; EXPECT_FATAL_FAILURE(ASSERT_EQ(static_cast(0x8120), wchar), " wchar\n Which is: L'"); @@ -4849,13 +4776,11 @@ TEST(EqAssertionTest, StdString) { // Compares a const char* to an std::string that has different // content - EXPECT_NONFATAL_FAILURE(EXPECT_EQ("Test", ::std::string("test")), - "\"test\""); + EXPECT_NONFATAL_FAILURE(EXPECT_EQ("Test", ::std::string("test")), "\"test\""); // Compares an std::string to a char* that has different content. char* const p1 = const_cast("foo"); - EXPECT_NONFATAL_FAILURE(EXPECT_EQ(::std::string("bar"), p1), - "p1"); + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(::std::string("bar"), p1), "p1"); // Compares two std::strings that have different contents, one of // which having a NUL character in the middle. This should fail. @@ -4876,28 +4801,31 @@ TEST(EqAssertionTest, StdWideString) { // Compares an std::wstring to a const wchar_t* that has identical // content. - const wchar_t kTestX8119[] = { 'T', 'e', 's', 't', 0x8119, '\0' }; + const wchar_t kTestX8119[] = {'T', 'e', 's', 't', 0x8119, '\0'}; EXPECT_EQ(::std::wstring(kTestX8119), kTestX8119); // Compares an std::wstring to a const wchar_t* that has different // content. - const wchar_t kTestX8120[] = { 'T', 'e', 's', 't', 0x8120, '\0' }; - EXPECT_NONFATAL_FAILURE({ // NOLINT - EXPECT_EQ(::std::wstring(kTestX8119), kTestX8120); - }, "kTestX8120"); + const wchar_t kTestX8120[] = {'T', 'e', 's', 't', 0x8120, '\0'}; + EXPECT_NONFATAL_FAILURE( + { // NOLINT + EXPECT_EQ(::std::wstring(kTestX8119), kTestX8120); + }, + "kTestX8120"); // Compares two std::wstrings that have different contents, one of // which having a NUL character in the middle. ::std::wstring wstr3(wstr1); wstr3.at(2) = L'\0'; - EXPECT_NONFATAL_FAILURE(EXPECT_EQ(wstr1, wstr3), - "wstr3"); + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(wstr1, wstr3), "wstr3"); // Compares a wchar_t* to an std::wstring that has different // content. - EXPECT_FATAL_FAILURE({ // NOLINT - ASSERT_EQ(const_cast(L"foo"), ::std::wstring(L"bar")); - }, ""); + EXPECT_FATAL_FAILURE( + { // NOLINT + ASSERT_EQ(const_cast(L"foo"), ::std::wstring(L"bar")); + }, + ""); } #endif // GTEST_HAS_STD_WSTRING @@ -4915,10 +4843,8 @@ TEST(EqAssertionTest, CharPointer) { char* const p2 = reinterpret_cast(pv2); ASSERT_EQ(p1, p1); - EXPECT_NONFATAL_FAILURE(EXPECT_EQ(p0, p2), - " p2\n Which is:"); - EXPECT_NONFATAL_FAILURE(EXPECT_EQ(p1, p2), - " p2\n Which is:"); + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(p0, p2), " p2\n Which is:"); + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(p1, p2), " p2\n Which is:"); EXPECT_FATAL_FAILURE(ASSERT_EQ(reinterpret_cast(0x1234), reinterpret_cast(0xABC0)), "ABC0"); @@ -4937,16 +4863,13 @@ TEST(EqAssertionTest, WideCharPointer) { wchar_t* const p2 = reinterpret_cast(pv2); EXPECT_EQ(p0, p0); - EXPECT_NONFATAL_FAILURE(EXPECT_EQ(p0, p2), - " p2\n Which is:"); - EXPECT_NONFATAL_FAILURE(EXPECT_EQ(p1, p2), - " p2\n Which is:"); + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(p0, p2), " p2\n Which is:"); + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(p1, p2), " p2\n Which is:"); void* pv3 = (void*)0x1234; // NOLINT void* pv4 = (void*)0xABC0; // NOLINT const wchar_t* p3 = reinterpret_cast(pv3); const wchar_t* p4 = reinterpret_cast(pv4); - EXPECT_NONFATAL_FAILURE(EXPECT_EQ(p3, p4), - "p4"); + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(p3, p4), "p4"); } // Tests using other types of pointers in {EXPECT|ASSERT}_EQ. @@ -4968,15 +4891,11 @@ class UnprintableChar { bool operator!=(const UnprintableChar& rhs) const { return char_ != rhs.char_; } - bool operator<(const UnprintableChar& rhs) const { - return char_ < rhs.char_; - } + bool operator<(const UnprintableChar& rhs) const { return char_ < rhs.char_; } bool operator<=(const UnprintableChar& rhs) const { return char_ <= rhs.char_; } - bool operator>(const UnprintableChar& rhs) const { - return char_ > rhs.char_; - } + bool operator>(const UnprintableChar& rhs) const { return char_ > rhs.char_; } bool operator>=(const UnprintableChar& rhs) const { return char_ >= rhs.char_; } @@ -5038,9 +4957,7 @@ class Foo { // Tests that the FRIEND_TEST declaration allows a TEST to access a // class's private members. This should compile. -TEST(FRIEND_TEST_Test, TEST) { - ASSERT_EQ(1, Foo().Bar()); -} +TEST(FRIEND_TEST_Test, TEST) { ASSERT_EQ(1, Foo().Bar()); } // The fixture needed to test using FRIEND_TEST with TEST_F. class FRIEND_TEST_Test2 : public Test { @@ -5050,9 +4967,7 @@ class FRIEND_TEST_Test2 : public Test { // Tests that the FRIEND_TEST declaration allows a TEST_F to access a // class's private members. This should compile. -TEST_F(FRIEND_TEST_Test2, TEST_F) { - ASSERT_EQ(1, foo.Bar()); -} +TEST_F(FRIEND_TEST_Test2, TEST_F) { ASSERT_EQ(1, foo.Bar()); } // Tests the life cycle of Test objects. @@ -5187,15 +5102,14 @@ class Base { public: explicit Base(int an_x) : x_(an_x) {} int x() const { return x_; } + private: int x_; }; -std::ostream& operator<<(std::ostream& os, - const Base& val) { +std::ostream& operator<<(std::ostream& os, const Base& val) { return os << val.x(); } -std::ostream& operator<<(std::ostream& os, - const Base* pointer) { +std::ostream& operator<<(std::ostream& os, const Base* pointer) { return os << "(" << pointer->x() << ")"; } @@ -5212,7 +5126,7 @@ TEST(MessageTest, CanStreamUserTypeInGlobalNameSpace) { namespace { class MyTypeInUnnamedNameSpace : public Base { public: - explicit MyTypeInUnnamedNameSpace(int an_x): Base(an_x) {} + explicit MyTypeInUnnamedNameSpace(int an_x) : Base(an_x) {} }; std::ostream& operator<<(std::ostream& os, const MyTypeInUnnamedNameSpace& val) { @@ -5237,14 +5151,12 @@ TEST(MessageTest, CanStreamUserTypeInUnnamedNameSpace) { namespace namespace1 { class MyTypeInNameSpace1 : public Base { public: - explicit MyTypeInNameSpace1(int an_x): Base(an_x) {} + explicit MyTypeInNameSpace1(int an_x) : Base(an_x) {} }; -std::ostream& operator<<(std::ostream& os, - const MyTypeInNameSpace1& val) { +std::ostream& operator<<(std::ostream& os, const MyTypeInNameSpace1& val) { return os << val.x(); } -std::ostream& operator<<(std::ostream& os, - const MyTypeInNameSpace1* pointer) { +std::ostream& operator<<(std::ostream& os, const MyTypeInNameSpace1* pointer) { return os << "(" << pointer->x() << ")"; } } // namespace namespace1 @@ -5262,7 +5174,7 @@ TEST(MessageTest, CanStreamUserTypeInUserNameSpace) { namespace namespace2 { class MyTypeInNameSpace2 : public ::Base { public: - explicit MyTypeInNameSpace2(int an_x): Base(an_x) {} + explicit MyTypeInNameSpace2(int an_x) : Base(an_x) {} }; } // namespace namespace2 std::ostream& operator<<(std::ostream& os, @@ -5293,21 +5205,18 @@ TEST(MessageTest, NullPointers) { Message* p6 = nullptr; msg << p1 << p2 << p3 << p4 << p5 << p6; - ASSERT_STREQ("(null)(null)(null)(null)(null)(null)", - msg.GetString().c_str()); + ASSERT_STREQ("(null)(null)(null)(null)(null)(null)", msg.GetString().c_str()); } // Tests streaming wide strings to testing::Message. TEST(MessageTest, WideStrings) { // Streams a NULL of type const wchar_t*. const wchar_t* const_wstr = nullptr; - EXPECT_STREQ("(null)", - (Message() << const_wstr).GetString().c_str()); + EXPECT_STREQ("(null)", (Message() << const_wstr).GetString().c_str()); // Streams a NULL of type wchar_t*. wchar_t* wstr = nullptr; - EXPECT_STREQ("(null)", - (Message() << wstr).GetString().c_str()); + EXPECT_STREQ("(null)", (Message() << wstr).GetString().c_str()); // Streams a non-NULL of type const wchar_t*. const_wstr = L"abc\x8119"; @@ -5316,11 +5225,9 @@ TEST(MessageTest, WideStrings) { // Streams a non-NULL of type wchar_t*. wstr = const_cast(const_wstr); - EXPECT_STREQ("abc\xe8\x84\x99", - (Message() << wstr).GetString().c_str()); + EXPECT_STREQ("abc\xe8\x84\x99", (Message() << wstr).GetString().c_str()); } - // This line tests that we can define tests in the testing namespace. namespace testing { @@ -5334,14 +5241,12 @@ class TestInfoTest : public Test { for (int i = 0; i < test_suite->total_test_count(); ++i) { const TestInfo* const test_info = test_suite->GetTestInfo(i); - if (strcmp(test_name, test_info->name()) == 0) - return test_info; + if (strcmp(test_name, test_info->name()) == 0) return test_info; } return nullptr; } - static const TestResult* GetTestResult( - const TestInfo* test_info) { + static const TestResult* GetTestResult(const TestInfo* test_info) { return test_info->result(); } }; @@ -5365,26 +5270,25 @@ TEST_F(TestInfoTest, result) { ASSERT_EQ(0, GetTestResult(test_info)->total_part_count()); } -#define VERIFY_CODE_LOCATION \ - const int expected_line = __LINE__ - 1; \ +#define VERIFY_CODE_LOCATION \ + const int expected_line = __LINE__ - 1; \ const TestInfo* const test_info = GetUnitTestImpl()->current_test_info(); \ - ASSERT_TRUE(test_info); \ - EXPECT_STREQ(__FILE__, test_info->file()); \ + ASSERT_TRUE(test_info); \ + EXPECT_STREQ(__FILE__, test_info->file()); \ EXPECT_EQ(expected_line, test_info->line()) +// clang-format off TEST(CodeLocationForTEST, Verify) { VERIFY_CODE_LOCATION; } -class CodeLocationForTESTF : public Test { -}; +class CodeLocationForTESTF : public Test {}; TEST_F(CodeLocationForTESTF, Verify) { VERIFY_CODE_LOCATION; } -class CodeLocationForTESTP : public TestWithParam { -}; +class CodeLocationForTESTP : public TestWithParam {}; TEST_P(CodeLocationForTESTP, Verify) { VERIFY_CODE_LOCATION; @@ -5393,8 +5297,7 @@ TEST_P(CodeLocationForTESTP, Verify) { INSTANTIATE_TEST_SUITE_P(, CodeLocationForTESTP, Values(0)); template -class CodeLocationForTYPEDTEST : public Test { -}; +class CodeLocationForTYPEDTEST : public Test {}; TYPED_TEST_SUITE(CodeLocationForTYPEDTEST, int); @@ -5403,8 +5306,7 @@ TYPED_TEST(CodeLocationForTYPEDTEST, Verify) { } template -class CodeLocationForTYPEDTESTP : public Test { -}; +class CodeLocationForTYPEDTESTP : public Test {}; TYPED_TEST_SUITE_P(CodeLocationForTYPEDTESTP); @@ -5417,6 +5319,7 @@ REGISTER_TYPED_TEST_SUITE_P(CodeLocationForTYPEDTESTP, Verify); INSTANTIATE_TYPED_TEST_SUITE_P(My, CodeLocationForTYPEDTESTP, int); #undef VERIFY_CODE_LOCATION +// clang-format on // Tests setting up and tearing down a test case. // Legacy API is deprecated but still available @@ -5476,9 +5379,7 @@ const char* SetUpTestCaseTest::shared_resource_ = nullptr; TEST_F(SetUpTestCaseTest, Test1) { EXPECT_STRNE(nullptr, shared_resource_); } // Another test that uses the shared resource. -TEST_F(SetUpTestCaseTest, Test2) { - EXPECT_STREQ("123", shared_resource_); -} +TEST_F(SetUpTestCaseTest, Test2) { EXPECT_STREQ("123", shared_resource_); } #endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ // Tests SetupTestSuite/TearDown TestSuite @@ -5791,22 +5692,22 @@ class ParseFlagsTest : public Test { // verifies that the flag values are expected and that the // recognized flags are removed from the command line. template - static void TestParsingFlags(int argc1, const CharType** argv1, - int argc2, const CharType** argv2, - const Flags& expected, bool should_print_help) { + static void TestParsingFlags(int argc1, const CharType** argv1, int argc2, + const CharType** argv2, const Flags& expected, + bool should_print_help) { const bool saved_help_flag = ::testing::internal::g_help_flag; ::testing::internal::g_help_flag = false; -# if GTEST_HAS_STREAM_REDIRECTION +#if GTEST_HAS_STREAM_REDIRECTION CaptureStdout(); -# endif +#endif // Parses the command line. internal::ParseGoogleTestFlagsOnly(&argc1, const_cast(argv1)); -# if GTEST_HAS_STREAM_REDIRECTION +#if GTEST_HAS_STREAM_REDIRECTION const std::string captured_stdout = GetCapturedStdout(); -# endif +#endif // Verifies the flag values. CheckFlags(expected); @@ -5819,16 +5720,16 @@ class ParseFlagsTest : public Test { // help message for the flags it recognizes. EXPECT_EQ(should_print_help, ::testing::internal::g_help_flag); -# if GTEST_HAS_STREAM_REDIRECTION +#if GTEST_HAS_STREAM_REDIRECTION const char* const expected_help_fragment = "This program contains tests written using"; if (should_print_help) { EXPECT_PRED_FORMAT2(IsSubstring, expected_help_fragment, captured_stdout); } else { - EXPECT_PRED_FORMAT2(IsNotSubstring, - expected_help_fragment, captured_stdout); + EXPECT_PRED_FORMAT2(IsNotSubstring, expected_help_fragment, + captured_stdout); } -# endif // GTEST_HAS_STREAM_REDIRECTION +#endif // GTEST_HAS_STREAM_REDIRECTION ::testing::internal::g_help_flag = saved_help_flag; } @@ -5836,10 +5737,10 @@ class ParseFlagsTest : public Test { // This macro wraps TestParsingFlags s.t. the user doesn't need // to specify the array sizes. -# define GTEST_TEST_PARSING_FLAGS_(argv1, argv2, expected, should_print_help) \ - TestParsingFlags(sizeof(argv1)/sizeof(*argv1) - 1, argv1, \ - sizeof(argv2)/sizeof(*argv2) - 1, argv2, \ - expected, should_print_help) +#define GTEST_TEST_PARSING_FLAGS_(argv1, argv2, expected, should_print_help) \ + TestParsingFlags(sizeof(argv1) / sizeof(*argv1) - 1, argv1, \ + sizeof(argv2) / sizeof(*argv2) - 1, argv2, expected, \ + should_print_help) }; // Tests parsing an empty command line. @@ -5869,15 +5770,6 @@ TEST_F(ParseFlagsTest, FailFast) { GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::FailFast(true), false); } -// Tests parsing a bad --gtest_filter flag. -TEST_F(ParseFlagsTest, FilterBad) { - const char* argv[] = {"foo.exe", "--gtest_filter", nullptr}; - - const char* argv2[] = {"foo.exe", "--gtest_filter", nullptr}; - - GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::Filter(""), true); -} - // Tests parsing an empty --gtest_filter flag. TEST_F(ParseFlagsTest, FilterEmpty) { const char* argv[] = {"foo.exe", "--gtest_filter=", nullptr}; @@ -6030,15 +5922,6 @@ TEST_F(ParseFlagsTest, ListTestsFalse_F) { GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::ListTests(false), false); } -// Tests parsing --gtest_output (invalid). -TEST_F(ParseFlagsTest, OutputEmpty) { - const char* argv[] = {"foo.exe", "--gtest_output", nullptr}; - - const char* argv2[] = {"foo.exe", "--gtest_output", nullptr}; - - GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags(), true); -} - // Tests parsing --gtest_output=xml TEST_F(ParseFlagsTest, OutputXml) { const char* argv[] = {"foo.exe", "--gtest_output=xml", nullptr}; @@ -6064,8 +5947,8 @@ TEST_F(ParseFlagsTest, OutputXmlDirectory) { const char* argv2[] = {"foo.exe", nullptr}; - GTEST_TEST_PARSING_FLAGS_(argv, argv2, - Flags::Output("xml:directory/path/"), false); + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::Output("xml:directory/path/"), + false); } // Tests having a --gtest_brief flag @@ -6246,8 +6129,8 @@ TEST_F(ParseFlagsTest, StreamResultTo) { const char* argv2[] = {"foo.exe", nullptr}; - GTEST_TEST_PARSING_FLAGS_( - argv, argv2, Flags::StreamResultTo("localhost:1234"), false); + GTEST_TEST_PARSING_FLAGS_(argv, argv2, + Flags::StreamResultTo("localhost:1234"), false); } // Tests parsing --gtest_throw_on_failure. @@ -6278,23 +6161,69 @@ TEST_F(ParseFlagsTest, ThrowOnFailureTrue) { GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::ThrowOnFailure(true), false); } -# if GTEST_OS_WINDOWS +// Tests parsing a bad --gtest_filter flag. +TEST_F(ParseFlagsTest, FilterBad) { + const char* argv[] = {"foo.exe", "--gtest_filter", nullptr}; + + const char* argv2[] = {"foo.exe", "--gtest_filter", nullptr}; + +#if GTEST_HAS_ABSL && GTEST_HAS_DEATH_TEST + // Invalid flag arguments are a fatal error when using the Abseil Flags. + EXPECT_EXIT(GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::Filter(""), true), + testing::ExitedWithCode(1), + "ERROR: Missing the value for the flag 'gtest_filter'"); +#elif !GTEST_HAS_ABSL + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::Filter(""), true); +#else + static_cast(argv); + static_cast(argv2); +#endif +} + +// Tests parsing --gtest_output (invalid). +TEST_F(ParseFlagsTest, OutputEmpty) { + const char* argv[] = {"foo.exe", "--gtest_output", nullptr}; + + const char* argv2[] = {"foo.exe", "--gtest_output", nullptr}; + +#if GTEST_HAS_ABSL && GTEST_HAS_DEATH_TEST + // Invalid flag arguments are a fatal error when using the Abseil Flags. + EXPECT_EXIT(GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags(), true), + testing::ExitedWithCode(1), + "ERROR: Missing the value for the flag 'gtest_output'"); +#elif !GTEST_HAS_ABSL + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags(), true); +#else + static_cast(argv); + static_cast(argv2); +#endif +} + +#if GTEST_HAS_ABSL +TEST_F(ParseFlagsTest, AbseilPositionalFlags) { + const char* argv[] = {"foo.exe", "--gtest_throw_on_failure=1", "--", + "--other_flag", nullptr}; + + // When using Abseil flags, it should be possible to pass flags not recognized + // using "--" to delimit positional arguments. These flags should be returned + // though argv. + const char* argv2[] = {"foo.exe", "--other_flag", nullptr}; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::ThrowOnFailure(true), false); +} +#endif + +#if GTEST_OS_WINDOWS // Tests parsing wide strings. TEST_F(ParseFlagsTest, WideStrings) { - const wchar_t* argv[] = { - L"foo.exe", - L"--gtest_filter=Foo*", - L"--gtest_list_tests=1", - L"--gtest_break_on_failure", - L"--non_gtest_flag", - NULL - }; + const wchar_t* argv[] = {L"foo.exe", + L"--gtest_filter=Foo*", + L"--gtest_list_tests=1", + L"--gtest_break_on_failure", + L"--non_gtest_flag", + NULL}; - const wchar_t* argv2[] = { - L"foo.exe", - L"--non_gtest_flag", - NULL - }; + const wchar_t* argv2[] = {L"foo.exe", L"--non_gtest_flag", NULL}; Flags expected_flags; expected_flags.break_on_failure = true; @@ -6303,7 +6232,7 @@ TEST_F(ParseFlagsTest, WideStrings) { GTEST_TEST_PARSING_FLAGS_(argv, argv2, expected_flags, false); } -# endif // GTEST_OS_WINDOWS +#endif // GTEST_OS_WINDOWS #if GTEST_USE_OWN_FLAGFILE_FLAG_ class FlagfileTest : public ParseFlagsTest { @@ -6351,8 +6280,8 @@ TEST_F(FlagfileTest, Empty) { // Tests passing a non-empty --gtest_filter flag via --gtest_flagfile. TEST_F(FlagfileTest, FilterNonEmpty) { - internal::FilePath flagfile_path(CreateFlagfile( - "--" GTEST_FLAG_PREFIX_ "filter=abc")); + internal::FilePath flagfile_path( + CreateFlagfile("--" GTEST_FLAG_PREFIX_ "filter=abc")); std::string flagfile_flag = std::string("--" GTEST_FLAG_PREFIX_ "flagfile=") + flagfile_path.c_str(); @@ -6365,10 +6294,10 @@ TEST_F(FlagfileTest, FilterNonEmpty) { // Tests passing several flags via --gtest_flagfile. TEST_F(FlagfileTest, SeveralFlags) { - internal::FilePath flagfile_path(CreateFlagfile( - "--" GTEST_FLAG_PREFIX_ "filter=abc\n" - "--" GTEST_FLAG_PREFIX_ "break_on_failure\n" - "--" GTEST_FLAG_PREFIX_ "list_tests")); + internal::FilePath flagfile_path( + CreateFlagfile("--" GTEST_FLAG_PREFIX_ "filter=abc\n" + "--" GTEST_FLAG_PREFIX_ "break_on_failure\n" + "--" GTEST_FLAG_PREFIX_ "list_tests")); std::string flagfile_flag = std::string("--" GTEST_FLAG_PREFIX_ "flagfile=") + flagfile_path.c_str(); @@ -6392,8 +6321,7 @@ class CurrentTestInfoTest : public Test { // the test case is run. static void SetUpTestSuite() { // There should be no tests running at this point. - const TestInfo* test_info = - UnitTest::GetInstance()->current_test_info(); + const TestInfo* test_info = UnitTest::GetInstance()->current_test_info(); EXPECT_TRUE(test_info == nullptr) << "There should be no tests running at this point."; } @@ -6401,8 +6329,7 @@ class CurrentTestInfoTest : public Test { // Tests that current_test_info() returns NULL after the last test in // the test case has run. static void TearDownTestSuite() { - const TestInfo* test_info = - UnitTest::GetInstance()->current_test_info(); + const TestInfo* test_info = UnitTest::GetInstance()->current_test_info(); EXPECT_TRUE(test_info == nullptr) << "There should be no tests running at this point."; } @@ -6411,8 +6338,7 @@ class CurrentTestInfoTest : public Test { // Tests that current_test_info() returns TestInfo for currently running // test by checking the expected test name against the actual one. TEST_F(CurrentTestInfoTest, WorksForFirstTestInATestSuite) { - const TestInfo* test_info = - UnitTest::GetInstance()->current_test_info(); + const TestInfo* test_info = UnitTest::GetInstance()->current_test_info(); ASSERT_TRUE(nullptr != test_info) << "There is a test running so we should have a valid TestInfo."; EXPECT_STREQ("CurrentTestInfoTest", test_info->test_suite_name()) @@ -6426,8 +6352,7 @@ TEST_F(CurrentTestInfoTest, WorksForFirstTestInATestSuite) { // use this test to see that the TestInfo object actually changed from // the previous invocation. TEST_F(CurrentTestInfoTest, WorksForSecondTestInATestSuite) { - const TestInfo* test_info = - UnitTest::GetInstance()->current_test_info(); + const TestInfo* test_info = UnitTest::GetInstance()->current_test_info(); ASSERT_TRUE(nullptr != test_info) << "There is a test running so we should have a valid TestInfo."; EXPECT_STREQ("CurrentTestInfoTest", test_info->test_suite_name()) @@ -6438,7 +6363,6 @@ TEST_F(CurrentTestInfoTest, WorksForSecondTestInATestSuite) { } // namespace testing - // These two lines test that we can define tests in a namespace that // has the name "testing" and is nested in another namespace. namespace my_namespace { @@ -6487,13 +6411,12 @@ TEST(StreamingAssertionsTest, Unconditional) { SUCCEED() << "expected success"; EXPECT_NONFATAL_FAILURE(ADD_FAILURE() << "expected failure", "expected failure"); - EXPECT_FATAL_FAILURE(FAIL() << "expected failure", - "expected failure"); + EXPECT_FATAL_FAILURE(FAIL() << "expected failure", "expected failure"); } #ifdef __BORLANDC__ // Silences warnings: "Condition is always true", "Unreachable code" -# pragma option push -w-ccc -w-rch +#pragma option push -w-ccc -w-rch #endif TEST(StreamingAssertionsTest, Truth) { @@ -6516,7 +6439,7 @@ TEST(StreamingAssertionsTest, Truth2) { #ifdef __BORLANDC__ // Restores warnings after previous "#pragma option push" suppressed them -# pragma option pop +#pragma option pop #endif TEST(StreamingAssertionsTest, IntegerEquals) { @@ -6587,28 +6510,32 @@ TEST(StreamingAssertionsTest, FloatingPointEquals) { TEST(StreamingAssertionsTest, Throw) { EXPECT_THROW(ThrowAnInteger(), int) << "unexpected failure"; ASSERT_THROW(ThrowAnInteger(), int) << "unexpected failure"; - EXPECT_NONFATAL_FAILURE(EXPECT_THROW(ThrowAnInteger(), bool) << - "expected failure", "expected failure"); - EXPECT_FATAL_FAILURE(ASSERT_THROW(ThrowAnInteger(), bool) << - "expected failure", "expected failure"); + EXPECT_NONFATAL_FAILURE(EXPECT_THROW(ThrowAnInteger(), bool) + << "expected failure", + "expected failure"); + EXPECT_FATAL_FAILURE(ASSERT_THROW(ThrowAnInteger(), bool) + << "expected failure", + "expected failure"); } TEST(StreamingAssertionsTest, NoThrow) { EXPECT_NO_THROW(ThrowNothing()) << "unexpected failure"; ASSERT_NO_THROW(ThrowNothing()) << "unexpected failure"; - EXPECT_NONFATAL_FAILURE(EXPECT_NO_THROW(ThrowAnInteger()) << - "expected failure", "expected failure"); - EXPECT_FATAL_FAILURE(ASSERT_NO_THROW(ThrowAnInteger()) << - "expected failure", "expected failure"); + EXPECT_NONFATAL_FAILURE(EXPECT_NO_THROW(ThrowAnInteger()) + << "expected failure", + "expected failure"); + EXPECT_FATAL_FAILURE(ASSERT_NO_THROW(ThrowAnInteger()) << "expected failure", + "expected failure"); } TEST(StreamingAssertionsTest, AnyThrow) { EXPECT_ANY_THROW(ThrowAnInteger()) << "unexpected failure"; ASSERT_ANY_THROW(ThrowAnInteger()) << "unexpected failure"; - EXPECT_NONFATAL_FAILURE(EXPECT_ANY_THROW(ThrowNothing()) << - "expected failure", "expected failure"); - EXPECT_FATAL_FAILURE(ASSERT_ANY_THROW(ThrowNothing()) << - "expected failure", "expected failure"); + EXPECT_NONFATAL_FAILURE(EXPECT_ANY_THROW(ThrowNothing()) + << "expected failure", + "expected failure"); + EXPECT_FATAL_FAILURE(ASSERT_ANY_THROW(ThrowNothing()) << "expected failure", + "expected failure"); } #endif // GTEST_HAS_EXCEPTIONS @@ -6618,12 +6545,12 @@ TEST(StreamingAssertionsTest, AnyThrow) { TEST(ColoredOutputTest, UsesColorsWhenGTestColorFlagIsYes) { GTEST_FLAG_SET(color, "yes"); - SetEnv("TERM", "xterm"); // TERM supports colors. - EXPECT_TRUE(ShouldUseColor(true)); // Stdout is a TTY. + SetEnv("TERM", "xterm"); // TERM supports colors. + EXPECT_TRUE(ShouldUseColor(true)); // Stdout is a TTY. EXPECT_TRUE(ShouldUseColor(false)); // Stdout is not a TTY. - SetEnv("TERM", "dumb"); // TERM doesn't support colors. - EXPECT_TRUE(ShouldUseColor(true)); // Stdout is a TTY. + SetEnv("TERM", "dumb"); // TERM doesn't support colors. + EXPECT_TRUE(ShouldUseColor(true)); // Stdout is a TTY. EXPECT_TRUE(ShouldUseColor(false)); // Stdout is not a TTY. } @@ -6643,12 +6570,12 @@ TEST(ColoredOutputTest, UsesColorsWhenGTestColorFlagIsAliasOfYes) { TEST(ColoredOutputTest, UsesNoColorWhenGTestColorFlagIsNo) { GTEST_FLAG_SET(color, "no"); - SetEnv("TERM", "xterm"); // TERM supports colors. - EXPECT_FALSE(ShouldUseColor(true)); // Stdout is a TTY. + SetEnv("TERM", "xterm"); // TERM supports colors. + EXPECT_FALSE(ShouldUseColor(true)); // Stdout is a TTY. EXPECT_FALSE(ShouldUseColor(false)); // Stdout is not a TTY. - SetEnv("TERM", "dumb"); // TERM doesn't support colors. - EXPECT_FALSE(ShouldUseColor(true)); // Stdout is a TTY. + SetEnv("TERM", "dumb"); // TERM doesn't support colors. + EXPECT_FALSE(ShouldUseColor(true)); // Stdout is a TTY. EXPECT_FALSE(ShouldUseColor(false)); // Stdout is not a TTY. } @@ -6668,7 +6595,7 @@ TEST(ColoredOutputTest, UsesNoColorWhenGTestColorFlagIsInvalid) { TEST(ColoredOutputTest, UsesColorsWhenStdoutIsTty) { GTEST_FLAG_SET(color, "auto"); - SetEnv("TERM", "xterm"); // TERM supports colors. + SetEnv("TERM", "xterm"); // TERM supports colors. EXPECT_FALSE(ShouldUseColor(false)); // Stdout is not a TTY. EXPECT_TRUE(ShouldUseColor(true)); // Stdout is a TTY. } @@ -6691,49 +6618,49 @@ TEST(ColoredOutputTest, UsesColorsWhenTermSupportsColors) { // On non-Windows platforms, we rely on TERM to determine if the // terminal supports colors. - SetEnv("TERM", "dumb"); // TERM doesn't support colors. + SetEnv("TERM", "dumb"); // TERM doesn't support colors. EXPECT_FALSE(ShouldUseColor(true)); // Stdout is a TTY. - SetEnv("TERM", "emacs"); // TERM doesn't support colors. + SetEnv("TERM", "emacs"); // TERM doesn't support colors. EXPECT_FALSE(ShouldUseColor(true)); // Stdout is a TTY. - SetEnv("TERM", "vt100"); // TERM doesn't support colors. + SetEnv("TERM", "vt100"); // TERM doesn't support colors. EXPECT_FALSE(ShouldUseColor(true)); // Stdout is a TTY. - SetEnv("TERM", "xterm-mono"); // TERM doesn't support colors. + SetEnv("TERM", "xterm-mono"); // TERM doesn't support colors. EXPECT_FALSE(ShouldUseColor(true)); // Stdout is a TTY. - SetEnv("TERM", "xterm"); // TERM supports colors. + SetEnv("TERM", "xterm"); // TERM supports colors. EXPECT_TRUE(ShouldUseColor(true)); // Stdout is a TTY. - SetEnv("TERM", "xterm-color"); // TERM supports colors. + SetEnv("TERM", "xterm-color"); // TERM supports colors. EXPECT_TRUE(ShouldUseColor(true)); // Stdout is a TTY. - SetEnv("TERM", "xterm-256color"); // TERM supports colors. + SetEnv("TERM", "xterm-256color"); // TERM supports colors. EXPECT_TRUE(ShouldUseColor(true)); // Stdout is a TTY. - SetEnv("TERM", "screen"); // TERM supports colors. + SetEnv("TERM", "screen"); // TERM supports colors. EXPECT_TRUE(ShouldUseColor(true)); // Stdout is a TTY. SetEnv("TERM", "screen-256color"); // TERM supports colors. EXPECT_TRUE(ShouldUseColor(true)); // Stdout is a TTY. - SetEnv("TERM", "tmux"); // TERM supports colors. + SetEnv("TERM", "tmux"); // TERM supports colors. EXPECT_TRUE(ShouldUseColor(true)); // Stdout is a TTY. - SetEnv("TERM", "tmux-256color"); // TERM supports colors. + SetEnv("TERM", "tmux-256color"); // TERM supports colors. EXPECT_TRUE(ShouldUseColor(true)); // Stdout is a TTY. - SetEnv("TERM", "rxvt-unicode"); // TERM supports colors. + SetEnv("TERM", "rxvt-unicode"); // TERM supports colors. EXPECT_TRUE(ShouldUseColor(true)); // Stdout is a TTY. SetEnv("TERM", "rxvt-unicode-256color"); // TERM supports colors. + EXPECT_TRUE(ShouldUseColor(true)); // Stdout is a TTY. + + SetEnv("TERM", "linux"); // TERM supports colors. EXPECT_TRUE(ShouldUseColor(true)); // Stdout is a TTY. - SetEnv("TERM", "linux"); // TERM supports colors. - EXPECT_TRUE(ShouldUseColor(true)); // Stdout is a TTY. - - SetEnv("TERM", "cygwin"); // TERM supports colors. + SetEnv("TERM", "cygwin"); // TERM supports colors. EXPECT_TRUE(ShouldUseColor(true)); // Stdout is a TTY. #endif // GTEST_OS_WINDOWS } @@ -6853,12 +6780,10 @@ class TestListener : public EmptyTestEventListener { public: TestListener() : on_start_counter_(nullptr), is_destroyed_(nullptr) {} TestListener(int* on_start_counter, bool* is_destroyed) - : on_start_counter_(on_start_counter), - is_destroyed_(is_destroyed) {} + : on_start_counter_(on_start_counter), is_destroyed_(is_destroyed) {} ~TestListener() override { - if (is_destroyed_) - *is_destroyed_ = true; + if (is_destroyed_) *is_destroyed_ = true; } protected: @@ -6915,8 +6840,8 @@ TEST(TestEventListenersTest, Append) { { TestEventListeners listeners; listeners.Append(listener); - TestEventListenersAccessor::GetRepeater(&listeners)->OnTestProgramStart( - *UnitTest::GetInstance()); + TestEventListenersAccessor::GetRepeater(&listeners) + ->OnTestProgramStart(*UnitTest::GetInstance()); EXPECT_EQ(1, on_start_counter); } EXPECT_TRUE(is_destroyed); @@ -6959,7 +6884,8 @@ class SequenceTestingListener : public EmptyTestEventListener { std::vector* vector_; const char* const id_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(SequenceTestingListener); + SequenceTestingListener(const SequenceTestingListener&) = delete; + SequenceTestingListener& operator=(const SequenceTestingListener&) = delete; }; TEST(EventListenerTest, AppendKeepsOrder) { @@ -6969,32 +6895,32 @@ TEST(EventListenerTest, AppendKeepsOrder) { listeners.Append(new SequenceTestingListener(&vec, "2nd")); listeners.Append(new SequenceTestingListener(&vec, "3rd")); - TestEventListenersAccessor::GetRepeater(&listeners)->OnTestProgramStart( - *UnitTest::GetInstance()); + TestEventListenersAccessor::GetRepeater(&listeners) + ->OnTestProgramStart(*UnitTest::GetInstance()); ASSERT_EQ(3U, vec.size()); EXPECT_STREQ("1st.OnTestProgramStart", vec[0].c_str()); EXPECT_STREQ("2nd.OnTestProgramStart", vec[1].c_str()); EXPECT_STREQ("3rd.OnTestProgramStart", vec[2].c_str()); vec.clear(); - TestEventListenersAccessor::GetRepeater(&listeners)->OnTestProgramEnd( - *UnitTest::GetInstance()); + TestEventListenersAccessor::GetRepeater(&listeners) + ->OnTestProgramEnd(*UnitTest::GetInstance()); ASSERT_EQ(3U, vec.size()); EXPECT_STREQ("3rd.OnTestProgramEnd", vec[0].c_str()); EXPECT_STREQ("2nd.OnTestProgramEnd", vec[1].c_str()); EXPECT_STREQ("1st.OnTestProgramEnd", vec[2].c_str()); vec.clear(); - TestEventListenersAccessor::GetRepeater(&listeners)->OnTestIterationStart( - *UnitTest::GetInstance(), 0); + TestEventListenersAccessor::GetRepeater(&listeners) + ->OnTestIterationStart(*UnitTest::GetInstance(), 0); ASSERT_EQ(3U, vec.size()); EXPECT_STREQ("1st.OnTestIterationStart", vec[0].c_str()); EXPECT_STREQ("2nd.OnTestIterationStart", vec[1].c_str()); EXPECT_STREQ("3rd.OnTestIterationStart", vec[2].c_str()); vec.clear(); - TestEventListenersAccessor::GetRepeater(&listeners)->OnTestIterationEnd( - *UnitTest::GetInstance(), 0); + TestEventListenersAccessor::GetRepeater(&listeners) + ->OnTestIterationEnd(*UnitTest::GetInstance(), 0); ASSERT_EQ(3U, vec.size()); EXPECT_STREQ("3rd.OnTestIterationEnd", vec[0].c_str()); EXPECT_STREQ("2nd.OnTestIterationEnd", vec[1].c_str()); @@ -7014,8 +6940,8 @@ TEST(TestEventListenersTest, Release) { TestEventListeners listeners; listeners.Append(listener); EXPECT_EQ(listener, listeners.Release(listener)); - TestEventListenersAccessor::GetRepeater(&listeners)->OnTestProgramStart( - *UnitTest::GetInstance()); + TestEventListenersAccessor::GetRepeater(&listeners) + ->OnTestProgramStart(*UnitTest::GetInstance()); EXPECT_TRUE(listeners.Release(listener) == nullptr); } EXPECT_EQ(0, on_start_counter); @@ -7033,17 +6959,20 @@ TEST(EventListenerTest, SuppressEventForwarding) { ASSERT_TRUE(TestEventListenersAccessor::EventForwardingEnabled(listeners)); TestEventListenersAccessor::SuppressEventForwarding(&listeners); ASSERT_FALSE(TestEventListenersAccessor::EventForwardingEnabled(listeners)); - TestEventListenersAccessor::GetRepeater(&listeners)->OnTestProgramStart( - *UnitTest::GetInstance()); + TestEventListenersAccessor::GetRepeater(&listeners) + ->OnTestProgramStart(*UnitTest::GetInstance()); EXPECT_EQ(0, on_start_counter); } // Tests that events generated by Google Test are not forwarded in // death test subprocesses. TEST(EventListenerDeathTest, EventsNotForwardedInDeathTestSubprecesses) { - EXPECT_DEATH_IF_SUPPORTED({ - GTEST_CHECK_(TestEventListenersAccessor::EventForwardingEnabled( - *GetUnitTestImpl()->listeners())) << "expected failure";}, + EXPECT_DEATH_IF_SUPPORTED( + { + GTEST_CHECK_(TestEventListenersAccessor::EventForwardingEnabled( + *GetUnitTestImpl()->listeners())) + << "expected failure"; + }, "expected failure"); } @@ -7060,8 +6989,8 @@ TEST(EventListenerTest, default_result_printer) { EXPECT_EQ(listener, listeners.default_result_printer()); - TestEventListenersAccessor::GetRepeater(&listeners)->OnTestProgramStart( - *UnitTest::GetInstance()); + TestEventListenersAccessor::GetRepeater(&listeners) + ->OnTestProgramStart(*UnitTest::GetInstance()); EXPECT_EQ(1, on_start_counter); @@ -7074,8 +7003,8 @@ TEST(EventListenerTest, default_result_printer) { // After broadcasting an event the counter is still the same, indicating // the listener is not in the list anymore. - TestEventListenersAccessor::GetRepeater(&listeners)->OnTestProgramStart( - *UnitTest::GetInstance()); + TestEventListenersAccessor::GetRepeater(&listeners) + ->OnTestProgramStart(*UnitTest::GetInstance()); EXPECT_EQ(1, on_start_counter); } @@ -7097,8 +7026,8 @@ TEST(EventListenerTest, RemovingDefaultResultPrinterWorks) { EXPECT_FALSE(is_destroyed); // Broadcasting events now should not affect default_result_printer. - TestEventListenersAccessor::GetRepeater(&listeners)->OnTestProgramStart( - *UnitTest::GetInstance()); + TestEventListenersAccessor::GetRepeater(&listeners) + ->OnTestProgramStart(*UnitTest::GetInstance()); EXPECT_EQ(0, on_start_counter); } // Destroying the list should not affect the listener now, too. @@ -7119,8 +7048,8 @@ TEST(EventListenerTest, default_xml_generator) { EXPECT_EQ(listener, listeners.default_xml_generator()); - TestEventListenersAccessor::GetRepeater(&listeners)->OnTestProgramStart( - *UnitTest::GetInstance()); + TestEventListenersAccessor::GetRepeater(&listeners) + ->OnTestProgramStart(*UnitTest::GetInstance()); EXPECT_EQ(1, on_start_counter); @@ -7133,8 +7062,8 @@ TEST(EventListenerTest, default_xml_generator) { // After broadcasting an event the counter is still the same, indicating // the listener is not in the list anymore. - TestEventListenersAccessor::GetRepeater(&listeners)->OnTestProgramStart( - *UnitTest::GetInstance()); + TestEventListenersAccessor::GetRepeater(&listeners) + ->OnTestProgramStart(*UnitTest::GetInstance()); EXPECT_EQ(1, on_start_counter); } @@ -7156,8 +7085,8 @@ TEST(EventListenerTest, RemovingDefaultXmlGeneratorWorks) { EXPECT_FALSE(is_destroyed); // Broadcasting events now should not affect default_xml_generator. - TestEventListenersAccessor::GetRepeater(&listeners)->OnTestProgramStart( - *UnitTest::GetInstance()); + TestEventListenersAccessor::GetRepeater(&listeners) + ->OnTestProgramStart(*UnitTest::GetInstance()); EXPECT_EQ(0, on_start_counter); } // Destroying the list should not affect the listener now, too. @@ -7165,7 +7094,7 @@ TEST(EventListenerTest, RemovingDefaultXmlGeneratorWorks) { delete listener; } -// Sanity tests to ensure that the alternative, verbose spellings of +// Tests to ensure that the alternative, verbose spellings of // some of the macros work. We don't test them thoroughly as that // would be quite involved. Since their implementations are // straightforward, and they are rarely used, we'll just rely on the @@ -7245,28 +7174,26 @@ struct IncompleteType; // Tests that HasDebugStringAndShortDebugString::value is a compile-time // constant. TEST(HasDebugStringAndShortDebugStringTest, ValueIsCompileTimeConstant) { - GTEST_COMPILE_ASSERT_( - HasDebugStringAndShortDebugString::value, - const_true); - GTEST_COMPILE_ASSERT_( + static_assert(HasDebugStringAndShortDebugString::value, + "const_true"); + static_assert( HasDebugStringAndShortDebugString::value, - const_true); - GTEST_COMPILE_ASSERT_(HasDebugStringAndShortDebugString< - const InheritsDebugStringMethods>::value, - const_true); - GTEST_COMPILE_ASSERT_( + "const_true"); + static_assert(HasDebugStringAndShortDebugString< + const InheritsDebugStringMethods>::value, + "const_true"); + static_assert( !HasDebugStringAndShortDebugString::value, - const_false); - GTEST_COMPILE_ASSERT_( + "const_false"); + static_assert( !HasDebugStringAndShortDebugString::value, - const_false); - GTEST_COMPILE_ASSERT_( + "const_false"); + static_assert( !HasDebugStringAndShortDebugString::value, - const_false); - GTEST_COMPILE_ASSERT_( - !HasDebugStringAndShortDebugString::value, const_false); - GTEST_COMPILE_ASSERT_(!HasDebugStringAndShortDebugString::value, - const_false); + "const_false"); + static_assert(!HasDebugStringAndShortDebugString::value, + "const_false"); + static_assert(!HasDebugStringAndShortDebugString::value, "const_false"); } // Tests that HasDebugStringAndShortDebugString::value is true when T has @@ -7317,7 +7244,6 @@ TEST(GTestReferenceToConstTest, Works) { TestGTestReferenceToConst(); } - // Tests IsContainerTest. class NonContainer {}; @@ -7329,10 +7255,9 @@ TEST(IsContainerTestTest, WorksForNonContainer) { } TEST(IsContainerTestTest, WorksForContainer) { + EXPECT_EQ(sizeof(IsContainer), sizeof(IsContainerTest>(0))); EXPECT_EQ(sizeof(IsContainer), - sizeof(IsContainerTest >(0))); - EXPECT_EQ(sizeof(IsContainer), - sizeof(IsContainerTest >(0))); + sizeof(IsContainerTest>(0))); } struct ConstOnlyContainerWithPointerIterator { @@ -7381,8 +7306,8 @@ TEST(ArrayEqTest, WorksForDegeneratedArrays) { TEST(ArrayEqTest, WorksForOneDimensionalArrays) { // Note that a and b are distinct but compatible types. - const int a[] = { 0, 1 }; - long b[] = { 0, 1 }; + const int a[] = {0, 1}; + long b[] = {0, 1}; EXPECT_TRUE(ArrayEq(a, b)); EXPECT_TRUE(ArrayEq(a, 2, b)); @@ -7392,9 +7317,9 @@ TEST(ArrayEqTest, WorksForOneDimensionalArrays) { } TEST(ArrayEqTest, WorksForTwoDimensionalArrays) { - const char a[][3] = { "hi", "lo" }; - const char b[][3] = { "hi", "lo" }; - const char c[][3] = { "hi", "li" }; + const char a[][3] = {"hi", "lo"}; + const char b[][3] = {"hi", "lo"}; + const char c[][3] = {"hi", "li"}; EXPECT_TRUE(ArrayEq(a, b)); EXPECT_TRUE(ArrayEq(a, 2, b)); @@ -7412,11 +7337,11 @@ TEST(ArrayAwareFindTest, WorksForOneDimensionalArray) { } TEST(ArrayAwareFindTest, WorksForTwoDimensionalArray) { - int a[][2] = { { 0, 1 }, { 2, 3 }, { 4, 5 } }; - const int b[2] = { 2, 3 }; + int a[][2] = {{0, 1}, {2, 3}, {4, 5}}; + const int b[2] = {2, 3}; EXPECT_EQ(a + 1, ArrayAwareFind(a, a + 3, b)); - const int c[2] = { 6, 7 }; + const int c[2] = {6, 7}; EXPECT_EQ(a + 3, ArrayAwareFind(a, a + 3, c)); } @@ -7442,7 +7367,7 @@ TEST(CopyArrayTest, WorksForOneDimensionalArrays) { } TEST(CopyArrayTest, WorksForTwoDimensionalArrays) { - const int a[2][3] = { { 0, 1, 2 }, { 3, 4, 5 } }; + const int a[2][3] = {{0, 1, 2}, {3, 4, 5}}; int b[2][3]; #ifndef __BORLANDC__ // C++Builder cannot compile some array size deductions. CopyArray(a, &b); @@ -7457,7 +7382,7 @@ TEST(CopyArrayTest, WorksForTwoDimensionalArrays) { // Tests NativeArray. TEST(NativeArrayTest, ConstructorFromArrayWorks) { - const int a[3] = { 0, 1, 2 }; + const int a[3] = {0, 1, 2}; NativeArray na(a, 3, RelationToSourceReference()); EXPECT_EQ(3U, na.size()); EXPECT_EQ(a, na.begin()); @@ -7487,7 +7412,7 @@ TEST(NativeArrayTest, TypeMembersAreCorrect) { } TEST(NativeArrayTest, MethodsWork) { - const int a[3] = { 0, 1, 2 }; + const int a[3] = {0, 1, 2}; NativeArray na(a, 3, RelationToSourceCopy()); ASSERT_EQ(3U, na.size()); EXPECT_EQ(3, na.end() - na.begin()); @@ -7506,14 +7431,14 @@ TEST(NativeArrayTest, MethodsWork) { NativeArray na2(a, 3, RelationToSourceReference()); EXPECT_TRUE(na == na2); - const int b1[3] = { 0, 1, 1 }; - const int b2[4] = { 0, 1, 2, 3 }; + const int b1[3] = {0, 1, 1}; + const int b2[4] = {0, 1, 2, 3}; EXPECT_FALSE(na == NativeArray(b1, 3, RelationToSourceReference())); EXPECT_FALSE(na == NativeArray(b2, 4, RelationToSourceCopy())); } TEST(NativeArrayTest, WorksForTwoDimensionalArray) { - const char a[2][3] = { "hi", "lo" }; + const char a[2][3] = {"hi", "lo"}; NativeArray na(a, 2, RelationToSourceReference()); ASSERT_EQ(2U, na.size()); EXPECT_EQ(a, na.begin()); @@ -7793,3 +7718,35 @@ TEST(RegisterTest, WasRegistered) { FAIL() << "Didn't find the test!"; } + +// Test that the pattern globbing algorithm is linear. If not, this test should +// time out. +TEST(PatternGlobbingTest, MatchesFilterLinearRuntime) { + std::string name(100, 'a'); // Construct the string (a^100)b + name.push_back('b'); + + std::string pattern; // Construct the string ((a*)^100)b + for (int i = 0; i < 100; ++i) { + pattern.append("a*"); + } + pattern.push_back('b'); + + EXPECT_TRUE( + testing::internal::UnitTestOptions::MatchesFilter(name, pattern.c_str())); +} + +TEST(PatternGlobbingTest, MatchesFilterWithMultiplePatterns) { + const std::string name = "aaaa"; + EXPECT_TRUE(testing::internal::UnitTestOptions::MatchesFilter(name, "a*")); + EXPECT_TRUE(testing::internal::UnitTestOptions::MatchesFilter(name, "a*:")); + EXPECT_FALSE(testing::internal::UnitTestOptions::MatchesFilter(name, "ab")); + EXPECT_FALSE(testing::internal::UnitTestOptions::MatchesFilter(name, "ab:")); + EXPECT_TRUE(testing::internal::UnitTestOptions::MatchesFilter(name, "ab:a*")); +} + +TEST(PatternGlobbingTest, MatchesFilterEdgeCases) { + EXPECT_FALSE(testing::internal::UnitTestOptions::MatchesFilter("", "*a")); + EXPECT_TRUE(testing::internal::UnitTestOptions::MatchesFilter("", "*")); + EXPECT_FALSE(testing::internal::UnitTestOptions::MatchesFilter("a", "")); + EXPECT_TRUE(testing::internal::UnitTestOptions::MatchesFilter("", "")); +} diff --git a/ext/googletest/googletest/test/gtest_xml_outfiles_test.py b/ext/googletest/googletest/test/gtest_xml_outfiles_test.py index ac66feb667..c129e64b90 100755 --- a/ext/googletest/googletest/test/gtest_xml_outfiles_test.py +++ b/ext/googletest/googletest/test/gtest_xml_outfiles_test.py @@ -33,8 +33,8 @@ import os from xml.dom import minidom, Node -import gtest_test_utils -import gtest_xml_test_utils +from googletest.test import gtest_test_utils +from googletest.test import gtest_xml_test_utils GTEST_OUTPUT_SUBDIR = "xml_outfiles" GTEST_OUTPUT_1_TEST = "gtest_xml_outfile1_test_" @@ -43,7 +43,7 @@ GTEST_OUTPUT_2_TEST = "gtest_xml_outfile2_test_" EXPECTED_XML_1 = """ - + @@ -57,7 +57,7 @@ EXPECTED_XML_1 = """ EXPECTED_XML_2 = """ - + diff --git a/ext/googletest/googletest/test/gtest_xml_output_unittest.py b/ext/googletest/googletest/test/gtest_xml_output_unittest.py index eade7aac88..e1b7f1fc83 100755 --- a/ext/googletest/googletest/test/gtest_xml_output_unittest.py +++ b/ext/googletest/googletest/test/gtest_xml_output_unittest.py @@ -38,8 +38,8 @@ import re import sys from xml.dom import minidom, Node -import gtest_test_utils -import gtest_xml_test_utils +from googletest.test import gtest_test_utils +from googletest.test import gtest_xml_test_utils GTEST_FILTER_FLAG = '--gtest_filter' GTEST_LIST_TESTS_FLAG = '--gtest_list_tests' @@ -67,10 +67,10 @@ else: EXPECTED_NON_EMPTY_XML = """ - + - + - - + + - + - + ]]>%(stack)s]]> - + - + - + - + - + - + - + - + - + - + - + - + - - - - + + + + - + - + - + - + """ % { 'stack': STACK_TRACE_TEMPLATE @@ -195,24 +195,24 @@ EXPECTED_FILTERED_TEST_XML = """ timestamp="*" name="AllTests" ad_hoc_property="42"> - + """ EXPECTED_SHARDED_TEST_XML = """ - + - + - + """ diff --git a/ext/googletest/googletest/test/gtest_xml_output_unittest_.cc b/ext/googletest/googletest/test/gtest_xml_output_unittest_.cc index c0036aaef9..4bdb0c7e8a 100644 --- a/ext/googletest/googletest/test/gtest_xml_output_unittest_.cc +++ b/ext/googletest/googletest/test/gtest_xml_output_unittest_.cc @@ -35,18 +35,18 @@ // // This program will be invoked from a Python unit test. Don't run it // directly. +// clang-format off #include "gtest/gtest.h" using ::testing::InitGoogleTest; +using ::testing::Test; using ::testing::TestEventListeners; using ::testing::TestWithParam; using ::testing::UnitTest; -using ::testing::Test; using ::testing::Values; -class SuccessfulTest : public Test { -}; +class SuccessfulTest : public Test {}; TEST_F(SuccessfulTest, Succeeds) { SUCCEED() << "This is a success."; @@ -191,3 +191,5 @@ int main(int argc, char** argv) { testing::Test::RecordProperty("ad_hoc_property", "42"); return RUN_ALL_TESTS(); } + +// clang-format on diff --git a/ext/googletest/googletest/test/gtest_xml_test_utils.py b/ext/googletest/googletest/test/gtest_xml_test_utils.py index ec42c62c3b..c6fb9f4438 100755 --- a/ext/googletest/googletest/test/gtest_xml_test_utils.py +++ b/ext/googletest/googletest/test/gtest_xml_test_utils.py @@ -31,7 +31,7 @@ import re from xml.dom import minidom, Node -import gtest_test_utils +from googletest.test import gtest_test_utils GTEST_DEFAULT_OUTPUT_FILE = 'test_detail.xml' @@ -170,6 +170,10 @@ class GTestXMLTestCase(gtest_test_utils.TestCase): * The stack traces are removed. """ + if element.tagName == 'testcase': + source_file = element.getAttributeNode('file') + if source_file: + source_file.value = re.sub(r'^.*[/\\](.*)', '\\1', source_file.value) if element.tagName in ('testsuites', 'testsuite', 'testcase'): timestamp = element.getAttributeNode('timestamp') timestamp.value = re.sub(r'^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d\.\d\d\d$', diff --git a/ext/googletest/googletest/test/production.h b/ext/googletest/googletest/test/production.h index 41a5472254..4dec8d46e2 100644 --- a/ext/googletest/googletest/test/production.h +++ b/ext/googletest/googletest/test/production.h @@ -46,6 +46,7 @@ class PrivateCode { PrivateCode(); int x() const { return x_; } + private: void set_x(int an_x) { x_ = an_x; } int x_; diff --git a/ext/sst/README.md b/ext/sst/README.md index d9bbe85e1b..49f56349bb 100644 --- a/ext/sst/README.md +++ b/ext/sst/README.md @@ -62,7 +62,7 @@ See `INSTALL.md`. Downloading the built bootloader containing a Linux Kernel and a workload, ```sh -wget http://dist.gem5.org/dist/v22-0/misc/riscv/bbl-busybox-boot-exit +wget http://dist.gem5.org/dist/v22-1/misc/riscv/bbl-busybox-boot-exit ``` Running the simulation @@ -87,7 +87,7 @@ extract them under the $M5_PATH directory (make sure M5_PATH points to a valid directory): ```sh -wget http://dist.gem5.org/dist/v22-0/arm/aarch-sst-20211207.tar.bz2 +wget http://dist.gem5.org/dist/v22-1/arm/aarch-sst-20211207.tar.bz2 tar -xf aarch-sst-20211207.tar.bz2 # copying bootloaders diff --git a/ext/testlib/configuration.py b/ext/testlib/configuration.py index cc40b0d17f..97c637687d 100644 --- a/ext/testlib/configuration.py +++ b/ext/testlib/configuration.py @@ -213,7 +213,7 @@ def define_defaults(defaults): os.pardir, os.pardir)) defaults.result_path = os.path.join(os.getcwd(), 'testing-results') - defaults.resource_url = 'http://dist.gem5.org/dist/v22-0' + defaults.resource_url = 'http://dist.gem5.org/dist/v22-1' defaults.resource_path = os.path.abspath(os.path.join(defaults.base_dir, 'tests', 'gem5', diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000..a06d464de7 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,3 @@ +[tool.black] +line-length = 79 +include = '\.pyi?$' diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000..561cab79cf --- /dev/null +++ b/requirements.txt @@ -0,0 +1 @@ +pre-commit==2.20.0 diff --git a/site_scons/gem5_python_paths.py b/site_scons/gem5_python_paths.py index 2e51e2f8ed..26e9594706 100644 --- a/site_scons/gem5_python_paths.py +++ b/site_scons/gem5_python_paths.py @@ -41,13 +41,13 @@ import SCons.Node.FS fs = SCons.Node.FS.get_default_fs() -root = fs.Dir('#') +root = fs.Dir("#") extra_python_nodes = [ - root.Dir('src').Dir('python').srcnode(), # gem5 includes - root.Dir('ext').Dir('ply').srcnode(), # ply is used by several files - root.Dir('ext').Dir('Kconfiglib').Dir('import').srcnode(), # kconfiglib + root.Dir("src").Dir("python").srcnode(), # gem5 includes + root.Dir("ext").Dir("ply").srcnode(), # ply is used by several files + root.Dir("ext").Dir("Kconfiglib").Dir("import").srcnode(), # kconfiglib ] -extra_python_paths = [ node.abspath for node in extra_python_nodes ] +extra_python_paths = [node.abspath for node in extra_python_nodes] -__all__ = ['extra_python_paths'] +__all__ = ["extra_python_paths"] diff --git a/site_scons/gem5_scons/__init__.py b/site_scons/gem5_scons/__init__.py index bf7b85d35d..721487656b 100644 --- a/site_scons/gem5_scons/__init__.py +++ b/site_scons/gem5_scons/__init__.py @@ -52,34 +52,38 @@ import SCons.Script termcap = get_termcap() + def strip_build_path(path, env): path = str(path) - build_base = 'build/' - variant_base = env['BUILDROOT'] + os.path.sep + build_base = "build/" + variant_base = env["BUILDROOT"] + os.path.sep if path.startswith(variant_base): - path = path[len(variant_base):] + path = path[len(variant_base) :] elif path.startswith(build_base): - path = path[len(build_base):] + path = path[len(build_base) :] return path + def TempFileSpawn(scons_env): - old_pspawn = scons_env['PSPAWN'] - old_spawn = scons_env['SPAWN'] + old_pspawn = scons_env["PSPAWN"] + old_spawn = scons_env["SPAWN"] def wrapper(old, sh, esc, cmd, sh_args, *py_args): with tempfile.NamedTemporaryFile() as temp: - temp.write(' '.join(sh_args).encode()) + temp.write(" ".join(sh_args).encode()) temp.flush() sh_args = [sh, esc(temp.name)] return old(sh, esc, sh, sh_args, *py_args) def new_pspawn(sh, esc, cmd, args, sh_env, stdout, stderr): return wrapper(old_pspawn, sh, esc, cmd, args, sh_env, stdout, stderr) + def new_spawn(sh, esc, cmd, args, sh_env): return wrapper(old_spawn, sh, esc, cmd, args, sh_env) - scons_env['PSPAWN'] = new_pspawn - scons_env['SPAWN'] = new_spawn + scons_env["PSPAWN"] = new_pspawn + scons_env["SPAWN"] = new_spawn + # Generate a string of the form: # common/path/prefix/src1, src2 -> tgt1, tgt2 @@ -93,23 +97,32 @@ class Transform(object): tgts_color = termcap.Yellow + termcap.Bold def __init__(self, tool, max_sources=99): - self.format = self.tool_color + (" [%8s] " % tool) \ - + self.pfx_color + "%s" \ - + self.srcs_color + "%s" \ - + self.arrow_color + " -> " \ - + self.tgts_color + "%s" \ - + termcap.Normal + self.format = ( + self.tool_color + + (" [%8s] " % tool) + + self.pfx_color + + "%s" + + self.srcs_color + + "%s" + + self.arrow_color + + " -> " + + self.tgts_color + + "%s" + + termcap.Normal + ) self.max_sources = max_sources def __call__(self, target, source, env, for_signature=None): # truncate source list according to max_sources param - source = source[0:self.max_sources] + source = source[0 : self.max_sources] + def strip(f): return strip_build_path(str(f), env) + if len(source) > 0: srcs = list(map(strip, source)) else: - srcs = [''] + srcs = [""] tgts = list(map(strip, target)) # surprisingly, os.path.commonprefix is a dumb char-by-char string # operation that has nothing to do with paths. @@ -137,20 +150,23 @@ class Transform(object): if sep_idx != -1: com_pfx = com_pfx[0:sep_idx] else: - com_pfx = '' + com_pfx = "" elif src0_len > com_pfx_len and srcs[0][com_pfx_len] == ".": # still splitting at file extension: ok pass else: # probably a fluke; ignore it - com_pfx = '' + com_pfx = "" # recalculate length in case com_pfx was modified com_pfx_len = len(com_pfx) + def fmt(files): f = list(map(lambda s: s[com_pfx_len:], files)) - return ', '.join(f) + return ", ".join(f) + return self.format % (com_pfx, fmt(srcs), fmt(tgts)) + # The width warning and error messages should be wrapped at. text_width = None @@ -162,6 +178,7 @@ if not sys.stdout.isatty(): if text_width is None: try: import shutil + text_width = shutil.get_terminal_size().columns except: pass @@ -170,6 +187,7 @@ if text_width is None: if text_width is None: try: import curses + try: _, text_width = curses.initscr().getmaxyx() finally: @@ -181,21 +199,22 @@ if text_width is None: if text_width is None: text_width = 80 + def print_message(prefix, color, message, **kwargs): prefix_len = len(prefix) if text_width > prefix_len: wrap_width = text_width - prefix_len - padding = ' ' * prefix_len + padding = " " * prefix_len # First split on newlines. - lines = message.split('\n') + lines = message.split("\n") # Then wrap each line to the required width. wrapped_lines = [] for line in lines: wrapped_lines.extend(textwrap.wrap(line, wrap_width)) # Finally add the prefix and padding on extra lines, and glue it all # back together. - message = prefix + ('\n' + padding).join(wrapped_lines) + message = prefix + ("\n" + padding).join(wrapped_lines) else: # We have very small terminal, indent formatting doesn't help. message = prefix + message @@ -205,27 +224,36 @@ def print_message(prefix, color, message, **kwargs): print(message, **kwargs) return message + all_warnings = [] + + def summarize_warnings(): if not all_warnings: return - print(termcap.Yellow + termcap.Bold + - '*** Summary of Warnings ***' + - termcap.Normal) + print( + termcap.Yellow + + termcap.Bold + + "*** Summary of Warnings ***" + + termcap.Normal + ) list(map(print, all_warnings)) + def warning(*args, **kwargs): - message = ' '.join(args) - printed = print_message('Warning: ', termcap.Yellow, message, **kwargs) + message = " ".join(args) + printed = print_message("Warning: ", termcap.Yellow, message, **kwargs) all_warnings.append(printed) + def error(*args, **kwargs): - message = ' '.join(args) - print_message('Error: ', termcap.Red, message, **kwargs) + message = " ".join(args) + print_message("Error: ", termcap.Red, message, **kwargs) SCons.Script.Exit(1) + def parse_build_path(target): - path_dirs = target.split('/') + path_dirs = target.split("/") # Pop off the target file. path_dirs.pop() @@ -233,40 +261,55 @@ def parse_build_path(target): # Search backwards for the "build" directory. Whatever was just before it # was the name of the variant. variant_dir = path_dirs.pop() - while path_dirs and path_dirs[-1] != 'build': + while path_dirs and path_dirs[-1] != "build": variant_dir = path_dirs.pop() if not path_dirs: - error("No non-leaf 'build' dir found on target path.", t) + error("No non-leaf 'build' dir found on target path.", target) + + return os.path.join("/", *path_dirs), variant_dir - return os.path.join('/', *path_dirs), variant_dir # The MakeAction wrapper, and a SCons tool to set up the *COMSTR variables. -if SCons.Script.GetOption('verbose'): +if SCons.Script.GetOption("verbose"): + def MakeAction(action, string, *args, **kwargs): return SCons.Script.Action(action, *args, **kwargs) def MakeActionTool(env): pass + else: MakeAction = SCons.Script.Action def MakeActionTool(env): - env['CCCOMSTR'] = Transform("CC") - env['CXXCOMSTR'] = Transform("CXX") - env['ASCOMSTR'] = Transform("AS") - env['ARCOMSTR'] = Transform("AR", 0) - env['LINKCOMSTR'] = Transform("LINK", 0) - env['SHLINKCOMSTR'] = Transform("SHLINK", 0) - env['RANLIBCOMSTR'] = Transform("RANLIB", 0) - env['M4COMSTR'] = Transform("M4") - env['SHCCCOMSTR'] = Transform("SHCC") - env['SHCXXCOMSTR'] = Transform("SHCXX") + env["CCCOMSTR"] = Transform("CC") + env["CXXCOMSTR"] = Transform("CXX") + env["ASCOMSTR"] = Transform("AS") + env["ARCOMSTR"] = Transform("AR", 0) + env["LINKCOMSTR"] = Transform("LINK", 0) + env["SHLINKCOMSTR"] = Transform("SHLINK", 0) + env["RANLIBCOMSTR"] = Transform("RANLIB", 0) + env["M4COMSTR"] = Transform("M4") + env["SHCCCOMSTR"] = Transform("SHCC") + env["SHCXXCOMSTR"] = Transform("SHCXX") + def ToValue(obj): return SCons.Node.Python.Value(pickle.dumps(obj)) + def FromValue(node): return pickle.loads(node.read()) -__all__ = ['Configure', 'EnvDefaults', 'Transform', 'warning', 'error', - 'MakeAction', 'MakeActionTool', 'ToValue', 'FromValue'] + +__all__ = [ + "Configure", + "EnvDefaults", + "Transform", + "warning", + "error", + "MakeAction", + "MakeActionTool", + "ToValue", + "FromValue", +] diff --git a/site_scons/gem5_scons/builders/add_local_rpath.py b/site_scons/gem5_scons/builders/add_local_rpath.py index ce26575614..fb7ffb88f6 100755 --- a/site_scons/gem5_scons/builders/add_local_rpath.py +++ b/site_scons/gem5_scons/builders/add_local_rpath.py @@ -43,26 +43,23 @@ import sys import SCons.Node.FS + def AddLocalRPATH(env): def add_local_rpath(env, *targets): - '''Set up an RPATH for a library which lives in the build directory. + """Set up an RPATH for a library which lives in the build directory. The construction environment variable BIN_RPATH_PREFIX should be set to the relative path of the build directory starting from the location - of the binary.''' + of the binary.""" for target in targets: target = env.Entry(target) if not isinstance(target, SCons.Node.FS.Dir): target = target.dir - relpath = os.path.relpath(target.abspath, env['BUILDDIR']) - components = [ - '\\$$ORIGIN', - '${BIN_RPATH_PREFIX}', - relpath - ] + relpath = os.path.relpath(target.abspath, env["BUILDDIR"]) + components = ["\\$$ORIGIN", "${BIN_RPATH_PREFIX}", relpath] env.Append(RPATH=[env.Literal(os.path.join(*components))]) if sys.platform != "darwin": - env.Append(LINKFLAGS=env.Split('-z origin')) + env.Append(LINKFLAGS=env.Split("-z origin")) - env.AddMethod(add_local_rpath, 'AddLocalRPATH') + env.AddMethod(add_local_rpath, "AddLocalRPATH") diff --git a/site_scons/gem5_scons/builders/blob.py b/site_scons/gem5_scons/builders/blob.py index 20d804a2e9..f4e6d3a0ea 100644 --- a/site_scons/gem5_scons/builders/blob.py +++ b/site_scons/gem5_scons/builders/blob.py @@ -46,19 +46,21 @@ from code_formatter import code_formatter import SCons.Node.Python + def build_blob(target, source, env): - ''' + """ Embed an arbitrary blob into the gem5 executable, and make it accessible to C++ as a byte array. - ''' + """ - with open(str(source[0]), 'rb') as f: + with open(str(source[0]), "rb") as f: data = f.read() symbol = str(source[1]) cc, hh = target hh_code = code_formatter() - hh_code('''\ + hh_code( + """\ #include #include @@ -72,13 +74,15 @@ extern const std::uint8_t ${symbol}[]; } // namespace Blobs } // namespace gem5 -''') +""" + ) hh_code.write(str(hh)) - include_path = os.path.relpath(hh.abspath, env['BUILDDIR']) + include_path = os.path.relpath(hh.abspath, env["BUILDDIR"]) cc_code = code_formatter() - cc_code('''\ + cc_code( + """\ #include "${include_path}" namespace gem5 @@ -87,22 +91,28 @@ namespace Blobs { const std::size_t ${symbol}_len = ${{len(data)}}; -''') +""" + ) bytesToCppArray(cc_code, symbol, data) - cc_code(''' + cc_code( + """ } // namespace Blobs } // namespace gem5 -''') +""" + ) cc_code.write(str(cc)) + blob_action = MakeAction(build_blob, Transform("EMBED BLOB")) + def blob_emitter(target, source, env): symbol = str(target[0]) - cc_file = env.File(symbol + '.cc') - hh_file = env.File(symbol + '.hh') + cc_file = env.File(symbol + ".cc") + hh_file = env.File(symbol + ".hh") return [cc_file, hh_file], [source, SCons.Node.Python.Value(symbol)] + def Blob(env): blob_builder = env.Builder(action=blob_action, emitter=blob_emitter) - env.Append(BUILDERS={'Blob': blob_builder}) + env.Append(BUILDERS={"Blob": blob_builder}) diff --git a/site_scons/gem5_scons/builders/config_file.py b/site_scons/gem5_scons/builders/config_file.py index 85820b9fb7..2ab7bf87b4 100755 --- a/site_scons/gem5_scons/builders/config_file.py +++ b/site_scons/gem5_scons/builders/config_file.py @@ -46,15 +46,16 @@ from gem5_scons import Transform, MakeAction # ################################################### + def ConfigFile(env): # This function generates a config header file that #defines the # variable symbol to the current variable setting (0 or 1). The source # operands are the name of the variable and a Value node containing the # value of the variable. def build_config_file(target, source, env): - (variable, value) = [s.get_contents().decode('utf-8') for s in source] - with open(str(target[0].abspath), 'w') as f: - print('#define', variable, value, file=f) + (variable, value) = [s.get_contents().decode("utf-8") for s in source] + with open(str(target[0].abspath), "w") as f: + print("#define", variable, value, file=f) return None # Combine the two functions into a scons Action object. @@ -66,8 +67,8 @@ def ConfigFile(env): # extract variable name from Builder arg variable = str(target[0]) # True target is config header file - target = env.Dir('config').File(variable.lower() + '.hh') - val = env['CONF'][variable] + target = env.Dir("config").File(variable.lower() + ".hh") + val = env["CONF"][variable] if isinstance(val, bool): # Force value to 0/1 val = str(int(val)) @@ -79,4 +80,4 @@ def ConfigFile(env): config_builder = env.Builder(emitter=config_emitter, action=config_action) - env.Append(BUILDERS = { 'ConfigFile' : config_builder }) + env.Append(BUILDERS={"ConfigFile": config_builder}) diff --git a/site_scons/gem5_scons/builders/switching_headers.py b/site_scons/gem5_scons/builders/switching_headers.py index 881d568cd1..a56ab51c86 100755 --- a/site_scons/gem5_scons/builders/switching_headers.py +++ b/site_scons/gem5_scons/builders/switching_headers.py @@ -51,27 +51,32 @@ from gem5_scons import Transform, MakeAction # ################################################### + def SwitchingHeaders(env): def build_switching_header(target, source, env): path = str(target[0]) subdir = str(source[0]) dp, fp = os.path.split(path) - dp = os.path.relpath(os.path.realpath(dp), - os.path.realpath(env['BUILDDIR'])) - with open(path, 'w') as hdr: + dp = os.path.relpath( + os.path.realpath(dp), os.path.realpath(env["BUILDDIR"]) + ) + with open(path, "w") as hdr: print('#include "%s/%s/%s"' % (dp, subdir, fp), file=hdr) - switching_header_action = MakeAction(build_switching_header, - Transform('GENERATE')) + switching_header_action = MakeAction( + build_switching_header, Transform("GENERATE") + ) - switching_header_builder = env.Builder(action=switching_header_action, - source_factory=env.Value, - single_source=True) + switching_header_builder = env.Builder( + action=switching_header_action, + source_factory=env.Value, + single_source=True, + ) - env.Append(BUILDERS = { 'SwitchingHeader': switching_header_builder }) + env.Append(BUILDERS={"SwitchingHeader": switching_header_builder}) def switching_headers(self, headers, source): for header in headers: self.SwitchingHeader(header, source) - env.AddMethod(switching_headers, 'SwitchingHeaders') + env.AddMethod(switching_headers, "SwitchingHeaders") diff --git a/site_scons/gem5_scons/configure.py b/site_scons/gem5_scons/configure.py index 53ee14a103..55a0d7d399 100644 --- a/site_scons/gem5_scons/configure.py +++ b/site_scons/gem5_scons/configure.py @@ -44,39 +44,41 @@ import os import SCons.Script import SCons.Util + def CheckCxxFlag(context, flag, autoadd=True): context.Message("Checking for compiler %s support... " % flag) - last_cxxflags = context.env['CXXFLAGS'] + last_cxxflags = context.env["CXXFLAGS"] context.env.Append(CXXFLAGS=[flag]) - pre_werror = context.env['CXXFLAGS'] - context.env.Append(CXXFLAGS=['-Werror']) - ret = context.TryCompile('// CheckCxxFlag DO NOTHING', '.cc') - context.env['CXXFLAGS'] = pre_werror + pre_werror = context.env["CXXFLAGS"] + context.env.Append(CXXFLAGS=["-Werror"]) + ret = context.TryCompile("// CheckCxxFlag DO NOTHING", ".cc") + context.env["CXXFLAGS"] = pre_werror if not (ret and autoadd): - context.env['CXXFLAGS'] = last_cxxflags + context.env["CXXFLAGS"] = last_cxxflags context.Result(ret) return ret + def CheckLinkFlag(context, flag, autoadd=True, set_for_shared=True): context.Message("Checking for linker %s support... " % flag) - last_linkflags = context.env['LINKFLAGS'] + last_linkflags = context.env["LINKFLAGS"] context.env.Append(LINKFLAGS=[flag]) - pre_werror = context.env['LINKFLAGS'] - context.env.Append(LINKFLAGS=['-Werror']) - ret = context.TryLink('int main(int, char *[]) { return 0; }', '.cc') - context.env['LINKFLAGS'] = pre_werror + pre_werror = context.env["LINKFLAGS"] + context.env.Append(LINKFLAGS=["-Werror"]) + ret = context.TryLink("int main(int, char *[]) { return 0; }", ".cc") + context.env["LINKFLAGS"] = pre_werror if not (ret and autoadd): - context.env['LINKFLAGS'] = last_linkflags - if (ret and set_for_shared): - assert(autoadd) + context.env["LINKFLAGS"] = last_linkflags + if ret and set_for_shared: + assert autoadd context.env.Append(SHLINKFLAGS=[flag]) context.Result(ret) return ret + # Add a custom Check function to test for structure members. def CheckMember(context, include, decl, member, include_quotes="<>"): - context.Message("Checking for member %s in %s..." % - (member, decl)) + context.Message("Checking for member %s in %s..." % (member, decl)) text = """ #include %(header)s int main(){ @@ -84,18 +86,21 @@ int main(){ (void)test.%(member)s; return 0; }; -""" % { "header" : include_quotes[0] + include + include_quotes[1], - "decl" : decl, - "member" : member, - } +""" % { + "header": include_quotes[0] + include + include_quotes[1], + "decl": decl, + "member": member, + } ret = context.TryCompile(text, extension=".cc") context.Result(ret) return ret + def CheckPythonLib(context): - context.Message('Checking Python version... ') - ret = context.TryRun(r""" + context.Message("Checking Python version... ") + ret = context.TryRun( + r""" #include int @@ -107,21 +112,24 @@ main(int argc, char **argv) { "sys.stdout.write('%i.%i.%i' % (vi.major, vi.minor, vi.micro));\n"); return 0; } - """, extension=".cc") + """, + extension=".cc", + ) context.Result(ret[1] if ret[0] == 1 else 0) if ret[0] == 0: return None else: return tuple(map(int, ret[1].split("."))) + def CheckPkgConfig(context, pkgs, *args): if not SCons.Util.is_List(pkgs): pkgs = [pkgs] - assert(pkgs) + assert pkgs for pkg in pkgs: - context.Message('Checking for pkg-config package %s... ' % pkg) - ret = context.TryAction('pkg-config %s' % pkg)[0] + context.Message("Checking for pkg-config package %s... " % pkg) + ret = context.TryAction("pkg-config %s" % pkg)[0] if not ret: context.Result(ret) continue @@ -129,7 +137,7 @@ def CheckPkgConfig(context, pkgs, *args): if len(args) == 0: break - cmd = ' '.join(['pkg-config'] + list(args) + [pkg]) + cmd = " ".join(["pkg-config"] + list(args) + [pkg]) try: context.env.ParseConfig(cmd) ret = 1 @@ -141,20 +149,25 @@ def CheckPkgConfig(context, pkgs, *args): return ret + @contextlib.contextmanager def Configure(env, *args, **kwargs): - kwargs.setdefault('conf_dir', - os.path.join(env['GEM5BUILD'], 'scons_config')) - kwargs.setdefault('log_file', - os.path.join(env['GEM5BUILD'], 'scons_config.log')) - kwargs.setdefault('custom_tests', {}) - kwargs['custom_tests'].update({ - 'CheckCxxFlag' : CheckCxxFlag, - 'CheckLinkFlag' : CheckLinkFlag, - 'CheckMember' : CheckMember, - 'CheckPkgConfig' : CheckPkgConfig, - 'CheckPythonLib' : CheckPythonLib, - }) + kwargs.setdefault( + "conf_dir", os.path.join(env["GEM5BUILD"], "scons_config") + ) + kwargs.setdefault( + "log_file", os.path.join(env["GEM5BUILD"], "scons_config.log") + ) + kwargs.setdefault("custom_tests", {}) + kwargs["custom_tests"].update( + { + "CheckCxxFlag": CheckCxxFlag, + "CheckLinkFlag": CheckLinkFlag, + "CheckMember": CheckMember, + "CheckPkgConfig": CheckPkgConfig, + "CheckPythonLib": CheckPythonLib, + } + ) conf = SCons.Script.Configure(env, *args, **kwargs) # Recent versions of scons substitute a "Null" object for Configure() @@ -163,14 +176,17 @@ def Configure(env, *args, **kwargs): # breaking all our configuration checks. We replace it with our own # more optimistic null object that returns True instead. if not conf: + def NullCheck(*args, **kwargs): return True class NullConf: def __init__(self, env): self.env = env + def Finish(self): return self.env + def __getattr__(self, mname): return NullCheck diff --git a/site_scons/gem5_scons/defaults.py b/site_scons/gem5_scons/defaults.py index 7a245892f4..a07b7ffa4b 100644 --- a/site_scons/gem5_scons/defaults.py +++ b/site_scons/gem5_scons/defaults.py @@ -42,56 +42,77 @@ import os from gem5_python_paths import extra_python_paths + def EnvDefaults(env): # export TERM so that clang reports errors in color - use_vars = set([ 'AS', 'AR', 'CC', 'CXX', 'HOME', 'LD_LIBRARY_PATH', - 'LIBRARY_PATH', 'PATH', 'PKG_CONFIG_PATH', 'PROTOC', - 'PYTHONPATH', 'RANLIB', 'TERM', 'PYTHON_CONFIG', - 'CCFLAGS_EXTRA', 'GEM5PY_CCFLAGS_EXTRA', - 'GEM5PY_LINKFLAGS_EXTRA', 'LINKFLAGS_EXTRA', 'LANG']) + use_vars = set( + [ + "AS", + "AR", + "CC", + "CXX", + "HOME", + "LD_LIBRARY_PATH", + "LIBRARY_PATH", + "PATH", + "PKG_CONFIG_PATH", + "PROTOC", + "PYTHONPATH", + "RANLIB", + "TERM", + "PYTHON_CONFIG", + "CCFLAGS_EXTRA", + "GEM5PY_CCFLAGS_EXTRA", + "GEM5PY_LINKFLAGS_EXTRA", + "LINKFLAGS_EXTRA", + "LANG", + ] + ) use_prefixes = [ - "ASAN_", # address sanitizer symbolizer path and settings - "CCACHE_", # ccache (caching compiler wrapper) configuration - "CCC_", # clang static analyzer configuration - "DISTCC_", # distcc (distributed compiler wrapper) config - "INCLUDE_SERVER_", # distcc pump server settings - "M5", # M5 configuration (e.g., path to kernels) - "NIX_", # wrapped binaries if using nix package manager - ] + "ASAN_", # address sanitizer symbolizer path and settings + "CCACHE_", # ccache (caching compiler wrapper) configuration + "CCC_", # clang static analyzer configuration + "DISTCC_", # distcc (distributed compiler wrapper) config + "INCLUDE_SERVER_", # distcc pump server settings + "M5", # M5 configuration (e.g., path to kernels) + "NIX_", # wrapped binaries if using nix package manager + "TMPDIR", # default temporary directory + ] - for key,val in sorted(os.environ.items()): - if key in use_vars or \ - any([key.startswith(prefix) for prefix in use_prefixes]): - env['ENV'][key] = val + for key, val in sorted(os.environ.items()): + if key in use_vars or any( + [key.startswith(prefix) for prefix in use_prefixes] + ): + env["ENV"][key] = val # These variables from the environment override/become SCons variables, # with a default if they weren't in the host environment. var_overrides = { - 'CC': env['CC'], - 'CXX': env['CXX'], - 'PROTOC': 'protoc', - 'PYTHON_CONFIG': [ 'python3-config', 'python-config' ], - 'CCFLAGS_EXTRA': '', - 'GEM5PY_CCFLAGS_EXTRA': '', - 'GEM5PY_LINKFLAGS_EXTRA': '', - 'LINKFLAGS_EXTRA': '', + "CC": env["CC"], + "CXX": env["CXX"], + "PROTOC": "protoc", + "PYTHON_CONFIG": ["python3-config", "python-config"], + "CCFLAGS_EXTRA": "", + "GEM5PY_CCFLAGS_EXTRA": "", + "GEM5PY_LINKFLAGS_EXTRA": "", + "LINKFLAGS_EXTRA": "", } - for key,default in var_overrides.items(): - env[key] = env['ENV'].get(key, default) + for key, default in var_overrides.items(): + env[key] = env["ENV"].get(key, default) # Tell scons to avoid implicit command dependencies to avoid issues # with the param wrappes being compiled twice (see # https://github.com/SCons/scons/issues/2811 - env['IMPLICIT_COMMAND_DEPENDENCIES'] = 0 - env.Decider('MD5-timestamp') + env["IMPLICIT_COMMAND_DEPENDENCIES"] = 0 + env.Decider("MD5-timestamp") # add useful python code PYTHONPATH so it can be used by subprocesses # as well - env.AppendENVPath('PYTHONPATH', extra_python_paths) + env.AppendENVPath("PYTHONPATH", extra_python_paths) # Default duplicate option is to use hard links, but this messes up # when you use emacs to edit a file in the target dir, as emacs moves # file to file~ then copies to file, breaking the link. Symbolic # (soft) links work better. - env.SetOption('duplicate', 'soft-copy') + env.SetOption("duplicate", "soft-copy") diff --git a/site_scons/gem5_scons/sources.py b/site_scons/gem5_scons/sources.py index 85b0b4e453..548e9386ea 100644 --- a/site_scons/gem5_scons/sources.py +++ b/site_scons/gem5_scons/sources.py @@ -47,8 +47,9 @@ import SCons.Script # When specifying a source file of some type, a set of tags can be # specified for that file. + def tag_implies(env, tag, tag_list): - ''' + """ Associates a tag X to a list of tags which are implied by X. For example, assume: @@ -72,10 +73,10 @@ def tag_implies(env, tag, tag_list): So that any use of a tag will automatically include its transitive tags after being resolved. - ''' + """ env.SetDefault(_tag_implies={}) - implications = env['_tag_implies'] + implications = env["_tag_implies"] if isinstance(tag_list, str): tag_list = frozenset([tag_list]) @@ -95,21 +96,23 @@ def tag_implies(env, tag, tag_list): # Check if another tag depends on this tag. If so, add this tag's # implications to that tag. - for t,implied in implications.items(): + for t, implied in implications.items(): if tag in implied: implications[t] |= implications[tag] + def TagImpliesTool(env): - env.AddMethod(tag_implies, 'TagImplies') + env.AddMethod(tag_implies, "TagImplies") + def resolve_tags(env, tags): - ''' + """ Returns the complete set of tags implied (dependencies) by the supplied tags. - ''' + """ implications = env.SetDefault(_tag_implies={}) - implications = env['_tag_implies'] + implications = env["_tag_implies"] if isinstance(tags, str): tags = frozenset([tags]) @@ -122,53 +125,71 @@ def resolve_tags(env, tags): tags |= implications[tag] return tags + class SourceFilter(object): factories = {} + def __init__(self, predicate): self.predicate = predicate def __or__(self, other): - return SourceFilter(lambda env, tags: self.predicate(env, tags) or - other.predicate(env, tags)) + return SourceFilter( + lambda env, tags: self.predicate(env, tags) + or other.predicate(env, tags) + ) def __and__(self, other): - return SourceFilter(lambda env, tags: self.predicate(env, tags) and - other.predicate(env, tags)) + return SourceFilter( + lambda env, tags: self.predicate(env, tags) + and other.predicate(env, tags) + ) + def with_any_tags(*tags): - '''Return a list of sources with any of the supplied tags.''' - return SourceFilter(lambda env, stags: \ - len(resolve_tags(env, tags) & stags) > 0) + """Return a list of sources with any of the supplied tags.""" + return SourceFilter( + lambda env, stags: len(resolve_tags(env, tags) & stags) > 0 + ) + def with_all_tags(*tags): - '''Return a list of sources with all of the supplied tags.''' + """Return a list of sources with all of the supplied tags.""" return SourceFilter(lambda env, stags: resolve_tags(env, tags) <= stags) + def with_tag(tag): - '''Return a list of sources with the supplied tag.''' + """Return a list of sources with the supplied tag.""" return with_any_tags(*[tag]) + def without_tags(*tags): - '''Return a list of sources without any of the supplied tags.''' - return SourceFilter(lambda env, stags: \ - len(resolve_tags(env, tags) & stags) == 0) + """Return a list of sources without any of the supplied tags.""" + return SourceFilter( + lambda env, stags: len(resolve_tags(env, tags) & stags) == 0 + ) + def without_tag(tag): - '''Return a list of sources without the supplied tag.''' + """Return a list of sources without the supplied tag.""" return without_tags(*[tag]) -SourceFilter.factories.update({ - 'with_any_tags': with_any_tags, - 'with_all_tags': with_all_tags, - 'with_tag': with_tag, - 'without_tags': without_tags, - 'without_tag': without_tag, -}) + +SourceFilter.factories.update( + { + "with_any_tags": with_any_tags, + "with_all_tags": with_all_tags, + "with_tag": with_tag, + "without_tags": without_tags, + "without_tag": without_tag, + } +) + class SourceList(list): def apply_filter(self, env, f): def match(source): return f.predicate(env, resolve_tags(env, source.tags)) + return SourceList(filter(match, self)) def __getattr__(self, name): @@ -179,33 +200,38 @@ class SourceList(list): @functools.wraps(func) def wrapper(env, *args, **kwargs): return self.apply_filter(env, func(*args, **kwargs)) + return wrapper + class SourceMeta(type): - '''Meta class for source files that keeps track of all files of a - particular type.''' + """Meta class for source files that keeps track of all files of a + particular type.""" + def __init__(cls, name, bases, dict): super(SourceMeta, cls).__init__(name, bases, dict) cls.all = SourceList() + class SourceItem(object, metaclass=SourceMeta): - '''Base object that encapsulates the notion of a source component for + """Base object that encapsulates the notion of a source component for gem5. This specifies a set of tags which help group components into groups - based on arbitrary properties.''' + based on arbitrary properties.""" + def __init__(self, source, tags=None, add_tags=None, append=None): self.source = source if tags is None: - tags='gem5 lib' + tags = "gem5 lib" if isinstance(tags, str): - tags = { tags } + tags = {tags} if not isinstance(tags, set): tags = set(tags) self.tags = tags.copy() if add_tags: if isinstance(add_tags, str): - add_tags = { add_tags } + add_tags = {add_tags} if not isinstance(add_tags, set): add_tags = set(add_tags) self.tags |= add_tags @@ -216,10 +242,11 @@ class SourceItem(object, metaclass=SourceMeta): if issubclass(base, SourceItem): base.all.append(self) + class SourceFile(SourceItem): - '''Base object that encapsulates the notion of a source file. + """Base object that encapsulates the notion of a source file. This includes, the source node, target node, various manipulations - of those.''' + of those.""" def __init__(self, source, tags=None, add_tags=None, append=None): super().__init__(source, tags=tags, add_tags=add_tags, append=append) @@ -243,6 +270,15 @@ class SourceFile(SourceItem): return env.SharedObject(self.tnode) -__all__ = ['TagImpliesTool', 'SourceFilter', 'SourceList', 'SourceFile', - 'SourceItem', 'with_any_tags', 'with_all_tags', 'with_tag', - 'without_tags', 'without_tag'] +__all__ = [ + "TagImpliesTool", + "SourceFilter", + "SourceList", + "SourceFile", + "SourceItem", + "with_any_tags", + "with_all_tags", + "with_tag", + "without_tags", + "without_tag", +] diff --git a/site_scons/gem5_scons/util.py b/site_scons/gem5_scons/util.py index b62cc0164e..045fd4ef32 100644 --- a/site_scons/gem5_scons/util.py +++ b/site_scons/gem5_scons/util.py @@ -46,12 +46,15 @@ import SCons.Script import m5.util.terminal + def ignore_style(): """Determine whether we should ignore style checks""" - return SCons.Script.GetOption('ignore_style') or not sys.stdin.isatty() + return SCons.Script.GetOption("ignore_style") or not sys.stdin.isatty() + def get_termcap(): - return m5.util.terminal.get_termcap(SCons.Script.GetOption('use_colors')) + return m5.util.terminal.get_termcap(SCons.Script.GetOption("use_colors")) + def readCommand(cmd, **kwargs): """ @@ -68,13 +71,13 @@ def readCommand(cmd, **kwargs): if isinstance(cmd, str): cmd = cmd.split() - no_exception = 'exception' in kwargs - exception = kwargs.pop('exception', None) + no_exception = "exception" in kwargs + exception = kwargs.pop("exception", None) - kwargs.setdefault('shell', False) - kwargs.setdefault('stdout', PIPE) - kwargs.setdefault('stderr', STDOUT) - kwargs.setdefault('close_fds', True) + kwargs.setdefault("shell", False) + kwargs.setdefault("stdout", PIPE) + kwargs.setdefault("stderr", STDOUT) + kwargs.setdefault("close_fds", True) try: subp = Popen(cmd, **kwargs) except Exception as e: @@ -82,20 +85,23 @@ def readCommand(cmd, **kwargs): return -1, exception raise - output = subp.communicate()[0].decode('utf-8') + output = subp.communicate()[0].decode("utf-8") return output + def compareVersions(v1, v2): """helper function: compare arrays or strings of version numbers. E.g., compare_version((1,3,25), (1,4,1)') returns -1, 0, 1 if v1 is <, ==, > v2 """ + def make_version_list(v): - if isinstance(v, (list,tuple)): + if isinstance(v, (list, tuple)): return v elif isinstance(v, str): - return list(map(lambda x: int(re.match('\d+', x).group()), - v.split('.'))) + return list( + map(lambda x: int(re.match("\d+", x).group()), v.split(".")) + ) else: raise TypeError() @@ -104,8 +110,10 @@ def compareVersions(v1, v2): # Compare corresponding elements of lists # The shorter list is filled with 0 till the lists have the same length - for n1,n2 in itertools.zip_longest(v1, v2, fillvalue=0): - if n1 < n2: return -1 - if n1 > n2: return 1 + for n1, n2 in itertools.zip_longest(v1, v2, fillvalue=0): + if n1 < n2: + return -1 + if n1 > n2: + return 1 return 0 diff --git a/site_scons/site_init.py b/site_scons/site_init.py index 5eeb29012a..480dfa74da 100644 --- a/site_scons/site_init.py +++ b/site_scons/site_init.py @@ -44,10 +44,12 @@ from __future__ import print_function try: EnsureSConsVersion(3, 0, 0) except SystemExit as e: - print(""" + print( + """ For more details, see: http://gem5.org/documentation/general_docs/building -""") +""" + ) raise @@ -55,7 +57,8 @@ For more details, see: try: EnsurePythonVersion(3, 6) except SystemExit as e: - print("""\033[93m + print( + """\033[93m Python 3 is now required. The following are steps to compile gem5 in Python 3 environment, @@ -81,7 +84,8 @@ Python 3 environment, (Optional) For convenience reasons, you can set up an alias for the Python3 \ scons phrase in your environment. \033[0m -""") +""" + ) raise from gem5_python_paths import extra_python_paths diff --git a/site_scons/site_tools/git.py b/site_scons/site_tools/git.py index fe083b7566..b47de77612 100644 --- a/site_scons/site_tools/git.py +++ b/site_scons/site_tools/git.py @@ -38,69 +38,45 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +from asyncio import subprocess import os import sys +import subprocess import gem5_scons.util import SCons.Script git_style_message = """ -You're missing the gem5 style or commit message hook. These hooks help -to ensure that your code follows gem5's style rules on git commit. -This script will now install the hook in your .git/hooks/ directory. -Press enter to continue, or ctrl-c to abort: """ +You're missing the pre-commit/commit-msg hooks. These hook help to ensure your +code follows gem5's style rules on git commit and your commit messages follow +our commit message requirements. This script will now install these hooks in +your .git/hooks/ directory. +Press enter to continue, or ctrl-c to abort: +""" + def install_style_hooks(env): try: - gitdir = env.Dir(gem5_scons.util.readCommand( - ["git", "rev-parse", "--git-dir"]).strip("\n")) + gitdir = env.Dir( + gem5_scons.util.readCommand( + ["git", "rev-parse", "--git-dir"] + ).strip("\n") + ) except Exception as e: print("Warning: Failed to find git repo directory: %s" % e) return git_hooks = gitdir.Dir("hooks") + def hook_exists(hook_name): hook = git_hooks.File(hook_name) return hook.exists() - def hook_install(hook_name, script): - hook = git_hooks.File(hook_name) - if hook.exists(): - print("Warning: Can't install %s, hook already exists." % - hook_name) - return - - if hook.islink(): - print("Warning: Removing broken symlink for hook %s." % hook_name) - os.unlink(hook.get_abspath()) - - if not git_hooks.exists(): - os.mkdir(git_hooks.get_abspath()) - git_hooks.clear() - - abs_symlink_hooks = git_hooks.islink() and \ - os.path.isabs(os.readlink(git_hooks.get_abspath())) - - # Use a relative symlink if the hooks live in the source directory, - # and the hooks directory is not a symlink to an absolute path. - if hook.is_under(env.Dir("#")) and not abs_symlink_hooks: - script_path = os.path.relpath( - os.path.realpath(script.get_abspath()), - os.path.realpath(hook.Dir(".").get_abspath())) - else: - script_path = script.get_abspath() - - try: - os.symlink(script_path, hook.get_abspath()) - except: - print("Error updating git %s hook" % hook_name) - raise - if hook_exists("pre-commit") and hook_exists("commit-msg"): return - print(git_style_message, end=' ') - if SCons.Script.GetOption('install_hooks'): + print(git_style_message, end=" ") + if SCons.Script.GetOption("install_hooks"): print("Installing revision control hooks automatically.") else: try: @@ -109,15 +85,32 @@ def install_style_hooks(env): print("Input exception, exiting scons.\n") sys.exit(1) - git_style_script = env.Dir("#util").File("git-pre-commit.py") - git_msg_script = env.Dir("#ext").File("git-commit-msg") + pre_commit_install = env.Dir("#util").File("pre-commit-install.sh") + + ret = subprocess.call(str(pre_commit_install), shell=True) + if ret != 0: + print( + "It is strongly recommended you install the pre-commit hooks " + "before working with gem5. Do you want to continue compilation " + "(y/n)?" + ) + while True: + response = input().lower().strip() + if response in {"yes", "ye", "y"}: + return + elif response in {"no", "n"}: + sys.exit(1) + else: + print( + f"Could not parse answer '{response}'. Do you want to " + "continue compilation (y/n)?" + ) - hook_install("pre-commit", git_style_script) - hook_install("commit-msg", git_msg_script) def generate(env): if exists(env) and not gem5_scons.util.ignore_style(): install_style_hooks(env) + def exists(env): - return env.Entry('#.git').exists() + return env.Entry("#.git").exists() diff --git a/src/Doxyfile b/src/Doxyfile index c8bde04cbb..4d14b7ccb7 100644 --- a/src/Doxyfile +++ b/src/Doxyfile @@ -22,20 +22,20 @@ DOXYFILE_ENCODING = UTF-8 -# The PROJECT_NAME tag is a single word (or a sequence of words surrounded +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded # by quotes) that should identify the project. PROJECT_NAME = gem5 -# The PROJECT_NUMBER tag can be used to enter a project or revision number. -# This could be handy for archiving the generated documentation or +# The PROJECT_NUMBER tag can be used to enter a project or revision number. +# This could be handy for archiving the generated documentation or # if some version control system is used. -PROJECT_NUMBER = v22.0.0.2 +PROJECT_NUMBER = v22.1.0.0 -# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) -# base path where the generated documentation will be put. -# If a relative path is entered, it will be relative to the location +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) +# base path where the generated documentation will be put. +# If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = doxygen @@ -49,20 +49,20 @@ OUTPUT_DIRECTORY = doxygen CREATE_SUBDIRS = NO -# The OUTPUT_LANGUAGE tag is used to specify the language in which all -# documentation generated by doxygen is written. Doxygen will use this -# information to generate all constant output in the proper language. -# The default language is English, other supported languages are: -# Brazilian, Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, Dutch, -# Finnish, French, German, Greek, Hungarian, Italian, Japanese, Japanese-en -# (Japanese with English messages), Korean, Korean-en, Norwegian, Polish, Portuguese, +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# The default language is English, other supported languages are: +# Brazilian, Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, Dutch, +# Finnish, French, German, Greek, Hungarian, Italian, Japanese, Japanese-en +# (Japanese with English messages), Korean, Korean-en, Norwegian, Polish, Portuguese, # Romanian, Russian, Serbian, Slovak, Slovene, Spanish, Swedish, and Ukrainian. OUTPUT_LANGUAGE = English -# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will -# include brief member descriptions after the members that are listed in -# the file and class documentation (similar to JavaDoc). +# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will +# include brief member descriptions after the members that are listed in +# the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES @@ -86,8 +86,8 @@ REPEAT_BRIEF = YES ABBREVIATE_BRIEF = -# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then -# Doxygen will generate a detailed section even if there is only a brief +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = YES @@ -99,17 +99,17 @@ ALWAYS_DETAILED_SEC = YES INLINE_INHERITED_MEMB = NO -# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full -# path before files name in the file list and in the header files. If set +# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full +# path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = YES -# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag -# can be used to strip a user-defined part of the path. Stripping is -# only done if one of the specified strings matches the left-hand part of -# the path. It is allowed to use relative paths in the argument list. -# If left blank the directory from which doxygen is run is used as the +# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag +# can be used to strip a user-defined part of the path. Stripping is +# only done if one of the specified strings matches the left-hand part of +# the path. It is allowed to use relative paths in the argument list. +# If left blank the directory from which doxygen is run is used as the # path to strip. STRIP_FROM_PATH = . @@ -123,16 +123,16 @@ STRIP_FROM_PATH = . STRIP_FROM_INC_PATH = -# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter -# (but less readable) file names. This can be useful is your file systems +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter +# (but less readable) file names. This can be useful is your file systems # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO -# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen -# will interpret the first line (until the first dot) of a JavaDoc-style -# comment as the brief description. If set to NO, the JavaDoc -# comments will behave just like the Qt-style comments (thus requiring an +# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen +# will interpret the first line (until the first dot) of a JavaDoc-style +# comment as the brief description. If set to NO, the JavaDoc +# comments will behave just like the Qt-style comments (thus requiring an # explicit @brief command for a brief description. JAVADOC_AUTOBRIEF = YES @@ -145,44 +145,44 @@ JAVADOC_AUTOBRIEF = YES QT_AUTOBRIEF = YES -# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen -# treat a multi-line C++ special comment block (i.e. a block of //! or /// -# comments) as a brief description. This used to be the default behaviour. -# The new default is to treat a multi-line C++ comment block as a detailed +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen +# treat a multi-line C++ special comment block (i.e. a block of //! or /// +# comments) as a brief description. This used to be the default behaviour. +# The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO -# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented -# member inherits the documentation from any documented member that it +# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented +# member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES -# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC -# tag is set to YES, then doxygen will reuse the documentation of the first -# member in the group (if any) for the other members of the group. By default +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES, then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO -# The TAB_SIZE tag can be used to set the number of spaces in a tab. +# The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 8 -# This tag can be used to specify a number of aliases that acts -# as commands in the documentation. An alias has the form "name=value". -# For example adding "sideeffect=\par Side Effects:\n" will allow you to -# put the command \sideeffect (or @sideeffect) in the documentation, which -# will result in a user-defined paragraph with heading "Side Effects:". +# This tag can be used to specify a number of aliases that acts +# as commands in the documentation. An alias has the form "name=value". +# For example adding "sideeffect=\par Side Effects:\n" will allow you to +# put the command \sideeffect (or @sideeffect) in the documentation, which +# will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. -ALIASES = +ALIASES = -# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources -# only. Doxygen will then generate output that is more tailored for C. -# For instance, some of the names that are used will be different. The list +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources +# only. Doxygen will then generate output that is more tailored for C. +# For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = NO @@ -253,10 +253,10 @@ IDL_PROPERTY_SUPPORT = YES DISTRIBUTE_GROUP_DOC = NO -# Set the SUBGROUPING tag to YES (the default) to allow class member groups of -# the same type (for instance a group of public functions) to be put as a -# subgroup of that type (e.g. under the Public Functions section). Set it to -# NO to prevent subgrouping. Alternatively, this can be done per class using +# Set the SUBGROUPING tag to YES (the default) to allow class member groups of +# the same type (for instance a group of public functions) to be put as a +# subgroup of that type (e.g. under the Public Functions section). Set it to +# NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES @@ -292,25 +292,25 @@ SYMBOL_CACHE_SIZE = 0 # Build related configuration options #--------------------------------------------------------------------------- -# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in -# documentation are documented, even if no documentation was available. -# Private class members and static file members will be hidden unless +# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in +# documentation are documented, even if no documentation was available. +# Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = YES -# If the EXTRACT_PRIVATE tag is set to YES all private members of a class +# If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = YES -# If the EXTRACT_STATIC tag is set to YES all static members of a file +# If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = YES -# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) -# defined locally in source files will be included in the documentation. +# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) +# defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = YES @@ -330,77 +330,77 @@ EXTRACT_LOCAL_METHODS = NO EXTRACT_ANON_NSPACES = NO -# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all -# undocumented members of documented classes, files or namespaces. -# If set to NO (the default) these members will be included in the -# various overviews, but no documentation section is generated. +# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all +# undocumented members of documented classes, files or namespaces. +# If set to NO (the default) these members will be included in the +# various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = NO -# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all -# undocumented classes that are normally visible in the class hierarchy. -# If set to NO (the default) these classes will be included in the various +# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. +# If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = YES -# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all -# friend (class|struct|union) declarations. -# If set to NO (the default) these declarations will be included in the +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all +# friend (class|struct|union) declarations. +# If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO -# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any -# documentation blocks found inside the body of a function. -# If set to NO (the default) these blocks will be appended to the +# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any +# documentation blocks found inside the body of a function. +# If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO -# The INTERNAL_DOCS tag determines if documentation -# that is typed after a \internal command is included. If the tag is set -# to NO (the default) then the documentation will be excluded. +# The INTERNAL_DOCS tag determines if documentation +# that is typed after a \internal command is included. If the tag is set +# to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO -# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate -# file names in lower-case letters. If set to YES upper-case letters are also -# allowed. This is useful if you have classes or files whose names only differ -# in case and if your file system supports case sensitive file names. Windows +# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate +# file names in lower-case letters. If set to YES upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows # users are advised to set this option to NO. CASE_SENSE_NAMES = YES -# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen -# will show members with their full class and namespace scopes in the +# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen +# will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = NO -# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen -# will put a list of the files that are included by a file in the documentation +# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen +# will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = YES -# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] +# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES -# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen -# will sort the (detailed) documentation of file and class members -# alphabetically by member name. If set to NO the members will appear in +# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen +# will sort the (detailed) documentation of file and class members +# alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = YES -# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the -# brief documentation of file, namespace and class members alphabetically -# by member name. If set to NO (the default) the members will appear in +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the +# brief documentation of file, namespace and class members alphabetically +# by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = NO @@ -415,57 +415,57 @@ SORT_MEMBERS_CTORS_1ST = NO SORT_GROUP_NAMES = NO -# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be -# sorted by fully-qualified names, including namespaces. If set to -# NO (the default), the class list will be sorted only by class name, -# not including the namespace part. +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be +# sorted by fully-qualified names, including namespaces. If set to +# NO (the default), the class list will be sorted only by class name, +# not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. -# Note: This option applies only to the class list, not to the +# Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = NO -# The GENERATE_TODOLIST tag can be used to enable (YES) or -# disable (NO) the todo list. This list is created by putting \todo +# The GENERATE_TODOLIST tag can be used to enable (YES) or +# disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = YES -# The GENERATE_TESTLIST tag can be used to enable (YES) or -# disable (NO) the test list. This list is created by putting \test +# The GENERATE_TESTLIST tag can be used to enable (YES) or +# disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = YES -# The GENERATE_BUGLIST tag can be used to enable (YES) or -# disable (NO) the bug list. This list is created by putting \bug +# The GENERATE_BUGLIST tag can be used to enable (YES) or +# disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = YES -# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or -# disable (NO) the deprecated list. This list is created by putting +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or +# disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= YES -# The ENABLED_SECTIONS tag can be used to enable conditional +# The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if sectionname ... \endif. -ENABLED_SECTIONS = +ENABLED_SECTIONS = -# The MAX_INITIALIZER_LINES tag determines the maximum number of lines -# the initial value of a variable or define consists of for it to appear in -# the documentation. If the initializer consists of more lines than specified -# here it will be hidden. Use a value of 0 to hide initializers completely. -# The appearance of the initializer of individual variables and defines in the -# documentation can be controlled using \showinitializer or \hideinitializer +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines +# the initial value of a variable or define consists of for it to appear in +# the documentation. If the initializer consists of more lines than specified +# here it will be hidden. Use a value of 0 to hide initializers completely. +# The appearance of the initializer of individual variables and defines in the +# documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 -# Set the SHOW_USED_FILES tag to NO to disable the list of files generated -# at the bottom of the documentation of classes and structs. If set to YES the +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated +# at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES @@ -513,26 +513,26 @@ LAYOUT_FILE = # configuration options related to warning and progress messages #--------------------------------------------------------------------------- -# The QUIET tag can be used to turn on/off the messages that are generated +# The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = YES -# The WARNINGS tag can be used to turn on/off the warning messages that are -# generated by doxygen. Possible values are YES and NO. If left blank +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES -# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings -# for undocumented members. If EXTRACT_ALL is set to YES then this flag will +# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings +# for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES -# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for -# potential errors in the documentation, such as not documenting some -# parameters in a documented function, or documenting parameters that +# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some +# parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES @@ -545,26 +545,26 @@ WARN_IF_DOC_ERROR = YES WARN_NO_PARAMDOC = NO -# The WARN_FORMAT tag determines the format of the warning messages that -# doxygen can produce. The string should contain the $file, $line, and $text -# tags, which will be replaced by the file and line number from which the +# The WARN_FORMAT tag determines the format of the warning messages that +# doxygen can produce. The string should contain the $file, $line, and $text +# tags, which will be replaced by the file and line number from which the # warning originated and the warning text. WARN_FORMAT = "$file:$line: $text" -# The WARN_LOGFILE tag can be used to specify a file to which warning -# and error messages should be written. If left blank the output is written +# The WARN_LOGFILE tag can be used to specify a file to which warning +# and error messages should be written. If left blank the output is written # to stderr. -WARN_LOGFILE = +WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- -# The INPUT tag can be used to specify the files and/or directories that contain -# documented source files. You may enter file names like "myfile.cpp" or -# directories like "/usr/src/myproject". Separate the files or directories +# The INPUT tag can be used to specify the files and/or directories that contain +# documented source files. You may enter file names like "myfile.cpp" or +# directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = . @@ -577,11 +577,11 @@ INPUT = . INPUT_ENCODING = UTF-8 -# If the value of the INPUT tag contains directories, you can use the -# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp -# and *.h) to filter out the source-files in the directories. If left -# blank the following patterns are tested: -# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx *.hpp +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank the following patterns are tested: +# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx *.hpp # *.h++ *.idl *.odl *.cs *.php *.php3 *.inc FILE_PATTERNS = *.c \ @@ -590,14 +590,14 @@ FILE_PATTERNS = *.c \ *.hh \ *.doxygen -# The RECURSIVE tag can be used to turn specify whether or not subdirectories -# should be searched for input files as well. Possible values are YES and NO. +# The RECURSIVE tag can be used to turn specify whether or not subdirectories +# should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = YES -# The EXCLUDE tag can be used to specify files and/or directories that should -# excluded from the INPUT source files. This way you can easily exclude a +# The EXCLUDE tag can be used to specify files and/or directories that should +# excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. EXCLUDE = build \ @@ -606,13 +606,13 @@ EXCLUDE = build \ PENDING \ RESYNC -# The EXCLUDE_SYMLINKS tag can be used select whether or not files or directories +# The EXCLUDE_SYMLINKS tag can be used select whether or not files or directories # that are symbolic links (a Unix filesystem feature) are excluded from the input. EXCLUDE_SYMLINKS = YES -# If the value of the INPUT tag contains directories, you can use the -# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. EXCLUDE_PATTERNS = */BitKeeper/* \ @@ -627,40 +627,40 @@ EXCLUDE_PATTERNS = */BitKeeper/* \ EXCLUDE_SYMBOLS = -# The EXAMPLE_PATH tag can be used to specify one or more files or -# directories that contain example code fragments that are included (see +# The EXAMPLE_PATH tag can be used to specify one or more files or +# directories that contain example code fragments that are included (see # the \include command). -EXAMPLE_PATH = +EXAMPLE_PATH = -# If the value of the EXAMPLE_PATH tag contains directories, you can use the -# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp -# and *.h) to filter out the source-files in the directories. If left +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left # blank all files are included. -EXAMPLE_PATTERNS = +EXAMPLE_PATTERNS = -# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be -# searched for input files to be used with the \include or \dontinclude -# commands irrespective of the value of the RECURSIVE tag. +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude +# commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = NO -# The IMAGE_PATH tag can be used to specify one or more files or -# directories that contain image that are included in the documentation (see +# The IMAGE_PATH tag can be used to specify one or more files or +# directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = doxygen/images -# The INPUT_FILTER tag can be used to specify a program that doxygen should -# invoke to filter for each input file. Doxygen will invoke the filter program -# by executing (via popen()) the command , where -# is the value of the INPUT_FILTER tag, and is the name of an -# input file. Doxygen will then use the output that the filter program writes +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command , where +# is the value of the INPUT_FILTER tag, and is the name of an +# input file. Doxygen will then use the output that the filter program writes # to standard output. -INPUT_FILTER = +INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. @@ -673,8 +673,8 @@ INPUT_FILTER = FILTER_PATTERNS = -# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using -# INPUT_FILTER) will be used to filter the input files when producing source +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO @@ -683,32 +683,32 @@ FILTER_SOURCE_FILES = NO # configuration options related to source browsing #--------------------------------------------------------------------------- -# If the SOURCE_BROWSER tag is set to YES then a list of source files will -# be generated. Documented entities will be cross-referenced with these sources. -# Note: To get rid of all source code in the generated output, make sure also +# If the SOURCE_BROWSER tag is set to YES then a list of source files will +# be generated. Documented entities will be cross-referenced with these sources. +# Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = YES -# Setting the INLINE_SOURCES tag to YES will include the body +# Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO -# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct -# doxygen to hide any special comment blocks from generated source code +# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct +# doxygen to hide any special comment blocks from generated source code # fragments. Normal C and C++ comments will always remain visible. STRIP_CODE_COMMENTS = YES -# If the REFERENCED_BY_RELATION tag is set to YES (the default) -# then for each documented function all documented +# If the REFERENCED_BY_RELATION tag is set to YES (the default) +# then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = YES -# If the REFERENCES_RELATION tag is set to YES (the default) -# then for each documented function all documented entities +# If the REFERENCES_RELATION tag is set to YES (the default) +# then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = YES @@ -729,8 +729,8 @@ REFERENCES_LINK_SOURCE = YES USE_HTAGS = NO -# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen -# will generate a verbatim copy of the header file for each class for +# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen +# will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = YES @@ -739,66 +739,66 @@ VERBATIM_HEADERS = YES # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- -# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index -# of all compounds will be generated. Enable this if the project +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index +# of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = YES -# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then -# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns +# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then +# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 3 -# In case all classes in a project start with a common prefix, all -# classes will be put under the same header in the alphabetical index. -# The IGNORE_PREFIX tag can be used to specify one or more prefixes that +# In case all classes in a project start with a common prefix, all +# classes will be put under the same header in the alphabetical index. +# The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. -IGNORE_PREFIX = +IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- -# If the GENERATE_HTML tag is set to YES (the default) Doxygen will +# If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = YES -# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = html -# The HTML_FILE_EXTENSION tag can be used to specify the file extension for -# each generated HTML page (for example: .htm,.php,.asp). If it is left blank +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for +# each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html -# The HTML_HEADER tag can be used to specify a personal HTML header for -# each generated HTML page. If it is left blank doxygen will generate a +# The HTML_HEADER tag can be used to specify a personal HTML header for +# each generated HTML page. If it is left blank doxygen will generate a # standard header. -HTML_HEADER = +HTML_HEADER = -# The HTML_FOOTER tag can be used to specify a personal HTML footer for -# each generated HTML page. If it is left blank doxygen will generate a +# The HTML_FOOTER tag can be used to specify a personal HTML footer for +# each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = doxygen/footer.html -# The HTML_STYLESHEET tag can be used to specify a user-defined cascading -# style sheet that is used by each HTML page. It can be used to -# fine-tune the look of the HTML output. If the tag is left blank doxygen -# will generate a default style sheet. Note that doxygen will try to copy -# the style sheet file to the HTML output directory, so don't put your own +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading +# style sheet that is used by each HTML page. It can be used to +# fine-tune the look of the HTML output. If the tag is left blank doxygen +# will generate a default style sheet. Note that doxygen will try to copy +# the style sheet file to the HTML output directory, so don't put your own # stylesheet in the HTML output directory as well, or it will be erased! -HTML_STYLESHEET = +HTML_STYLESHEET = # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML # page will contain the date and time when the page was generated. Setting @@ -806,8 +806,8 @@ HTML_STYLESHEET = HTML_TIMESTAMP = YES -# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, -# files or namespaces will be aligned in HTML using tables. If set to +# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, +# files or namespaces will be aligned in HTML using tables. If set to # NO a bullet list will be used. HTML_ALIGN_MEMBERS = YES @@ -846,29 +846,29 @@ DOCSET_FEEDNAME = "Doxygen generated docs" DOCSET_BUNDLE_ID = org.doxygen.Project -# If the GENERATE_HTMLHELP tag is set to YES, additional index files -# will be generated that can be used as input for tools like the -# Microsoft HTML help workshop to generate a compressed HTML help file (.chm) +# If the GENERATE_HTMLHELP tag is set to YES, additional index files +# will be generated that can be used as input for tools like the +# Microsoft HTML help workshop to generate a compressed HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO -# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can -# be used to specify the file name of the resulting .chm file. You -# can add a path in front of the file if the result should not be +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can +# be used to specify the file name of the resulting .chm file. You +# can add a path in front of the file if the result should not be # written to the html output directory. -CHM_FILE = +CHM_FILE = -# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can -# be used to specify the location (absolute path including file name) of -# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run +# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can +# be used to specify the location (absolute path including file name) of +# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. -HHC_LOCATION = +HHC_LOCATION = -# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag -# controls if a separate .chi index file is generated (YES) or that +# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag +# controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO @@ -879,13 +879,13 @@ GENERATE_CHI = NO CHM_INDEX_ENCODING = -# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag -# controls whether a binary table of contents is generated (YES) or a +# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag +# controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO -# The TOC_EXPAND flag can be set to YES to add extra items for group members +# The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO @@ -955,22 +955,22 @@ GENERATE_ECLIPSEHELP = NO ECLIPSE_DOC_ID = org.doxygen.Project -# The DISABLE_INDEX tag can be used to turn on/off the condensed index at -# top of each HTML page. The value NO (the default) enables the index and +# The DISABLE_INDEX tag can be used to turn on/off the condensed index at +# top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. DISABLE_INDEX = NO -# This tag can be used to set the number of enum values (range [1..20]) +# This tag can be used to set the number of enum values (range [1..20]) # that doxygen will group on one line in the generated HTML documentation. ENUM_VALUES_PER_LINE = 4 # If the GENERATE_TREEVIEW tag is set to YES, a side panel will be -# generated containing a tree-like index structure (just like the one that -# is generated for HTML Help). For this to work a browser that supports -# JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, -# Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are +# generated containing a tree-like index structure (just like the one that +# is generated for HTML Help). For this to work a browser that supports +# JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, +# Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are # probably better off using the HTML help feature. GENERATE_TREEVIEW = NO @@ -980,8 +980,8 @@ GENERATE_TREEVIEW = NO USE_INLINE_TREES = NO -# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be -# used to set the initial width (in pixels) of the frame in which the tree +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be +# used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 @@ -1011,74 +1011,74 @@ SERVER_BASED_SEARCH = NO # configuration options related to the LaTeX output #--------------------------------------------------------------------------- -# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will +# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = NO -# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex -# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be +# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. LATEX_CMD_NAME = latex -# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to -# generate index for LaTeX. If left blank `makeindex' will be used as the +# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to +# generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = makeindex -# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact -# LaTeX documents. This may be useful for small projects and may help to +# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact +# LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO -# The PAPER_TYPE tag can be used to set the paper type that is used -# by the printer. Possible values are: a4, a4wide, letter, legal and +# The PAPER_TYPE tag can be used to set the paper type that is used +# by the printer. Possible values are: a4, a4wide, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = a4wide -# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX +# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. -EXTRA_PACKAGES = +EXTRA_PACKAGES = -# The LATEX_HEADER tag can be used to specify a personal LaTeX header for -# the generated latex document. The header should contain everything until -# the first chapter. If it is left blank doxygen will generate a +# The LATEX_HEADER tag can be used to specify a personal LaTeX header for +# the generated latex document. The header should contain everything until +# the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! -LATEX_HEADER = +LATEX_HEADER = -# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated -# is prepared for conversion to pdf (using ps2pdf). The pdf file will -# contain links (just like the HTML output) instead of page references +# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated +# is prepared for conversion to pdf (using ps2pdf). The pdf file will +# contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = NO -# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of -# plain latex in the generated Makefile. Set this option to YES to get a +# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of +# plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = NO -# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. -# command to the generated LaTeX files. This will instruct LaTeX to keep -# running if errors occur, instead of asking the user for help. +# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. +# command to the generated LaTeX files. This will instruct LaTeX to keep +# running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = NO -# If LATEX_HIDE_INDICES is set to YES then doxygen will not -# include the index chapters (such as File Index, Compound Index, etc.) +# If LATEX_HIDE_INDICES is set to YES then doxygen will not +# include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO @@ -1091,68 +1091,68 @@ LATEX_SOURCE_CODE = NO # configuration options related to the RTF output #--------------------------------------------------------------------------- -# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output -# The RTF output is optimized for Word 97 and may not look very pretty with +# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output +# The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO -# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf -# If the COMPACT_RTF tag is set to YES Doxygen generates more compact -# RTF documents. This may be useful for small projects and may help to +# If the COMPACT_RTF tag is set to YES Doxygen generates more compact +# RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO -# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated -# will contain hyperlink fields. The RTF file will -# contain links (just like the HTML output) instead of page references. -# This makes the output suitable for online browsing using WORD or other -# programs which support those fields. +# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated +# will contain hyperlink fields. The RTF file will +# contain links (just like the HTML output) instead of page references. +# This makes the output suitable for online browsing using WORD or other +# programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO -# Load stylesheet definitions from file. Syntax is similar to doxygen's -# config file, i.e. a series of assignments. You only have to provide +# Load stylesheet definitions from file. Syntax is similar to doxygen's +# config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. -RTF_STYLESHEET_FILE = +RTF_STYLESHEET_FILE = -# Set optional variables used in the generation of an rtf document. +# Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. -RTF_EXTENSIONS_FILE = +RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- -# If the GENERATE_MAN tag is set to YES (the default) Doxygen will +# If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = NO -# The MAN_OUTPUT tag is used to specify where the man pages will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# The MAN_OUTPUT tag is used to specify where the man pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man -# The MAN_EXTENSION tag determines the extension that is added to +# The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 -# If the MAN_LINKS tag is set to YES and Doxygen generates man output, -# then it will generate one additional man file for each entity -# documented in the real man page(s). These additional files -# only source the real man page, but without them the man command +# If the MAN_LINKS tag is set to YES and Doxygen generates man output, +# then it will generate one additional man file for each entity +# documented in the real man page(s). These additional files +# only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO @@ -1161,33 +1161,33 @@ MAN_LINKS = NO # configuration options related to the XML output #--------------------------------------------------------------------------- -# If the GENERATE_XML tag is set to YES Doxygen will -# generate an XML file that captures the structure of +# If the GENERATE_XML tag is set to YES Doxygen will +# generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = NO -# The XML_OUTPUT tag is used to specify where the XML pages will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# The XML_OUTPUT tag is used to specify where the XML pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml -# The XML_SCHEMA tag can be used to specify an XML schema, -# which can be used by a validating XML parser to check the +# The XML_SCHEMA tag can be used to specify an XML schema, +# which can be used by a validating XML parser to check the # syntax of the XML files. -XML_SCHEMA = +XML_SCHEMA = -# The XML_DTD tag can be used to specify an XML DTD, -# which can be used by a validating XML parser to check the +# The XML_DTD tag can be used to specify an XML DTD, +# which can be used by a validating XML parser to check the # syntax of the XML files. -XML_DTD = +XML_DTD = -# If the XML_PROGRAMLISTING tag is set to YES Doxygen will -# dump the program listings (including syntax highlighting -# and cross-referencing information) to the XML output. Note that +# If the XML_PROGRAMLISTING tag is set to YES Doxygen will +# dump the program listings (including syntax highlighting +# and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES @@ -1196,10 +1196,10 @@ XML_PROGRAMLISTING = YES # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- -# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will -# generate an AutoGen Definitions (see autogen.sf.net) file -# that captures the structure of the code including all -# documentation. Note that this feature is still experimental +# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will +# generate an AutoGen Definitions (see autogen.sf.net) file +# that captures the structure of the code including all +# documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO @@ -1208,91 +1208,90 @@ GENERATE_AUTOGEN_DEF = NO # configuration options related to the Perl module output #--------------------------------------------------------------------------- -# If the GENERATE_PERLMOD tag is set to YES Doxygen will -# generate a Perl module file that captures the structure of -# the code including all documentation. Note that this -# feature is still experimental and incomplete at the +# If the GENERATE_PERLMOD tag is set to YES Doxygen will +# generate a Perl module file that captures the structure of +# the code including all documentation. Note that this +# feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO -# If the PERLMOD_LATEX tag is set to YES Doxygen will generate -# the necessary Makefile rules, Perl scripts and LaTeX code to be able +# If the PERLMOD_LATEX tag is set to YES Doxygen will generate +# the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO -# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be -# nicely formatted so it can be parsed by a human reader. This is useful -# if you want to understand what is going on. On the other hand, if this -# tag is set to NO the size of the Perl module output will be much smaller +# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be +# nicely formatted so it can be parsed by a human reader. This is useful +# if you want to understand what is going on. On the other hand, if this +# tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES -# The names of the make variables in the generated doxyrules.make file -# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. -# This is useful so different doxyrules.make files included by the same +# The names of the make variables in the generated doxyrules.make file +# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. +# This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. -PERLMOD_MAKEVAR_PREFIX = +PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- -# Configuration options related to the preprocessor +# Configuration options related to the preprocessor #--------------------------------------------------------------------------- -# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will -# evaluate all C-preprocessor directives found in the sources and include +# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will +# evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES -# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro -# names in the source code. If set to NO (the default) only conditional -# compilation will be performed. Macro expansion can be done in a controlled +# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro +# names in the source code. If set to NO (the default) only conditional +# compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = NO -# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES -# then the macro expansion is limited to the macros specified with the +# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES +# then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_PREDEFINED tags. EXPAND_ONLY_PREDEF = NO -# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files +# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # in the INCLUDE_PATH (see below) will be search if a #include is found. SEARCH_INCLUDES = YES -# The INCLUDE_PATH tag can be used to specify one or more directories that -# contain include files that are not input files but should be processed by +# The INCLUDE_PATH tag can be used to specify one or more directories that +# contain include files that are not input files but should be processed by # the preprocessor. -INCLUDE_PATH = +INCLUDE_PATH = -# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard -# patterns (like *.h and *.hpp) to filter out the header-files in the -# directories. If left blank, the patterns specified with FILE_PATTERNS will +# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard +# patterns (like *.h and *.hpp) to filter out the header-files in the +# directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. -INCLUDE_FILE_PATTERNS = +INCLUDE_FILE_PATTERNS = -# The PREDEFINED tag can be used to specify one or more macro names that -# are defined before the preprocessor is started (similar to the -D option of -# gcc). The argument of the tag is a list of macros of the form: name -# or name=definition (no spaces). If the definition and the = are +# The PREDEFINED tag can be used to specify one or more macro names that +# are defined before the preprocessor is started (similar to the -D option of +# gcc). The argument of the tag is a list of macros of the form: name +# or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. -PREDEFINED = DOXYGEN_SHOULD_SKIP_THIS \ - IS_NULL_ISA +PREDEFINED = DOXYGEN_SHOULD_SKIP_THIS -# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then -# this tag can be used to specify a list of macro names that should be expanded. -# The macro definition that is found in the sources will be used. +# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then +# this tag can be used to specify a list of macro names that should be expanded. +# The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition. -EXPAND_AS_DEFINED = +EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all function-like macros that are alone @@ -1303,50 +1302,50 @@ EXPAND_AS_DEFINED = SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- -# Configuration::additions related to external references +# Configuration::additions related to external references #--------------------------------------------------------------------------- -# The TAGFILES option can be used to specify one or more tagfiles. -# Optionally an initial location of the external documentation -# can be added for each tagfile. The format of a tag file without -# this location is as follows: -# TAGFILES = file1 file2 ... -# Adding location for the tag files is done as follows: -# TAGFILES = file1=loc1 "file2 = loc2" ... -# where "loc1" and "loc2" can be relative or absolute paths or -# URLs. If a location is present for each tag, the installdox tool +# The TAGFILES option can be used to specify one or more tagfiles. +# Optionally an initial location of the external documentation +# can be added for each tagfile. The format of a tag file without +# this location is as follows: +# TAGFILES = file1 file2 ... +# Adding location for the tag files is done as follows: +# TAGFILES = file1=loc1 "file2 = loc2" ... +# where "loc1" and "loc2" can be relative or absolute paths or +# URLs. If a location is present for each tag, the installdox tool # does not have to be run to correct the links. # Note that each tag file must have a unique name # (where the name does NOT include the path) -# If a tag file is not located in the directory in which doxygen +# If a tag file is not located in the directory in which doxygen # is run, you must also specify the path to the tagfile here. -TAGFILES = +TAGFILES = -# When a file name is specified after GENERATE_TAGFILE, doxygen will create +# When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. -GENERATE_TAGFILE = +GENERATE_TAGFILE = -# If the ALLEXTERNALS tag is set to YES all external classes will be listed -# in the class index. If set to NO only the inherited external classes +# If the ALLEXTERNALS tag is set to YES all external classes will be listed +# in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO -# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed -# in the modules index. If set to NO, only the current project's groups will +# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed +# in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES -# The PERL_PATH should be the absolute path and name of the perl script +# The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- -# Configuration options related to the dot tool +# Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will @@ -1367,15 +1366,15 @@ CLASS_DIAGRAMS = YES MSCGEN_PATH = -# If set to YES, the inheritance and collaboration graphs will hide -# inheritance and usage relations if the target is undocumented +# If set to YES, the inheritance and collaboration graphs will hide +# inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES -# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is -# available from the path. This tool is part of Graphviz, a graph visualization -# toolkit from AT&T and Lucent Bell Labs. The other options in this section +# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is +# available from the path. This tool is part of Graphviz, a graph visualization +# toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = NO @@ -1403,16 +1402,16 @@ DOT_FONTSIZE = 10 DOT_FONTPATH = -# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen -# will generate a graph for each documented class showing the direct and -# indirect inheritance relations. Setting this tag to YES will force the +# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect inheritance relations. Setting this tag to YES will force the # the CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = YES -# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen -# will generate a graph for each documented class showing the direct and -# indirect implementation dependencies (inheritance, containment, and +# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = YES @@ -1422,35 +1421,35 @@ COLLABORATION_GRAPH = YES GROUP_GRAPHS = YES -# If the UML_LOOK tag is set to YES doxygen will generate inheritance and -# collaboration diagrams in a style similar to the OMG's Unified Modeling +# If the UML_LOOK tag is set to YES doxygen will generate inheritance and +# collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO -# If set to YES, the inheritance and collaboration graphs will show the +# If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = NO -# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT -# tags are set to YES then doxygen will generate a graph for each documented -# file showing the direct and indirect include dependencies of the file with +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT +# tags are set to YES then doxygen will generate a graph for each documented +# file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = YES -# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and -# HAVE_DOT tags are set to YES then doxygen will generate a graph for each -# documented header file showing the documented files that directly or +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and +# HAVE_DOT tags are set to YES then doxygen will generate a graph for each +# documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = YES -# If the CALL_GRAPH and HAVE_DOT tags are set to YES then doxygen will -# generate a call dependency graph for every global function or class method. -# Note that enabling this option will significantly increase the time of a run. -# So in most cases it will be better to enable call graphs for selected +# If the CALL_GRAPH and HAVE_DOT tags are set to YES then doxygen will +# generate a call dependency graph for every global function or class method. +# Note that enabling this option will significantly increase the time of a run. +# So in most cases it will be better to enable call graphs for selected # functions only using the \callgraph command. CALL_GRAPH = YES @@ -1463,7 +1462,7 @@ CALL_GRAPH = YES CALLER_GRAPH = NO -# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen +# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = YES @@ -1475,22 +1474,22 @@ GRAPHICAL_HIERARCHY = YES DIRECTORY_GRAPH = YES -# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images +# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are png, jpg, or gif # If left blank png will be used. DOT_IMAGE_FORMAT = png -# The tag DOT_PATH can be used to specify the path where the dot tool can be +# The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found on the path. -DOT_PATH = +DOT_PATH = -# The DOTFILE_DIRS tag can be used to specify one or more directories that -# contain dot files that are included in the documentation (see the +# The DOTFILE_DIRS tag can be used to specify one or more directories that +# contain dot files that are included in the documentation (see the # \dotfile command). -DOTFILE_DIRS = +DOTFILE_DIRS = # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of # nodes that will be shown in the graph. If the number of nodes in a graph # becomes larger than this value, doxygen will truncate the graph, which is @@ -1526,14 +1525,14 @@ DOT_TRANSPARENT = NO DOT_MULTI_TARGETS = YES -# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will -# generate a legend page explaining the meaning of the various boxes and +# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will +# generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES -# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will -# remove the intermediate dot files that are used to generate +# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will +# remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES diff --git a/src/SConscript b/src/SConscript index 2ddf4bf7af..4e7139c064 100644 --- a/src/SConscript +++ b/src/SConscript @@ -48,7 +48,7 @@ import sys import SCons -from gem5_scons import Transform, warning, error, ToValue, FromValue +from gem5_scons import Configure, error, FromValue, ToValue, Transform, warning from gem5_scons.sources import * Export(SourceFilter.factories) @@ -250,9 +250,10 @@ def protoc_emitter(target, source, env): root, ext = os.path.splitext(source[0].get_abspath()) return [root + '.pb.cc', root + '.pb.h'], source -protoc_action = MakeAction('${PROTOC} --cpp_out ${BUILDDIR} ' - '--proto_path ${BUILDDIR} --proto_path ${SOURCE.dir} ' - '${SOURCE.get_abspath()}', +# To understand these variables and attributes, see: +# https://scons.org/doc/production/HTML/scons-man.html#variable_substitution +protoc_action = MakeAction('${PROTOC} --cpp_out ${TARGET.dir.abspath} ' + '--proto_path ${SOURCE.dir.abspath} ${SOURCE.abspath}', Transform("PROTOC")) protobuf_builder = Builder(action=protoc_action, emitter=protoc_emitter, src_suffix='.proto') @@ -268,7 +269,10 @@ def ProtoBuf(source, tags=None, add_tags=None): env['PROTOC_GRPC'] = distutils.spawn.find_executable('grpc_cpp_plugin') if env['PROTOC_GRPC']: - env.Append(LIBS=['grpc++']) + with Configure(env) as conf: + if (not env['HAVE_PKG_CONFIG'] or + not conf.CheckPkgConfig(['grpc++'], '--libs')): + env.Append(LIBS=['grpc++']) def protoc_grpc_emitter(target, source, env): root, ext = os.path.splitext(source[0].get_abspath()) @@ -559,30 +563,6 @@ for extra_dir in extras_dir_list: for opt in env['CONF'].keys(): env.ConfigFile(opt) -def makeTheISA(source, target, env): - isas = sorted(set(env.Split('${ALL_ISAS}'))) - target_isa = env['CONF']['TARGET_ISA'] - is_null_isa = '1' if (target_isa.lower() == 'null') else '0' - - def namespace(isa): - return isa[0].upper() + isa[1:].lower() + 'ISA' - - - code = code_formatter() - code('''\ -#ifndef __CONFIG_THE_ISA_HH__ -#define __CONFIG_THE_ISA_HH__ - -#define IS_NULL_ISA ${{is_null_isa}} -#define TheISA ${{namespace(target_isa)}} - -#endif // __CONFIG_THE_ISA_HH__''') - - code.write(str(target[0])) - -env.Command('config/the_isa.hh', [], - MakeAction(makeTheISA, Transform("CFG ISA", 0))) - def makeTheGPUISA(source, target, env): gpu_isa = env['CONF']['TARGET_GPU_ISA'] diff --git a/src/arch/SConscript b/src/arch/SConscript index 1fe981508e..90d7ad7700 100644 --- a/src/arch/SConscript +++ b/src/arch/SConscript @@ -51,19 +51,25 @@ Import('*') # # ISA "switch header" generation. # -# Auto-generate arch headers that include the right ISA-specific -# header based on the setting of TARGET_ISA setting. +# Auto-generate gpu headers that include the right GPU-specific +# header based on the setting of TARGET_GPU_ISA setting. # ################################################################# -env.TagImplies(env.subst('${CONF["TARGET_ISA"]} isa'), 'gem5 lib') - -env.SwitchingHeaders( - Split(''' - isa.hh - vecregs.hh - '''), - env.subst('${CONF["TARGET_ISA"]}')) +if env['CONF']['USE_ARM_ISA']: + isa = 'arm' +elif env['CONF']['USE_MIPS_ISA']: + isa = 'mips' +elif env['CONF']['USE_POWER_ISA']: + isa = 'power' +elif env['CONF']['USE_RISCV_ISA']: + isa = 'riscv' +elif env['CONF']['USE_SPARC_ISA']: + isa = 'sparc' +elif env['CONF']['USE_X86_ISA']: + isa = 'x86' +elif env['CONF']['USE_NULL_ISA']: + isa = 'null' amdgpu_isa = ['gcn3', 'vega'] diff --git a/src/arch/SConsopts b/src/arch/SConsopts index 048814e995..f05bdef14b 100644 --- a/src/arch/SConsopts +++ b/src/arch/SConsopts @@ -27,8 +27,6 @@ Import('*') def add_isa_lists(): sticky_vars.AddVariables( - EnumVariable('TARGET_ISA', 'Target ISA', 'null', - sorted(set(main.Split('${ALL_ISAS}')))), EnumVariable('TARGET_GPU_ISA', 'Target GPU ISA', 'gcn3', sorted(set(main.Split('${ALL_GPU_ISAS}')))), ) diff --git a/src/arch/amdgpu/common/X86GPUTLB.py b/src/arch/amdgpu/common/X86GPUTLB.py index 557ace6f9b..59cc549d17 100644 --- a/src/arch/amdgpu/common/X86GPUTLB.py +++ b/src/arch/amdgpu/common/X86GPUTLB.py @@ -34,41 +34,48 @@ from m5.proxy import * from m5.objects.ClockedObject import ClockedObject from m5.SimObject import SimObject + class X86GPUTLB(ClockedObject): - type = 'X86GPUTLB' - cxx_class = 'gem5::X86ISA::GpuTLB' - cxx_header = 'arch/amdgpu/common/tlb.hh' + type = "X86GPUTLB" + cxx_class = "gem5::X86ISA::GpuTLB" + cxx_header = "arch/amdgpu/common/tlb.hh" size = Param.Int(64, "TLB size (number of entries)") assoc = Param.Int(64, "TLB associativity") - if buildEnv.get('FULL_SYSTEM', False): - walker = Param.X86PagetableWalker(X86PagetableWalker(), - "page table walker") + if buildEnv.get("FULL_SYSTEM", False): + walker = Param.X86PagetableWalker( + X86PagetableWalker(), "page table walker" + ) hitLatency = Param.Int(2, "Latency of a TLB hit") missLatency1 = Param.Int(5, "Latency #1 of a TLB miss") missLatency2 = Param.Int(100, "Latency #2 of a TLB miss") maxOutstandingReqs = Param.Int(64, "# of maximum outstanding requests") cpu_side_ports = VectorResponsePort("Ports on side closer to CPU/CU") - slave = DeprecatedParam(cpu_side_ports, - '`slave` is now called `cpu_side_ports`') + slave = DeprecatedParam( + cpu_side_ports, "`slave` is now called `cpu_side_ports`" + ) mem_side_ports = VectorRequestPort("Ports on side closer to memory") - master = DeprecatedParam(mem_side_ports, - '`master` is now called `mem_side_ports`') + master = DeprecatedParam( + mem_side_ports, "`master` is now called `mem_side_ports`" + ) allocationPolicy = Param.Bool(True, "Allocate on an access") accessDistance = Param.Bool(False, "print accessDistance stats") + class TLBCoalescer(ClockedObject): - type = 'TLBCoalescer' - cxx_class = 'gem5::TLBCoalescer' - cxx_header = 'arch/amdgpu/common/tlb_coalescer.hh' + type = "TLBCoalescer" + cxx_class = "gem5::TLBCoalescer" + cxx_header = "arch/amdgpu/common/tlb_coalescer.hh" probesPerCycle = Param.Int(2, "Number of TLB probes per cycle") coalescingWindow = Param.Int(1, "Permit coalescing across that many ticks") cpu_side_ports = VectorResponsePort("Port on side closer to CPU/CU") - slave = DeprecatedParam(cpu_side_ports, - '`slave` is now called `cpu_side_ports`') + slave = DeprecatedParam( + cpu_side_ports, "`slave` is now called `cpu_side_ports`" + ) mem_side_ports = VectorRequestPort("Port on side closer to memory") - master = DeprecatedParam(mem_side_ports, - '`master` is now called `mem_side_ports`') - disableCoalescing = Param.Bool(False,"Dispable Coalescing") + master = DeprecatedParam( + mem_side_ports, "`master` is now called `mem_side_ports`" + ) + disableCoalescing = Param.Bool(False, "Dispable Coalescing") diff --git a/src/arch/amdgpu/common/tlb_coalescer.cc b/src/arch/amdgpu/common/tlb_coalescer.cc index 1279ee3d6e..0be1387977 100644 --- a/src/arch/amdgpu/common/tlb_coalescer.cc +++ b/src/arch/amdgpu/common/tlb_coalescer.cc @@ -194,7 +194,7 @@ TLBCoalescer::updatePhysAddresses(PacketPtr pkt) // the correct TLBEentry in the TLBs above. auto p = sender_state->tc->getProcessPtr(); sender_state->tlbEntry = - new TheISA::TlbEntry(p->pid(), first_entry_vaddr, + new X86ISA::TlbEntry(p->pid(), first_entry_vaddr, first_entry_paddr, false, false); // update the hitLevel for all uncoalesced reqs diff --git a/src/arch/amdgpu/gcn3/insts/instructions.cc b/src/arch/amdgpu/gcn3/insts/instructions.cc index 56a541c52f..8c51af5187 100644 --- a/src/arch/amdgpu/gcn3/insts/instructions.cc +++ b/src/arch/amdgpu/gcn3/insts/instructions.cc @@ -5316,6 +5316,8 @@ namespace Gcn3ISA Inst_SMEM__S_MEMTIME::Inst_SMEM__S_MEMTIME(InFmt_SMEM *iFmt) : Inst_SMEM(iFmt, "s_memtime") { + // s_memtime does not issue a memory request + setFlag(ALU); } // Inst_SMEM__S_MEMTIME Inst_SMEM__S_MEMTIME::~Inst_SMEM__S_MEMTIME() @@ -5326,7 +5328,9 @@ namespace Gcn3ISA void Inst_SMEM__S_MEMTIME::execute(GPUDynInstPtr gpuDynInst) { - panicUnimplemented(); + ScalarOperandU64 sdst(gpuDynInst, instData.SDATA); + sdst = (ScalarRegU64)gpuDynInst->computeUnit()->curCycle(); + sdst.write(); } Inst_SMEM__S_MEMREALTIME::Inst_SMEM__S_MEMREALTIME(InFmt_SMEM *iFmt) diff --git a/src/arch/amdgpu/gcn3/operand.hh b/src/arch/amdgpu/gcn3/operand.hh index 6b51307f44..769f28a8a8 100644 --- a/src/arch/amdgpu/gcn3/operand.hh +++ b/src/arch/amdgpu/gcn3/operand.hh @@ -654,7 +654,10 @@ namespace Gcn3ISA ComputeUnit *cu = _gpuDynInst->computeUnit(); int sgprIdx(-1); - if (_opIdx == REG_VCC_LO) { + if (_opIdx == REG_VCC_HI) { + sgprIdx = cu->registerManager + ->mapSgpr(wf, wf->reservedScalarRegs - 1 + dword); + } else if (_opIdx == REG_VCC_LO) { sgprIdx = cu->registerManager ->mapSgpr(wf, wf->reservedScalarRegs - 2 + dword); } else if (_opIdx == REG_FLAT_SCRATCH_HI) { diff --git a/src/arch/amdgpu/gcn3/registers.cc b/src/arch/amdgpu/gcn3/registers.cc index 3329cf1ff1..7f1d0dba37 100644 --- a/src/arch/amdgpu/gcn3/registers.cc +++ b/src/arch/amdgpu/gcn3/registers.cc @@ -75,7 +75,10 @@ namespace Gcn3ISA reg_sym = "flat_scratch_hi"; break; case REG_VCC_LO: - reg_sym = "vcc"; + reg_sym = "vcc_lo"; + break; + case REG_VCC_HI: + reg_sym = "vcc_hi"; break; case REG_M0: reg_sym = "m0"; diff --git a/src/arch/amdgpu/vega/VegaGPUTLB.py b/src/arch/amdgpu/vega/VegaGPUTLB.py index 4d77f51c95..96b940c544 100644 --- a/src/arch/amdgpu/vega/VegaGPUTLB.py +++ b/src/arch/amdgpu/vega/VegaGPUTLB.py @@ -35,23 +35,26 @@ from m5.objects.ClockedObject import ClockedObject from m5.objects.AMDGPU import AMDGPUDevice from m5.SimObject import SimObject + class VegaPagetableWalker(ClockedObject): - type = 'VegaPagetableWalker' - cxx_class = 'gem5::VegaISA::Walker' - cxx_header = 'arch/amdgpu/vega/pagetable_walker.hh' + type = "VegaPagetableWalker" + cxx_class = "gem5::VegaISA::Walker" + cxx_header = "arch/amdgpu/vega/pagetable_walker.hh" port = RequestPort("Port for the hardware table walker") system = Param.System(Parent.any, "system object") + class VegaGPUTLB(ClockedObject): - type = 'VegaGPUTLB' - cxx_class = 'gem5::VegaISA::GpuTLB' - cxx_header = 'arch/amdgpu/vega/tlb.hh' + type = "VegaGPUTLB" + cxx_class = "gem5::VegaISA::GpuTLB" + cxx_header = "arch/amdgpu/vega/tlb.hh" size = Param.Int(64, "TLB size (number of entries)") assoc = Param.Int(64, "TLB associativity") - walker = Param.VegaPagetableWalker(VegaPagetableWalker(), - "page table walker") - gpu_device = Param.AMDGPUDevice(NULL, 'GPU Device') + walker = Param.VegaPagetableWalker( + VegaPagetableWalker(), "page table walker" + ) + gpu_device = Param.AMDGPUDevice(NULL, "GPU Device") hitLatency = Param.Int(2, "Latency of a TLB hit") missLatency1 = Param.Int(5, "Latency #1 of a TLB miss") @@ -61,14 +64,15 @@ class VegaGPUTLB(ClockedObject): mem_side_ports = VectorRequestPort("Port on side closer to memory") allocationPolicy = Param.Bool(True, "Allocate on an access") + class VegaTLBCoalescer(ClockedObject): - type = 'VegaTLBCoalescer' - cxx_class = 'gem5::VegaTLBCoalescer' - cxx_header = 'arch/amdgpu/vega/tlb_coalescer.hh' + type = "VegaTLBCoalescer" + cxx_class = "gem5::VegaTLBCoalescer" + cxx_header = "arch/amdgpu/vega/tlb_coalescer.hh" tlb_level = Param.Int(64, "tlb level") maxDownstream = Param.Int(64, "max downstream @ this level") probesPerCycle = Param.Int(2, "Number of TLB probes per cycle") coalescingWindow = Param.Int(1, "Permit coalescing across that many ticks") cpu_side_ports = VectorResponsePort("Port on side closer to CPU/CU") mem_side_ports = VectorRequestPort("Port on side closer to memory") - disableCoalescing = Param.Bool(False,"Dispable Coalescing") + disableCoalescing = Param.Bool(False, "Dispable Coalescing") diff --git a/src/arch/amdgpu/vega/decoder.cc b/src/arch/amdgpu/vega/decoder.cc index f716636978..18c72a4382 100644 --- a/src/arch/amdgpu/vega/decoder.cc +++ b/src/arch/amdgpu/vega/decoder.cc @@ -877,9 +877,9 @@ namespace VegaISA &Decoder::decode_OPU_VOP3__V_MIN_U16, &Decoder::decode_OPU_VOP3__V_MIN_I16, &Decoder::decode_OPU_VOP3__V_LDEXP_F16, - &Decoder::decode_invalid, - &Decoder::decode_invalid, - &Decoder::decode_invalid, + &Decoder::decode_OPU_VOP3__V_ADD_U32, + &Decoder::decode_OPU_VOP3__V_SUB_U32, + &Decoder::decode_OPU_VOP3__V_SUBREV_U32, &Decoder::decode_invalid, &Decoder::decode_invalid, &Decoder::decode_invalid, @@ -6105,6 +6105,24 @@ namespace VegaISA return new Inst_VOP3__V_LDEXP_F16(&iFmt->iFmt_VOP3A); } // decode_OPU_VOP3__V_LDEXP_F16 + GPUStaticInst* + Decoder::decode_OPU_VOP3__V_ADD_U32(MachInst iFmt) + { + return new Inst_VOP3__V_ADD_U32(&iFmt->iFmt_VOP3A); + } // decode_OPU_VOP3__V_ADD_U32 + + GPUStaticInst* + Decoder::decode_OPU_VOP3__V_SUB_U32(MachInst iFmt) + { + return new Inst_VOP3__V_SUB_U32(&iFmt->iFmt_VOP3A); + } // decode_OPU_VOP3__V_SUB_U32 + + GPUStaticInst* + Decoder::decode_OPU_VOP3__V_SUBREV_U32(MachInst iFmt) + { + return new Inst_VOP3__V_SUBREV_U32(&iFmt->iFmt_VOP3A); + } // decode_OPU_VOP3__V_SUBREV_U32 + GPUStaticInst* Decoder::decode_OPU_VOP3__V_NOP(MachInst iFmt) { @@ -6854,8 +6872,7 @@ namespace VegaISA GPUStaticInst* Decoder::decode_OPU_VOP3__V_XAD_U32(MachInst iFmt) { - fatal("Trying to decode instruction without a class\n"); - return nullptr; + return new Inst_VOP3__V_XAD_U32(&iFmt->iFmt_VOP3A); } GPUStaticInst* diff --git a/src/arch/amdgpu/vega/gpu_decoder.hh b/src/arch/amdgpu/vega/gpu_decoder.hh index 1ca292675e..1be43861df 100644 --- a/src/arch/amdgpu/vega/gpu_decoder.hh +++ b/src/arch/amdgpu/vega/gpu_decoder.hh @@ -322,6 +322,9 @@ namespace VegaISA GPUStaticInst* decode_OPU_VOP3__V_MIN_U16(MachInst); GPUStaticInst* decode_OPU_VOP3__V_MIN_I16(MachInst); GPUStaticInst* decode_OPU_VOP3__V_LDEXP_F16(MachInst); + GPUStaticInst* decode_OPU_VOP3__V_ADD_U32(MachInst); + GPUStaticInst* decode_OPU_VOP3__V_SUB_U32(MachInst); + GPUStaticInst* decode_OPU_VOP3__V_SUBREV_U32(MachInst); GPUStaticInst* decode_OPU_VOP3__V_NOP(MachInst); GPUStaticInst* decode_OPU_VOP3__V_MOV_B32(MachInst); GPUStaticInst* decode_OPU_VOP3__V_CVT_I32_F64(MachInst); diff --git a/src/arch/amdgpu/vega/insts/instructions.cc b/src/arch/amdgpu/vega/insts/instructions.cc index edf908d1f2..f5b08b7ce1 100644 --- a/src/arch/amdgpu/vega/insts/instructions.cc +++ b/src/arch/amdgpu/vega/insts/instructions.cc @@ -36,6 +36,7 @@ #include "arch/amdgpu/vega/insts/inst_util.hh" #include "debug/VEGA.hh" #include "debug/GPUSync.hh" +#include "dev/amdgpu/hwreg_defines.hh" #include "gpu-compute/shader.hh" namespace gem5 @@ -1552,7 +1553,7 @@ namespace VegaISA void Inst_SOPK__S_MOVK_I32::execute(GPUDynInstPtr gpuDynInst) { - ScalarRegI32 simm16 = (ScalarRegI32)instData.SIMM16; + ScalarRegI32 simm16 = (ScalarRegI32)sext<16>(instData.SIMM16); ScalarOperandI32 sdst(gpuDynInst, instData.SDST); sdst = simm16; @@ -1578,7 +1579,7 @@ namespace VegaISA void Inst_SOPK__S_CMOVK_I32::execute(GPUDynInstPtr gpuDynInst) { - ScalarRegI32 simm16 = (ScalarRegI32)instData.SIMM16; + ScalarRegI32 simm16 = (ScalarRegI32)sext<16>(instData.SIMM16); ScalarOperandI32 sdst(gpuDynInst, instData.SDST); ConstScalarOperandU32 scc(gpuDynInst, REG_SCC); @@ -1606,7 +1607,7 @@ namespace VegaISA void Inst_SOPK__S_CMPK_EQ_I32::execute(GPUDynInstPtr gpuDynInst) { - ScalarRegI32 simm16 = (ScalarRegI32)instData.SIMM16; + ScalarRegI32 simm16 = (ScalarRegI32)sext<16>(instData.SIMM16); ConstScalarOperandI32 src(gpuDynInst, instData.SDST); ScalarOperandU32 scc(gpuDynInst, REG_SCC); @@ -1633,7 +1634,7 @@ namespace VegaISA void Inst_SOPK__S_CMPK_LG_I32::execute(GPUDynInstPtr gpuDynInst) { - ScalarRegI32 simm16 = (ScalarRegI32)instData.SIMM16; + ScalarRegI32 simm16 = (ScalarRegI32)sext<16>(instData.SIMM16); ConstScalarOperandI32 src(gpuDynInst, instData.SDST); ScalarOperandU32 scc(gpuDynInst, REG_SCC); @@ -1660,7 +1661,7 @@ namespace VegaISA void Inst_SOPK__S_CMPK_GT_I32::execute(GPUDynInstPtr gpuDynInst) { - ScalarRegI32 simm16 = (ScalarRegI32)instData.SIMM16; + ScalarRegI32 simm16 = (ScalarRegI32)sext<16>(instData.SIMM16); ConstScalarOperandI32 src(gpuDynInst, instData.SDST); ScalarOperandU32 scc(gpuDynInst, REG_SCC); @@ -1687,7 +1688,7 @@ namespace VegaISA void Inst_SOPK__S_CMPK_GE_I32::execute(GPUDynInstPtr gpuDynInst) { - ScalarRegI32 simm16 = (ScalarRegI32)instData.SIMM16; + ScalarRegI32 simm16 = (ScalarRegI32)sext<16>(instData.SIMM16); ConstScalarOperandI32 src(gpuDynInst, instData.SDST); ScalarOperandU32 scc(gpuDynInst, REG_SCC); @@ -1714,7 +1715,7 @@ namespace VegaISA void Inst_SOPK__S_CMPK_LT_I32::execute(GPUDynInstPtr gpuDynInst) { - ScalarRegI32 simm16 = (ScalarRegI32)instData.SIMM16; + ScalarRegI32 simm16 = (ScalarRegI32)sext<16>(instData.SIMM16); ConstScalarOperandI32 src(gpuDynInst, instData.SDST); ScalarOperandU32 scc(gpuDynInst, REG_SCC); @@ -1741,7 +1742,7 @@ namespace VegaISA void Inst_SOPK__S_CMPK_LE_I32::execute(GPUDynInstPtr gpuDynInst) { - ScalarRegI32 simm16 = (ScalarRegI32)instData.SIMM16; + ScalarRegI32 simm16 = (ScalarRegI32)sext<16>(instData.SIMM16); ConstScalarOperandI32 src(gpuDynInst, instData.SDST); ScalarOperandU32 scc(gpuDynInst, REG_SCC); @@ -1938,7 +1939,7 @@ namespace VegaISA src.read(); - sdst = src.rawData() + (ScalarRegI32)simm16; + sdst = src.rawData() + (ScalarRegI32)sext<16>(simm16); scc = (bits(src.rawData(), 31) == bits(simm16, 15) && bits(src.rawData(), 31) != bits(sdst.rawData(), 31)) ? 1 : 0; @@ -1968,7 +1969,7 @@ namespace VegaISA src.read(); - sdst = src.rawData() * (ScalarRegI32)simm16; + sdst = src.rawData() * (ScalarRegI32)sext<16>(simm16); sdst.write(); } // execute @@ -2017,6 +2018,7 @@ namespace VegaISA Inst_SOPK__S_GETREG_B32::Inst_SOPK__S_GETREG_B32(InFmt_SOPK *iFmt) : Inst_SOPK(iFmt, "s_getreg_b32") { + setFlag(ALU); } // Inst_SOPK__S_GETREG_B32 Inst_SOPK__S_GETREG_B32::~Inst_SOPK__S_GETREG_B32() @@ -2031,7 +2033,20 @@ namespace VegaISA void Inst_SOPK__S_GETREG_B32::execute(GPUDynInstPtr gpuDynInst) { - panicUnimplemented(); + ScalarRegI16 simm16 = instData.SIMM16; + ScalarRegU32 hwregId = simm16 & 0x3f; + ScalarRegU32 offset = (simm16 >> 6) & 31; + ScalarRegU32 size = ((simm16 >> 11) & 31) + 1; + + ScalarRegU32 hwreg = + gpuDynInst->computeUnit()->shader->getHwReg(hwregId); + ScalarOperandU32 sdst(gpuDynInst, instData.SDST); + sdst.read(); + + // Store value from hardware to part of the SDST. + ScalarRegU32 mask = (((1U << size) - 1U) << offset); + sdst = (hwreg & mask) >> offset; + sdst.write(); } // execute // --- Inst_SOPK__S_SETREG_B32 class methods --- @@ -2058,16 +2073,15 @@ namespace VegaISA ScalarRegU32 offset = (simm16 >> 6) & 31; ScalarRegU32 size = ((simm16 >> 11) & 31) + 1; - ScalarOperandU32 hwreg(gpuDynInst, hwregId); + ScalarRegU32 hwreg = + gpuDynInst->computeUnit()->shader->getHwReg(hwregId); ScalarOperandU32 sdst(gpuDynInst, instData.SDST); - hwreg.read(); sdst.read(); // Store value from SDST to part of the hardware register. ScalarRegU32 mask = (((1U << size) - 1U) << offset); - hwreg = ((hwreg.rawData() & ~mask) - | ((sdst.rawData() << offset) & mask)); - hwreg.write(); + hwreg = ((hwreg & ~mask) | ((sdst.rawData() << offset) & mask)); + gpuDynInst->computeUnit()->shader->setHwReg(hwregId, hwreg); // set MODE register to control the behavior of single precision // floating-point numbers: denormal mode or round mode @@ -2107,19 +2121,18 @@ namespace VegaISA ScalarRegU32 offset = (simm16 >> 6) & 31; ScalarRegU32 size = ((simm16 >> 11) & 31) + 1; - ScalarOperandU32 hwreg(gpuDynInst, hwregId); + ScalarRegU32 hwreg = + gpuDynInst->computeUnit()->shader->getHwReg(hwregId); ScalarRegI32 simm32 = extData.imm_u32; - hwreg.read(); // Store value from SIMM32 to part of the hardware register. ScalarRegU32 mask = (((1U << size) - 1U) << offset); - hwreg = ((hwreg.rawData() & ~mask) - | ((simm32 << offset) & mask)); - hwreg.write(); + hwreg = ((hwreg & ~mask) | ((simm32 << offset) & mask)); + gpuDynInst->computeUnit()->shader->setHwReg(hwregId, hwreg); // set MODE register to control the behavior of single precision // floating-point numbers: denormal mode or round mode - if (hwregId==1 && size==2 + if (hwregId==HW_REG_MODE && size==2 && (offset==4 || offset==0)) { warn_once("Be cautious that s_setreg_imm32_b32 has no real effect " "on FP modes: %s\n", gpuDynInst->disassemble()); @@ -5906,6 +5919,8 @@ namespace VegaISA Inst_SMEM__S_MEMTIME::Inst_SMEM__S_MEMTIME(InFmt_SMEM *iFmt) : Inst_SMEM(iFmt, "s_memtime") { + // s_memtime does not issue a memory request + setFlag(ALU); } // Inst_SMEM__S_MEMTIME Inst_SMEM__S_MEMTIME::~Inst_SMEM__S_MEMTIME() @@ -5917,7 +5932,9 @@ namespace VegaISA void Inst_SMEM__S_MEMTIME::execute(GPUDynInstPtr gpuDynInst) { - panicUnimplemented(); + ScalarOperandU64 sdst(gpuDynInst, instData.SDATA); + sdst = (ScalarRegU64)gpuDynInst->computeUnit()->curCycle(); + sdst.write(); } // execute // --- Inst_SMEM__S_MEMREALTIME class methods --- @@ -27382,6 +27399,135 @@ namespace VegaISA { panicUnimplemented(); } // execute + // --- Inst_VOP3__V_ADD_U32 class methods --- + + Inst_VOP3__V_ADD_U32::Inst_VOP3__V_ADD_U32(InFmt_VOP3A *iFmt) + : Inst_VOP3A(iFmt, "v_add_u32", false) + { + setFlag(ALU); + } // Inst_VOP3__V_ADD_U32 + + Inst_VOP3__V_ADD_U32::~Inst_VOP3__V_ADD_U32() + { + } // ~Inst_VOP3__V_ADD_U32 + + // --- description from .arch file --- + // D.u32 = S0.u32 + S1.u32. + void + Inst_VOP3__V_ADD_U32::execute(GPUDynInstPtr gpuDynInst) + { + Wavefront *wf = gpuDynInst->wavefront(); + ConstVecOperandU32 src0(gpuDynInst, extData.SRC0); + ConstVecOperandU32 src1(gpuDynInst, extData.SRC1); + VecOperandU32 vdst(gpuDynInst, instData.VDST); + + src0.readSrc(); + src1.readSrc(); + + /** + * input modifiers are supported by FP operations only + */ + assert(!(instData.ABS & 0x1)); + assert(!(instData.ABS & 0x2)); + assert(!(instData.ABS & 0x4)); + assert(!(extData.NEG & 0x1)); + assert(!(extData.NEG & 0x2)); + assert(!(extData.NEG & 0x4)); + + for (int lane = 0; lane < NumVecElemPerVecReg; ++lane) { + if (wf->execMask(lane)) { + vdst[lane] = src0[lane] + src1[lane]; + } + } + + vdst.write(); + } // execute + // --- Inst_VOP3__V_SUB_U32 class methods --- + + Inst_VOP3__V_SUB_U32::Inst_VOP3__V_SUB_U32(InFmt_VOP3A *iFmt) + : Inst_VOP3A(iFmt, "v_sub_u32", false) + { + setFlag(ALU); + } // Inst_VOP3__V_SUB_U32 + + Inst_VOP3__V_SUB_U32::~Inst_VOP3__V_SUB_U32() + { + } // ~Inst_VOP3__V_SUB_U32 + + // --- description from .arch file --- + // D.u32 = S0.u32 - S1.u32. + void + Inst_VOP3__V_SUB_U32::execute(GPUDynInstPtr gpuDynInst) + { + Wavefront *wf = gpuDynInst->wavefront(); + ConstVecOperandU32 src0(gpuDynInst, extData.SRC0); + ConstVecOperandU32 src1(gpuDynInst, extData.SRC1); + VecOperandU32 vdst(gpuDynInst, instData.VDST); + + src0.readSrc(); + src1.readSrc(); + + /** + * input modifiers are supported by FP operations only + */ + assert(!(instData.ABS & 0x1)); + assert(!(instData.ABS & 0x2)); + assert(!(instData.ABS & 0x4)); + assert(!(extData.NEG & 0x1)); + assert(!(extData.NEG & 0x2)); + assert(!(extData.NEG & 0x4)); + + for (int lane = 0; lane < NumVecElemPerVecReg; ++lane) { + if (wf->execMask(lane)) { + vdst[lane] = src0[lane] - src1[lane]; + } + } + + vdst.write(); + } // execute + // --- Inst_VOP3__V_SUBREV_U32 class methods --- + + Inst_VOP3__V_SUBREV_U32::Inst_VOP3__V_SUBREV_U32(InFmt_VOP3A *iFmt) + : Inst_VOP3A(iFmt, "v_subrev_u32", false) + { + setFlag(ALU); + } // Inst_VOP3__V_SUBREV_U32 + + Inst_VOP3__V_SUBREV_U32::~Inst_VOP3__V_SUBREV_U32() + { + } // ~Inst_VOP3__V_SUBREV_U32 + + // --- description from .arch file --- + // D.u32 = S1.u32 - S0.u32. + void + Inst_VOP3__V_SUBREV_U32::execute(GPUDynInstPtr gpuDynInst) + { + Wavefront *wf = gpuDynInst->wavefront(); + ConstVecOperandU32 src0(gpuDynInst, extData.SRC0); + ConstVecOperandU32 src1(gpuDynInst, extData.SRC1); + VecOperandU32 vdst(gpuDynInst, instData.VDST); + + src0.readSrc(); + src1.readSrc(); + + /** + * input modifiers are supported by FP operations only + */ + assert(!(instData.ABS & 0x1)); + assert(!(instData.ABS & 0x2)); + assert(!(instData.ABS & 0x4)); + assert(!(extData.NEG & 0x1)); + assert(!(extData.NEG & 0x2)); + assert(!(extData.NEG & 0x4)); + + for (int lane = 0; lane < NumVecElemPerVecReg; ++lane) { + if (wf->execMask(lane)) { + vdst[lane] = src1[lane] - src0[lane]; + } + } + + vdst.write(); + } // execute // --- Inst_VOP3__V_NOP class methods --- Inst_VOP3__V_NOP::Inst_VOP3__V_NOP(InFmt_VOP3A *iFmt) @@ -32069,6 +32215,51 @@ namespace VegaISA vcc.write(); vdst.write(); } // execute + // --- Inst_VOP3__V_XAD_U32 class methods --- + + Inst_VOP3__V_XAD_U32::Inst_VOP3__V_XAD_U32(InFmt_VOP3A *iFmt) + : Inst_VOP3A(iFmt, "v_xad_u32", false) + { + setFlag(ALU); + } // Inst_VOP3__V_XAD_U32 + + Inst_VOP3__V_XAD_U32::~Inst_VOP3__V_XAD_U32() + { + } // ~Inst_VOP3__V_XAD_U32 + + // --- description from .arch file --- + // D.u32 = (S0.u32 ^ S1.u32) + S2.u32. + void + Inst_VOP3__V_XAD_U32::execute(GPUDynInstPtr gpuDynInst) + { + Wavefront *wf = gpuDynInst->wavefront(); + ConstVecOperandU32 src0(gpuDynInst, extData.SRC0); + ConstVecOperandU32 src1(gpuDynInst, extData.SRC1); + ConstVecOperandU32 src2(gpuDynInst, extData.SRC2); + VecOperandU32 vdst(gpuDynInst, instData.VDST); + + src0.readSrc(); + src1.readSrc(); + src2.readSrc(); + + /** + * input modifiers are supported by FP operations only + */ + assert(!(instData.ABS & 0x1)); + assert(!(instData.ABS & 0x2)); + assert(!(instData.ABS & 0x4)); + assert(!(extData.NEG & 0x1)); + assert(!(extData.NEG & 0x2)); + assert(!(extData.NEG & 0x4)); + + for (int lane = 0; lane < NumVecElemPerVecReg; ++lane) { + if (wf->execMask(lane)) { + vdst[lane] = (src0[lane] ^ src1[lane]) + src2[lane]; + } + } + + vdst.write(); + } // execute // --- Inst_VOP3__V_LSHL_ADD_U32 class methods --- Inst_VOP3__V_LSHL_ADD_U32::Inst_VOP3__V_LSHL_ADD_U32(InFmt_VOP3A *iFmt) @@ -34035,6 +34226,10 @@ namespace VegaISA Inst_DS__DS_OR_B32::Inst_DS__DS_OR_B32(InFmt_DS *iFmt) : Inst_DS(iFmt, "ds_or_b32") { + setFlag(MemoryRef); + setFlag(GroupSegment); + setFlag(AtomicOr); + setFlag(AtomicNoReturn); } // Inst_DS__DS_OR_B32 Inst_DS__DS_OR_B32::~Inst_DS__DS_OR_B32() @@ -34043,14 +34238,54 @@ namespace VegaISA // --- description from .arch file --- // 32b: - // tmp = MEM[ADDR]; // MEM[ADDR] |= DATA; - // RETURN_DATA = tmp. void Inst_DS__DS_OR_B32::execute(GPUDynInstPtr gpuDynInst) { - panicUnimplemented(); + Wavefront *wf = gpuDynInst->wavefront(); + + if (gpuDynInst->exec_mask.none()) { + wf->decLGKMInstsIssued(); + return; + } + + gpuDynInst->execUnitId = wf->execUnitId; + gpuDynInst->latency.init(gpuDynInst->computeUnit()); + gpuDynInst->latency.set( + gpuDynInst->computeUnit()->cyclesToTicks(Cycles(24))); + ConstVecOperandU32 addr(gpuDynInst, extData.ADDR); + ConstVecOperandU32 data(gpuDynInst, extData.DATA0); + + addr.read(); + data.read(); + + calcAddr(gpuDynInst, addr); + + for (int lane = 0; lane < NumVecElemPerVecReg; ++lane) { + if (gpuDynInst->exec_mask[lane]) { + (reinterpret_cast(gpuDynInst->a_data))[lane] + = data[lane]; + } + } + + gpuDynInst->computeUnit()->localMemoryPipe.issueRequest(gpuDynInst); } // execute + + void + Inst_DS__DS_OR_B32::initiateAcc(GPUDynInstPtr gpuDynInst) + { + Addr offset0 = instData.OFFSET0; + Addr offset1 = instData.OFFSET1; + Addr offset = (offset1 << 8) | offset0; + + initAtomicAccess(gpuDynInst, offset); + } // initiateAcc + + void + Inst_DS__DS_OR_B32::completeAcc(GPUDynInstPtr gpuDynInst) + { + } // completeAcc + // --- Inst_DS__DS_XOR_B32 class methods --- Inst_DS__DS_XOR_B32::Inst_DS__DS_XOR_B32(InFmt_DS *iFmt) diff --git a/src/arch/amdgpu/vega/insts/instructions.hh b/src/arch/amdgpu/vega/insts/instructions.hh index e9361c3cc4..0671df8452 100644 --- a/src/arch/amdgpu/vega/insts/instructions.hh +++ b/src/arch/amdgpu/vega/insts/instructions.hh @@ -25712,6 +25712,108 @@ namespace VegaISA void execute(GPUDynInstPtr) override; }; // Inst_VOP3__V_LDEXP_F16 + class Inst_VOP3__V_ADD_U32 : public Inst_VOP3A + { + public: + Inst_VOP3__V_ADD_U32(InFmt_VOP3A*); + ~Inst_VOP3__V_ADD_U32(); + + int + getNumOperands() override + { + return numDstRegOperands() + numSrcRegOperands(); + } // getNumOperands + + int numDstRegOperands() override { return 1; } + int numSrcRegOperands() override { return 2; } + + int + getOperandSize(int opIdx) override + { + switch (opIdx) { + case 0: //src_0 + return 4; + case 1: //src_1 + return 4; + case 2: //vdst + return 4; + default: + fatal("op idx %i out of bounds\n", opIdx); + return -1; + } + } // getOperandSize + + void execute(GPUDynInstPtr) override; + }; // Inst_VOP3__V_ADD_U32 + + class Inst_VOP3__V_SUB_U32 : public Inst_VOP3A + { + public: + Inst_VOP3__V_SUB_U32(InFmt_VOP3A*); + ~Inst_VOP3__V_SUB_U32(); + + int + getNumOperands() override + { + return numDstRegOperands() + numSrcRegOperands(); + } // getNumOperands + + int numDstRegOperands() override { return 1; } + int numSrcRegOperands() override { return 2; } + + int + getOperandSize(int opIdx) override + { + switch (opIdx) { + case 0: //src_0 + return 4; + case 1: //src_1 + return 4; + case 2: //vdst + return 4; + default: + fatal("op idx %i out of bounds\n", opIdx); + return -1; + } + } // getOperandSize + + void execute(GPUDynInstPtr) override; + }; // Inst_VOP3__V_SUB_U32 + + class Inst_VOP3__V_SUBREV_U32 : public Inst_VOP3A + { + public: + Inst_VOP3__V_SUBREV_U32(InFmt_VOP3A*); + ~Inst_VOP3__V_SUBREV_U32(); + + int + getNumOperands() override + { + return numDstRegOperands() + numSrcRegOperands(); + } // getNumOperands + + int numDstRegOperands() override { return 1; } + int numSrcRegOperands() override { return 2; } + + int + getOperandSize(int opIdx) override + { + switch (opIdx) { + case 0: //src_0 + return 4; + case 1: //src_1 + return 4; + case 2: //vdst + return 4; + default: + fatal("op idx %i out of bounds\n", opIdx); + return -1; + } + } // getOperandSize + + void execute(GPUDynInstPtr) override; + }; // Inst_VOP3__V_SUBREV_U32 + class Inst_VOP3__V_NOP : public Inst_VOP3A { public: @@ -29562,6 +29664,42 @@ namespace VegaISA void execute(GPUDynInstPtr) override; }; // Inst_VOP3__V_MAD_I64_I32 + class Inst_VOP3__V_XAD_U32 : public Inst_VOP3A + { + public: + Inst_VOP3__V_XAD_U32(InFmt_VOP3A*); + ~Inst_VOP3__V_XAD_U32(); + + int + getNumOperands() override + { + return numDstRegOperands() + numSrcRegOperands(); + } // getNumOperands + + int numDstRegOperands() override { return 1; } + int numSrcRegOperands() override { return 3; } + + int + getOperandSize(int opIdx) override + { + switch (opIdx) { + case 0: //src_0 + return 4; + case 1: //src_1 + return 4; + case 2: //src_2 + return 4; + case 3: //vdst + return 4; + default: + fatal("op idx %i out of bounds\n", opIdx); + return -1; + } + } // getOperandSize + + void execute(GPUDynInstPtr) override; + }; // Inst_VOP3__V_XAD_U32 + class Inst_VOP3__V_LSHL_ADD_U32 : public Inst_VOP3A { public: @@ -31394,6 +31532,8 @@ namespace VegaISA } // getOperandSize void execute(GPUDynInstPtr) override; + void initiateAcc(GPUDynInstPtr gpuDynInst); + void completeAcc(GPUDynInstPtr gpuDynInst); }; // Inst_DS__DS_OR_B32 class Inst_DS__DS_XOR_B32 : public Inst_DS diff --git a/src/arch/amdgpu/vega/insts/op_encodings.cc b/src/arch/amdgpu/vega/insts/op_encodings.cc index 6f78b6962e..cc650fbbd0 100644 --- a/src/arch/amdgpu/vega/insts/op_encodings.cc +++ b/src/arch/amdgpu/vega/insts/op_encodings.cc @@ -248,15 +248,28 @@ namespace VegaISA // Needed because can't take addr of bitfield int reg = instData.SSRC0; + /* + S_GETPC_B64 does not use SSRC0, so don't put anything on srcOps + for it (0x1c is 29 base 10, which is the opcode for S_GETPC_B64). + */ if (instData.OP != 0x1C) { srcOps.emplace_back(reg, getOperandSize(opNum), true, isScalarReg(instData.SSRC0), false, false); opNum++; } - reg = instData.SDST; - dstOps.emplace_back(reg, getOperandSize(opNum), false, + /* + S_SETPC_B64, S_RFE_B64, S_CBRANCH_JOIN, and S_SET_GPR_IDX_IDX do not + use SDST, so don't put anything on dstOps for them. + */ + if ((instData.OP != 0x1D) /* S_SETPC_B64 (29 base 10) */ && + (instData.OP != 0x1F) /* S_RFE_B64 (31 base 10) */ && + (instData.OP != 0x2E) /* S_CBRANCH_JOIN (46 base 10) */ && + (instData.OP != 0x32)) /* S_SET_GPR_IDX_IDX (50 base 10) */ { + reg = instData.SDST; + dstOps.emplace_back(reg, getOperandSize(opNum), false, isScalarReg(instData.SDST), false, false); + } assert(srcOps.size() == numSrcRegOperands()); assert(dstOps.size() == numDstRegOperands()); @@ -759,8 +772,21 @@ namespace VegaISA if (numDstRegOperands()) { reg = instData.VDST; - dstOps.emplace_back(reg, getOperandSize(opNum), false, - false, true, false); + /* + The v_readfirstlane_b32 instruction (op = 2) is a special case + VOP1 instruction which has a scalar register as the destination. + (See section 6.6.2 "Special Cases" in the Vega ISA manual) + + Therefore we change the dest op to be scalar reg = true and + vector reg = false in reserve of all other instructions. + */ + if (instData.OP == 2) { + dstOps.emplace_back(reg, getOperandSize(opNum), false, + true, false, false); + } else { + dstOps.emplace_back(reg, getOperandSize(opNum), false, + false, true, false); + } } assert(srcOps.size() == numSrcRegOperands()); @@ -893,7 +919,14 @@ namespace VegaISA std::stringstream dis_stream; dis_stream << _opcode << " vcc, "; - dis_stream << opSelectorToRegSym(instData.SRC0) << ", "; + if ((instData.SRC0 == REG_SRC_LITERAL) || + (instData.SRC0 == REG_SRC_DPP) || + (instData.SRC0 == REG_SRC_SWDA)) { + dis_stream << "0x" << std::hex << std::setfill('0') << std::setw(8) + << _srcLiteral << ", "; + } else { + dis_stream << opSelectorToRegSym(instData.SRC0) << ", "; + } dis_stream << "v" << instData.VSRC1; disassembly = dis_stream.str(); @@ -1699,7 +1732,10 @@ namespace VegaISA if (isLoad()) dis_stream << "v" << extData.VDST << ", "; - dis_stream << "v[" << extData.ADDR << ":" << extData.ADDR + 1 << "]"; + if (extData.SADDR == 0x7f) + dis_stream << "v[" << extData.ADDR << ":" << extData.ADDR+1 << "]"; + else + dis_stream << "v" << extData.ADDR; if (isStore()) dis_stream << ", v" << extData.DATA; @@ -1707,7 +1743,11 @@ namespace VegaISA if (extData.SADDR == 0x7f) dis_stream << ", off"; else - dis_stream << ", " << extData.SADDR; + dis_stream << ", s[" << extData.SADDR << ":" << extData.SADDR+1 + << "]"; + + if (instData.OFFSET) + dis_stream << " offset:" << instData.OFFSET; disassembly = dis_stream.str(); } diff --git a/src/arch/amdgpu/vega/insts/op_encodings.hh b/src/arch/amdgpu/vega/insts/op_encodings.hh index 17d69d5821..508d706cd3 100644 --- a/src/arch/amdgpu/vega/insts/op_encodings.hh +++ b/src/arch/amdgpu/vega/insts/op_encodings.hh @@ -504,6 +504,27 @@ namespace VegaISA } } + template + void + initAtomicAccess(GPUDynInstPtr gpuDynInst, Addr offset) + { + Wavefront *wf = gpuDynInst->wavefront(); + + for (int lane = 0; lane < NumVecElemPerVecReg; ++lane) { + if (gpuDynInst->exec_mask[lane]) { + Addr vaddr = gpuDynInst->addr[lane] + offset; + + AtomicOpFunctorPtr amo_op = + gpuDynInst->makeAtomicOpFunctor( + &(reinterpret_cast(gpuDynInst->a_data))[lane], + &(reinterpret_cast(gpuDynInst->x_data))[lane]); + + (reinterpret_cast(gpuDynInst->d_data))[lane] + = wf->ldsChunk->atomic(vaddr, std::move(amo_op)); + } + } + } + void calcAddr(GPUDynInstPtr gpuDynInst, ConstVecOperandU32 &addr) { diff --git a/src/arch/amdgpu/vega/operand.hh b/src/arch/amdgpu/vega/operand.hh index d6542971ef..1760bd7213 100644 --- a/src/arch/amdgpu/vega/operand.hh +++ b/src/arch/amdgpu/vega/operand.hh @@ -644,7 +644,10 @@ namespace VegaISA ComputeUnit *cu = _gpuDynInst->computeUnit(); int sgprIdx(-1); - if (_opIdx == REG_VCC_LO) { + if (_opIdx == REG_VCC_HI) { + sgprIdx = cu->registerManager + ->mapSgpr(wf, wf->reservedScalarRegs - 1 + dword); + } else if (_opIdx == REG_VCC_LO) { sgprIdx = cu->registerManager ->mapSgpr(wf, wf->reservedScalarRegs - 2 + dword); } else if (_opIdx == REG_FLAT_SCRATCH_HI) { diff --git a/src/arch/amdgpu/vega/pagetable_walker.cc b/src/arch/amdgpu/vega/pagetable_walker.cc index bbbaa56700..96ac0fe179 100644 --- a/src/arch/amdgpu/vega/pagetable_walker.cc +++ b/src/arch/amdgpu/vega/pagetable_walker.cc @@ -53,8 +53,7 @@ Walker::startFunctional(Addr base, Addr &addr, unsigned &logBytes, Addr vaddr = addr; Fault fault = startFunctional(base, vaddr, pte, logBytes, mode); isSystem = pte.s; - addr = ((pte.ppn << PageShift) & ~mask(logBytes)) - | (vaddr & mask(logBytes)); + addr = ((pte.ppn << PageShift) + (vaddr & mask(logBytes))); return fault; } @@ -182,8 +181,8 @@ Walker::WalkerState::startWalk() sendPackets(); } else { // Set physical page address in entry - entry.paddr = bits(entry.pte, 47, entry.logBytes); - entry.paddr <<= entry.logBytes; + entry.paddr = entry.pte.ppn << PageShift; + entry.paddr += entry.vaddr & mask(entry.logBytes); // Insert to TLB assert(walker); @@ -246,7 +245,16 @@ Walker::WalkerState::walkStateMachine(PageTableEntry &pte, Addr &nextRead, switch(state) { case PDE2: - fatal_if(pde.p, "Fragment in PDE2 not implemented"); + if (pde.p) { + DPRINTF(GPUPTWalker, "Treating PDE2 as PTE: %#016x frag: %d\n", + (uint64_t)pte, pte.fragment); + entry.pte = pte; + int fragment = pte.fragment; + entry.logBytes = PageShift + std::min(3*9, fragment); + entry.vaddr <<= PageShift; + entry.vaddr = entry.vaddr & ~mask(entry.logBytes); + doEndWalk = true; + } // Read the pde1Addr part1 = ((((uint64_t)pte) >> 6) << 3); @@ -258,7 +266,16 @@ Walker::WalkerState::walkStateMachine(PageTableEntry &pte, Addr &nextRead, nextState = PDE1; break; case PDE1: - fatal_if(pde.p, "Fragment in PDE1 not implemented"); + if (pde.p) { + DPRINTF(GPUPTWalker, "Treating PDE1 as PTE: %#016x frag: %d\n", + (uint64_t)pte, pte.fragment); + entry.pte = pte; + int fragment = pte.fragment; + entry.logBytes = PageShift + std::min(2*9, fragment); + entry.vaddr <<= PageShift; + entry.vaddr = entry.vaddr & ~mask(entry.logBytes); + doEndWalk = true; + } // Read the pde0Addr part1 = ((((uint64_t)pte) >> 6) << 3); @@ -277,7 +294,6 @@ Walker::WalkerState::walkStateMachine(PageTableEntry &pte, Addr &nextRead, int fragment = pte.fragment; entry.logBytes = PageShift + std::min(9, fragment); entry.vaddr <<= PageShift; - entry.vaddr = entry.vaddr & ~((1 << entry.logBytes) - 1); entry.vaddr = entry.vaddr & ~mask(entry.logBytes); doEndWalk = true; } diff --git a/src/arch/amdgpu/vega/registers.cc b/src/arch/amdgpu/vega/registers.cc index 2145ee38a5..b7404379cc 100644 --- a/src/arch/amdgpu/vega/registers.cc +++ b/src/arch/amdgpu/vega/registers.cc @@ -75,7 +75,10 @@ namespace VegaISA reg_sym = "flat_scratch_hi"; break; case REG_VCC_LO: - reg_sym = "vcc"; + reg_sym = "vcc_lo"; + break; + case REG_VCC_HI: + reg_sym = "vcc_hi"; break; case REG_M0: reg_sym = "m0"; diff --git a/src/arch/amdgpu/vega/tlb.cc b/src/arch/amdgpu/vega/tlb.cc index 5d9a9e5aff..c3dd576f0e 100644 --- a/src/arch/amdgpu/vega/tlb.cc +++ b/src/arch/amdgpu/vega/tlb.cc @@ -158,24 +158,7 @@ GpuTLB::insert(Addr vpn, VegaTlbEntry &entry) { VegaTlbEntry *newEntry = nullptr; - /** - * vpn holds the virtual page address assuming native page size. - * However, we need to check the entry size as Vega supports - * flexible page sizes of arbitrary size. The set will assume - * native page size but the vpn needs to be fixed up to consider - * the flexible page size. - */ - Addr real_vpn = vpn & ~(entry.size() - 1); - - /** - * Also fix up the ppn as this is used in the math later to compute paddr. - */ - Addr real_ppn = entry.paddr & ~(entry.size() - 1); - - int set = (real_vpn >> VegaISA::PageShift) & setMask; - - DPRINTF(GPUTLB, "Inserted %#lx -> %#lx of size %#lx into set %d\n", - real_vpn, real_ppn, entry.size(), set); + int set = (entry.vaddr >> VegaISA::PageShift) & setMask; if (!freeList[set].empty()) { newEntry = freeList[set].front(); @@ -186,10 +169,11 @@ GpuTLB::insert(Addr vpn, VegaTlbEntry &entry) } *newEntry = entry; - newEntry->vaddr = real_vpn; - newEntry->paddr = real_ppn; entryList[set].push_front(newEntry); + DPRINTF(GPUTLB, "Inserted %#lx -> %#lx of size %#lx into set %d\n", + newEntry->vaddr, newEntry->paddr, entry.size(), set); + return newEntry; } @@ -447,7 +431,7 @@ GpuTLB::walkerResponse(VegaTlbEntry& entry, PacketPtr pkt) VegaISA::PageBytes); Addr page_addr = entry.pte.ppn << VegaISA::PageShift; - Addr paddr = insertBits(page_addr, entry.logBytes - 1, 0, entry.vaddr); + Addr paddr = page_addr + (entry.vaddr & mask(entry.logBytes)); pkt->req->setPaddr(paddr); pkt->req->setSystemReq(entry.pte.s); @@ -524,7 +508,7 @@ GpuTLB::handleTranslationReturn(Addr virt_page_addr, pagingProtectionChecks(pkt, local_entry, mode); int page_size = local_entry->size(); - Addr paddr = local_entry->paddr | (vaddr & (page_size - 1)); + Addr paddr = local_entry->paddr + (vaddr & (page_size - 1)); DPRINTF(GPUTLB, "Translated %#x -> %#x.\n", vaddr, paddr); // Since this packet will be sent through the cpu side port, it must be @@ -767,7 +751,7 @@ GpuTLB::handleFuncTranslationReturn(PacketPtr pkt, tlbOutcome tlb_outcome) pagingProtectionChecks(pkt, local_entry, mode); int page_size = local_entry->size(); - Addr paddr = local_entry->paddr | (vaddr & (page_size - 1)); + Addr paddr = local_entry->paddr + (vaddr & (page_size - 1)); DPRINTF(GPUTLB, "Translated %#x -> %#x.\n", vaddr, paddr); pkt->req->setPaddr(paddr); @@ -842,7 +826,7 @@ GpuTLB::CpuSidePort::recvFunctional(PacketPtr pkt) // page size. Fragment is still used via logBytes to select lower // bits from vaddr. Addr page_addr = pte.ppn << PageShift; - Addr paddr = insertBits(page_addr, logBytes - 1, 0, vaddr); + Addr paddr = page_addr + (vaddr & mask(logBytes)); Addr alignedPaddr = tlb->pageAlign(paddr); pkt->req->setPaddr(paddr); pkt->req->setSystemReq(pte.s); diff --git a/src/arch/amdgpu/vega/tlb_coalescer.cc b/src/arch/amdgpu/vega/tlb_coalescer.cc index d02c9bc155..414bb85bbb 100644 --- a/src/arch/amdgpu/vega/tlb_coalescer.cc +++ b/src/arch/amdgpu/vega/tlb_coalescer.cc @@ -162,13 +162,6 @@ VegaTLBCoalescer::updatePhysAddresses(PacketPtr pkt) int page_size = tlb_entry.size(); bool uncacheable = tlb_entry.uncacheable(); int first_hit_level = sender_state->hitLevel; - - // Get the physical page address of the translated request - // Using the page_size specified in the TLBEntry allows us - // to support different page sizes. - Addr phys_page_paddr = pkt->req->getPaddr(); - phys_page_paddr &= ~(page_size - 1); - bool is_system = pkt->req->systemReq(); for (int i = 0; i < issuedTranslationsTable[virt_page_addr].size(); ++i) { @@ -190,8 +183,8 @@ VegaTLBCoalescer::updatePhysAddresses(PacketPtr pkt) * page offsets. */ if (i) { - Addr paddr = phys_page_paddr; - paddr |= (local_pkt->req->getVaddr() & (page_size - 1)); + Addr paddr = first_entry_paddr + + (local_pkt->req->getVaddr() & (page_size - 1)); local_pkt->req->setPaddr(paddr); if (uncacheable) diff --git a/src/arch/amdgpu/vega/tlb_coalescer.hh b/src/arch/amdgpu/vega/tlb_coalescer.hh index da73b2ef19..4ff9324715 100644 --- a/src/arch/amdgpu/vega/tlb_coalescer.hh +++ b/src/arch/amdgpu/vega/tlb_coalescer.hh @@ -38,7 +38,6 @@ #include #include "arch/amdgpu/vega/tlb.hh" -#include "arch/isa.hh" #include "base/statistics.hh" #include "mem/port.hh" #include "mem/request.hh" diff --git a/src/arch/arm/ArmCPU.py b/src/arch/arm/ArmCPU.py index c55d99bf1a..52c3ba8a0a 100644 --- a/src/arch/arm/ArmCPU.py +++ b/src/arch/arm/ArmCPU.py @@ -36,24 +36,30 @@ from m5.objects.ArmMMU import ArmMMU from m5.objects.ArmInterrupts import ArmInterrupts from m5.objects.ArmISA import ArmISA + class ArmCPU: ArchDecoder = ArmDecoder ArchMMU = ArmMMU ArchInterrupts = ArmInterrupts ArchISA = ArmISA + class ArmAtomicSimpleCPU(BaseAtomicSimpleCPU, ArmCPU): mmu = ArmMMU() + class ArmNonCachingSimpleCPU(BaseNonCachingSimpleCPU, ArmCPU): mmu = ArmMMU() + class ArmTimingSimpleCPU(BaseTimingSimpleCPU, ArmCPU): mmu = ArmMMU() + class ArmO3Checker(BaseO3Checker, ArmCPU): mmu = ArmMMU() + class ArmO3CPU(BaseO3CPU, ArmCPU): mmu = ArmMMU() @@ -66,13 +72,16 @@ class ArmO3CPU(BaseO3CPU, ArmCPU): numPhysCCRegs = Self.numPhysIntRegs * 5 def addCheckerCpu(self): - self.checker = ArmO3Checker(workload=self.workload, - exitOnError=False, - updateOnError=True, - warnOnlyOnLoadError=True) + self.checker = ArmO3Checker( + workload=self.workload, + exitOnError=False, + updateOnError=True, + warnOnlyOnLoadError=True, + ) self.checker.mmu.itb.size = self.mmu.itb.size self.checker.mmu.dtb.size = self.mmu.dtb.size self.checker.cpu_id = self.cpu_id + class ArmMinorCPU(BaseMinorCPU, ArmCPU): mmu = ArmMMU() diff --git a/src/arch/arm/ArmDecoder.py b/src/arch/arm/ArmDecoder.py index a5c16f56ab..d4b82e3f4f 100644 --- a/src/arch/arm/ArmDecoder.py +++ b/src/arch/arm/ArmDecoder.py @@ -38,10 +38,12 @@ from m5.params import * from m5.objects.InstDecoder import InstDecoder + class ArmDecoder(InstDecoder): - type = 'ArmDecoder' - cxx_class = 'gem5::ArmISA::Decoder' + type = "ArmDecoder" + cxx_class = "gem5::ArmISA::Decoder" cxx_header = "arch/arm/decoder.hh" - dvm_enabled = Param.Bool(False, - "Does the decoder implement DVM operations") + dvm_enabled = Param.Bool( + False, "Does the decoder implement DVM operations" + ) diff --git a/src/arch/arm/ArmFsWorkload.py b/src/arch/arm/ArmFsWorkload.py index 43bbd16a06..d0dcde749a 100644 --- a/src/arch/arm/ArmFsWorkload.py +++ b/src/arch/arm/ArmFsWorkload.py @@ -38,47 +38,57 @@ from m5.options import * from m5.SimObject import * from m5.objects.Workload import KernelWorkload + class ArmMachineType(Enum): - map = { - 'VExpress_EMM' : 2272, - 'VExpress_EMM64' : 2272, - 'DTOnly' : -1, - } + map = {"VExpress_EMM": 2272, "VExpress_EMM64": 2272, "DTOnly": -1} + class ArmFsWorkload(KernelWorkload): - type = 'ArmFsWorkload' + type = "ArmFsWorkload" cxx_header = "arch/arm/fs_workload.hh" - cxx_class = 'gem5::ArmISA::FsWorkload' + cxx_class = "gem5::ArmISA::FsWorkload" - boot_loader = VectorParam.String([], + boot_loader = VectorParam.String( + [], "File that contains the boot loader code. Zero or more files may be " "specified. The first boot loader that matches the kernel's " - "architecture will be used.") + "architecture will be used.", + ) - dtb_filename = Param.String("", - "File that contains the Device Tree Blob. Don't use DTB if empty.") + dtb_filename = Param.String( + "", "File that contains the Device Tree Blob. Don't use DTB if empty." + ) dtb_addr = Param.Addr(0, "DTB or ATAGS address") - initrd_filename = Param.String("", - "File that contains the initial ramdisk. Don't use initrd if empty.") + initrd_filename = Param.String( + "", + "File that contains the initial ramdisk. Don't use initrd if empty.", + ) initrd_addr = Param.Addr(0, "initrd/initramfs address") cpu_release_addr = Param.Addr(0, "cpu-release-addr property") - machine_type = Param.ArmMachineType('DTOnly', - "Machine id from http://www.arm.linux.org.uk/developer/machines/") - early_kernel_symbols = Param.Bool(False, - "enable early kernel symbol tables before MMU") - enable_context_switch_stats_dump = Param.Bool(False, - "enable stats/task info dumping at context switch boundaries") + machine_type = Param.ArmMachineType( + "DTOnly", + "Machine id from http://www.arm.linux.org.uk/developer/machines/", + ) + early_kernel_symbols = Param.Bool( + False, "enable early kernel symbol tables before MMU" + ) + enable_context_switch_stats_dump = Param.Bool( + False, "enable stats/task info dumping at context switch boundaries" + ) + + panic_on_panic = Param.Bool( + False, "Trigger a gem5 panic if the " "guest kernel panics" + ) + panic_on_oops = Param.Bool( + False, "Trigger a gem5 panic if the " "guest kernel oopses" + ) - panic_on_panic = Param.Bool(False, "Trigger a gem5 panic if the " \ - "guest kernel panics") - panic_on_oops = Param.Bool(False, "Trigger a gem5 panic if the " \ - "guest kernel oopses") class ArmFsLinux(ArmFsWorkload): - type = 'ArmFsLinux' + type = "ArmFsLinux" cxx_header = "arch/arm/linux/fs_workload.hh" - cxx_class = 'gem5::ArmISA::FsLinux' + cxx_class = "gem5::ArmISA::FsLinux" load_addr_mask = 0 @@ -87,7 +97,8 @@ class ArmFsLinux(ArmFsWorkload): """Dump dmesg from the simulated kernel to standard out""" pass + class ArmFsFreebsd(ArmFsWorkload): - type = 'ArmFsFreebsd' + type = "ArmFsFreebsd" cxx_header = "arch/arm/freebsd/fs_workload.hh" - cxx_class = 'gem5::ArmISA::FsFreebsd' + cxx_class = "gem5::ArmISA::FsFreebsd" diff --git a/src/arch/arm/ArmISA.py b/src/arch/arm/ArmISA.py index 5616ad8ffd..4f336e04cd 100644 --- a/src/arch/arm/ArmISA.py +++ b/src/arch/arm/ArmISA.py @@ -42,34 +42,42 @@ from m5.objects.ArmSystem import SveVectorLength, ArmRelease from m5.objects.BaseISA import BaseISA # Enum for DecoderFlavor -class DecoderFlavor(Enum): vals = ['Generic'] +class DecoderFlavor(Enum): + vals = ["Generic"] + class ArmDefaultSERelease(ArmRelease): extensions = [ - 'CRYPTO', + "CRYPTO", # Armv8.1 - 'FEAT_LSE', 'FEAT_RDM', + "FEAT_LSE", + "FEAT_RDM", # Armv8.2 - 'FEAT_SVE', + "FEAT_SVE", # Armv8.3 - 'FEAT_FCMA', 'FEAT_JSCVT', 'FEAT_PAuth', + "FEAT_FCMA", + "FEAT_JSCVT", + "FEAT_PAuth", # Other - 'TME' + "TME", ] + class ArmISA(BaseISA): - type = 'ArmISA' - cxx_class = 'gem5::ArmISA::ISA' + type = "ArmISA" + cxx_class = "gem5::ArmISA::ISA" cxx_header = "arch/arm/isa.hh" system = Param.System(Parent.any, "System this ISA object belongs to") pmu = Param.ArmPMU(NULL, "Performance Monitoring Unit") decoderFlavor = Param.DecoderFlavor( - 'Generic', "Decoder flavor specification") + "Generic", "Decoder flavor specification" + ) - release_se = Param.ArmRelease(ArmDefaultSERelease(), - "Set of features/extensions to use in SE mode") + release_se = Param.ArmRelease( + ArmDefaultSERelease(), "Set of features/extensions to use in SE mode" + ) # If no MIDR value is provided, 0x0 is treated by gem5 as follows: # When 'highest_el_is_64' (AArch64 support) is: @@ -100,51 +108,63 @@ class ArmISA(BaseISA): # !I8MM | !BF16 | SPECRES = 0 | !SB | !FHM | DP | JSCVT id_isar6 = Param.UInt32(0x00000001, "Instruction Set Attribute Register 6") - fpsid = Param.UInt32(0x410430a0, "Floating-point System ID Register") + fpsid = Param.UInt32(0x410430A0, "Floating-point System ID Register") # [31:0] is implementation defined - id_aa64afr0_el1 = Param.UInt64(0x0000000000000000, - "AArch64 Auxiliary Feature Register 0") + id_aa64afr0_el1 = Param.UInt64( + 0x0000000000000000, "AArch64 Auxiliary Feature Register 0" + ) # Reserved for future expansion - id_aa64afr1_el1 = Param.UInt64(0x0000000000000000, - "AArch64 Auxiliary Feature Register 1") + id_aa64afr1_el1 = Param.UInt64( + 0x0000000000000000, "AArch64 Auxiliary Feature Register 1" + ) # 1 CTX CMPs | 16 WRPs | 16 BRPs | !PMU | !Trace | Debug v8-A - id_aa64dfr0_el1 = Param.UInt64(0x0000000000F0F006, - "AArch64 Debug Feature Register 0") + id_aa64dfr0_el1 = Param.UInt64( + 0x0000000000F0F006, "AArch64 Debug Feature Register 0" + ) # Reserved for future expansion - id_aa64dfr1_el1 = Param.UInt64(0x0000000000000000, - "AArch64 Debug Feature Register 1") + id_aa64dfr1_el1 = Param.UInt64( + 0x0000000000000000, "AArch64 Debug Feature Register 1" + ) # !FHM | !TME | !Atomic | !CRC32 | !SHA2 | RDM | !SHA1 | !AES - id_aa64isar0_el1 = Param.UInt64(0x0000000010000000, - "AArch64 Instruction Set Attribute Register 0") + id_aa64isar0_el1 = Param.UInt64( + 0x0000000010000000, "AArch64 Instruction Set Attribute Register 0" + ) # !I8MM | !BF16 | SPECRES = 0 | !SB | # GPI = 0x0 | GPA = 0x1 | API=0x0 | FCMA | JSCVT | APA=0x1 - id_aa64isar1_el1 = Param.UInt64(0x0000000001011010, - "AArch64 Instruction Set Attribute Register 1") + id_aa64isar1_el1 = Param.UInt64( + 0x0000000001011010, "AArch64 Instruction Set Attribute Register 1" + ) # 4K | 64K | !16K | !BigEndEL0 | !SNSMem | !BigEnd | 8b ASID | 40b PA - id_aa64mmfr0_el1 = Param.UInt64(0x0000000000f00002, - "AArch64 Memory Model Feature Register 0") + id_aa64mmfr0_el1 = Param.UInt64( + 0x0000000000F00002, "AArch64 Memory Model Feature Register 0" + ) # PAN | HPDS | !VHE | VMIDBits - id_aa64mmfr1_el1 = Param.UInt64(0x0000000000101020, - "AArch64 Memory Model Feature Register 1") + id_aa64mmfr1_el1 = Param.UInt64( + 0x0000000000101020, "AArch64 Memory Model Feature Register 1" + ) # |VARANGE | UAO - id_aa64mmfr2_el1 = Param.UInt64(0x0000000000010010, - "AArch64 Memory Model Feature Register 2") + id_aa64mmfr2_el1 = Param.UInt64( + 0x0000000000010010, "AArch64 Memory Model Feature Register 2" + ) # Any access (read/write) to an unimplemented # Implementation Defined registers is not causing an Undefined Instruction. # It is rather executed as a NOP. - impdef_nop = Param.Bool(False, - "Any access to a MISCREG_IMPDEF_UNIMPL register is executed as NOP") + impdef_nop = Param.Bool( + False, + "Any access to a MISCREG_IMPDEF_UNIMPL register is executed as NOP", + ) # This is required because in SE mode a generic System SimObject is # allocated, instead of an ArmSystem - sve_vl_se = Param.SveVectorLength(1, - "SVE vector length in quadwords (128-bit), SE-mode only") + sve_vl_se = Param.SveVectorLength( + 1, "SVE vector length in quadwords (128-bit), SE-mode only" + ) # Recurse into subnodes to generate DTB entries. This is mainly needed to # generate the PMU entry. diff --git a/src/arch/arm/ArmInterrupts.py b/src/arch/arm/ArmInterrupts.py index c683fe267c..1940db1322 100644 --- a/src/arch/arm/ArmInterrupts.py +++ b/src/arch/arm/ArmInterrupts.py @@ -26,7 +26,8 @@ from m5.objects.BaseInterrupts import BaseInterrupts + class ArmInterrupts(BaseInterrupts): - type = 'ArmInterrupts' - cxx_class = 'gem5::ArmISA::Interrupts' + type = "ArmInterrupts" + cxx_class = "gem5::ArmISA::Interrupts" cxx_header = "arch/arm/interrupts.hh" diff --git a/src/arch/arm/ArmMMU.py b/src/arch/arm/ArmMMU.py index a0bbda8dbb..dba6618567 100644 --- a/src/arch/arm/ArmMMU.py +++ b/src/arch/arm/ArmMMU.py @@ -44,60 +44,68 @@ from m5.proxy import * # Basic stage 1 translation objects class ArmTableWalker(ClockedObject): - type = 'ArmTableWalker' - cxx_class = 'gem5::ArmISA::TableWalker' + type = "ArmTableWalker" + cxx_class = "gem5::ArmISA::TableWalker" cxx_header = "arch/arm/table_walker.hh" - is_stage2 = Param.Bool(False, "Is this object for stage 2 translation?") - num_squash_per_cycle = Param.Unsigned(2, - "Number of outstanding walks that can be squashed per cycle") + is_stage2 = Param.Bool(False, "Is this object for stage 2 translation?") + num_squash_per_cycle = Param.Unsigned( + 2, "Number of outstanding walks that can be squashed per cycle" + ) port = RequestPort("Table Walker port") sys = Param.System(Parent.any, "system object parameter") + # Stage 2 translation objects, only used when virtualisation is being used class ArmStage2TableWalker(ArmTableWalker): is_stage2 = True + class ArmMMU(BaseMMU): - type = 'ArmMMU' - cxx_class = 'gem5::ArmISA::MMU' - cxx_header = 'arch/arm/mmu.hh' + type = "ArmMMU" + cxx_class = "gem5::ArmISA::MMU" + cxx_header = "arch/arm/mmu.hh" # L2 TLBs - l2_shared = ArmTLB(entry_type="unified", size=1280, - partial_levels=["L2"]) + l2_shared = ArmTLB(entry_type="unified", size=1280, partial_levels=["L2"]) # L1 TLBs itb = ArmTLB(entry_type="instruction", next_level=Parent.l2_shared) dtb = ArmTLB(entry_type="data", next_level=Parent.l2_shared) stage2_itb = Param.ArmTLB( - ArmStage2TLB(entry_type="instruction"), - "Stage 2 Instruction TLB") + ArmStage2TLB(entry_type="instruction"), "Stage 2 Instruction TLB" + ) stage2_dtb = Param.ArmTLB( - ArmStage2TLB(entry_type="data"), - "Stage 2 Data TLB") + ArmStage2TLB(entry_type="data"), "Stage 2 Data TLB" + ) - itb_walker = Param.ArmTableWalker( - ArmTableWalker(), "HW Table walker") - dtb_walker = Param.ArmTableWalker( - ArmTableWalker(), "HW Table walker") + itb_walker = Param.ArmTableWalker(ArmTableWalker(), "HW Table walker") + dtb_walker = Param.ArmTableWalker(ArmTableWalker(), "HW Table walker") stage2_itb_walker = Param.ArmTableWalker( - ArmStage2TableWalker(), "HW Table walker") + ArmStage2TableWalker(), "HW Table walker" + ) stage2_dtb_walker = Param.ArmTableWalker( - ArmStage2TableWalker(), "HW Table walker") + ArmStage2TableWalker(), "HW Table walker" + ) sys = Param.System(Parent.any, "system object parameter") - release_se = Param.ArmRelease(Parent.isa[0].release_se, - "Set of features/extensions to use in SE mode") + release_se = Param.ArmRelease( + Parent.isa[0].release_se, + "Set of features/extensions to use in SE mode", + ) @classmethod def walkerPorts(cls): - return ["mmu.itb_walker.port", "mmu.dtb_walker.port", - "mmu.stage2_itb_walker.port", "mmu.stage2_dtb_walker.port"] + return [ + "mmu.itb_walker.port", + "mmu.dtb_walker.port", + "mmu.stage2_itb_walker.port", + "mmu.stage2_dtb_walker.port", + ] def connectWalkerPorts(self, iport, dport): self.itb_walker.port = iport diff --git a/src/arch/arm/ArmNativeTrace.py b/src/arch/arm/ArmNativeTrace.py index c8bc272c9e..0c795a6426 100644 --- a/src/arch/arm/ArmNativeTrace.py +++ b/src/arch/arm/ArmNativeTrace.py @@ -28,9 +28,11 @@ from m5.SimObject import SimObject from m5.params import * from m5.objects.CPUTracers import NativeTrace + class ArmNativeTrace(NativeTrace): - type = 'ArmNativeTrace' - cxx_class = 'gem5::Trace::ArmNativeTrace' + type = "ArmNativeTrace" + cxx_class = "gem5::trace::ArmNativeTrace" cxx_header = "arch/arm/nativetrace.hh" - stop_on_pc_error = Param.Bool(True, - "Stop M5 if it and statetrace's pcs are different") + stop_on_pc_error = Param.Bool( + True, "Stop M5 if it and statetrace's pcs are different" + ) diff --git a/src/arch/arm/ArmPMU.py b/src/arch/arm/ArmPMU.py index ec5dc2f742..80288ded57 100644 --- a/src/arch/arm/ArmPMU.py +++ b/src/arch/arm/ArmPMU.py @@ -42,6 +42,7 @@ from m5.proxy import * from m5.objects.Gic import ArmInterruptPin, ArmPPI from m5.util.fdthelper import * + class ProbeEvent(object): def __init__(self, pmu, _eventId, obj, *listOfNames): self.obj = obj @@ -52,23 +53,27 @@ class ProbeEvent(object): def register(self): if self.obj: for name in self.names: - self.pmu.getCCObject().addEventProbe(self.eventId, - self.obj.getCCObject(), name) + self.pmu.getCCObject().addEventProbe( + self.eventId, self.obj.getCCObject(), name + ) + class SoftwareIncrement(object): - def __init__(self,pmu, _eventId): + def __init__(self, pmu, _eventId): self.eventId = _eventId self.pmu = pmu def register(self): self.pmu.getCCObject().addSoftwareIncrementEvent(self.eventId) + ARCH_EVENT_CORE_CYCLES = 0x11 + class ArmPMU(SimObject): - type = 'ArmPMU' - cxx_class = 'gem5::ArmISA::PMU' - cxx_header = 'arch/arm/pmu.hh' + type = "ArmPMU" + cxx_class = "gem5::ArmISA::PMU" + cxx_header = "arch/arm/pmu.hh" cxx_exports = [ PyBindMethod("addEventProbe"), @@ -78,10 +83,13 @@ class ArmPMU(SimObject): _events = None def addEvent(self, newObject): - if not (isinstance(newObject, ProbeEvent) - or isinstance(newObject, SoftwareIncrement)): - raise TypeError("argument must be of ProbeEvent or " - "SoftwareIncrement type") + if not ( + isinstance(newObject, ProbeEvent) + or isinstance(newObject, SoftwareIncrement) + ): + raise TypeError( + "argument must be of ProbeEvent or " "SoftwareIncrement type" + ) if not self._events: self._events = [] @@ -92,15 +100,19 @@ class ArmPMU(SimObject): # register deferred event handlers. def regProbeListeners(self): for event in self._events: - event.register() + event.register() self.getCCObject().regProbeListeners() - def addArchEvents(self, - cpu=None, - itb=None, dtb=None, - icache=None, dcache=None, - l2cache=None): + def addArchEvents( + self, + cpu=None, + itb=None, + dtb=None, + icache=None, + dcache=None, + l2cache=None, + ): """Add architected events to the PMU. This method can be called multiple times with only a subset of @@ -118,20 +130,20 @@ class ArmPMU(SimObject): bpred = None # 0x00: SW_INCR - self.addEvent(SoftwareIncrement(self,0x00)) + self.addEvent(SoftwareIncrement(self, 0x00)) # 0x01: L1I_CACHE_REFILL # 0x02: L1I_TLB_REFILL, - self.addEvent(ProbeEvent(self,0x02, itb, "Refills")) + self.addEvent(ProbeEvent(self, 0x02, itb, "Refills")) # 0x03: L1D_CACHE_REFILL # 0x04: L1D_CACHE # 0x05: L1D_TLB_REFILL - self.addEvent(ProbeEvent(self,0x05, dtb, "Refills")) + self.addEvent(ProbeEvent(self, 0x05, dtb, "Refills")) # 0x06: LD_RETIRED - self.addEvent(ProbeEvent(self,0x06, cpu, "RetiredLoads")) + self.addEvent(ProbeEvent(self, 0x06, cpu, "RetiredLoads")) # 0x07: ST_RETIRED - self.addEvent(ProbeEvent(self,0x07, cpu, "RetiredStores")) + self.addEvent(ProbeEvent(self, 0x07, cpu, "RetiredStores")) # 0x08: INST_RETIRED - self.addEvent(ProbeEvent(self,0x08, cpu, "RetiredInsts")) + self.addEvent(ProbeEvent(self, 0x08, cpu, "RetiredInsts")) # 0x09: EXC_TAKEN # 0x0A: EXC_RETURN # 0x0B: CID_WRITE_RETIRED @@ -140,15 +152,17 @@ class ArmPMU(SimObject): # 0x0E: BR_RETURN_RETIRED # 0x0F: UNALIGEND_LDST_RETIRED # 0x10: BR_MIS_PRED - self.addEvent(ProbeEvent(self,0x10, bpred, "Misses")) + self.addEvent(ProbeEvent(self, 0x10, bpred, "Misses")) # 0x11: CPU_CYCLES - self.addEvent(ProbeEvent(self, ARCH_EVENT_CORE_CYCLES, cpu, - "ActiveCycles")) + self.addEvent( + ProbeEvent(self, ARCH_EVENT_CORE_CYCLES, cpu, "ActiveCycles") + ) # 0x12: BR_PRED - self.addEvent(ProbeEvent(self,0x12, bpred, "Branches")) + self.addEvent(ProbeEvent(self, 0x12, bpred, "Branches")) # 0x13: MEM_ACCESS - self.addEvent(ProbeEvent(self,0x13, cpu, "RetiredLoads", - "RetiredStores")) + self.addEvent( + ProbeEvent(self, 0x13, cpu, "RetiredLoads", "RetiredStores") + ) # 0x14: L1I_CACHE # 0x15: L1D_CACHE_WB # 0x16: L2D_CACHE @@ -163,7 +177,7 @@ class ArmPMU(SimObject): # 0x1F: L1D_CACHE_ALLOCATE # 0x20: L2D_CACHE_ALLOCATE # 0x21: BR_RETIRED - self.addEvent(ProbeEvent(self,0x21, cpu, "RetiredBranches")) + self.addEvent(ProbeEvent(self, 0x21, cpu, "RetiredBranches")) # 0x22: BR_MIS_PRED_RETIRED # 0x23: STALL_FRONTEND # 0x24: STALL_BACKEND @@ -190,8 +204,10 @@ class ArmPMU(SimObject): gic = self.platform.unproxy(self).gic node.append( - FdtPropertyWords("interrupts", - self.interrupt.generateFdtProperty(gic))) + FdtPropertyWords( + "interrupts", self.interrupt.generateFdtProperty(gic) + ) + ) yield node diff --git a/src/arch/arm/ArmSeWorkload.py b/src/arch/arm/ArmSeWorkload.py index dfde24d2dd..1bf4e3edf3 100644 --- a/src/arch/arm/ArmSeWorkload.py +++ b/src/arch/arm/ArmSeWorkload.py @@ -27,28 +27,36 @@ from m5.params import * from m5.objects.Workload import SEWorkload + class ArmSEWorkload(SEWorkload): - type = 'ArmSEWorkload' + type = "ArmSEWorkload" cxx_header = "arch/arm/se_workload.hh" - cxx_class = 'gem5::ArmISA::SEWorkload' + cxx_class = "gem5::ArmISA::SEWorkload" abstract = True + class ArmEmuLinux(ArmSEWorkload): - type = 'ArmEmuLinux' + type = "ArmEmuLinux" cxx_header = "arch/arm/linux/se_workload.hh" - cxx_class = 'gem5::ArmISA::EmuLinux' + cxx_class = "gem5::ArmISA::EmuLinux" @classmethod def _is_compatible_with(cls, obj): - return obj.get_arch() in ('arm64', 'arm', 'thumb') and \ - obj.get_op_sys() in ('linux', 'unknown') + return obj.get_arch() in ( + "arm64", + "arm", + "thumb", + ) and obj.get_op_sys() in ("linux", "unknown") + class ArmEmuFreebsd(ArmSEWorkload): - type = 'ArmEmuFreebsd' + type = "ArmEmuFreebsd" cxx_header = "arch/arm/freebsd/se_workload.hh" - cxx_class = 'gem5::ArmISA::EmuFreebsd' + cxx_class = "gem5::ArmISA::EmuFreebsd" @classmethod def _is_compatible_with(cls, obj): - return obj.get_arch() in ('arm64', 'arm', 'thumb') and \ - obj.get_op_sys() == 'freebsd' + return ( + obj.get_arch() in ("arm64", "arm", "thumb") + and obj.get_op_sys() == "freebsd" + ) diff --git a/src/arch/arm/ArmSemihosting.py b/src/arch/arm/ArmSemihosting.py index 8064a68099..54322cdec0 100644 --- a/src/arch/arm/ArmSemihosting.py +++ b/src/arch/arm/ArmSemihosting.py @@ -39,25 +39,31 @@ from m5.SimObject import * from m5.objects.Serial import SerialDevice from m5.objects.Terminal import Terminal + class ArmSemihosting(SimObject): - type = 'ArmSemihosting' + type = "ArmSemihosting" cxx_header = "arch/arm/semihosting.hh" - cxx_class = 'gem5::ArmSemihosting' + cxx_class = "gem5::ArmSemihosting" - cmd_line = Param.String("", "Command line to report to guest"); - stdin = Param.String("stdin", - "Standard input (stdin for gem5's terminal)") - stdout = Param.String("stdout", - "Standard output (stdout for gem5's terminal)") - stderr = Param.String("stderr", - "Standard error (stderr for gem5's terminal)") - files_root_dir = Param.String("", - "Host root directory for files handled by Semihosting") + cmd_line = Param.String("", "Command line to report to guest") + stdin = Param.String("stdin", "Standard input (stdin for gem5's terminal)") + stdout = Param.String( + "stdout", "Standard output (stdout for gem5's terminal)" + ) + stderr = Param.String( + "stderr", "Standard error (stderr for gem5's terminal)" + ) + files_root_dir = Param.String( + "", "Host root directory for files handled by Semihosting" + ) - mem_reserve = Param.MemorySize("32MiB", + mem_reserve = Param.MemorySize( + "32MiB", "Amount of memory to reserve at the start of the address map. This " - "memory won't be used by the heap reported to an application."); - stack_size = Param.MemorySize("32MiB", "Application stack size"); + "memory won't be used by the heap reported to an application.", + ) + stack_size = Param.MemorySize("32MiB", "Application stack size") - time = Param.Time('01/01/2009', - "System time to use ('Now' for actual time)") + time = Param.Time( + "01/01/2009", "System time to use ('Now' for actual time)" + ) diff --git a/src/arch/arm/ArmSystem.py b/src/arch/arm/ArmSystem.py index f9df791045..936c032780 100644 --- a/src/arch/arm/ArmSystem.py +++ b/src/arch/arm/ArmSystem.py @@ -1,4 +1,4 @@ -# Copyright (c) 2009, 2012-2013, 2015-2021 ARM Limited +# Copyright (c) 2009, 2012-2013, 2015-2022 ARM Limited # All rights reserved. # # The license below extends only to copyright in the software and shall @@ -41,44 +41,47 @@ from m5.util.fdthelper import * from m5.objects.System import System from m5.objects.ArmSemihosting import ArmSemihosting -class SveVectorLength(UInt8): min = 1; max = 16 +from typing import Any + + +class SveVectorLength(UInt8): + min = 1 + max = 16 + class ArmExtension(ScopedEnum): vals = [ # Armv8.1 - 'FEAT_VHE', - 'FEAT_PAN', - 'FEAT_LSE', - 'FEAT_HPDS', - 'FEAT_VMID16', - 'FEAT_RDM', - + "FEAT_VHE", + "FEAT_PAN", + "FEAT_LSE", + "FEAT_HPDS", + "FEAT_VMID16", + "FEAT_RDM", # Armv8.2 - 'FEAT_SVE', - 'FEAT_UAO', - 'FEAT_LVA', # Optional in Armv8.2 - 'FEAT_LPA', # Optional in Armv8.2 - + "FEAT_SVE", + "FEAT_UAO", + "FEAT_LVA", # Optional in Armv8.2 + "FEAT_LPA", # Optional in Armv8.2 # Armv8.3 - 'FEAT_FCMA', - 'FEAT_JSCVT', - 'FEAT_PAuth', - + "FEAT_FCMA", + "FEAT_JSCVT", + "FEAT_PAuth", # Armv8.4 - 'FEAT_SEL2', - + "FEAT_SEL2", # Others - 'SECURITY', - 'LPAE', - 'VIRTUALIZATION', - 'CRYPTO', - 'TME' + "SECURITY", + "LPAE", + "VIRTUALIZATION", + "CRYPTO", + "TME", ] + class ArmRelease(SimObject): - type = 'ArmRelease' + type = "ArmRelease" cxx_header = "arch/arm/system.hh" - cxx_class = 'gem5::ArmRelease' + cxx_class = "gem5::ArmRelease" extensions = VectorParam.ArmExtension([], "ISA extensions") @@ -87,82 +90,125 @@ class ArmRelease(SimObject): Add the provided extension (ArmExtension) to the system The method is discarding pre-existing values """ - if (new_ext.value not in - [ ext.value for ext in self.extensions ]): + if new_ext.value not in [ext.value for ext in self.extensions]: self.extensions.append(new_ext) + def remove(self, ext: ArmExtension) -> None: + """ + Remove the provided extension (ArmExtension) from the system + """ + for curr_ext in list(self.extensions): + if curr_ext.value == ext.value: + self.extensions.remove(curr_ext) + def has(self, new_ext: ArmExtension) -> bool: """ Is the system implementing the provided extension (ArmExtension) ? """ - if (new_ext.value not in - [ ext.value for ext in self.extensions ]): + if new_ext.value not in [ext.value for ext in self.extensions]: return False else: return True + @classmethod + def for_kvm(cls) -> Any: + """ + Generates an ArmRelease for KVM. It simply extracts EL2/EL3 support + from the current cls object + """ + release = cls() + release.remove(ArmExtension("SECURITY")) + release.remove(ArmExtension("VIRTUALIZATION")) + return release + + class Armv8(ArmRelease): - extensions = [ - 'LPAE', 'VIRTUALIZATION', 'SECURITY' - ] + extensions = ["LPAE", "VIRTUALIZATION", "SECURITY"] + class ArmDefaultRelease(Armv8): extensions = Armv8.extensions + [ # Armv8.1 - 'FEAT_LSE', 'FEAT_PAN', 'FEAT_HPDS', 'FEAT_VMID16', 'FEAT_RDM', + "FEAT_LSE", + "FEAT_PAN", + "FEAT_HPDS", + "FEAT_VMID16", + "FEAT_RDM", # Armv8.2 - 'FEAT_UAO', 'FEAT_LVA', 'FEAT_LPA', 'FEAT_SVE', + "FEAT_UAO", + "FEAT_LVA", + "FEAT_LPA", + "FEAT_SVE", # Armv8.3 - 'FEAT_FCMA', 'FEAT_JSCVT', 'FEAT_PAuth', + "FEAT_FCMA", + "FEAT_JSCVT", + "FEAT_PAuth", # Armv8.4 - 'FEAT_SEL2' + "FEAT_SEL2", ] + class Armv81(Armv8): extensions = Armv8.extensions + [ - 'FEAT_LSE', 'FEAT_VHE', 'FEAT_PAN', - 'FEAT_HPDS', 'FEAT_VMID16', 'FEAT_RDM' + "FEAT_LSE", + "FEAT_VHE", + "FEAT_PAN", + "FEAT_HPDS", + "FEAT_VMID16", + "FEAT_RDM", ] + class Armv82(Armv81): extensions = Armv81.extensions + [ - 'FEAT_UAO', 'FEAT_LVA', 'FEAT_LPA', 'FEAT_SVE' + "FEAT_UAO", + "FEAT_LVA", + "FEAT_LPA", + "FEAT_SVE", ] + class Armv83(Armv82): - extensions = Armv82.extensions + [ - 'FEAT_FCMA', 'FEAT_JSCVT', 'FEAT_PAuth', - ] + extensions = Armv82.extensions + ["FEAT_FCMA", "FEAT_JSCVT", "FEAT_PAuth"] + class Armv84(Armv83): - extensions = Armv83.extensions + [ - 'FEAT_SEL2' - ] + extensions = Armv83.extensions + ["FEAT_SEL2"] + class ArmSystem(System): - type = 'ArmSystem' + type = "ArmSystem" cxx_header = "arch/arm/system.hh" - cxx_class = 'gem5::ArmSystem' + cxx_class = "gem5::ArmSystem" release = Param.ArmRelease(ArmDefaultRelease(), "Arm Release") multi_proc = Param.Bool(True, "Multiprocessor system?") gic_cpu_addr = Param.Addr(0, "Addres of the GIC CPU interface") - reset_addr = Param.Addr(0x0, - "Reset address (ARMv8)") - auto_reset_addr = Param.Bool(True, - "Determine reset address from kernel entry point if no boot loader") - highest_el_is_64 = Param.Bool(True, + reset_addr = Param.Addr(0x0, "Reset address (ARMv8)") + auto_reset_addr = Param.Bool( + True, + "Determine reset address from kernel entry point if no boot loader", + ) + highest_el_is_64 = Param.Bool( + True, "True if the register width of the highest implemented exception level " - "is 64 bits (ARMv8)") - phys_addr_range_64 = Param.UInt8(40, - "Supported physical address range in bits when using AArch64 (ARMv8)") - have_large_asid_64 = Param.Bool(False, - "True if ASID is 16 bits in AArch64 (ARMv8)") - sve_vl = Param.SveVectorLength(1, - "SVE vector length in quadwords (128-bit)") - semihosting = Param.ArmSemihosting(NULL, - "Enable support for the Arm semihosting by settings this parameter") + "is 64 bits (ARMv8)", + ) + phys_addr_range_64 = Param.UInt8( + 40, + "Supported physical address range in bits when using AArch64 (ARMv8)", + ) + have_large_asid_64 = Param.Bool( + False, "True if ASID is 16 bits in AArch64 (ARMv8)" + ) + sve_vl = Param.SveVectorLength( + 1, "SVE vector length in quadwords (128-bit)" + ) + semihosting = Param.ArmSemihosting( + NULL, + "Enable support for the Arm semihosting by settings this parameter", + ) # Set to true if simulation provides a PSCI implementation # This flag will be checked when auto-generating @@ -182,7 +228,6 @@ class ArmSystem(System): fdt.add_rootnode(rootNode) fdt.writeDtbFile(filename) - def generateDeviceTree(self, state): # Generate a device tree root node for the system by creating the root # node and adding the generated subnodes of all children. @@ -193,12 +238,16 @@ class ArmSystem(System): def generateMemNode(mem_range): node = FdtNode("memory@%x" % int(mem_range.start)) node.append(FdtPropertyStrings("device_type", ["memory"])) - node.append(FdtPropertyWords("reg", - state.addrCells(mem_range.start) + - state.sizeCells(mem_range.size()) )) + node.append( + FdtPropertyWords( + "reg", + state.addrCells(mem_range.start) + + state.sizeCells(mem_range.size()), + ) + ) return node - root = FdtNode('/') + root = FdtNode("/") root.append(state.addrCellsProperty()) root.append(state.sizeCellsProperty()) diff --git a/src/arch/arm/ArmTLB.py b/src/arch/arm/ArmTLB.py index 10ed48b21c..8475a56b5b 100644 --- a/src/arch/arm/ArmTLB.py +++ b/src/arch/arm/ArmTLB.py @@ -40,20 +40,24 @@ from m5.params import * from m5.proxy import * from m5.objects.BaseTLB import BaseTLB + class ArmLookupLevel(Enum): - vals = [ "L0", "L1", "L2", "L3" ] + vals = ["L0", "L1", "L2", "L3"] + class ArmTLB(BaseTLB): - type = 'ArmTLB' - cxx_class = 'gem5::ArmISA::TLB' + type = "ArmTLB" + cxx_class = "gem5::ArmISA::TLB" cxx_header = "arch/arm/tlb.hh" sys = Param.System(Parent.any, "system object parameter") size = Param.Int(64, "TLB size") is_stage2 = Param.Bool(False, "Is this a stage 2 TLB?") - partial_levels = VectorParam.ArmLookupLevel([], + partial_levels = VectorParam.ArmLookupLevel( + [], "List of intermediate lookup levels allowed to be cached in the TLB " - "(=holding intermediate PAs obtained during a table walk") + "(=holding intermediate PAs obtained during a table walk", + ) class ArmStage2TLB(ArmTLB): diff --git a/src/arch/arm/AtomicSimpleCPU.py b/src/arch/arm/AtomicSimpleCPU.py deleted file mode 100644 index c3a25ba52e..0000000000 --- a/src/arch/arm/AtomicSimpleCPU.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2021 Google, Inc. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer; -# redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution; -# neither the name of the copyright holders nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from m5.objects.ArmCPU import ArmAtomicSimpleCPU - -AtomicSimpleCPU = ArmAtomicSimpleCPU diff --git a/src/arch/arm/MinorCPU.py b/src/arch/arm/MinorCPU.py deleted file mode 100644 index bac019774e..0000000000 --- a/src/arch/arm/MinorCPU.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2021 Google, Inc. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer; -# redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution; -# neither the name of the copyright holders nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from m5.objects.ArmCPU import ArmMinorCPU - -MinorCPU = ArmMinorCPU diff --git a/src/arch/arm/NonCachingSimpleCPU.py b/src/arch/arm/NonCachingSimpleCPU.py deleted file mode 100644 index bfad3ba88b..0000000000 --- a/src/arch/arm/NonCachingSimpleCPU.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2021 Google, Inc. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer; -# redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution; -# neither the name of the copyright holders nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from m5.objects.ArmCPU import ArmNonCachingSimpleCPU - -NonCachingSimpleCPU = ArmNonCachingSimpleCPU diff --git a/src/arch/arm/SConscript b/src/arch/arm/SConscript index bc720d16d5..6c359fb255 100644 --- a/src/arch/arm/SConscript +++ b/src/arch/arm/SConscript @@ -40,14 +40,20 @@ Import('*') +if env['USE_ARM_ISA']: + env.TagImplies('arm isa', 'gem5 lib') + # The GTest function does not have a 'tags' parameter. We therefore apply this # guard to ensure this test is only built when ARM is compiled. # # Note: This will need reconfigured for multi-isa. E.g., if this is # incorporated: https://gem5-review.googlesource.com/c/public/gem5/+/52491 -if env['TARGET_ISA'] == 'arm': - GTest('aapcs64.test', 'aapcs64.test.cc', '../../base/debug.cc') - +if env['USE_ARM_ISA']: + GTest('aapcs64.test', 'aapcs64.test.cc', + '../../base/debug.cc', + '../../cpu/reg_class.cc', + '../../sim/bufval.cc', '../../sim/cur_tick.cc', + 'regs/int.cc') Source('decoder.cc', tags='arm isa') Source('faults.cc', tags='arm isa') Source('htm.cc', tags='arm isa') @@ -81,6 +87,7 @@ Source('linux/fs_workload.cc', tags='arm isa') Source('freebsd/fs_workload.cc', tags='arm isa') Source('freebsd/se_workload.cc', tags='arm isa') Source('fs_workload.cc', tags='arm isa') +Source('regs/int.cc', tags='arm isa') Source('regs/misc.cc', tags='arm isa') Source('mmu.cc', tags='arm isa') Source('nativetrace.cc', tags='arm isa') @@ -120,12 +127,6 @@ SimObject('ArmTLB.py', sim_objects=['ArmTLB'], enums=['ArmLookupLevel'], SimObject('ArmPMU.py', sim_objects=['ArmPMU'], tags='arm isa') SimObject('ArmCPU.py', sim_objects=[], tags='arm isa') -SimObject('AtomicSimpleCPU.py', sim_objects=[], tags='arm isa') -SimObject('TimingSimpleCPU.py', sim_objects=[], tags='arm isa') -SimObject('NonCachingSimpleCPU.py', sim_objects=[], tags='arm isa') -SimObject('O3CPU.py', sim_objects=[], tags='arm isa') -SimObject('O3Checker.py', sim_objects=[], tags='arm isa') -SimObject('MinorCPU.py', sim_objects=[], tags='arm isa') DebugFlag('Arm', tags='arm isa') DebugFlag('ArmTme', 'Transactional Memory Extension', tags='arm isa') diff --git a/src/arch/arm/SConsopts b/src/arch/arm/SConsopts index c284f2c4b7..f760404957 100644 --- a/src/arch/arm/SConsopts +++ b/src/arch/arm/SConsopts @@ -1,7 +1,4 @@ -# -*- mode:python -*- - -# Copyright (c) 2007-2008 The Florida State University -# All rights reserved. +# Copyright 2021 Google, Inc. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are @@ -27,5 +24,4 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Import('*') - -main.Append(ALL_ISAS=['arm']) +sticky_vars.Add(BoolVariable('USE_ARM_ISA', 'Enable ARM ISA support', False)) diff --git a/src/arch/arm/TimingSimpleCPU.py b/src/arch/arm/TimingSimpleCPU.py deleted file mode 100644 index 8a20a36345..0000000000 --- a/src/arch/arm/TimingSimpleCPU.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2021 Google, Inc. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer; -# redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution; -# neither the name of the copyright holders nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from m5.objects.ArmCPU import ArmTimingSimpleCPU - -TimingSimpleCPU = ArmTimingSimpleCPU diff --git a/src/arch/arm/aapcs32.hh b/src/arch/arm/aapcs32.hh index ff52039046..383b8eb36b 100644 --- a/src/arch/arm/aapcs32.hh +++ b/src/arch/arm/aapcs32.hh @@ -34,6 +34,7 @@ #include #include "arch/arm/regs/int.hh" +#include "arch/arm/regs/vec.hh" #include "arch/arm/utility.hh" #include "base/intmath.hh" #include "cpu/thread_context.hh" @@ -191,7 +192,7 @@ struct ArgumentgetReg(RegId(IntRegClass, state.ncrn++)); + return tc->getReg(ArmISA::intRegClass[state.ncrn++]); } // Max out the ncrn since we effectively exhausted it. @@ -216,11 +217,13 @@ struct ArgumentgetReg(RegId(IntRegClass, state.ncrn++)) & mask(32); - high = tc->getReg(RegId(IntRegClass, state.ncrn++)) & mask(32); + low = tc->getReg(ArmISA::intRegClass[state.ncrn++]) & mask(32); + high = tc->getReg(ArmISA::intRegClass[state.ncrn++]) & + mask(32); } else { - high = tc->getReg(RegId(IntRegClass, state.ncrn++)) & mask(32); - low = tc->getReg(RegId(IntRegClass, state.ncrn++)) & mask(32); + high = tc->getReg(ArmISA::intRegClass[state.ncrn++]) & + mask(32); + low = tc->getReg(ArmISA::intRegClass[state.ncrn++]) & mask(32); } return low | (high << 32); } @@ -295,7 +298,7 @@ struct Result sizeof(uint32_t)) - state.retAddr = tc->getReg(RegId(IntRegClass, state.ncrn++)); + state.retAddr = tc->getReg(ArmISA::intRegClass[state.ncrn++]); } }; @@ -316,7 +319,7 @@ struct ArgumentgetReg(RegId(IntRegClass, state.ncrn++)); + tc->getReg(ArmISA::intRegClass[state.ncrn++]); val = htog(val, ArmISA::byteOrder(tc)); return gtoh(*(Composite *)&val, ArmISA::byteOrder(tc)); } @@ -328,7 +331,7 @@ struct ArgumentgetReg(RegId(IntRegClass, state.ncrn++)); + Chunk val = tc->getReg(ArmISA::intRegClass[state.ncrn++]); val = htog(val, ArmISA::byteOrder(tc)); size_t to_copy = std::min(bytes, chunk_size); memcpy(buf + i * chunk_size, &val, to_copy); @@ -342,7 +345,7 @@ struct ArgumentgetReg(RegId(IntRegClass, state.ncrn++)); + Chunk val = tc->getReg(ArmISA::intRegClass[state.ncrn++]); val = htog(val, ArmISA::byteOrder(tc)); size_t to_copy = std::min(bytes, chunk_size); memcpy(buf + offset, &val, to_copy); @@ -351,8 +354,11 @@ struct Argument(&bytes); constexpr int chunks = sizeof(Float) / sizeof(ArmISA::VecElem); for (int chunk = 0; chunk < chunks; chunk++) - tc->setReg(RegId(VecElemClass, chunk), vec_elems[chunk]); + tc->setReg(ArmISA::vecElemClass[chunk], vec_elems[chunk]); }; }; @@ -503,7 +509,7 @@ struct ArgumentgetReg(RegId(VecElemClass, chunk)); + vec_elems[chunk] = tc->getReg(ArmISA::vecElemClass[chunk]); return bitsToFloat(result); } @@ -571,7 +577,7 @@ struct ArgumentgetReg(id, &val); ha[i] = val.as()[lane]; @@ -619,7 +625,7 @@ struct ResultgetReg(id, &val); val.as()[lane] = ha[i]; diff --git a/src/arch/arm/aapcs64.hh b/src/arch/arm/aapcs64.hh index fe58fef922..2f53822a70 100644 --- a/src/arch/arm/aapcs64.hh +++ b/src/arch/arm/aapcs64.hh @@ -34,6 +34,7 @@ #include #include "arch/arm/regs/int.hh" +#include "arch/arm/regs/vec.hh" #include "arch/arm/utility.hh" #include "base/intmath.hh" #include "cpu/thread_context.hh" @@ -201,7 +202,7 @@ struct ArgumentgetReg(id, &vc); return vc.as()[0]; @@ -218,7 +219,7 @@ struct ResultgetReg(id, ®); reg.as()[0] = f; @@ -241,7 +242,7 @@ struct ArgumentgetReg(RegId(IntRegClass, state.ngrn++)); + return tc->getReg(ArmISA::intRegClass[state.ngrn++]); // Max out ngrn since we've effectively saturated it. state.ngrn = state.MAX_GRN + 1; @@ -262,8 +263,8 @@ struct ArgumentgetReg(RegId(IntRegClass, state.ngrn++)); - Integer high = tc->getReg(RegId(IntRegClass, state.ngrn++)); + Integer low = tc->getReg(ArmISA::intRegClass[state.ngrn++]); + Integer high = tc->getReg(ArmISA::intRegClass[state.ngrn++]); high = high << 64; return high | low; } @@ -382,7 +383,7 @@ struct ArgumentgetReg(RegId(IntRegClass, state.ngrn++)); + Chunk val = tc->getReg(ArmISA::intRegClass[state.ngrn++]); val = htog(val, ArmISA::byteOrder(tc)); size_t to_copy = std::min(bytes, chunk_size); memcpy(buf + i * chunk_size, &val, to_copy); diff --git a/src/arch/arm/decoder.cc b/src/arch/arm/decoder.cc index 43857440c2..c315ecfefb 100644 --- a/src/arch/arm/decoder.cc +++ b/src/arch/arm/decoder.cc @@ -42,6 +42,7 @@ #include "arch/arm/isa.hh" #include "arch/arm/utility.hh" +#include "base/cast.hh" #include "base/trace.hh" #include "debug/Decoder.hh" #include "sim/full_system.hh" @@ -58,13 +59,13 @@ Decoder::Decoder(const ArmDecoderParams ¶ms) : InstDecoder(params, &data), dvmEnabled(params.dvm_enabled), data(0), fpscrLen(0), fpscrStride(0), - decoderFlavor(dynamic_cast(params.isa)->decoderFlavor()) + decoderFlavor(safe_cast(params.isa)->decoderFlavor()) { reset(); // Initialize SVE vector length - sveLen = (dynamic_cast(params.isa) - ->getCurSveVecLenInBitsAtReset() >> 7) - 1; + sveLen = (safe_cast(params.isa)-> + getCurSveVecLenInBitsAtReset() >> 7) - 1; if (dvmEnabled) { warn_once( diff --git a/src/arch/arm/decoder.hh b/src/arch/arm/decoder.hh index fdabe6c5d9..8e486a3458 100644 --- a/src/arch/arm/decoder.hh +++ b/src/arch/arm/decoder.hh @@ -56,10 +56,11 @@ namespace gem5 { +class BaseISA; + namespace ArmISA { -class ISA; class Decoder : public InstDecoder { public: // Public decoder parameters diff --git a/src/arch/arm/fastmodel/CortexA76/FastModelCortexA76.py b/src/arch/arm/fastmodel/CortexA76/FastModelCortexA76.py index 3f98162b79..577fd535d8 100644 --- a/src/arch/arm/fastmodel/CortexA76/FastModelCortexA76.py +++ b/src/arch/arm/fastmodel/CortexA76/FastModelCortexA76.py @@ -38,393 +38,626 @@ from m5.objects.Iris import IrisBaseCPU from m5.objects.SystemC import SystemC_ScModule from m5.util.fdthelper import FdtNode, FdtPropertyWords + class FastModelCortexA76(IrisBaseCPU): - type = 'FastModelCortexA76' - cxx_class = 'gem5::fastmodel::CortexA76' - cxx_header = 'arch/arm/fastmodel/CortexA76/cortex_a76.hh' + type = "FastModelCortexA76" + cxx_class = "gem5::fastmodel::CortexA76" + cxx_header = "arch/arm/fastmodel/CortexA76/cortex_a76.hh" cntfrq = Param.UInt64(0x1800000, "Value for the CNTFRQ timer register") evs = Parent.evs - redistributor = Gicv3CommsTargetSocket('GIC communication target') - core_reset = IntSinkPin('Raising this signal will put the core into ' \ - 'reset mode.') - poweron_reset = IntSinkPin('Power on reset. Initializes all the ' \ - 'processor logic, including debug logic.') + redistributor = Gicv3CommsTargetSocket("GIC communication target") + core_reset = IntSinkPin( + "Raising this signal will put the core into " "reset mode." + ) + poweron_reset = IntSinkPin( + "Power on reset. Initializes all the " + "processor logic, including debug logic." + ) - CFGEND = Param.Bool(False, "Endianness configuration at reset. "\ - "0, little endian. 1, big endian.") - CFGTE = Param.Bool(False, "Instruction set state when resetting "\ - "into AArch32. 0, A32. 1, T32.") + CFGEND = Param.Bool( + False, + "Endianness configuration at reset. " + "0, little endian. 1, big endian.", + ) + CFGTE = Param.Bool( + False, + "Instruction set state when resetting " + "into AArch32. 0, A32. 1, T32.", + ) CRYPTODISABLE = Param.Bool(False, "Disable cryptographic features.") RVBARADDR = Param.Addr(0x0, "Value of RVBAR_ELx register.") VINITHI = Param.Bool(False, "Reset value of SCTLR.V.") - enable_trace_special_hlt_imm16 = Param.Bool(False, - "Enable usage of parameter trace_special_hlt_imm16") - l2cache_hit_latency = Param.UInt64(0, "L2 Cache timing annotation "\ - "latency for hit. Intended to model the tag-lookup time. This "\ - "is only used when l2cache-state_modelled=true.") - l2cache_maintenance_latency = Param.UInt64(0, "L2 Cache timing "\ - "annotation latency for cache maintenance operations given in "\ - "total ticks. This is only used when dcache-state_modelled=true.") - l2cache_miss_latency = Param.UInt64(0, "L2 Cache timing annotation "\ - "latency for miss. Intended to model the time for failed "\ - "tag-lookup and allocation of intermediate buffers. This is "\ - "only used when l2cache-state_modelled=true.") - l2cache_read_access_latency = Param.UInt64(0, "L2 Cache timing "\ - "annotation latency for read accesses given in ticks per "\ - "access. If this parameter is non-zero, per-access latencies "\ - "will be used instead of per-byte even if l2cache-read_latency "\ - "is set. This is in addition to the hit or miss latency, and "\ - "intended to correspond to the time taken to transfer across the "\ - "cache upstream bus, this is only used when "\ - "l2cache-state_modelled=true.") - l2cache_read_latency = Param.UInt64(0, "L2 Cache timing annotation "\ - "latency for read accesses given in ticks per byte "\ - "accessed.l2cache-read_access_latency must be set to 0 for "\ - "per-byte latencies to be applied. This is in addition to the "\ - "hit or miss latency, and intended to correspond to the time "\ - "taken to transfer across the cache upstream bus. This is only "\ - "used when l2cache-state_modelled=true.") - l2cache_size = Param.MemorySize32('0x80000', "L2 Cache size in bytes.") - l2cache_snoop_data_transfer_latency = Param.UInt64(0, "L2 Cache "\ - "timing annotation latency for received snoop accesses that "\ - "perform a data transfer given in ticks per byte accessed. This "\ - "is only used when dcache-state_modelled=true.") - l2cache_snoop_issue_latency = Param.UInt64(0, "L2 Cache timing "\ - "annotation latency for snoop accesses issued by this cache in "\ - "total ticks. This is only used when dcache-state_modelled=true.") - l2cache_write_access_latency = Param.UInt64(0, "L2 Cache timing "\ - "annotation latency for write accesses given in ticks per "\ - "access. If this parameter is non-zero, per-access latencies "\ - "will be used instead of per-byte even if l2cache-write_latency "\ - "is set. This is only used when l2cache-state_modelled=true.") - l2cache_write_latency = Param.UInt64(0, "L2 Cache timing annotation "\ - "latency for write accesses given in ticks per byte accessed. "\ - "l2cache-write_access_latency must be set to 0 for per-byte "\ - "latencies to be applied. This is only used when "\ - "l2cache-state_modelled=true.") - max_code_cache_mb = Param.MemorySize32('0x100', "Maximum size of "\ - "the simulation code cache (MiB). For platforms with more than 2 "\ - "cores this limit will be scaled down. (e.g 1/8 for 16 or more "\ - "cores)") - min_sync_level = Param.Unsigned(0, "Force minimum syncLevel "\ - "(0=off=default,1=syncState,2=postInsnIO,3=postInsnAll)") - semihosting_A32_HLT = Param.UInt16(0xf000, - "A32 HLT number for semihosting calls.") - semihosting_A64_HLT = Param.UInt16(0xf000, - "A64 HLT number for semihosting calls.") - semihosting_ARM_SVC = Param.UInt32(0x123456, - "A32 SVC number for semihosting calls.") - semihosting_T32_HLT = Param.Unsigned(60, - "T32 HLT number for semihosting calls.") - semihosting_Thumb_SVC = Param.Unsigned(171, - "T32 SVC number for semihosting calls.") - semihosting_cmd_line = Param.String("", - "Command line available to semihosting calls.") - semihosting_cwd = Param.String("", - "Base directory for semihosting file access.") + enable_trace_special_hlt_imm16 = Param.Bool( + False, "Enable usage of parameter trace_special_hlt_imm16" + ) + l2cache_hit_latency = Param.UInt64( + 0, + "L2 Cache timing annotation " + "latency for hit. Intended to model the tag-lookup time. This " + "is only used when l2cache-state_modelled=true.", + ) + l2cache_maintenance_latency = Param.UInt64( + 0, + "L2 Cache timing " + "annotation latency for cache maintenance operations given in " + "total ticks. This is only used when dcache-state_modelled=true.", + ) + l2cache_miss_latency = Param.UInt64( + 0, + "L2 Cache timing annotation " + "latency for miss. Intended to model the time for failed " + "tag-lookup and allocation of intermediate buffers. This is " + "only used when l2cache-state_modelled=true.", + ) + l2cache_read_access_latency = Param.UInt64( + 0, + "L2 Cache timing " + "annotation latency for read accesses given in ticks per " + "access. If this parameter is non-zero, per-access latencies " + "will be used instead of per-byte even if l2cache-read_latency " + "is set. This is in addition to the hit or miss latency, and " + "intended to correspond to the time taken to transfer across the " + "cache upstream bus, this is only used when " + "l2cache-state_modelled=true.", + ) + l2cache_read_latency = Param.UInt64( + 0, + "L2 Cache timing annotation " + "latency for read accesses given in ticks per byte " + "accessed.l2cache-read_access_latency must be set to 0 for " + "per-byte latencies to be applied. This is in addition to the " + "hit or miss latency, and intended to correspond to the time " + "taken to transfer across the cache upstream bus. This is only " + "used when l2cache-state_modelled=true.", + ) + l2cache_size = Param.MemorySize32("0x80000", "L2 Cache size in bytes.") + l2cache_snoop_data_transfer_latency = Param.UInt64( + 0, + "L2 Cache " + "timing annotation latency for received snoop accesses that " + "perform a data transfer given in ticks per byte accessed. This " + "is only used when dcache-state_modelled=true.", + ) + l2cache_snoop_issue_latency = Param.UInt64( + 0, + "L2 Cache timing " + "annotation latency for snoop accesses issued by this cache in " + "total ticks. This is only used when dcache-state_modelled=true.", + ) + l2cache_write_access_latency = Param.UInt64( + 0, + "L2 Cache timing " + "annotation latency for write accesses given in ticks per " + "access. If this parameter is non-zero, per-access latencies " + "will be used instead of per-byte even if l2cache-write_latency " + "is set. This is only used when l2cache-state_modelled=true.", + ) + l2cache_write_latency = Param.UInt64( + 0, + "L2 Cache timing annotation " + "latency for write accesses given in ticks per byte accessed. " + "l2cache-write_access_latency must be set to 0 for per-byte " + "latencies to be applied. This is only used when " + "l2cache-state_modelled=true.", + ) + max_code_cache_mb = Param.MemorySize32( + "0x100", + "Maximum size of " + "the simulation code cache (MiB). For platforms with more than 2 " + "cores this limit will be scaled down. (e.g 1/8 for 16 or more " + "cores)", + ) + min_sync_level = Param.Unsigned( + 0, + "Force minimum syncLevel " + "(0=off=default,1=syncState,2=postInsnIO,3=postInsnAll)", + ) + semihosting_A32_HLT = Param.UInt16( + 0xF000, "A32 HLT number for semihosting calls." + ) + semihosting_A64_HLT = Param.UInt16( + 0xF000, "A64 HLT number for semihosting calls." + ) + semihosting_ARM_SVC = Param.UInt32( + 0x123456, "A32 SVC number for semihosting calls." + ) + semihosting_T32_HLT = Param.Unsigned( + 60, "T32 HLT number for semihosting calls." + ) + semihosting_Thumb_SVC = Param.Unsigned( + 171, "T32 SVC number for semihosting calls." + ) + semihosting_cmd_line = Param.String( + "", "Command line available to semihosting calls." + ) + semihosting_cwd = Param.String( + "", "Base directory for semihosting file access." + ) semihosting_enable = Param.Bool(True, "Enable semihosting SVC/HLT traps.") semihosting_heap_base = Param.Addr(0x0, "Virtual address of heap base.") - semihosting_heap_limit = Param.Addr(0xf000000, - "Virtual address of top of heap.") - semihosting_stack_base = Param.Addr(0x10000000, - "Virtual address of base of descending stack.") - semihosting_stack_limit = Param.Addr(0xf000000, - "Virtual address of stack limit.") - trace_special_hlt_imm16 = Param.UInt16(0xf000, "For this HLT "\ - "number, IF enable_trace_special_hlt_imm16=true, skip performing "\ - "usual HLT execution but call MTI trace if registered") - vfp_enable_at_reset = Param.Bool(False, "Enable VFP in CPACR, "\ - "CPPWR, NSACR at reset. Warning: Arm recommends going through "\ - "the implementation's suggested VFP power-up sequence!") + semihosting_heap_limit = Param.Addr( + 0xF000000, "Virtual address of top of heap." + ) + semihosting_stack_base = Param.Addr( + 0x10000000, "Virtual address of base of descending stack." + ) + semihosting_stack_limit = Param.Addr( + 0xF000000, "Virtual address of stack limit." + ) + trace_special_hlt_imm16 = Param.UInt16( + 0xF000, + "For this HLT " + "number, IF enable_trace_special_hlt_imm16=true, skip performing " + "usual HLT execution but call MTI trace if registered", + ) + vfp_enable_at_reset = Param.Bool( + False, + "Enable VFP in CPACR, " + "CPPWR, NSACR at reset. Warning: Arm recommends going through " + "the implementation's suggested VFP power-up sequence!", + ) + class FastModelCortexA76Cluster(SimObject): - type = 'FastModelCortexA76Cluster' - cxx_class = 'gem5::fastmodel::CortexA76Cluster' - cxx_header = 'arch/arm/fastmodel/CortexA76/cortex_a76.hh' + type = "FastModelCortexA76Cluster" + cxx_class = "gem5::fastmodel::CortexA76Cluster" + cxx_header = "arch/arm/fastmodel/CortexA76/cortex_a76.hh" cores = VectorParam.FastModelCortexA76( - 'Core in a given cluster of CortexA76s') + "Core in a given cluster of CortexA76s" + ) evs = Param.SystemC_ScModule( - "Fast mo0del exported virtual subsystem holding cores") + "Fast mo0del exported virtual subsystem holding cores" + ) - cnthpirq = Param.ArmInterruptPin(ArmPPI(num=10), - "EL2 physical timer event") + cnthpirq = Param.ArmInterruptPin( + ArmPPI(num=10), "EL2 physical timer event" + ) cnthvirq = Param.ArmInterruptPin(ArmPPI(num=12), "EL2 virtual timer event") - cntpsirq = Param.ArmInterruptPin(ArmPPI(num=13), - "EL1 Secure physical timer event") + cntpsirq = Param.ArmInterruptPin( + ArmPPI(num=13), "EL1 Secure physical timer event" + ) cntvirq = Param.ArmInterruptPin(ArmPPI(num=11), "Virtual timer event") - commirq = Param.ArmInterruptPin(ArmPPI(num=6), - "Interrupt signal from debug communications channel") - ctidbgirq = Param.ArmInterruptPin(ArmPPI(num=8), - "Cross Trigger Interface (CTI) interrupt trigger output") - pmuirq = Param.ArmInterruptPin(ArmPPI(num=7), - "Interrupt from performance monitoring unit") - vcpumntirq = Param.ArmInterruptPin(ArmPPI(num=9), - "Interrupt signal for virtual CPU maintenance IRQ") - cntpnsirq = Param.ArmInterruptPin(ArmPPI(num=14), - "Non-secure physical timer event") + commirq = Param.ArmInterruptPin( + ArmPPI(num=6), "Interrupt signal from debug communications channel" + ) + ctidbgirq = Param.ArmInterruptPin( + ArmPPI(num=8), "Cross Trigger Interface (CTI) interrupt trigger output" + ) + pmuirq = Param.ArmInterruptPin( + ArmPPI(num=7), "Interrupt from performance monitoring unit" + ) + vcpumntirq = Param.ArmInterruptPin( + ArmPPI(num=9), "Interrupt signal for virtual CPU maintenance IRQ" + ) + cntpnsirq = Param.ArmInterruptPin( + ArmPPI(num=14), "Non-secure physical timer event" + ) - amba = AmbaInitiatorSocket(64, 'AMBA initiator socket') - top_reset = IntSinkPin('A single cluster-wide power on reset signal for ' \ - 'all resettable registers in DynamIQ.') - dbg_reset = IntSinkPin('Initialize the shared debug APB, Cross Trigger ' \ - 'Interface (CTI), and Cross Trigger Matrix (CTM) logic.') - model_reset = ResetResponsePort('A reset port to reset the whole cluster.') + amba = AmbaInitiatorSocket(64, "AMBA initiator socket") + top_reset = IntSinkPin( + "A single cluster-wide power on reset signal for " + "all resettable registers in DynamIQ." + ) + dbg_reset = IntSinkPin( + "Initialize the shared debug APB, Cross Trigger " + "Interface (CTI), and Cross Trigger Matrix (CTM) logic." + ) + model_reset = ResetResponsePort("A reset port to reset the whole cluster.") # These parameters are described in "Fast Models Reference Manual" section # 3.4.19, "ARMCortexA7x1CT". - BROADCASTATOMIC = Param.Bool(True, "Enable broadcasting of atomic "\ - "operation. The broadcastatomic signal will override this value "\ - "if used") - BROADCASTCACHEMAINT = Param.Bool(True, "Enable broadcasting of cache "\ - "maintenance operations to downstream caches. The "\ - "broadcastcachemaint signal will override this value if used.") - BROADCASTOUTER = Param.Bool(True, "Enable broadcasting of Outer "\ - "Shareable transactions. The broadcastouter signal will override "\ - "this value if used.") - BROADCASTPERSIST = Param.Bool(True, "Enable broadcasting of cache clean "\ - "to the point of persistence operations. The broadcastpersist "\ - "signal will override this value if used") + BROADCASTATOMIC = Param.Bool( + True, + "Enable broadcasting of atomic " + "operation. The broadcastatomic signal will override this value " + "if used", + ) + BROADCASTCACHEMAINT = Param.Bool( + True, + "Enable broadcasting of cache " + "maintenance operations to downstream caches. The " + "broadcastcachemaint signal will override this value if used.", + ) + BROADCASTOUTER = Param.Bool( + True, + "Enable broadcasting of Outer " + "Shareable transactions. The broadcastouter signal will override " + "this value if used.", + ) + BROADCASTPERSIST = Param.Bool( + True, + "Enable broadcasting of cache clean " + "to the point of persistence operations. The broadcastpersist " + "signal will override this value if used", + ) CLUSTER_ID = Param.UInt16(0x0, "Processor cluster ID value") - GICDISABLE = Param.Bool(True, "Disable the new style GICv3 CPU interface "\ - "in each core model. Should be left enabled unless the platform "\ - "contains a GICv3 distributor.") - cpi_div = Param.UInt32(1, - "Divider for calculating CPI (Cycles Per Instruction)") - cpi_mul = Param.UInt32(1, - "Multiplier for calculating CPI (Cycles Per Instruction)") - dcache_hit_latency = Param.UInt64(0, "L1 D-Cache timing annotation "\ - "latency for hit. Intended to model the tag-lookup time. This "\ - "is only used when dcache-state_modelled=true.") - dcache_maintenance_latency = Param.UInt64(0, "L1 D-Cache timing "\ - "annotation latency for cache maintenance operations given in "\ - "total ticks. This is only used when dcache-state_modelled=true.") - dcache_miss_latency = Param.UInt64(0, "L1 D-Cache timing annotation "\ - "latency for miss. Intended to model the time for failed "\ - "tag-lookup and allocation of intermediate buffers. This is "\ - "only used when dcache-state_modelled=true.") - dcache_prefetch_enabled = Param.Bool(False, "Enable simulation of data "\ - "cache prefetching. This is only used when "\ - "dcache-state_modelled=true") - dcache_read_access_latency = Param.UInt64(0, "L1 D-Cache timing "\ - "annotation latency for read accesses given in ticks per access "\ - "(of size dcache-read_bus_width_in_bytes). If this parameter is "\ - "non-zero, per-access latencies will be used instead of per-byte "\ - "even if dcache-read_latency is set. This is in addition to the "\ - "hit or miss latency, and intended to correspond to the time "\ - "taken to transfer across the cache upstream bus, this is only "\ - "used when dcache-state_modelled=true.") - dcache_read_latency = Param.UInt64(0, "L1 D-Cache timing annotation "\ - "latency for read accesses given in ticks per byte "\ - "accessed.dcache-read_access_latency must be set to 0 for "\ - "per-byte latencies to be applied. This is in addition to the "\ - "hit or miss latency, and intended to correspond to the time "\ - "taken to transfer across the cache upstream bus. This is only "\ - "used when dcache-state_modelled=true.") - dcache_snoop_data_transfer_latency = Param.UInt64(0, "L1 D-Cache timing "\ - "annotation latency for received snoop accesses that perform a data "\ - "transfer given in ticks per byte accessed. This is only used when "\ - "dcache-state_modelled=true.") - dcache_state_modelled = Param.Bool(False, - "Set whether D-cache has stateful implementation") - dcache_write_access_latency = Param.UInt64(0, "L1 D-Cache timing "\ - "annotation latency for write accesses given in ticks per access "\ - "(of size dcache-write_bus_width_in_bytes). If this parameter is "\ - "non-zero, per-access latencies will be used instead of per-byte "\ - "even if dcache-write_latency is set. This is only used when "\ - "dcache-state_modelled=true.") - dcache_write_latency = Param.UInt64(0, "L1 D-Cache timing annotation "\ - "latency for write accesses given in ticks per byte accessed. "\ - "dcache-write_access_latency must be set to 0 for per-byte latencies "\ - "to be applied. This is only used when dcache-state_modelled=true.") - default_opmode = Param.Unsigned(4, "Operating mode of DynamIQ coming out "\ - "of reset. 0: SFONLY ON, 1: 1/4 CACHE ON, 2: 1/2 CACHE ON, 3: "\ - "3/4 CACHE ON, 4: FULL CACHE ON") + GICDISABLE = Param.Bool( + True, + "Disable the new style GICv3 CPU interface " + "in each core model. Should be left enabled unless the platform " + "contains a GICv3 distributor.", + ) + cpi_div = Param.UInt32( + 1, "Divider for calculating CPI (Cycles Per Instruction)" + ) + cpi_mul = Param.UInt32( + 1, "Multiplier for calculating CPI (Cycles Per Instruction)" + ) + dcache_hit_latency = Param.UInt64( + 0, + "L1 D-Cache timing annotation " + "latency for hit. Intended to model the tag-lookup time. This " + "is only used when dcache-state_modelled=true.", + ) + dcache_maintenance_latency = Param.UInt64( + 0, + "L1 D-Cache timing " + "annotation latency for cache maintenance operations given in " + "total ticks. This is only used when dcache-state_modelled=true.", + ) + dcache_miss_latency = Param.UInt64( + 0, + "L1 D-Cache timing annotation " + "latency for miss. Intended to model the time for failed " + "tag-lookup and allocation of intermediate buffers. This is " + "only used when dcache-state_modelled=true.", + ) + dcache_prefetch_enabled = Param.Bool( + False, + "Enable simulation of data " + "cache prefetching. This is only used when " + "dcache-state_modelled=true", + ) + dcache_read_access_latency = Param.UInt64( + 0, + "L1 D-Cache timing " + "annotation latency for read accesses given in ticks per access " + "(of size dcache-read_bus_width_in_bytes). If this parameter is " + "non-zero, per-access latencies will be used instead of per-byte " + "even if dcache-read_latency is set. This is in addition to the " + "hit or miss latency, and intended to correspond to the time " + "taken to transfer across the cache upstream bus, this is only " + "used when dcache-state_modelled=true.", + ) + dcache_read_latency = Param.UInt64( + 0, + "L1 D-Cache timing annotation " + "latency for read accesses given in ticks per byte " + "accessed.dcache-read_access_latency must be set to 0 for " + "per-byte latencies to be applied. This is in addition to the " + "hit or miss latency, and intended to correspond to the time " + "taken to transfer across the cache upstream bus. This is only " + "used when dcache-state_modelled=true.", + ) + dcache_snoop_data_transfer_latency = Param.UInt64( + 0, + "L1 D-Cache timing " + "annotation latency for received snoop accesses that perform a data " + "transfer given in ticks per byte accessed. This is only used when " + "dcache-state_modelled=true.", + ) + dcache_state_modelled = Param.Bool( + False, "Set whether D-cache has stateful implementation" + ) + dcache_write_access_latency = Param.UInt64( + 0, + "L1 D-Cache timing " + "annotation latency for write accesses given in ticks per access " + "(of size dcache-write_bus_width_in_bytes). If this parameter is " + "non-zero, per-access latencies will be used instead of per-byte " + "even if dcache-write_latency is set. This is only used when " + "dcache-state_modelled=true.", + ) + dcache_write_latency = Param.UInt64( + 0, + "L1 D-Cache timing annotation " + "latency for write accesses given in ticks per byte accessed. " + "dcache-write_access_latency must be set to 0 for per-byte latencies " + "to be applied. This is only used when dcache-state_modelled=true.", + ) + default_opmode = Param.Unsigned( + 4, + "Operating mode of DynamIQ coming out " + "of reset. 0: SFONLY ON, 1: 1/4 CACHE ON, 2: 1/2 CACHE ON, 3: " + "3/4 CACHE ON, 4: FULL CACHE ON", + ) diagnostics = Param.Bool(False, "Enable DynamIQ diagnostic messages") - enable_simulation_performance_optimizations = Param.Bool(True, - "With this option enabled, the model will run more quickly, but "\ - "be less accurate to exact CPU behavior. The model will still be "\ - "functionally accurate for software, but may increase "\ - "differences seen between hardware behavior and model behavior "\ - "for certain workloads (it changes the micro-architectural value "\ - "of stage12_tlb_size parameter to 1024).") - ext_abort_device_read_is_sync = Param.Bool(False, - "Synchronous reporting of device-nGnRE read external aborts") - ext_abort_device_write_is_sync = Param.Bool(False, - "Synchronous reporting of device-nGnRE write external aborts") - ext_abort_so_read_is_sync = Param.Bool(False, - "Synchronous reporting of device-nGnRnE read external aborts") - ext_abort_so_write_is_sync = Param.Bool(False, - "Synchronous reporting of device-nGnRnE write external aborts") - gicv3_cpuintf_mmap_access_level = Param.Unsigned(0, "Allowed values are: "\ - "0-mmap access is supported for GICC,GICH,GICV registers. 1-mmap "\ - "access is supported only for GICV registers. 2-mmap access is "\ - "not supported.") - has_peripheral_port = Param.Bool(False, - "If true, additional AXI peripheral port is configured.") - has_statistical_profiling = Param.Bool(True, - "Whether Statistical Based Profiling is implemented") - icache_hit_latency = Param.UInt64(0, "L1 I-Cache timing annotation "\ - "latency for hit. Intended to model the tag-lookup time. This "\ - "is only used when icache-state_modelled=true.") - icache_maintenance_latency = Param.UInt64(0, "L1 I-Cache timing "\ - "annotation latency for cache maintenance operations given in "\ - "total ticks. This is only used when icache-state_modelled=true.") - icache_miss_latency = Param.UInt64(0, "L1 I-Cache timing annotation "\ - "latency for miss. Intended to model the time for failed "\ - "tag-lookup and allocation of intermediate buffers. This is "\ - "only used when icache-state_modelled=true.") - icache_prefetch_enabled = Param.Bool(False, "Enable simulation of "\ - "instruction cache prefetching. This is only used when "\ - "icache-state_modelled=true.") - icache_read_access_latency = Param.UInt64(0, "L1 I-Cache timing "\ - "annotation latency for read accesses given in ticks per access "\ - "(of size icache-read_bus_width_in_bytes). If this parameter is "\ - "non-zero, per-access latencies will be used instead of per-byte "\ - "even if icache-read_latency is set. This is in addition to the "\ - "hit or miss latency, and intended to correspond to the time "\ - "taken to transfer across the cache upstream bus, this is only "\ - "used when icache-state_modelled=true.") - icache_read_latency = Param.UInt64(0, "L1 I-Cache timing annotation "\ - "latency for read accesses given in ticks per byte "\ - "accessed.icache-read_access_latency must be set to 0 for "\ - "per-byte latencies to be applied. This is in addition to the "\ - "hit or miss latency, and intended to correspond to the time "\ - "taken to transfer across the cache upstream bus. This is only "\ - "used when icache-state_modelled=true.") - icache_state_modelled = Param.Bool(False, - "Set whether I-cache has stateful implementation") - l3cache_hit_latency = Param.UInt64(0, "L3 Cache timing annotation "\ - "latency for hit. Intended to model the tag-lookup time. This "\ - "is only used when l3cache-state_modelled=true.") - l3cache_maintenance_latency = Param.UInt64(0, "L3 Cache timing "\ - "annotation latency for cache maintenance operations given in "\ - "total ticks. This is only used when dcache-state_modelled=true.") - l3cache_miss_latency = Param.UInt64(0, "L3 Cache timing annotation "\ - "latency for miss. Intended to model the time for failed "\ - "tag-lookup and allocation of intermediate buffers. This is "\ - "only used when l3cache-state_modelled=true.") - l3cache_read_access_latency = Param.UInt64(0, "L3 Cache timing "\ - "annotation latency for read accesses given in ticks per access "\ - "(of size l3cache-read_bus_width_in_bytes). If this parameter "\ - "is non-zero, per-access latencies will be used instead of "\ - "per-byte even if l3cache-read_latency is set. This is in "\ - "addition to the hit or miss latency, and intended to correspond "\ - "to the time taken to transfer across the cache upstream bus, "\ - "this is only used when l3cache-state_modelled=true.") - l3cache_read_latency = Param.UInt64(0, "L3 Cache timing annotation "\ - "latency for read accesses given in ticks per byte "\ - "accessed.l3cache-read_access_latency must be set to 0 for "\ - "per-byte latencies to be applied. This is in addition to the "\ - "hit or miss latency, and intended to correspond to the time "\ - "taken to transfer across the cache upstream bus. This is only "\ - "used when l3cache-state_modelled=true.") - l3cache_size = Param.MemorySize('0x100000', "L3 Cache size in bytes.") - l3cache_snoop_data_transfer_latency = Param.UInt64(0, "L3 Cache timing "\ - "annotation latency for received snoop accesses that perform a "\ - "data transfer given in ticks per byte accessed. This is only "\ - "used when dcache-state_modelled=true.") - l3cache_snoop_issue_latency = Param.UInt64(0, "L3 Cache timing "\ - "annotation latency for snoop accesses issued by this cache in "\ - "total ticks. This is only used when dcache-state_modelled=true.") - l3cache_write_access_latency = Param.UInt64(0, "L3 Cache timing "\ - "annotation latency for write accesses given in ticks per access "\ - "(of size l3cache-write_bus_width_in_bytes). If this parameter "\ - "is non-zero, per-access latencies will be used instead of "\ - "per-byte even if l3cache-write_latency is set. This is only "\ - "used when l3cache-state_modelled=true.") - l3cache_write_latency = Param.UInt64(0, "L3 Cache timing annotation "\ - "latency for write accesses given in ticks per byte accessed. "\ - "l3cache-write_access_latency must be set to 0 for per-byte "\ - "latencies to be applied. This is only used when "\ - "l3cache-state_modelled=true.") - pchannel_treat_simreset_as_poreset = Param.Bool(False, - "Register core as ON state to cluster with simulation reset.") - periph_address_end = Param.Addr(0x0, "End address for peripheral port "\ - "address range exclusive(corresponds to AENDMP input signal).") - periph_address_start = Param.Addr(0x0, "Start address for peripheral "\ - "port address range inclusive(corresponds to ASTARTMP input "\ - "signal).") - ptw_latency = Param.UInt64(0, "Page table walker latency for TA "\ - "(Timing Annotation), expressed in simulation ticks") - tlb_latency = Param.UInt64(0, "TLB latency for TA (Timing Annotation), "\ - "expressed in simulation ticks") - treat_dcache_cmos_to_pou_as_nop = Param.Bool(False, "Whether dcache "\ - "invalidation to the point of unification is required for "\ - "instruction to data coherence. true - Invalidate operations not "\ - "required") - walk_cache_latency = Param.UInt64(0, "Walk cache latency for TA (Timing "\ - "Annotation), expressed in simulation ticks") + enable_simulation_performance_optimizations = Param.Bool( + True, + "With this option enabled, the model will run more quickly, but " + "be less accurate to exact CPU behavior. The model will still be " + "functionally accurate for software, but may increase " + "differences seen between hardware behavior and model behavior " + "for certain workloads (it changes the micro-architectural value " + "of stage12_tlb_size parameter to 1024).", + ) + ext_abort_device_read_is_sync = Param.Bool( + False, "Synchronous reporting of device-nGnRE read external aborts" + ) + ext_abort_device_write_is_sync = Param.Bool( + False, "Synchronous reporting of device-nGnRE write external aborts" + ) + ext_abort_so_read_is_sync = Param.Bool( + False, "Synchronous reporting of device-nGnRnE read external aborts" + ) + ext_abort_so_write_is_sync = Param.Bool( + False, "Synchronous reporting of device-nGnRnE write external aborts" + ) + gicv3_cpuintf_mmap_access_level = Param.Unsigned( + 0, + "Allowed values are: " + "0-mmap access is supported for GICC,GICH,GICV registers. 1-mmap " + "access is supported only for GICV registers. 2-mmap access is " + "not supported.", + ) + has_peripheral_port = Param.Bool( + False, "If true, additional AXI peripheral port is configured." + ) + has_statistical_profiling = Param.Bool( + True, "Whether Statistical Based Profiling is implemented" + ) + icache_hit_latency = Param.UInt64( + 0, + "L1 I-Cache timing annotation " + "latency for hit. Intended to model the tag-lookup time. This " + "is only used when icache-state_modelled=true.", + ) + icache_maintenance_latency = Param.UInt64( + 0, + "L1 I-Cache timing " + "annotation latency for cache maintenance operations given in " + "total ticks. This is only used when icache-state_modelled=true.", + ) + icache_miss_latency = Param.UInt64( + 0, + "L1 I-Cache timing annotation " + "latency for miss. Intended to model the time for failed " + "tag-lookup and allocation of intermediate buffers. This is " + "only used when icache-state_modelled=true.", + ) + icache_prefetch_enabled = Param.Bool( + False, + "Enable simulation of " + "instruction cache prefetching. This is only used when " + "icache-state_modelled=true.", + ) + icache_read_access_latency = Param.UInt64( + 0, + "L1 I-Cache timing " + "annotation latency for read accesses given in ticks per access " + "(of size icache-read_bus_width_in_bytes). If this parameter is " + "non-zero, per-access latencies will be used instead of per-byte " + "even if icache-read_latency is set. This is in addition to the " + "hit or miss latency, and intended to correspond to the time " + "taken to transfer across the cache upstream bus, this is only " + "used when icache-state_modelled=true.", + ) + icache_read_latency = Param.UInt64( + 0, + "L1 I-Cache timing annotation " + "latency for read accesses given in ticks per byte " + "accessed.icache-read_access_latency must be set to 0 for " + "per-byte latencies to be applied. This is in addition to the " + "hit or miss latency, and intended to correspond to the time " + "taken to transfer across the cache upstream bus. This is only " + "used when icache-state_modelled=true.", + ) + icache_state_modelled = Param.Bool( + False, "Set whether I-cache has stateful implementation" + ) + l3cache_hit_latency = Param.UInt64( + 0, + "L3 Cache timing annotation " + "latency for hit. Intended to model the tag-lookup time. This " + "is only used when l3cache-state_modelled=true.", + ) + l3cache_maintenance_latency = Param.UInt64( + 0, + "L3 Cache timing " + "annotation latency for cache maintenance operations given in " + "total ticks. This is only used when dcache-state_modelled=true.", + ) + l3cache_miss_latency = Param.UInt64( + 0, + "L3 Cache timing annotation " + "latency for miss. Intended to model the time for failed " + "tag-lookup and allocation of intermediate buffers. This is " + "only used when l3cache-state_modelled=true.", + ) + l3cache_read_access_latency = Param.UInt64( + 0, + "L3 Cache timing " + "annotation latency for read accesses given in ticks per access " + "(of size l3cache-read_bus_width_in_bytes). If this parameter " + "is non-zero, per-access latencies will be used instead of " + "per-byte even if l3cache-read_latency is set. This is in " + "addition to the hit or miss latency, and intended to correspond " + "to the time taken to transfer across the cache upstream bus, " + "this is only used when l3cache-state_modelled=true.", + ) + l3cache_read_latency = Param.UInt64( + 0, + "L3 Cache timing annotation " + "latency for read accesses given in ticks per byte " + "accessed.l3cache-read_access_latency must be set to 0 for " + "per-byte latencies to be applied. This is in addition to the " + "hit or miss latency, and intended to correspond to the time " + "taken to transfer across the cache upstream bus. This is only " + "used when l3cache-state_modelled=true.", + ) + l3cache_size = Param.MemorySize("0x100000", "L3 Cache size in bytes.") + l3cache_snoop_data_transfer_latency = Param.UInt64( + 0, + "L3 Cache timing " + "annotation latency for received snoop accesses that perform a " + "data transfer given in ticks per byte accessed. This is only " + "used when dcache-state_modelled=true.", + ) + l3cache_snoop_issue_latency = Param.UInt64( + 0, + "L3 Cache timing " + "annotation latency for snoop accesses issued by this cache in " + "total ticks. This is only used when dcache-state_modelled=true.", + ) + l3cache_write_access_latency = Param.UInt64( + 0, + "L3 Cache timing " + "annotation latency for write accesses given in ticks per access " + "(of size l3cache-write_bus_width_in_bytes). If this parameter " + "is non-zero, per-access latencies will be used instead of " + "per-byte even if l3cache-write_latency is set. This is only " + "used when l3cache-state_modelled=true.", + ) + l3cache_write_latency = Param.UInt64( + 0, + "L3 Cache timing annotation " + "latency for write accesses given in ticks per byte accessed. " + "l3cache-write_access_latency must be set to 0 for per-byte " + "latencies to be applied. This is only used when " + "l3cache-state_modelled=true.", + ) + pchannel_treat_simreset_as_poreset = Param.Bool( + False, "Register core as ON state to cluster with simulation reset." + ) + periph_address_end = Param.Addr( + 0x0, + "End address for peripheral port " + "address range exclusive(corresponds to AENDMP input signal).", + ) + periph_address_start = Param.Addr( + 0x0, + "Start address for peripheral " + "port address range inclusive(corresponds to ASTARTMP input " + "signal).", + ) + ptw_latency = Param.UInt64( + 0, + "Page table walker latency for TA " + "(Timing Annotation), expressed in simulation ticks", + ) + tlb_latency = Param.UInt64( + 0, + "TLB latency for TA (Timing Annotation), " + "expressed in simulation ticks", + ) + treat_dcache_cmos_to_pou_as_nop = Param.Bool( + False, + "Whether dcache " + "invalidation to the point of unification is required for " + "instruction to data coherence. true - Invalidate operations not " + "required", + ) + walk_cache_latency = Param.UInt64( + 0, + "Walk cache latency for TA (Timing " + "Annotation), expressed in simulation ticks", + ) def generateDeviceTree(self, state): node = FdtNode("timer") - node.appendCompatible(["arm,cortex-a15-timer", - "arm,armv7-timer", - "arm,armv8-timer"]) - node.append(FdtPropertyWords("interrupts", [ - 1, int(self.cntpsirq.num), 0xf08, - 1, int(self.cntpnsirq.num), 0xf08, - 1, int(self.cntvirq.num), 0xf08, - 1, int(self.cnthpirq.num), 0xf08, - ])) + node.appendCompatible( + ["arm,cortex-a15-timer", "arm,armv7-timer", "arm,armv8-timer"] + ) + node.append( + FdtPropertyWords( + "interrupts", + [ + 1, + int(self.cntpsirq.num), + 0xF08, + 1, + int(self.cntpnsirq.num), + 0xF08, + 1, + int(self.cntvirq.num), + 0xF08, + 1, + int(self.cnthpirq.num), + 0xF08, + ], + ) + ) yield node + class FastModelScxEvsCortexA76x1(SystemC_ScModule): - type = 'FastModelScxEvsCortexA76x1' - cxx_class = 'gem5::fastmodel::ScxEvsCortexA76<' \ - 'gem5::fastmodel::ScxEvsCortexA76x1Types>' - cxx_template_params = [ 'class Types' ] - cxx_header = 'arch/arm/fastmodel/CortexA76/evs.hh' + type = "FastModelScxEvsCortexA76x1" + cxx_class = ( + "gem5::fastmodel::ScxEvsCortexA76<" + "gem5::fastmodel::ScxEvsCortexA76x1Types>" + ) + cxx_template_params = ["class Types"] + cxx_header = "arch/arm/fastmodel/CortexA76/evs.hh" + class FastModelCortexA76x1(FastModelCortexA76Cluster): - cores = [ FastModelCortexA76(thread_paths=[ 'core.cpu0' ]) ] + cores = [FastModelCortexA76(thread_paths=["core.cpu0"])] evs = FastModelScxEvsCortexA76x1() + class FastModelScxEvsCortexA76x2(SystemC_ScModule): - type = 'FastModelScxEvsCortexA76x2' - cxx_class = 'gem5::fastmodel::ScxEvsCortexA76<' \ - 'gem5::fastmodel::ScxEvsCortexA76x2Types>' - cxx_template_params = [ 'class Types' ] - cxx_header = 'arch/arm/fastmodel/CortexA76/evs.hh' + type = "FastModelScxEvsCortexA76x2" + cxx_class = ( + "gem5::fastmodel::ScxEvsCortexA76<" + "gem5::fastmodel::ScxEvsCortexA76x2Types>" + ) + cxx_template_params = ["class Types"] + cxx_header = "arch/arm/fastmodel/CortexA76/evs.hh" + class FastModelCortexA76x2(FastModelCortexA76Cluster): - cores = [ FastModelCortexA76(thread_paths=[ 'core.cpu0' ]), - FastModelCortexA76(thread_paths=[ 'core.cpu1' ]) ] + cores = [ + FastModelCortexA76(thread_paths=["core.cpu0"]), + FastModelCortexA76(thread_paths=["core.cpu1"]), + ] evs = FastModelScxEvsCortexA76x2() + class FastModelScxEvsCortexA76x3(SystemC_ScModule): - type = 'FastModelScxEvsCortexA76x3' - cxx_class = 'gem5::fastmodel::ScxEvsCortexA76<' \ - 'gem5::fastmodel::ScxEvsCortexA76x3Types>' - cxx_template_params = [ 'class Types' ] - cxx_header = 'arch/arm/fastmodel/CortexA76/evs.hh' + type = "FastModelScxEvsCortexA76x3" + cxx_class = ( + "gem5::fastmodel::ScxEvsCortexA76<" + "gem5::fastmodel::ScxEvsCortexA76x3Types>" + ) + cxx_template_params = ["class Types"] + cxx_header = "arch/arm/fastmodel/CortexA76/evs.hh" + class FastModelCortexA76x3(FastModelCortexA76Cluster): - cores = [ FastModelCortexA76(thread_paths=[ 'core.cpu0' ]), - FastModelCortexA76(thread_paths=[ 'core.cpu1' ]), - FastModelCortexA76(thread_paths=[ 'core.cpu2' ]) ] + cores = [ + FastModelCortexA76(thread_paths=["core.cpu0"]), + FastModelCortexA76(thread_paths=["core.cpu1"]), + FastModelCortexA76(thread_paths=["core.cpu2"]), + ] evs = FastModelScxEvsCortexA76x3() + class FastModelScxEvsCortexA76x4(SystemC_ScModule): - type = 'FastModelScxEvsCortexA76x4' - cxx_class = 'gem5::fastmodel::ScxEvsCortexA76<' \ - 'gem5::fastmodel::ScxEvsCortexA76x4Types>' - cxx_template_params = [ 'class Types' ] - cxx_header = 'arch/arm/fastmodel/CortexA76/evs.hh' + type = "FastModelScxEvsCortexA76x4" + cxx_class = ( + "gem5::fastmodel::ScxEvsCortexA76<" + "gem5::fastmodel::ScxEvsCortexA76x4Types>" + ) + cxx_template_params = ["class Types"] + cxx_header = "arch/arm/fastmodel/CortexA76/evs.hh" + class FastModelCortexA76x4(FastModelCortexA76Cluster): - cores = [ FastModelCortexA76(thread_paths=[ 'core.cpu0' ]), - FastModelCortexA76(thread_paths=[ 'core.cpu1' ]), - FastModelCortexA76(thread_paths=[ 'core.cpu2' ]), - FastModelCortexA76(thread_paths=[ 'core.cpu3' ]) ] + cores = [ + FastModelCortexA76(thread_paths=["core.cpu0"]), + FastModelCortexA76(thread_paths=["core.cpu1"]), + FastModelCortexA76(thread_paths=["core.cpu2"]), + FastModelCortexA76(thread_paths=["core.cpu3"]), + ] evs = FastModelScxEvsCortexA76x4() diff --git a/src/arch/arm/fastmodel/CortexR52/FastModelCortexR52.py b/src/arch/arm/fastmodel/CortexR52/FastModelCortexR52.py index c9e21e6a3a..4970ae2ed4 100644 --- a/src/arch/arm/fastmodel/CortexR52/FastModelCortexR52.py +++ b/src/arch/arm/fastmodel/CortexR52/FastModelCortexR52.py @@ -35,202 +35,295 @@ from m5.objects.IntPin import IntSinkPin, VectorIntSinkPin from m5.objects.Iris import IrisBaseCPU from m5.objects.SystemC import SystemC_ScModule + class FastModelCortexR52(IrisBaseCPU): - type = 'FastModelCortexR52' - cxx_class = 'gem5::fastmodel::CortexR52' - cxx_header = 'arch/arm/fastmodel/CortexR52/cortex_r52.hh' + type = "FastModelCortexR52" + cxx_class = "gem5::fastmodel::CortexR52" + cxx_header = "arch/arm/fastmodel/CortexR52/cortex_r52.hh" evs = Parent.evs - ppi = VectorIntSinkPin('PPI inputs (0-8)') + ppi = VectorIntSinkPin("PPI inputs (0-8)") - llpp = AmbaInitiatorSocket(64, 'Low Latency Peripheral Port') - flash = AmbaInitiatorSocket(64, 'Flash') - amba = AmbaInitiatorSocket(64, 'AMBA initiator socket') - core_reset = IntSinkPin('Raising this signal will put the core into ' \ - 'reset mode.') - poweron_reset = IntSinkPin('Power on reset. Initializes all the ' \ - 'processor logic, including debug logic.') - halt = IntSinkPin('Raising this signal will put the core into halt mode.') + llpp = AmbaInitiatorSocket(64, "Low Latency Peripheral Port") + flash = AmbaInitiatorSocket(64, "Flash") + amba = AmbaInitiatorSocket(64, "AMBA initiator socket") + core_reset = IntSinkPin( + "Raising this signal will put the core into " "reset mode." + ) + poweron_reset = IntSinkPin( + "Power on reset. Initializes all the " + "processor logic, including debug logic." + ) + halt = IntSinkPin("Raising this signal will put the core into halt mode.") - CFGEND = Param.Bool(False, "Endianness configuration at reset. 0, " \ - "little endian. 1, big endian.") + CFGEND = Param.Bool( + False, + "Endianness configuration at reset. 0, " + "little endian. 1, big endian.", + ) CFGTE = Param.Bool(False, "Equivalent to CFGTHUMBEXCEPTIONS") RVBARADDR = Param.UInt32(0, "Equivalent to CFGVECTABLE") - ase_present = Param.Bool(True, "Set whether the model has been built " \ - "with NEON support") + ase_present = Param.Bool( + True, "Set whether the model has been built " "with NEON support" + ) dcache_size = Param.UInt16(0x8000, "L1 D-Cache size in bytes") flash_enable = Param.Bool(False, "Equivalent to CFGFLASHEN") icache_size = Param.UInt16(0x8000, "L1 I-Cache size in bytes") llpp_base = Param.UInt32(0, "Equivalent to CFGLLPPBASEADDR") llpp_size = Param.UInt32(0x1000, "Equivalent to CFGLLPPSIZE") - max_code_cache_mb = Param.UInt64(0x100, "Maximum size of the " \ - "simulation code cache (MiB). For platforms with more than 2 " \ - "cores this limit will be scaled down. (e.g 1/8 for 16 or more " \ - "cores).") - min_sync_level = Param.UInt8(0, "Force minimum syncLevel " \ - "(0=off=default,1=syncState,2=postInsnIO,3=postInsnAll)") - semihosting_A32_HLT = Param.UInt16(0xf000, "A32 HLT number for " \ - "semihosting calls.") - semihosting_ARM_SVC = Param.UInt32(0x123456, "A32 SVC number for " \ - "semihosting calls.") - semihosting_T32_HLT = Param.UInt8(60, "T32 HLT number for semihosting " \ - "calls.") - semihosting_Thumb_SVC = Param.UInt8(171, "T32 SVC number for " \ - "semihosting calls.") - semihosting_cmd_line = Param.String("", "Command line available to " \ - "semihosting calls.") - semihosting_cwd = Param.String("", "Base directory for semihosting " \ - "file access.") + max_code_cache_mb = Param.UInt64( + 0x100, + "Maximum size of the " + "simulation code cache (MiB). For platforms with more than 2 " + "cores this limit will be scaled down. (e.g 1/8 for 16 or more " + "cores).", + ) + min_sync_level = Param.UInt8( + 0, + "Force minimum syncLevel " + "(0=off=default,1=syncState,2=postInsnIO,3=postInsnAll)", + ) + semihosting_A32_HLT = Param.UInt16( + 0xF000, "A32 HLT number for " "semihosting calls." + ) + semihosting_ARM_SVC = Param.UInt32( + 0x123456, "A32 SVC number for " "semihosting calls." + ) + semihosting_T32_HLT = Param.UInt8( + 60, "T32 HLT number for semihosting " "calls." + ) + semihosting_Thumb_SVC = Param.UInt8( + 171, "T32 SVC number for " "semihosting calls." + ) + semihosting_cmd_line = Param.String( + "", "Command line available to " "semihosting calls." + ) + semihosting_cwd = Param.String( + "", "Base directory for semihosting " "file access." + ) semihosting_enable = Param.Bool(True, "Enable semihosting SVC/HLT traps.") semihosting_heap_base = Param.UInt32(0, "Virtual address of heap base.") - semihosting_heap_limit = Param.UInt32(0xf000000, "Virtual address of " \ - "top of heap.") - semihosting_stack_base = Param.UInt32(0x10000000, "Virtual address of " \ - "base of descending stack.") - semihosting_stack_limit = Param.UInt32(0xf000000, "Virtual address of " \ - "stack limit.") + semihosting_heap_limit = Param.UInt32( + 0xF000000, "Virtual address of " "top of heap." + ) + semihosting_stack_base = Param.UInt32( + 0x10000000, "Virtual address of " "base of descending stack." + ) + semihosting_stack_limit = Param.UInt32( + 0xF000000, "Virtual address of " "stack limit." + ) tcm_a_enable = Param.Bool(False, "Equivalent to CFGTCMBOOT") tcm_a_size = Param.UInt32(0x4000, "Sets the size of the ATCM(in bytes)") tcm_b_size = Param.UInt32(0x4000, "Sets the size of the BTCM(in bytes)") tcm_c_size = Param.UInt32(0x2000, "Sets the size of the CTCM(in bytes)") - vfp_dp_present = Param.Bool(True, "Whether double-precision floating " \ - "point feature is implemented") - vfp_enable_at_reset = Param.Bool(False, "Enable VFP in CPACR, CPPWR, " \ - "NSACR at reset. Warning: Arm recommends going through the " - "implementation's suggested VFP power-up sequence!") + vfp_dp_present = Param.Bool( + True, + "Whether double-precision floating " "point feature is implemented", + ) + vfp_enable_at_reset = Param.Bool( + False, + "Enable VFP in CPACR, CPPWR, " + "NSACR at reset. Warning: Arm recommends going through the " + "implementation's suggested VFP power-up sequence!", + ) + class FastModelCortexR52Cluster(SimObject): - type = 'FastModelCortexR52Cluster' - cxx_class = 'gem5::fastmodel::CortexR52Cluster' - cxx_header = 'arch/arm/fastmodel/CortexR52/cortex_r52.hh' + type = "FastModelCortexR52Cluster" + cxx_class = "gem5::fastmodel::CortexR52Cluster" + cxx_header = "arch/arm/fastmodel/CortexR52/cortex_r52.hh" cores = VectorParam.FastModelCortexR52( - 'Core in a given cluster of CortexR52s') + "Core in a given cluster of CortexR52s" + ) evs = Param.SystemC_ScModule( - "Fast mo0del exported virtual subsystem holding cores") + "Fast mo0del exported virtual subsystem holding cores" + ) - spi = VectorIntSinkPin('SPI inputs (0-959)') + spi = VectorIntSinkPin("SPI inputs (0-959)") - ext_slave = AmbaTargetSocket(64, 'AMBA target socket') - top_reset = IntSinkPin('This signal resets timer and interrupt controller.') - dbg_reset = IntSinkPin('Initialize the shared debug APB, Cross Trigger ' \ - 'Interface (CTI), and Cross Trigger Matrix (CTM) logic.') - model_reset = ResetResponsePort('A reset port to reset the whole cluster.') + ext_slave = AmbaTargetSocket(64, "AMBA target socket") + top_reset = IntSinkPin( + "This signal resets timer and interrupt controller." + ) + dbg_reset = IntSinkPin( + "Initialize the shared debug APB, Cross Trigger " + "Interface (CTI), and Cross Trigger Matrix (CTM) logic." + ) + model_reset = ResetResponsePort("A reset port to reset the whole cluster.") - CLUSTER_ID = Param.UInt16(0, "CLUSTER_ID[15:8] equivalent to " \ - "CFGMPIDRAFF2, CLUSTER_ID[7:0] equivalent to CFGMPIDRAFF1") + CLUSTER_ID = Param.UInt16( + 0, + "CLUSTER_ID[15:8] equivalent to " + "CFGMPIDRAFF2, CLUSTER_ID[7:0] equivalent to CFGMPIDRAFF1", + ) DBGROMADDR = Param.UInt32(0, "Equivalent to CFGDBGROMADDR") - DBGROMADDRV = Param.Bool(False, "If true, set bits[1:0] of the CP15 " \ - "DBGDRAR to indicate that the address is valid") + DBGROMADDRV = Param.Bool( + False, + "If true, set bits[1:0] of the CP15 " + "DBGDRAR to indicate that the address is valid", + ) PERIPHBASE = Param.UInt32(0x13080000, "Equivalent to CFGPERIPHBASE") cluster_utid = Param.UInt8(0, "Equivalent to CFGCLUSTERUTID") - cpi_div = Param.UInt32(1, "Divider for calculating CPI " \ - "(Cycles Per Instruction)") - cpi_mul = Param.UInt32(1, "Multiplier for calculating CPI " \ - "(Cycles Per Instruction)") - dcache_prefetch_enabled = Param.Bool(False, "Enable simulation of data " \ - "cache prefetching. This is only used when " \ - "dcache-state_modelled=true") - dcache_read_access_latency = Param.UInt64(0, "L1 D-Cache timing " \ - "annotation latency for read accesses given in ticks per access " \ - "(of size dcache-read_bus_width_in_bytes). If this parameter " \ - "is non-zero, per-access latencies will be used instead of " \ - "per-byte even if dcache-read_latency is set. This is in " \ - "addition to the hit or miss latency, and intended to " \ - "correspond to the time taken to transfer across the cache " \ - "upstream bus, this is only used when dcache-state_modelled=true.") - dcache_state_modelled = Param.Bool(False, "Set whether D-cache has " \ - "stateful implementation") - dcache_write_access_latency = Param.UInt64(0, "L1 D-Cache timing " \ - "annotation latency for write accesses given in ticks per " \ - "access (of size dcache-write_bus_width_in_bytes). If this " \ - "parameter is non-zero, per-access latencies will be used " \ - "instead of per-byte even if dcache-write_latency is set. This " \ - "is only used when dcache-state_modelled=true.") - flash_protection_enable_at_reset = Param.Bool(False, "Equivalent to " \ - "CFGFLASHPROTEN") + cpi_div = Param.UInt32( + 1, "Divider for calculating CPI " "(Cycles Per Instruction)" + ) + cpi_mul = Param.UInt32( + 1, "Multiplier for calculating CPI " "(Cycles Per Instruction)" + ) + dcache_prefetch_enabled = Param.Bool( + False, + "Enable simulation of data " + "cache prefetching. This is only used when " + "dcache-state_modelled=true", + ) + dcache_read_access_latency = Param.UInt64( + 0, + "L1 D-Cache timing " + "annotation latency for read accesses given in ticks per access " + "(of size dcache-read_bus_width_in_bytes). If this parameter " + "is non-zero, per-access latencies will be used instead of " + "per-byte even if dcache-read_latency is set. This is in " + "addition to the hit or miss latency, and intended to " + "correspond to the time taken to transfer across the cache " + "upstream bus, this is only used when dcache-state_modelled=true.", + ) + dcache_state_modelled = Param.Bool( + False, "Set whether D-cache has " "stateful implementation" + ) + dcache_write_access_latency = Param.UInt64( + 0, + "L1 D-Cache timing " + "annotation latency for write accesses given in ticks per " + "access (of size dcache-write_bus_width_in_bytes). If this " + "parameter is non-zero, per-access latencies will be used " + "instead of per-byte even if dcache-write_latency is set. This " + "is only used when dcache-state_modelled=true.", + ) + flash_protection_enable_at_reset = Param.Bool( + False, "Equivalent to " "CFGFLASHPROTEN" + ) has_flash_protection = Param.Bool(True, "Equivalent to CFGFLASHPROTIMP") - icache_prefetch_enabled = Param.Bool(False, "Enable simulation of " \ - "instruction cache prefetching. This is only used when " \ - "icache-state_modelled=true.") - icache_read_access_latency = Param.UInt64(0, "L1 I-Cache timing " \ - "annotation latency for read accesses given in ticks per access " \ - "(of size icache-read_bus_width_in_bytes). If this parameter " \ - "is non-zero, per-access latencies will be used instead of " \ - "per-byte even if icache-read_latency is set. This is in " \ - "addition to the hit or miss latency, and intended to " \ - "correspond to the time taken to transfer across the cache " \ - "upstream bus, this is only used when icache-state_modelled=true.") - icache_state_modelled = Param.Bool(False, "Set whether I-cache has " \ - "stateful implementation") + icache_prefetch_enabled = Param.Bool( + False, + "Enable simulation of " + "instruction cache prefetching. This is only used when " + "icache-state_modelled=true.", + ) + icache_read_access_latency = Param.UInt64( + 0, + "L1 I-Cache timing " + "annotation latency for read accesses given in ticks per access " + "(of size icache-read_bus_width_in_bytes). If this parameter " + "is non-zero, per-access latencies will be used instead of " + "per-byte even if icache-read_latency is set. This is in " + "addition to the hit or miss latency, and intended to " + "correspond to the time taken to transfer across the cache " + "upstream bus, this is only used when icache-state_modelled=true.", + ) + icache_state_modelled = Param.Bool( + False, "Set whether I-cache has " "stateful implementation" + ) memory_ext_slave_base = Param.UInt32(0, "Equivalent to CFGAXISTCMBASEADDR") memory_flash_base = Param.UInt32(0, "Equivalent to CFGFLASHBASEADDR") - memory_flash_size = Param.UInt32(0x4000000, "Equivalent to CFGFLASHIMP. " \ - "memory.flash_size = 0 => CFGFLASHIMP = false") - num_protection_regions_s1 = Param.UInt8(16, "Number of v8-R stage1 " \ - "protection regions") - num_protection_regions_s2 = Param.UInt8(16, "Number of v8-R hyp " \ - "protection regions") - num_spi = Param.UInt16(960, "Number of interrupts (SPI) into the " \ - "internal GIC controller") - ram_protection_enable_at_reset = Param.Bool(False, "Equivalent to " \ - "CFGRAMPROTEN") - has_export_m_port = Param.Bool(True, "The interrupt distributor has an " \ - "optional interrupt export port for routing interrupts to an " \ - "external device") + memory_flash_size = Param.UInt32( + 0x4000000, + "Equivalent to CFGFLASHIMP. " + "memory.flash_size = 0 => CFGFLASHIMP = false", + ) + num_protection_regions_s1 = Param.UInt8( + 16, "Number of v8-R stage1 " "protection regions" + ) + num_protection_regions_s2 = Param.UInt8( + 16, "Number of v8-R hyp " "protection regions" + ) + num_spi = Param.UInt16( + 960, "Number of interrupts (SPI) into the " "internal GIC controller" + ) + ram_protection_enable_at_reset = Param.Bool( + False, "Equivalent to " "CFGRAMPROTEN" + ) + has_export_m_port = Param.Bool( + True, + "The interrupt distributor has an " + "optional interrupt export port for routing interrupts to an " + "external device", + ) + class FastModelScxEvsCortexR52x1(SystemC_ScModule): - type = 'FastModelScxEvsCortexR52x1' - cxx_class = 'gem5::fastmodel::ScxEvsCortexR52<' \ - 'gem5::fastmodel::ScxEvsCortexR52x1Types>' - cxx_template_params = [ 'class Types' ] - cxx_header = 'arch/arm/fastmodel/CortexR52/evs.hh' + type = "FastModelScxEvsCortexR52x1" + cxx_class = ( + "gem5::fastmodel::ScxEvsCortexR52<" + "gem5::fastmodel::ScxEvsCortexR52x1Types>" + ) + cxx_template_params = ["class Types"] + cxx_header = "arch/arm/fastmodel/CortexR52/evs.hh" + class FastModelCortexR52x1(FastModelCortexR52Cluster): - cores = [ FastModelCortexR52(thread_paths=[ 'core.cpu0' ]) ] + cores = [FastModelCortexR52(thread_paths=["core.cpu0"])] evs = FastModelScxEvsCortexR52x1() + class FastModelScxEvsCortexR52x2(SystemC_ScModule): - type = 'FastModelScxEvsCortexR52x2' - cxx_class = 'gem5::fastmodel::ScxEvsCortexR52<' \ - 'gem5::fastmodel::ScxEvsCortexR52x2Types>' - cxx_template_params = [ 'class Types' ] - cxx_header = 'arch/arm/fastmodel/CortexR52/evs.hh' + type = "FastModelScxEvsCortexR52x2" + cxx_class = ( + "gem5::fastmodel::ScxEvsCortexR52<" + "gem5::fastmodel::ScxEvsCortexR52x2Types>" + ) + cxx_template_params = ["class Types"] + cxx_header = "arch/arm/fastmodel/CortexR52/evs.hh" + class FastModelCortexR52x2(FastModelCortexR52Cluster): - cores = [ FastModelCortexR52(thread_paths=[ 'core.cpu0' ]), - FastModelCortexR52(thread_paths=[ 'core.cpu1' ]) ] + cores = [ + FastModelCortexR52(thread_paths=["core.cpu0"]), + FastModelCortexR52(thread_paths=["core.cpu1"]), + ] evs = FastModelScxEvsCortexR52x2() + class FastModelScxEvsCortexR52x3(SystemC_ScModule): - type = 'FastModelScxEvsCortexR52x3' - cxx_class = 'gem5::fastmodel::ScxEvsCortexR52<' \ - 'gem5::fastmodel::ScxEvsCortexR52x3Types>' - cxx_template_params = [ 'class Types' ] - cxx_header = 'arch/arm/fastmodel/CortexR52/evs.hh' + type = "FastModelScxEvsCortexR52x3" + cxx_class = ( + "gem5::fastmodel::ScxEvsCortexR52<" + "gem5::fastmodel::ScxEvsCortexR52x3Types>" + ) + cxx_template_params = ["class Types"] + cxx_header = "arch/arm/fastmodel/CortexR52/evs.hh" + class FastModelCortexR52x3(FastModelCortexR52Cluster): - cores = [ FastModelCortexR52(thread_paths=[ 'core.cpu0' ]), - FastModelCortexR52(thread_paths=[ 'core.cpu1' ]), - FastModelCortexR52(thread_paths=[ 'core.cpu2' ]) ] + cores = [ + FastModelCortexR52(thread_paths=["core.cpu0"]), + FastModelCortexR52(thread_paths=["core.cpu1"]), + FastModelCortexR52(thread_paths=["core.cpu2"]), + ] evs = FastModelScxEvsCortexR52x3() + class FastModelScxEvsCortexR52x4(SystemC_ScModule): - type = 'FastModelScxEvsCortexR52x4' - cxx_class = 'gem5::fastmodel::ScxEvsCortexR52<' \ - 'gem5::fastmodel::ScxEvsCortexR52x4Types>' - cxx_template_params = [ 'class Types' ] - cxx_header = 'arch/arm/fastmodel/CortexR52/evs.hh' + type = "FastModelScxEvsCortexR52x4" + cxx_class = ( + "gem5::fastmodel::ScxEvsCortexR52<" + "gem5::fastmodel::ScxEvsCortexR52x4Types>" + ) + cxx_template_params = ["class Types"] + cxx_header = "arch/arm/fastmodel/CortexR52/evs.hh" + class FastModelCortexR52x4(FastModelCortexR52Cluster): - cores = [ FastModelCortexR52(thread_paths=[ 'core.cpu0' ]), - FastModelCortexR52(thread_paths=[ 'core.cpu1' ]), - FastModelCortexR52(thread_paths=[ 'core.cpu2' ]), - FastModelCortexR52(thread_paths=[ 'core.cpu3' ]) ] + cores = [ + FastModelCortexR52(thread_paths=["core.cpu0"]), + FastModelCortexR52(thread_paths=["core.cpu1"]), + FastModelCortexR52(thread_paths=["core.cpu2"]), + FastModelCortexR52(thread_paths=["core.cpu3"]), + ] evs = FastModelScxEvsCortexR52x4() diff --git a/src/arch/arm/fastmodel/FastModel.py b/src/arch/arm/fastmodel/FastModel.py index f3e93a0439..8a28522db4 100644 --- a/src/arch/arm/fastmodel/FastModel.py +++ b/src/arch/arm/fastmodel/FastModel.py @@ -29,17 +29,22 @@ from m5.proxy import * from m5.objects.SystemC import SystemC_ScModule from m5.objects.Tlm import TlmInitiatorSocket, TlmTargetSocket + def AMBA_TARGET_ROLE(width): - return 'AMBA TARGET %d' % width + return "AMBA TARGET %d" % width + def AMBA_INITIATOR_ROLE(width): - return 'AMBA INITIATOR %d' % width + return "AMBA INITIATOR %d" % width + def SC_REQUEST_PORT_ROLE(port_type): - return 'SC REQUEST PORT for %s' % port_type + return "SC REQUEST PORT for %s" % port_type + def SC_RESPONSE_PORT_ROLE(port_type): - return 'SC RESPONSE PORT for %s' % port_type + return "SC RESPONSE PORT for %s" % port_type + class AmbaTargetSocket(Port): def __init__(self, width, desc): @@ -49,6 +54,7 @@ class AmbaTargetSocket(Port): super().__init__(my_role, desc) + class VectorAmbaTargetSocket(VectorPort): def __init__(self, width, desc): my_role = AMBA_INITIATOR_ROLE(width) @@ -57,6 +63,7 @@ class VectorAmbaTargetSocket(VectorPort): super().__init__(my_role, desc) + class AmbaInitiatorSocket(Port): def __init__(self, width, desc): my_role = AMBA_TARGET_ROLE(width) @@ -65,6 +72,7 @@ class AmbaInitiatorSocket(Port): super().__init__(my_role, desc, is_source=True) + class VectorAmbaInitiatorSocket(VectorPort): def __init__(self, width, desc): my_role = AMBA_TARGET_ROLE(width) @@ -73,6 +81,7 @@ class VectorAmbaInitiatorSocket(VectorPort): super().__init__(my_role, desc, is_source=True) + class ScRequestPort(Port): def __init__(self, desc, port_type): my_role = SC_REQUEST_PORT_ROLE(port_type) @@ -81,6 +90,7 @@ class ScRequestPort(Port): super().__init__(my_role, desc) + class ScResponsePort(Port): def __init__(self, desc, port_type): my_role = SC_RESPONSE_PORT_ROLE(port_type) @@ -89,18 +99,20 @@ class ScResponsePort(Port): super().__init__(my_role, desc) -class AmbaToTlmBridge64(SystemC_ScModule): - type = 'AmbaToTlmBridge64' - cxx_class = 'gem5::fastmodel::AmbaToTlmBridge64' - cxx_header = 'arch/arm/fastmodel/amba_to_tlm_bridge.hh' - amba = AmbaTargetSocket(64, 'AMBA PV target socket') - tlm = TlmInitiatorSocket(64, 'TLM initiator socket') +class AmbaToTlmBridge64(SystemC_ScModule): + type = "AmbaToTlmBridge64" + cxx_class = "gem5::fastmodel::AmbaToTlmBridge64" + cxx_header = "arch/arm/fastmodel/amba_to_tlm_bridge.hh" + + amba = AmbaTargetSocket(64, "AMBA PV target socket") + tlm = TlmInitiatorSocket(64, "TLM initiator socket") + class AmbaFromTlmBridge64(SystemC_ScModule): - type = 'AmbaFromTlmBridge64' - cxx_class = 'gem5::fastmodel::AmbaFromTlmBridge64' - cxx_header = 'arch/arm/fastmodel/amba_from_tlm_bridge.hh' + type = "AmbaFromTlmBridge64" + cxx_class = "gem5::fastmodel::AmbaFromTlmBridge64" + cxx_header = "arch/arm/fastmodel/amba_from_tlm_bridge.hh" - tlm = TlmTargetSocket(64, 'TLM target socket') - amba = AmbaInitiatorSocket(64, 'AMBA PV initiator socket') + tlm = TlmTargetSocket(64, "TLM target socket") + amba = AmbaInitiatorSocket(64, "AMBA PV initiator socket") diff --git a/src/arch/arm/fastmodel/GIC/FastModelGIC.py b/src/arch/arm/fastmodel/GIC/FastModelGIC.py index 8a94d59e2e..1ad5a979cd 100644 --- a/src/arch/arm/fastmodel/GIC/FastModelGIC.py +++ b/src/arch/arm/fastmodel/GIC/FastModelGIC.py @@ -44,437 +44,811 @@ from m5.objects.Gic import BaseGic from m5.objects.IntPin import VectorIntSourcePin from m5.objects.SystemC import SystemC_ScModule -GICV3_COMMS_TARGET_ROLE = 'GICV3 COMMS TARGET' -GICV3_COMMS_INITIATOR_ROLE = 'GICV3 COMMS INITIATOR' +GICV3_COMMS_TARGET_ROLE = "GICV3 COMMS TARGET" +GICV3_COMMS_INITIATOR_ROLE = "GICV3 COMMS INITIATOR" Port.compat(GICV3_COMMS_TARGET_ROLE, GICV3_COMMS_INITIATOR_ROLE) + class Gicv3CommsTargetSocket(Port): def __init__(self, desc): super().__init__(GICV3_COMMS_INITIATOR_ROLE, desc) + class Gicv3CommsInitiatorSocket(Port): def __init__(self, desc): super().__init__(GICV3_COMMS_TARGET_ROLE, desc, is_source=True) + class VectorGicv3CommsInitiatorSocket(VectorPort): def __init__(self, desc): super().__init__(GICV3_COMMS_TARGET_ROLE, desc, is_source=True) class SCFastModelGIC(SystemC_ScModule): - type = 'SCFastModelGIC' - cxx_class = 'gem5::fastmodel::SCGIC' - cxx_header = 'arch/arm/fastmodel/GIC/gic.hh' + type = "SCFastModelGIC" + cxx_class = "gem5::fastmodel::SCGIC" + cxx_header = "arch/arm/fastmodel/GIC/gic.hh" - enabled = Param.Bool(True, "Enable GICv3 functionality; when false the " - "component is inactive. has_gicv3 will replace this when GIC_IRI " - "replaces GICv3IRI.") - has_gicv3 = Param.Bool(False, "Enable GICv3 functionality; when false " - "the component is inactive. This will replace \"enabled\" " - "parameter.") - has_gicv4_1 = Param.Bool(False, "Enable GICv4.1 functionality; when " - "false the component is inactive.") + enabled = Param.Bool( + True, + "Enable GICv3 functionality; when false the " + "component is inactive. has_gicv3 will replace this when GIC_IRI " + "replaces GICv3IRI.", + ) + has_gicv3 = Param.Bool( + False, + "Enable GICv3 functionality; when false " + 'the component is inactive. This will replace "enabled" ' + "parameter.", + ) + has_gicv4_1 = Param.Bool( + False, + "Enable GICv4.1 functionality; when " + "false the component is inactive.", + ) vPEID_bits = Param.Unsigned(16, "Number of bits of vPEID with GICv4.1.") print_mmap = Param.Bool(False, "Print memory map to stdout") - monolithic = Param.Bool(False, "Indicate that the implementation is not " - "distributed") - direct_lpi_support = Param.Bool(False, "Enable support for LPI " - "operations through GICR registers") - cpu_affinities = Param.String("", "A comma separated list of dotted quads " - "containing the affinities of all PEs connected to this IRI.") - non_ARE_core_count = Param.Unsigned(8, "Maximum number of non-ARE cores; " - "normally used to pass the cluster-level NUM_CORES parameter to " - "the top-level redistributor.") - reg_base = Param.Addr(0x2c010000, "Base for decoding GICv3 registers.") - reg_base_per_redistributor = Param.String("", "Base address for each " - "redistributor in the form " - "'0.0.0.0=0x2c010000, 0.0.0.1=0x2c020000'. All redistributors " - "must be specified and this overrides the reg-base parameter " - "(except that reg-base will still be used for the top-level " - "redistributor).") - gicd_alias = Param.Addr(0x0, "In GICv2 mode: the base address for a 4k " - "page alias of the first 4k of the Distributor page, in GICv3 " - "mode. the base address of a 64KB page containing message based " - "SPI signalling register aliases(0:Disabled)") - has_two_security_states = Param.Bool(True, "If true, has two security " - "states") - DS_fixed_to_zero = Param.Bool(False, "Enable/disable support of single " - "security state") + monolithic = Param.Bool( + False, "Indicate that the implementation is not " "distributed" + ) + direct_lpi_support = Param.Bool( + False, "Enable support for LPI " "operations through GICR registers" + ) + cpu_affinities = Param.String( + "", + "A comma separated list of dotted quads " + "containing the affinities of all PEs connected to this IRI.", + ) + non_ARE_core_count = Param.Unsigned( + 8, + "Maximum number of non-ARE cores; " + "normally used to pass the cluster-level NUM_CORES parameter to " + "the top-level redistributor.", + ) + reg_base = Param.Addr(0x2C010000, "Base for decoding GICv3 registers.") + reg_base_per_redistributor = Param.String( + "", + "Base address for each " + "redistributor in the form " + "'0.0.0.0=0x2c010000, 0.0.0.1=0x2c020000'. All redistributors " + "must be specified and this overrides the reg-base parameter " + "(except that reg-base will still be used for the top-level " + "redistributor).", + ) + gicd_alias = Param.Addr( + 0x0, + "In GICv2 mode: the base address for a 4k " + "page alias of the first 4k of the Distributor page, in GICv3 " + "mode. the base address of a 64KB page containing message based " + "SPI signalling register aliases(0:Disabled)", + ) + has_two_security_states = Param.Bool( + True, "If true, has two security " "states" + ) + DS_fixed_to_zero = Param.Bool( + False, "Enable/disable support of single " "security state" + ) IIDR = Param.UInt32(0x0, "GICD_IIDR and GICR_IIDR value") - gicv2_only = Param.Bool(False, "If true, when using the GICv3 model, " - "pretend to be a GICv2 system") - STATUSR_implemented = Param.Bool(True, "Determines whether the " - "GICR_STATUSR register is implemented.") - priority_bits_implemented = Param.Unsigned(5, "Number of implemented " - "priority bits") - itargets_razwi = Param.Bool(False, "If true, the GICD_ITARGETS registers " - "are RAZ/WI") - icfgr_sgi_mask = Param.UInt32(0x0, "Mask for writes to ICFGR registers " - "that configure SGIs") - icfgr_ppi_mask = Param.UInt32(0xaaaaaaaa, "Mask for writes to ICFGR " - "registers that configure PPIs") - icfgr_spi_mask = Param.UInt32(0xaaaaaaaa, "Mask for writes to ICFGR " - "registers that configure SPIs") - icfgr_sgi_reset = Param.UInt32(0xaaaaaaaa, "Reset value for ICFGR " - "registers that configure SGIs") - icfgr_ppi_reset = Param.UInt32(0x0, "Reset value for ICFGR regesters " - "that configure PPIs") - icfgr_spi_reset = Param.UInt32(0x0, "Reset value for ICFGR regesters " - "that configure SPIs") - icfgr_ppi_rsvd_bit = Param.Bool(False, "If ARE=0, the value of reserved " - "bits i.e. bit 0,2,4..30 of ICFGRn for n>0") - igroup_sgi_mask = Param.UInt16(0xffff, "Mask for writes to SGI bits in " - "IGROUP registers") - igroup_ppi_mask = Param.UInt16(0xffff, "Mask for writes to PPI bits in " - "IGROUP registers") - igroup_sgi_reset = Param.UInt16(0x0, "Reset value for SGI bits in IGROUP " - "registers") - igroup_ppi_reset = Param.UInt16(0x0, "Reset value for SGI bits in IGROUP " - "registers") - ppi_implemented_mask = Param.UInt16(0xffff, "Mask of PPIs that are " - "implemented. One bit per PPI bit 0 == PPI 16 (first PPI). This " - "will affect other masks.") + gicv2_only = Param.Bool( + False, + "If true, when using the GICv3 model, " "pretend to be a GICv2 system", + ) + STATUSR_implemented = Param.Bool( + True, "Determines whether the " "GICR_STATUSR register is implemented." + ) + priority_bits_implemented = Param.Unsigned( + 5, "Number of implemented " "priority bits" + ) + itargets_razwi = Param.Bool( + False, "If true, the GICD_ITARGETS registers " "are RAZ/WI" + ) + icfgr_sgi_mask = Param.UInt32( + 0x0, "Mask for writes to ICFGR registers " "that configure SGIs" + ) + icfgr_ppi_mask = Param.UInt32( + 0xAAAAAAAA, "Mask for writes to ICFGR " "registers that configure PPIs" + ) + icfgr_spi_mask = Param.UInt32( + 0xAAAAAAAA, "Mask for writes to ICFGR " "registers that configure SPIs" + ) + icfgr_sgi_reset = Param.UInt32( + 0xAAAAAAAA, "Reset value for ICFGR " "registers that configure SGIs" + ) + icfgr_ppi_reset = Param.UInt32( + 0x0, "Reset value for ICFGR regesters " "that configure PPIs" + ) + icfgr_spi_reset = Param.UInt32( + 0x0, "Reset value for ICFGR regesters " "that configure SPIs" + ) + icfgr_ppi_rsvd_bit = Param.Bool( + False, + "If ARE=0, the value of reserved " + "bits i.e. bit 0,2,4..30 of ICFGRn for n>0", + ) + igroup_sgi_mask = Param.UInt16( + 0xFFFF, "Mask for writes to SGI bits in " "IGROUP registers" + ) + igroup_ppi_mask = Param.UInt16( + 0xFFFF, "Mask for writes to PPI bits in " "IGROUP registers" + ) + igroup_sgi_reset = Param.UInt16( + 0x0, "Reset value for SGI bits in IGROUP " "registers" + ) + igroup_ppi_reset = Param.UInt16( + 0x0, "Reset value for SGI bits in IGROUP " "registers" + ) + ppi_implemented_mask = Param.UInt16( + 0xFFFF, + "Mask of PPIs that are " + "implemented. One bit per PPI bit 0 == PPI 16 (first PPI). This " + "will affect other masks.", + ) spi_count = Param.UInt16(224, "Number of SPIs that are implemented.") - lockable_spi_count = Param.Unsigned(0, "Number of SPIs that are locked " - "down when CFGSDISABLE signal is asserted. Only applies for " - "GICv2.") - iri_id_bits = Param.Unsigned(16, "Number of bits used to represent " - "interrupts IDs in the Distributor and Redistributors, forced to " - "10 if LPIs are not supported") - delay_redistributor_accesses = Param.Bool(True, "Delay memory accesses " - "from the redistributor until GICR_SYNCR is read.") - gicd_pidr = Param.UInt64(0x0, "The value for the GICD_PIDR registers, if " - "non-zero. Note: fixed fields (device type etc.) will be " - "overriden in this value.") - gicr_pidr = Param.UInt64(0x0, "The value for the GICR_PIDR registers, if " - "non-zero. Note: fixed fields (device type etc.) will be " - "overriden in this value.") - its_count = Param.Unsigned(0, "Number of Interrupt Translation Services " - "to be instantiated (0=none)") - its0_base = Param.Addr(0, "Register base address for ITS0 " - "(automatic if 0).") - its1_base = Param.Addr(0, "Register base address for ITS1 " - "(automatic if 0).") - its2_base = Param.Addr(0, "Register base address for ITS2 " - "(automatic if 0).") - its3_base = Param.Addr(0, "Register base address for ITS3 " - "(automatic if 0).") - gits_pidr = Param.UInt64(0x0, "The value for the GITS_PIDR registers, if " - "non-zero. Note: fixed fields (device type etc.) will be " - "overriden in this value.") - gits_baser0_type = Param.Unsigned(0, "Type field for GITS_BASER0 " - "register. 0 = Unimplemented; 1 = Devices; " - "2 = Virtual Processors; 3 = Physical Processors; 4 = Collections") - gits_baser1_type = Param.Unsigned(0, "Type field for GITS_BASER1 " - "register. 0 = Unimplemented; 1 = Devices; " - "2 = Virtual Processors; 3 = Physical Processors; 4 = Collections") - gits_baser2_type = Param.Unsigned(0, "Type field for GITS_BASER2 " - "register. 0 = Unimplemented; 1 = Devices; " - "2 = Virtual Processors; 3 = Physical Processors; 4 = Collections") - gits_baser3_type = Param.Unsigned(0, "Type field for GITS_BASER3 " - "register. 0 = Unimplemented; 1 = Devices; " - "2 = Virtual Processors; 3 = Physical Processors; 4 = Collections") - gits_baser4_type = Param.Unsigned(0, "Type field for GITS_BASER4 " - "register. 0 = Unimplemented; 1 = Devices; " - "2 = Virtual Processors; 3 = Physical Processors; 4 = Collections") - gits_baser5_type = Param.Unsigned(0, "Type field for GITS_BASER5 " - "register. 0 = Unimplemented; 1 = Devices; " - "2 = Virtual Processors; 3 = Physical Processors; 4 = Collections") - gits_baser6_type = Param.Unsigned(0, "Type field for GITS_BASER6 " - "register. 0 = Unimplemented; 1 = Devices; " - "2 = Virtual Processors; 3 = Physical Processors; 4 = Collections") - gits_baser7_type = Param.Unsigned(0, "Type field for GITS_BASER7 " - "register. 0 = Unimplemented; 1 = Devices; " - "2 = Virtual Processors; 3 = Physical Processors; 4 = Collections") - gits_baser0_entry_bytes = Param.Unsigned(8, "Number of bytes required per " - "entry for GITS_BASER0 register.") - gits_baser1_entry_bytes = Param.Unsigned(8, "Number of bytes required per " - "entry for GITS_BASER1 register.") - gits_baser2_entry_bytes = Param.Unsigned(8, "Number of bytes required per " - "entry for GITS_BASER2 register.") - gits_baser3_entry_bytes = Param.Unsigned(8, "Number of bytes required per " - "entry for GITS_BASER3 register.") - gits_baser4_entry_bytes = Param.Unsigned(8, "Number of bytes required per " - "entry for GITS_BASER4 register.") - gits_baser5_entry_bytes = Param.Unsigned(8, "Number of bytes required per " - "entry for GITS_BASER5 register.") - gits_baser6_entry_bytes = Param.Unsigned(8, "Number of bytes required per " - "entry for GITS_BASER6 register.") - gits_baser7_entry_bytes = Param.Unsigned(8, "Number of bytes required per " - "entry for GITS_BASER7 register.") - gits_baser0_indirect_raz = Param.Bool(False, "Indirect field for " - "GITS_BASER0 register is RAZ/WI.") - gits_baser1_indirect_raz = Param.Bool(False, "Indirect field for " - "GITS_BASER1 register is RAZ/WI.") - gits_baser2_indirect_raz = Param.Bool(False, "Indirect field for " - "GITS_BASER2 register is RAZ/WI.") - gits_baser3_indirect_raz = Param.Bool(False, "Indirect field for " - "GITS_BASER3 register is RAZ/WI.") - gits_baser4_indirect_raz = Param.Bool(False, "Indirect field for " - "GITS_BASER4 register is RAZ/WI.") - gits_baser5_indirect_raz = Param.Bool(False, "Indirect field for " - "GITS_BASER5 register is RAZ/WI.") - gits_baser6_indirect_raz = Param.Bool(False, "Indirect field for " - "GITS_BASER6 register is RAZ/WI.") - gits_baser7_indirect_raz = Param.Bool(False, "Indirect field for " - "GITS_BASER7 register is RAZ/WI.") - its_baser_force_page_alignement = Param.Bool(True, "Force alignement of " - "address writen to a GITS_BASER register to the page size " - "configured") - processor_numbers = Param.String("", "Specify processor numbers (as " - "appears in GICR_TYPER) in the form 0.0.0.0=0,0.0.0.1=1 etc.) If " - "not specified, will number processors starting at 0.") - supports_shareability = Param.Bool(True, "Device supports shareability " - "attributes on outgoing memory bus (i.e. is modelling an ACElite " - "port rather than an AXI4 port).") - a3_affinity_supported = Param.Bool(False, "Device supports affinity " - "level 3 values that are non-zero.") - SGI_RSS_support = Param.Bool(False, "Device has support for the Range " - "Selector feature for SGI") - gicr_propbaser_read_only = Param.Bool(False, "GICR_PROPBASER register is " - "read-only.") - gicr_propbaser_reset = Param.UInt64(0x0, "Value of GICR_PROPBASER on " - "reset.") - its_device_bits = Param.Unsigned(16, "Number of bits supported for ITS " - "device IDs.") - its_entry_size = Param.Unsigned(8, "Number of bytes required to store " - "each entry in the ITT tables.") - its_id_bits = Param.Unsigned(16, "Number of interrupt bits supported by " - "ITS.") - its_collection_id_bits = Param.Unsigned(0, "Number of collection bits " - "supported by ITS (optional parameter, 0 => 16bits support and " - "GITS_TYPER.CIL=0") - its_cumulative_collection_tables = Param.Bool(True, "When true, the " - "supported amount of collections is the sum of GITS_TYPER.HCC and " - "the number of collections supported in memory, otherwise, simply " - "the number supported in memory only. Irrelevant when HCC=0") - delay_ITS_accesses = Param.Bool(True, "Delay accesses from the ITS until " - "GICR_SYNCR is read.") + lockable_spi_count = Param.Unsigned( + 0, + "Number of SPIs that are locked " + "down when CFGSDISABLE signal is asserted. Only applies for " + "GICv2.", + ) + iri_id_bits = Param.Unsigned( + 16, + "Number of bits used to represent " + "interrupts IDs in the Distributor and Redistributors, forced to " + "10 if LPIs are not supported", + ) + delay_redistributor_accesses = Param.Bool( + True, + "Delay memory accesses " + "from the redistributor until GICR_SYNCR is read.", + ) + gicd_pidr = Param.UInt64( + 0x0, + "The value for the GICD_PIDR registers, if " + "non-zero. Note: fixed fields (device type etc.) will be " + "overriden in this value.", + ) + gicr_pidr = Param.UInt64( + 0x0, + "The value for the GICR_PIDR registers, if " + "non-zero. Note: fixed fields (device type etc.) will be " + "overriden in this value.", + ) + its_count = Param.Unsigned( + 0, + "Number of Interrupt Translation Services " + "to be instantiated (0=none)", + ) + its0_base = Param.Addr( + 0, "Register base address for ITS0 " "(automatic if 0)." + ) + its1_base = Param.Addr( + 0, "Register base address for ITS1 " "(automatic if 0)." + ) + its2_base = Param.Addr( + 0, "Register base address for ITS2 " "(automatic if 0)." + ) + its3_base = Param.Addr( + 0, "Register base address for ITS3 " "(automatic if 0)." + ) + gits_pidr = Param.UInt64( + 0x0, + "The value for the GITS_PIDR registers, if " + "non-zero. Note: fixed fields (device type etc.) will be " + "overriden in this value.", + ) + gits_baser0_type = Param.Unsigned( + 0, + "Type field for GITS_BASER0 " + "register. 0 = Unimplemented; 1 = Devices; " + "2 = Virtual Processors; 3 = Physical Processors; 4 = Collections", + ) + gits_baser1_type = Param.Unsigned( + 0, + "Type field for GITS_BASER1 " + "register. 0 = Unimplemented; 1 = Devices; " + "2 = Virtual Processors; 3 = Physical Processors; 4 = Collections", + ) + gits_baser2_type = Param.Unsigned( + 0, + "Type field for GITS_BASER2 " + "register. 0 = Unimplemented; 1 = Devices; " + "2 = Virtual Processors; 3 = Physical Processors; 4 = Collections", + ) + gits_baser3_type = Param.Unsigned( + 0, + "Type field for GITS_BASER3 " + "register. 0 = Unimplemented; 1 = Devices; " + "2 = Virtual Processors; 3 = Physical Processors; 4 = Collections", + ) + gits_baser4_type = Param.Unsigned( + 0, + "Type field for GITS_BASER4 " + "register. 0 = Unimplemented; 1 = Devices; " + "2 = Virtual Processors; 3 = Physical Processors; 4 = Collections", + ) + gits_baser5_type = Param.Unsigned( + 0, + "Type field for GITS_BASER5 " + "register. 0 = Unimplemented; 1 = Devices; " + "2 = Virtual Processors; 3 = Physical Processors; 4 = Collections", + ) + gits_baser6_type = Param.Unsigned( + 0, + "Type field for GITS_BASER6 " + "register. 0 = Unimplemented; 1 = Devices; " + "2 = Virtual Processors; 3 = Physical Processors; 4 = Collections", + ) + gits_baser7_type = Param.Unsigned( + 0, + "Type field for GITS_BASER7 " + "register. 0 = Unimplemented; 1 = Devices; " + "2 = Virtual Processors; 3 = Physical Processors; 4 = Collections", + ) + gits_baser0_entry_bytes = Param.Unsigned( + 8, "Number of bytes required per " "entry for GITS_BASER0 register." + ) + gits_baser1_entry_bytes = Param.Unsigned( + 8, "Number of bytes required per " "entry for GITS_BASER1 register." + ) + gits_baser2_entry_bytes = Param.Unsigned( + 8, "Number of bytes required per " "entry for GITS_BASER2 register." + ) + gits_baser3_entry_bytes = Param.Unsigned( + 8, "Number of bytes required per " "entry for GITS_BASER3 register." + ) + gits_baser4_entry_bytes = Param.Unsigned( + 8, "Number of bytes required per " "entry for GITS_BASER4 register." + ) + gits_baser5_entry_bytes = Param.Unsigned( + 8, "Number of bytes required per " "entry for GITS_BASER5 register." + ) + gits_baser6_entry_bytes = Param.Unsigned( + 8, "Number of bytes required per " "entry for GITS_BASER6 register." + ) + gits_baser7_entry_bytes = Param.Unsigned( + 8, "Number of bytes required per " "entry for GITS_BASER7 register." + ) + gits_baser0_indirect_raz = Param.Bool( + False, "Indirect field for " "GITS_BASER0 register is RAZ/WI." + ) + gits_baser1_indirect_raz = Param.Bool( + False, "Indirect field for " "GITS_BASER1 register is RAZ/WI." + ) + gits_baser2_indirect_raz = Param.Bool( + False, "Indirect field for " "GITS_BASER2 register is RAZ/WI." + ) + gits_baser3_indirect_raz = Param.Bool( + False, "Indirect field for " "GITS_BASER3 register is RAZ/WI." + ) + gits_baser4_indirect_raz = Param.Bool( + False, "Indirect field for " "GITS_BASER4 register is RAZ/WI." + ) + gits_baser5_indirect_raz = Param.Bool( + False, "Indirect field for " "GITS_BASER5 register is RAZ/WI." + ) + gits_baser6_indirect_raz = Param.Bool( + False, "Indirect field for " "GITS_BASER6 register is RAZ/WI." + ) + gits_baser7_indirect_raz = Param.Bool( + False, "Indirect field for " "GITS_BASER7 register is RAZ/WI." + ) + its_baser_force_page_alignement = Param.Bool( + True, + "Force alignement of " + "address writen to a GITS_BASER register to the page size " + "configured", + ) + processor_numbers = Param.String( + "", + "Specify processor numbers (as " + "appears in GICR_TYPER) in the form 0.0.0.0=0,0.0.0.1=1 etc.) If " + "not specified, will number processors starting at 0.", + ) + supports_shareability = Param.Bool( + True, + "Device supports shareability " + "attributes on outgoing memory bus (i.e. is modelling an ACElite " + "port rather than an AXI4 port).", + ) + a3_affinity_supported = Param.Bool( + False, "Device supports affinity " "level 3 values that are non-zero." + ) + SGI_RSS_support = Param.Bool( + False, "Device has support for the Range " "Selector feature for SGI" + ) + gicr_propbaser_read_only = Param.Bool( + False, "GICR_PROPBASER register is " "read-only." + ) + gicr_propbaser_reset = Param.UInt64( + 0x0, "Value of GICR_PROPBASER on " "reset." + ) + its_device_bits = Param.Unsigned( + 16, "Number of bits supported for ITS " "device IDs." + ) + its_entry_size = Param.Unsigned( + 8, "Number of bytes required to store " "each entry in the ITT tables." + ) + its_id_bits = Param.Unsigned( + 16, "Number of interrupt bits supported by " "ITS." + ) + its_collection_id_bits = Param.Unsigned( + 0, + "Number of collection bits " + "supported by ITS (optional parameter, 0 => 16bits support and " + "GITS_TYPER.CIL=0", + ) + its_cumulative_collection_tables = Param.Bool( + True, + "When true, the " + "supported amount of collections is the sum of GITS_TYPER.HCC and " + "the number of collections supported in memory, otherwise, simply " + "the number supported in memory only. Irrelevant when HCC=0", + ) + delay_ITS_accesses = Param.Bool( + True, "Delay accesses from the ITS until " "GICR_SYNCR is read." + ) local_SEIs = Param.Bool(False, "Generate SEI to signal internal issues") local_VSEIs = Param.Bool(False, "Generate VSEI to signal internal issues") - ITS_use_physical_target_addresses = Param.Bool(True, "Use physical " - "hardware adresses for targets in ITS commands -- must be true " - "for distributed implementations") - ITS_hardware_collection_count = Param.Unsigned(0, "Number of hardware " - "collections held exclusively in the ITS") - ITS_MOVALL_update_collections = Param.Bool(False, "Whether MOVALL command " - "updates the collection entires") - ITS_TRANSLATE64R = Param.Bool(False, "Add an implementation specific " - "register at 0x10008 supporting 64 bit TRANSLATER (dev[63:32], " - "interupt[31:0])") - enable_protocol_checking = Param.Bool(False, "Enable/disable protocol " - "checking at cpu interface") - fixed_routed_spis = Param.String("", "Value of IROUTER[n] register in the " - "form 'n=a.b.c.d, n=*'. The RM bit of IROUTER is 0 when n=a.b.c.d " - "is used else 1 when n=* is used. n can be >= 32 and <= 1019") - irouter_default_mask = Param.String("", "Default Mask value for " - "IROUTER[32..1019] register in the form 'a.b.c.d'") - irouter_default_reset = Param.String("", "Default Reset Value of " - "IROUTER[32..1019] register in the form 'a.b.c.d' or *") - irouter_reset_values = Param.String("", "Reset Value of IROUTER[n] " - "register in the form 'n=a.b.c.d or n=*'.n can be >= 32 and " - "<= 1019") - irouter_mask_values = Param.String("", "Mask Value of IROUTER[n] register " - "in the form 'n=a.b.c.d'.n can be >= 32 and <= 1019") - ITS_threaded_command_queue = Param.Bool(True, "Enable execution of ITS " - "commands in a separate thread which is sometimes required for " - "cosimulation") - ITS_legacy_iidr_typer_offset = Param.Bool(False, "Put the GITS_IIDR and " - "GITS_TYPER registers at their older offset of 0x8 and 0x4 " - "respectively") - redistributor_threaded_command_queue = Param.Bool(True, "Enable execution " - "of redistributor delayed transactions in a separate thread which " - "is sometimes required for cosimulation") - ignore_generate_sgi_when_no_are = Param.Bool(False, "Ignore GenerateSGI " - "packets coming form the CPU interface if both ARE_S and ARE_NS " - "are 0") - trace_speculative_lpi_property_updates = Param.Bool(False, "Trace LPI " - "propery updates performed on speculative accesses (useful for " - "debuging LPI)") - virtual_lpi_support = Param.Bool(False, "GICv4 Virtual LPIs and Direct " - "injection of Virtual LPIs supported") - virtual_priority_bits = Param.Unsigned(5, "Number of implemented virtual " - "priority bits") - LPI_cache_type = Param.Unsigned(1, "Cache type for LPIs, 0:No caching, " - "1:Full caching") - LPI_cache_check_data = Param.Bool(False, "Enable Cached LPI data against " - "memory checking when available for cache type") - DPG_bits_implemented = Param.Bool(False, "Enable implementation of " - "interrupt group participation bits or DPG bits in GICR_CTLR") - DPG_ARE_only = Param.Bool(False, "Limit application of DPG bits to " - "interrupt groups for which ARE=1") - ARE_fixed_to_one = Param.Bool(False, "GICv2 compatibility is not " - "supported and GICD_CTLR.ARE_* is always one") - legacy_sgi_enable_rao = Param.Bool(False, "Enables for SGI associated " - "with an ARE=0 regime are RAO/WI") + ITS_use_physical_target_addresses = Param.Bool( + True, + "Use physical " + "hardware adresses for targets in ITS commands -- must be true " + "for distributed implementations", + ) + ITS_hardware_collection_count = Param.Unsigned( + 0, "Number of hardware " "collections held exclusively in the ITS" + ) + ITS_MOVALL_update_collections = Param.Bool( + False, "Whether MOVALL command " "updates the collection entires" + ) + ITS_TRANSLATE64R = Param.Bool( + False, + "Add an implementation specific " + "register at 0x10008 supporting 64 bit TRANSLATER (dev[63:32], " + "interupt[31:0])", + ) + enable_protocol_checking = Param.Bool( + False, "Enable/disable protocol " "checking at cpu interface" + ) + fixed_routed_spis = Param.String( + "", + "Value of IROUTER[n] register in the " + "form 'n=a.b.c.d, n=*'. The RM bit of IROUTER is 0 when n=a.b.c.d " + "is used else 1 when n=* is used. n can be >= 32 and <= 1019", + ) + irouter_default_mask = Param.String( + "", + "Default Mask value for " + "IROUTER[32..1019] register in the form 'a.b.c.d'", + ) + irouter_default_reset = Param.String( + "", + "Default Reset Value of " + "IROUTER[32..1019] register in the form 'a.b.c.d' or *", + ) + irouter_reset_values = Param.String( + "", + "Reset Value of IROUTER[n] " + "register in the form 'n=a.b.c.d or n=*'.n can be >= 32 and " + "<= 1019", + ) + irouter_mask_values = Param.String( + "", + "Mask Value of IROUTER[n] register " + "in the form 'n=a.b.c.d'.n can be >= 32 and <= 1019", + ) + ITS_threaded_command_queue = Param.Bool( + True, + "Enable execution of ITS " + "commands in a separate thread which is sometimes required for " + "cosimulation", + ) + ITS_legacy_iidr_typer_offset = Param.Bool( + False, + "Put the GITS_IIDR and " + "GITS_TYPER registers at their older offset of 0x8 and 0x4 " + "respectively", + ) + redistributor_threaded_command_queue = Param.Bool( + True, + "Enable execution " + "of redistributor delayed transactions in a separate thread which " + "is sometimes required for cosimulation", + ) + ignore_generate_sgi_when_no_are = Param.Bool( + False, + "Ignore GenerateSGI " + "packets coming form the CPU interface if both ARE_S and ARE_NS " + "are 0", + ) + trace_speculative_lpi_property_updates = Param.Bool( + False, + "Trace LPI " + "propery updates performed on speculative accesses (useful for " + "debuging LPI)", + ) + virtual_lpi_support = Param.Bool( + False, + "GICv4 Virtual LPIs and Direct " "injection of Virtual LPIs supported", + ) + virtual_priority_bits = Param.Unsigned( + 5, "Number of implemented virtual " "priority bits" + ) + LPI_cache_type = Param.Unsigned( + 1, "Cache type for LPIs, 0:No caching, " "1:Full caching" + ) + LPI_cache_check_data = Param.Bool( + False, + "Enable Cached LPI data against " + "memory checking when available for cache type", + ) + DPG_bits_implemented = Param.Bool( + False, + "Enable implementation of " + "interrupt group participation bits or DPG bits in GICR_CTLR", + ) + DPG_ARE_only = Param.Bool( + False, + "Limit application of DPG bits to " "interrupt groups for which ARE=1", + ) + ARE_fixed_to_one = Param.Bool( + False, + "GICv2 compatibility is not " + "supported and GICD_CTLR.ARE_* is always one", + ) + legacy_sgi_enable_rao = Param.Bool( + False, "Enables for SGI associated " "with an ARE=0 regime are RAO/WI" + ) pa_size = Param.Unsigned(48, "Number of valid bits in physical address") MSI_IIDR = Param.UInt32(0x0, "Value returned in MSI_IIDR registers.") - MSI_NS_frame0_base = Param.Addr(0x0, "If non-zero, sets the base " - "address used for non-secure MSI frame 0 registers.") - MSI_NS_frame0_max_SPI = Param.UInt16(0, "Maximum SPI ID supported by " - "non-secure MSI frame 0. Set to 0 to disable frame.") - MSI_NS_frame0_min_SPI = Param.UInt16(0, "Minimum SPI ID supported by " - "non-secure MSI frame 0. Set to 0 to disable frame.") - MSI_NS_frame1_base = Param.Addr(0x0, "If non-zero, sets the base " - "address used for non-secure MSI frame 1 registers.") - MSI_NS_frame1_max_SPI = Param.UInt16(0, "Maximum SPI ID supported by " - "non-secure MSI frame 1. Set to 0 to disable frame.") - MSI_NS_frame1_min_SPI = Param.UInt16(0, "Minimum SPI ID supported by " - "non-secure MSI frame 1. Set to 0 to disable frame.") - MSI_NS_frame2_base = Param.Addr(0x0, "If non-zero, sets the base address " - "used for non-secure MSI frame 2 registers.") - MSI_NS_frame2_max_SPI = Param.UInt16(0, "Maximum SPI ID supported by " - "non-secure MSI frame 2. Set to 0 to disable frame.") - MSI_NS_frame2_min_SPI = Param.UInt16(0, "Minimum SPI ID supported by " - "non-secure MSI frame 2. Set to 0 to disable frame.") - MSI_NS_frame3_base = Param.Addr(0x0, "If non-zero, sets the base address " - "used for non-secure MSI frame 3 registers.") - MSI_NS_frame3_max_SPI = Param.UInt16(0, "Maximum SPI ID supported by " - "non-secure MSI frame 3. Set to 0 to disable frame.") - MSI_NS_frame3_min_SPI = Param.UInt16(0, "Minimum SPI ID supported by " - "non-secure MSI frame 3. Set to 0 to disable frame.") - MSI_NS_frame4_base = Param.Addr(0x0, "If non-zero, sets the base address " - "used for non-secure MSI frame 4 registers.") - MSI_NS_frame4_max_SPI = Param.UInt16(0, "Maximum SPI ID supported by " - "non-secure MSI frame 4. Set to 0 to disable frame.") - MSI_NS_frame4_min_SPI = Param.UInt16(0, "Minimum SPI ID supported by " - "non-secure MSI frame 4. Set to 0 to disable frame.") - MSI_NS_frame5_base = Param.Addr(0x0, "If non-zero, sets the base address " - "used for non-secure MSI frame 5 registers.") - MSI_NS_frame5_max_SPI = Param.UInt16(0, "Maximum SPI ID supported by " - "non-secure MSI frame 5. Set to 0 to disable frame.") - MSI_NS_frame5_min_SPI = Param.UInt16(0, "Minimum SPI ID supported by " - "non-secure MSI frame 5. Set to 0 to disable frame.") - MSI_NS_frame6_base = Param.Addr(0x0, "If non-zero, sets the base address " - "used for non-secure MSI frame 6 registers.") - MSI_NS_frame6_max_SPI = Param.UInt16(0, "Maximum SPI ID supported by " - "non-secure MSI frame 6. Set to 0 to disable frame.") - MSI_NS_frame6_min_SPI = Param.UInt16(0, "Minimum SPI ID supported by " - "non-secure MSI frame 6. Set to 0 to disable frame.") - MSI_NS_frame7_base = Param.Addr(0x0, "If non-zero, sets the base address " - "used for non-secure MSI frame 7 registers.") - MSI_NS_frame7_max_SPI = Param.UInt16(0, "Maximum SPI ID supported by " - "non-secure MSI frame 7. Set to 0 to disable frame.") - MSI_NS_frame7_min_SPI = Param.UInt16(0, "Minimum SPI ID supported by " - "non-secure MSI frame 7. Set to 0 to disable frame.") - MSI_PIDR = Param.UInt64(0x0, "The value for the MSI_PIDR registers, if " - "non-zero and distributor supports GICv2m. Note: fixed fields " - "(device type etc.) will be overriden in this value.") - MSI_S_frame0_base = Param.Addr(0x0, "If non-zero, sets the base address " - "used for secure MSI frame 0 registers.") - MSI_S_frame0_max_SPI = Param.UInt16(0, "Maximum SPI ID supported by " - "secure MSI frame 0. Set to 0 to disable frame.") - MSI_S_frame0_min_SPI = Param.UInt16(0, "Minimum SPI ID supported by " - "secure MSI frame 0. Set to 0 to disable frame.") - MSI_S_frame1_base = Param.Addr(0x0, "If non-zero, sets the base address " - "used for secure MSI frame 1 registers.") - MSI_S_frame1_max_SPI = Param.UInt16(0, "Maximum SPI ID supported by " - "secure MSI frame 1. Set to 0 to disable frame.") - MSI_S_frame1_min_SPI = Param.UInt16(0, "Minimum SPI ID supported by " - "secure MSI frame 1. Set to 0 to disable frame.") - MSI_S_frame2_base = Param.Addr(0x0, "If non-zero, sets the base address " - "used for secure MSI frame 2 registers.") - MSI_S_frame2_max_SPI = Param.UInt16(0, "Maximum SPI ID supported by " - "secure MSI frame 2. Set to 0 to disable frame.") - MSI_S_frame2_min_SPI = Param.UInt16(0, "Minimum SPI ID supported by " - "secure MSI frame 2. Set to 0 to disable frame.") - MSI_S_frame3_base = Param.Addr(0x0, "If non-zero, sets the base address " - "used for secure MSI frame 3 registers.") - MSI_S_frame3_max_SPI = Param.UInt16(0, "Maximum SPI ID supported by " - "secure MSI frame 3. Set to 0 to disable frame.") - MSI_S_frame3_min_SPI = Param.UInt16(0, "Minimum SPI ID supported by " - "secure MSI frame 3. Set to 0 to disable frame.") - MSI_S_frame4_base = Param.Addr(0x0, "If non-zero, sets the base address " - "used for secure MSI frame 4 registers.") - MSI_S_frame4_max_SPI = Param.UInt16(0, "Maximum SPI ID supported by " - "secure MSI frame 4. Set to 0 to disable frame.") - MSI_S_frame4_min_SPI = Param.UInt16(0, "Minimum SPI ID supported by " - "secure MSI frame 4. Set to 0 to disable frame.") - MSI_S_frame5_base = Param.Addr(0x0, "If non-zero, sets the base address " - "used for secure MSI frame 5 registers.") - MSI_S_frame5_max_SPI = Param.UInt16(0, "Maximum SPI ID supported by " - "secure MSI frame 5. Set to 0 to disable frame.") - MSI_S_frame5_min_SPI = Param.UInt16(0, "Minimum SPI ID supported by " - "secure MSI frame 5. Set to 0 to disable frame.") - MSI_S_frame6_base = Param.Addr(0x0, "If non-zero, sets the base address " - "used for secure MSI frame 6 registers.") - MSI_S_frame6_max_SPI = Param.UInt16(0, "Maximum SPI ID supported by " - "secure MSI frame 6. Set to 0 to disable frame.") - MSI_S_frame6_min_SPI = Param.UInt16(0, "Minimum SPI ID supported by " - "secure MSI frame 6. Set to 0 to disable frame.") - MSI_S_frame7_base = Param.Addr(0x0, "If non-zero, sets the base address " - "used for secure MSI frame 7 registers.") - MSI_S_frame7_max_SPI = Param.UInt16(0, "Maximum SPI ID supported by " - "secure MSI frame 7. Set to 0 to disable frame.") - MSI_S_frame7_min_SPI = Param.UInt16(0, "Minimum SPI ID supported by " - "secure MSI frame 7. Set to 0 to disable frame.") - outer_cacheability_support = Param.Bool(False, "Allow configuration of " - "outer cachability attributes in ITS and Redistributor") - wakeup_on_reset = Param.Bool(False, "Go against specification and start " - "redistributors in woken-up state at reset. This allows software " - "that was written for previous versions of the GICv3 " - "specification to work correctly. This should not be used for " - "production code or when the distributor is used separately from " - "the core fast model.") - SPI_MBIS = Param.Bool(True, "Distributor supports meassage based " - "signaling of SPI") - SPI_unimplemented = Param.String("", "A comma spearated list of " - "unimplemented SPIs ranges for sparse SPI defintion(for ex: " - "'35, 39-42, 73)'") - irm_razwi = Param.Bool(False, "GICD_IROUTERn.InterruptRoutingMode is " - "RAZ/WI") - common_LPI_configuration = Param.Unsigned(0, "Describes which " - "re-distributors share (and must be configured with the same) " - "LPI configuration table as described in GICR_TYPER( 0:All, " - "1:A.x.x.x, 2:A.B.x.x, 3:A.B.C.x") - single_set_support = Param.Bool(False, "When true, forces redistributors " - "to recall interrupts with a clear rather than issue a second Set " - "command") - has_mpam = Param.Unsigned(0, "Implement ARMv8.4 MPAM Registers and " - "associated functionality.\n\nPossible values of this parameter " - "are:\n - 0, feature is not enabled.\n - 1, feature is " - "implemented if ARMv8.4 is enabled.\n - 2, feature is " - "implemented.") - mpam_max_partid = Param.UInt16(0xffff, "MPAM Maximum PARTID Supported") + MSI_NS_frame0_base = Param.Addr( + 0x0, + "If non-zero, sets the base " + "address used for non-secure MSI frame 0 registers.", + ) + MSI_NS_frame0_max_SPI = Param.UInt16( + 0, + "Maximum SPI ID supported by " + "non-secure MSI frame 0. Set to 0 to disable frame.", + ) + MSI_NS_frame0_min_SPI = Param.UInt16( + 0, + "Minimum SPI ID supported by " + "non-secure MSI frame 0. Set to 0 to disable frame.", + ) + MSI_NS_frame1_base = Param.Addr( + 0x0, + "If non-zero, sets the base " + "address used for non-secure MSI frame 1 registers.", + ) + MSI_NS_frame1_max_SPI = Param.UInt16( + 0, + "Maximum SPI ID supported by " + "non-secure MSI frame 1. Set to 0 to disable frame.", + ) + MSI_NS_frame1_min_SPI = Param.UInt16( + 0, + "Minimum SPI ID supported by " + "non-secure MSI frame 1. Set to 0 to disable frame.", + ) + MSI_NS_frame2_base = Param.Addr( + 0x0, + "If non-zero, sets the base address " + "used for non-secure MSI frame 2 registers.", + ) + MSI_NS_frame2_max_SPI = Param.UInt16( + 0, + "Maximum SPI ID supported by " + "non-secure MSI frame 2. Set to 0 to disable frame.", + ) + MSI_NS_frame2_min_SPI = Param.UInt16( + 0, + "Minimum SPI ID supported by " + "non-secure MSI frame 2. Set to 0 to disable frame.", + ) + MSI_NS_frame3_base = Param.Addr( + 0x0, + "If non-zero, sets the base address " + "used for non-secure MSI frame 3 registers.", + ) + MSI_NS_frame3_max_SPI = Param.UInt16( + 0, + "Maximum SPI ID supported by " + "non-secure MSI frame 3. Set to 0 to disable frame.", + ) + MSI_NS_frame3_min_SPI = Param.UInt16( + 0, + "Minimum SPI ID supported by " + "non-secure MSI frame 3. Set to 0 to disable frame.", + ) + MSI_NS_frame4_base = Param.Addr( + 0x0, + "If non-zero, sets the base address " + "used for non-secure MSI frame 4 registers.", + ) + MSI_NS_frame4_max_SPI = Param.UInt16( + 0, + "Maximum SPI ID supported by " + "non-secure MSI frame 4. Set to 0 to disable frame.", + ) + MSI_NS_frame4_min_SPI = Param.UInt16( + 0, + "Minimum SPI ID supported by " + "non-secure MSI frame 4. Set to 0 to disable frame.", + ) + MSI_NS_frame5_base = Param.Addr( + 0x0, + "If non-zero, sets the base address " + "used for non-secure MSI frame 5 registers.", + ) + MSI_NS_frame5_max_SPI = Param.UInt16( + 0, + "Maximum SPI ID supported by " + "non-secure MSI frame 5. Set to 0 to disable frame.", + ) + MSI_NS_frame5_min_SPI = Param.UInt16( + 0, + "Minimum SPI ID supported by " + "non-secure MSI frame 5. Set to 0 to disable frame.", + ) + MSI_NS_frame6_base = Param.Addr( + 0x0, + "If non-zero, sets the base address " + "used for non-secure MSI frame 6 registers.", + ) + MSI_NS_frame6_max_SPI = Param.UInt16( + 0, + "Maximum SPI ID supported by " + "non-secure MSI frame 6. Set to 0 to disable frame.", + ) + MSI_NS_frame6_min_SPI = Param.UInt16( + 0, + "Minimum SPI ID supported by " + "non-secure MSI frame 6. Set to 0 to disable frame.", + ) + MSI_NS_frame7_base = Param.Addr( + 0x0, + "If non-zero, sets the base address " + "used for non-secure MSI frame 7 registers.", + ) + MSI_NS_frame7_max_SPI = Param.UInt16( + 0, + "Maximum SPI ID supported by " + "non-secure MSI frame 7. Set to 0 to disable frame.", + ) + MSI_NS_frame7_min_SPI = Param.UInt16( + 0, + "Minimum SPI ID supported by " + "non-secure MSI frame 7. Set to 0 to disable frame.", + ) + MSI_PIDR = Param.UInt64( + 0x0, + "The value for the MSI_PIDR registers, if " + "non-zero and distributor supports GICv2m. Note: fixed fields " + "(device type etc.) will be overriden in this value.", + ) + MSI_S_frame0_base = Param.Addr( + 0x0, + "If non-zero, sets the base address " + "used for secure MSI frame 0 registers.", + ) + MSI_S_frame0_max_SPI = Param.UInt16( + 0, + "Maximum SPI ID supported by " + "secure MSI frame 0. Set to 0 to disable frame.", + ) + MSI_S_frame0_min_SPI = Param.UInt16( + 0, + "Minimum SPI ID supported by " + "secure MSI frame 0. Set to 0 to disable frame.", + ) + MSI_S_frame1_base = Param.Addr( + 0x0, + "If non-zero, sets the base address " + "used for secure MSI frame 1 registers.", + ) + MSI_S_frame1_max_SPI = Param.UInt16( + 0, + "Maximum SPI ID supported by " + "secure MSI frame 1. Set to 0 to disable frame.", + ) + MSI_S_frame1_min_SPI = Param.UInt16( + 0, + "Minimum SPI ID supported by " + "secure MSI frame 1. Set to 0 to disable frame.", + ) + MSI_S_frame2_base = Param.Addr( + 0x0, + "If non-zero, sets the base address " + "used for secure MSI frame 2 registers.", + ) + MSI_S_frame2_max_SPI = Param.UInt16( + 0, + "Maximum SPI ID supported by " + "secure MSI frame 2. Set to 0 to disable frame.", + ) + MSI_S_frame2_min_SPI = Param.UInt16( + 0, + "Minimum SPI ID supported by " + "secure MSI frame 2. Set to 0 to disable frame.", + ) + MSI_S_frame3_base = Param.Addr( + 0x0, + "If non-zero, sets the base address " + "used for secure MSI frame 3 registers.", + ) + MSI_S_frame3_max_SPI = Param.UInt16( + 0, + "Maximum SPI ID supported by " + "secure MSI frame 3. Set to 0 to disable frame.", + ) + MSI_S_frame3_min_SPI = Param.UInt16( + 0, + "Minimum SPI ID supported by " + "secure MSI frame 3. Set to 0 to disable frame.", + ) + MSI_S_frame4_base = Param.Addr( + 0x0, + "If non-zero, sets the base address " + "used for secure MSI frame 4 registers.", + ) + MSI_S_frame4_max_SPI = Param.UInt16( + 0, + "Maximum SPI ID supported by " + "secure MSI frame 4. Set to 0 to disable frame.", + ) + MSI_S_frame4_min_SPI = Param.UInt16( + 0, + "Minimum SPI ID supported by " + "secure MSI frame 4. Set to 0 to disable frame.", + ) + MSI_S_frame5_base = Param.Addr( + 0x0, + "If non-zero, sets the base address " + "used for secure MSI frame 5 registers.", + ) + MSI_S_frame5_max_SPI = Param.UInt16( + 0, + "Maximum SPI ID supported by " + "secure MSI frame 5. Set to 0 to disable frame.", + ) + MSI_S_frame5_min_SPI = Param.UInt16( + 0, + "Minimum SPI ID supported by " + "secure MSI frame 5. Set to 0 to disable frame.", + ) + MSI_S_frame6_base = Param.Addr( + 0x0, + "If non-zero, sets the base address " + "used for secure MSI frame 6 registers.", + ) + MSI_S_frame6_max_SPI = Param.UInt16( + 0, + "Maximum SPI ID supported by " + "secure MSI frame 6. Set to 0 to disable frame.", + ) + MSI_S_frame6_min_SPI = Param.UInt16( + 0, + "Minimum SPI ID supported by " + "secure MSI frame 6. Set to 0 to disable frame.", + ) + MSI_S_frame7_base = Param.Addr( + 0x0, + "If non-zero, sets the base address " + "used for secure MSI frame 7 registers.", + ) + MSI_S_frame7_max_SPI = Param.UInt16( + 0, + "Maximum SPI ID supported by " + "secure MSI frame 7. Set to 0 to disable frame.", + ) + MSI_S_frame7_min_SPI = Param.UInt16( + 0, + "Minimum SPI ID supported by " + "secure MSI frame 7. Set to 0 to disable frame.", + ) + outer_cacheability_support = Param.Bool( + False, + "Allow configuration of " + "outer cachability attributes in ITS and Redistributor", + ) + wakeup_on_reset = Param.Bool( + False, + "Go against specification and start " + "redistributors in woken-up state at reset. This allows software " + "that was written for previous versions of the GICv3 " + "specification to work correctly. This should not be used for " + "production code or when the distributor is used separately from " + "the core fast model.", + ) + SPI_MBIS = Param.Bool( + True, "Distributor supports meassage based " "signaling of SPI" + ) + SPI_unimplemented = Param.String( + "", + "A comma spearated list of " + "unimplemented SPIs ranges for sparse SPI defintion(for ex: " + "'35, 39-42, 73)'", + ) + irm_razwi = Param.Bool( + False, "GICD_IROUTERn.InterruptRoutingMode is " "RAZ/WI" + ) + common_LPI_configuration = Param.Unsigned( + 0, + "Describes which " + "re-distributors share (and must be configured with the same) " + "LPI configuration table as described in GICR_TYPER( 0:All, " + "1:A.x.x.x, 2:A.B.x.x, 3:A.B.C.x", + ) + single_set_support = Param.Bool( + False, + "When true, forces redistributors " + "to recall interrupts with a clear rather than issue a second Set " + "command", + ) + has_mpam = Param.Unsigned( + 0, + "Implement ARMv8.4 MPAM Registers and " + "associated functionality.\n\nPossible values of this parameter " + "are:\n - 0, feature is not enabled.\n - 1, feature is " + "implemented if ARMv8.4 is enabled.\n - 2, feature is " + "implemented.", + ) + mpam_max_partid = Param.UInt16(0xFFFF, "MPAM Maximum PARTID Supported") mpam_max_pmg = Param.Unsigned(255, "MPAM Maximum PMG Supported") - output_attributes = Param.String("ExtendedID[62:55]=MPAM_PMG, " - "ExtendedID[54:39]=MPAM_PARTID, ExtendedID[38]=MPAM_NS", - "User-defined transform to be applied to bus attributes like " - "RequestorID, ExtendedID or UserFlags. Currently, only works for " - "MPAM Attributes encoding into bus attributes.") - has_DirtyVLPIOnLoad = Param.Bool(False, "GICR_VPENDBASER.Dirty reflects " - "transient loading state when valid=1") - allow_LPIEN_clear = Param.Bool(False, "Allow RW behaviour on " - "GICR_CTLR.LPIEN isntead of set once") - GICD_legacy_reg_reserved = Param.Bool(False, "When ARE is RAO/WI, makes " - "superfluous registers in GICD reserved (including for the " - "purpose of STATUSR updates)") + output_attributes = Param.String( + "ExtendedID[62:55]=MPAM_PMG, " + "ExtendedID[54:39]=MPAM_PARTID, ExtendedID[38]=MPAM_NS", + "User-defined transform to be applied to bus attributes like " + "RequestorID, ExtendedID or UserFlags. Currently, only works for " + "MPAM Attributes encoding into bus attributes.", + ) + has_DirtyVLPIOnLoad = Param.Bool( + False, + "GICR_VPENDBASER.Dirty reflects " + "transient loading state when valid=1", + ) + allow_LPIEN_clear = Param.Bool( + False, "Allow RW behaviour on " "GICR_CTLR.LPIEN isntead of set once" + ) + GICD_legacy_reg_reserved = Param.Bool( + False, + "When ARE is RAO/WI, makes " + "superfluous registers in GICD reserved (including for the " + "purpose of STATUSR updates)", + ) extended_spi_count = Param.Unsigned(0, "Number of extended SPI supported") extended_ppi_count = Param.Unsigned(0, "Number of extended PPI supported") - consolidators = Param.String("", "Specify consolidators' base addresses, " - "interrupt line counts and base interrupt IDs, in the form " - "'baseAddr0:itlineCount0:baseINTID0, " - "baseAddr1:itlineCount1:baseINTID1, [etc]' " - "(eg '0x3f100000:64:4096, 0x3f200000:64:4224'). The " - "consolidators' count is inferred from the list (maximum of 4). " - "If not specified, the component contains no consolidators.") + consolidators = Param.String( + "", + "Specify consolidators' base addresses, " + "interrupt line counts and base interrupt IDs, in the form " + "'baseAddr0:itlineCount0:baseINTID0, " + "baseAddr1:itlineCount1:baseINTID1, [etc]' " + "(eg '0x3f100000:64:4096, 0x3f200000:64:4224'). The " + "consolidators' count is inferred from the list (maximum of 4). " + "If not specified, the component contains no consolidators.", + ) + class FastModelGIC(BaseGic): - type = 'FastModelGIC' - cxx_class = 'gem5::fastmodel::GIC' - cxx_header = 'arch/arm/fastmodel/GIC/gic.hh' + type = "FastModelGIC" + cxx_class = "gem5::fastmodel::GIC" + cxx_header = "arch/arm/fastmodel/GIC/gic.hh" - sc_gic = Param.SCFastModelGIC(SCFastModelGIC(), - 'SystemC version of the GIC') + sc_gic = Param.SCFastModelGIC( + SCFastModelGIC(), "SystemC version of the GIC" + ) - amba_m = AmbaInitiatorSocket(64, 'Memory initiator socket') - amba_s = AmbaTargetSocket(64, 'Memory target socket') + amba_m = AmbaInitiatorSocket(64, "Memory initiator socket") + amba_s = AmbaTargetSocket(64, "Memory target socket") redistributor = VectorGicv3CommsInitiatorSocket( - 'GIC communication initiator') + "GIC communication initiator" + ) - wake_request = VectorIntSourcePin('GIC wake request initiator') + wake_request = VectorIntSourcePin("GIC wake request initiator") # Used for DTB autogeneration _state = FdtState(addr_cells=2, size_cells=2, interrupt_cells=3) @@ -488,10 +862,10 @@ class FastModelGIC(BaseGic): redists = self.sc_gic.reg_base_per_redistributor.split(",") # make sure we have at least one redistributor assert len(redists) > 0 and "=" in redists[0] - return [ int(r.split('=')[1], 16) for r in redists ] + return [int(r.split("=")[1], 16) for r in redists] def get_addr_ranges(self): - """ Return address ranges that should be served by this GIC """ + """Return address ranges that should be served by this GIC""" sc_gic = self.sc_gic gic_frame_size = 0x10000 # Add range of distributor @@ -504,8 +878,10 @@ class FastModelGIC(BaseGic): ] # Add ranges of ITSs its_bases = [ - sc_gic.its0_base, sc_gic.its1_base, sc_gic.its2_base, - sc_gic.its3_base + sc_gic.its0_base, + sc_gic.its1_base, + sc_gic.its2_base, + sc_gic.its3_base, ] ranges += [ AddrRange(its_bases[i], size=2 * gic_frame_size) @@ -540,32 +916,39 @@ class FastModelGIC(BaseGic): node.append(FdtProperty("interrupt-controller")) redist_stride = 0x40000 if sc_gic.has_gicv4_1 else 0x20000 - node.append(FdtPropertyWords("redistributor-stride", - state.sizeCells(redist_stride))) + node.append( + FdtPropertyWords( + "redistributor-stride", state.sizeCells(redist_stride) + ) + ) regs = ( - state.addrCells(sc_gic.reg_base) + - state.sizeCells(0x10000) + - state.addrCells(self.get_redist_bases()[0]) + - state.sizeCells(0x2000000) ) + state.addrCells(sc_gic.reg_base) + + state.sizeCells(0x10000) + + state.addrCells(self.get_redist_bases()[0]) + + state.sizeCells(0x2000000) + ) node.append(FdtPropertyWords("reg", regs)) # Maintenance interrupt (PPI 25). - node.append(FdtPropertyWords("interrupts", - self.interruptCells(1, 9, 0x4))) + node.append( + FdtPropertyWords("interrupts", self.interruptCells(1, 9, 0x4)) + ) node.appendPhandle(self) # Generate the ITS device tree its_frame_size = 0x10000 its_bases = [ - sc_gic.its0_base, sc_gic.its1_base, sc_gic.its2_base, - sc_gic.its3_base + sc_gic.its0_base, + sc_gic.its1_base, + sc_gic.its2_base, + sc_gic.its3_base, ] for its_base in its_bases: - its_node = self.generateBasicPioDeviceNode(state, "gic-its", - its_base, - 2 * its_frame_size) + its_node = self.generateBasicPioDeviceNode( + state, "gic-its", its_base, 2 * its_frame_size + ) its_node.appendCompatible(["arm,gic-v3-its"]) its_node.append(FdtProperty("msi-controller")) its_node.append(FdtPropertyWords("#msi-cells", [1])) diff --git a/src/arch/arm/fastmodel/PL330_DMAC/FastModelPL330.py b/src/arch/arm/fastmodel/PL330_DMAC/FastModelPL330.py index ab61b74267..d6c515cf44 100644 --- a/src/arch/arm/fastmodel/PL330_DMAC/FastModelPL330.py +++ b/src/arch/arm/fastmodel/PL330_DMAC/FastModelPL330.py @@ -28,10 +28,11 @@ from m5.objects.FastModel import AmbaInitiatorSocket, AmbaTargetSocket from m5.objects.IntPin import IntSourcePin from m5.objects.SystemC import SystemC_ScModule + class FastModelPL330(SystemC_ScModule): - type = 'FastModelPL330' - cxx_class = 'gem5::fastmodel::PL330' - cxx_header = 'arch/arm/fastmodel/PL330_DMAC/pl330.hh' + type = "FastModelPL330" + cxx_class = "gem5::fastmodel::PL330" + cxx_header = "arch/arm/fastmodel/PL330_DMAC/pl330.hh" clock = Param.Frequency("Clock frequency") @@ -85,77 +86,110 @@ class FastModelPL330(SystemC_ScModule): cache_line_words = Param.UInt32(1, "number of words in a cache line") cache_lines = Param.UInt32(1, "number of cache lines") max_channels = Param.UInt32(8, "virtual channels") - controller_nsecure = Param.Bool(False, "Controller non-secure at reset " - "(boot_manager_ns)") + controller_nsecure = Param.Bool( + False, "Controller non-secure at reset " "(boot_manager_ns)" + ) irq_nsecure = Param.UInt32(0, "Interrupts non-secure at reset") periph_nsecure = Param.Bool(False, "Peripherals non-secure at reset") controller_boots = Param.Bool(True, "DMA boots from reset") reset_pc = Param.UInt32(0x60000000, "DMA PC at reset") max_periph = Param.UInt32(32, "number of peripheral interfaces") - perip_request_acceptance_0 = \ - Param.UInt32(2, "Peripheral 0 request acceptance" ) - perip_request_acceptance_1 = \ - Param.UInt32(2, "Peripheral 1 request acceptance" ) - perip_request_acceptance_2 = \ - Param.UInt32(2, "Peripheral 2 request acceptance" ) - perip_request_acceptance_3 = \ - Param.UInt32(2, "Peripheral 3 request acceptance" ) - perip_request_acceptance_4 = \ - Param.UInt32(2, "Peripheral 4 request acceptance" ) - perip_request_acceptance_5 = \ - Param.UInt32(2, "Peripheral 5 request acceptance" ) - perip_request_acceptance_6 = \ - Param.UInt32(2, "Peripheral 6 request acceptance" ) - perip_request_acceptance_7 = \ - Param.UInt32(2, "Peripheral 7 request acceptance" ) - perip_request_acceptance_8 = \ - Param.UInt32(2, "Peripheral 8 request acceptance" ) - perip_request_acceptance_9 = \ - Param.UInt32(2, "Peripheral 9 request acceptance" ) - perip_request_acceptance_10 = \ - Param.UInt32(2, "Peripheral 10 request acceptance") - perip_request_acceptance_11 = \ - Param.UInt32(2, "Peripheral 11 request acceptance") - perip_request_acceptance_12 = \ - Param.UInt32(2, "Peripheral 12 request acceptance") - perip_request_acceptance_13 = \ - Param.UInt32(2, "Peripheral 13 request acceptance") - perip_request_acceptance_14 = \ - Param.UInt32(2, "Peripheral 14 request acceptance") - perip_request_acceptance_15 = \ - Param.UInt32(2, "Peripheral 15 request acceptance") - perip_request_acceptance_16 = \ - Param.UInt32(2, "Peripheral 16 request acceptance") - perip_request_acceptance_17 = \ - Param.UInt32(2, "Peripheral 17 request acceptance") - perip_request_acceptance_18 = \ - Param.UInt32(2, "Peripheral 18 request acceptance") - perip_request_acceptance_19 = \ - Param.UInt32(2, "Peripheral 19 request acceptance") - perip_request_acceptance_20 = \ - Param.UInt32(2, "Peripheral 20 request acceptance") - perip_request_acceptance_21 = \ - Param.UInt32(2, "Peripheral 21 request acceptance") - perip_request_acceptance_22 = \ - Param.UInt32(2, "Peripheral 22 request acceptance") - perip_request_acceptance_23 = \ - Param.UInt32(2, "Peripheral 23 request acceptance") - perip_request_acceptance_24 = \ - Param.UInt32(2, "Peripheral 24 request acceptance") - perip_request_acceptance_25 = \ - Param.UInt32(2, "Peripheral 25 request acceptance") - perip_request_acceptance_26 = \ - Param.UInt32(2, "Peripheral 26 request acceptance") - perip_request_acceptance_27 = \ - Param.UInt32(2, "Peripheral 27 request acceptance") - perip_request_acceptance_28 = \ - Param.UInt32(2, "Peripheral 28 request acceptance") - perip_request_acceptance_29 = \ - Param.UInt32(2, "Peripheral 29 request acceptance") - perip_request_acceptance_30 = \ - Param.UInt32(2, "Peripheral 30 request acceptance") - perip_request_acceptance_31 = \ - Param.UInt32(2, "Peripheral 31 request acceptance") + perip_request_acceptance_0 = Param.UInt32( + 2, "Peripheral 0 request acceptance" + ) + perip_request_acceptance_1 = Param.UInt32( + 2, "Peripheral 1 request acceptance" + ) + perip_request_acceptance_2 = Param.UInt32( + 2, "Peripheral 2 request acceptance" + ) + perip_request_acceptance_3 = Param.UInt32( + 2, "Peripheral 3 request acceptance" + ) + perip_request_acceptance_4 = Param.UInt32( + 2, "Peripheral 4 request acceptance" + ) + perip_request_acceptance_5 = Param.UInt32( + 2, "Peripheral 5 request acceptance" + ) + perip_request_acceptance_6 = Param.UInt32( + 2, "Peripheral 6 request acceptance" + ) + perip_request_acceptance_7 = Param.UInt32( + 2, "Peripheral 7 request acceptance" + ) + perip_request_acceptance_8 = Param.UInt32( + 2, "Peripheral 8 request acceptance" + ) + perip_request_acceptance_9 = Param.UInt32( + 2, "Peripheral 9 request acceptance" + ) + perip_request_acceptance_10 = Param.UInt32( + 2, "Peripheral 10 request acceptance" + ) + perip_request_acceptance_11 = Param.UInt32( + 2, "Peripheral 11 request acceptance" + ) + perip_request_acceptance_12 = Param.UInt32( + 2, "Peripheral 12 request acceptance" + ) + perip_request_acceptance_13 = Param.UInt32( + 2, "Peripheral 13 request acceptance" + ) + perip_request_acceptance_14 = Param.UInt32( + 2, "Peripheral 14 request acceptance" + ) + perip_request_acceptance_15 = Param.UInt32( + 2, "Peripheral 15 request acceptance" + ) + perip_request_acceptance_16 = Param.UInt32( + 2, "Peripheral 16 request acceptance" + ) + perip_request_acceptance_17 = Param.UInt32( + 2, "Peripheral 17 request acceptance" + ) + perip_request_acceptance_18 = Param.UInt32( + 2, "Peripheral 18 request acceptance" + ) + perip_request_acceptance_19 = Param.UInt32( + 2, "Peripheral 19 request acceptance" + ) + perip_request_acceptance_20 = Param.UInt32( + 2, "Peripheral 20 request acceptance" + ) + perip_request_acceptance_21 = Param.UInt32( + 2, "Peripheral 21 request acceptance" + ) + perip_request_acceptance_22 = Param.UInt32( + 2, "Peripheral 22 request acceptance" + ) + perip_request_acceptance_23 = Param.UInt32( + 2, "Peripheral 23 request acceptance" + ) + perip_request_acceptance_24 = Param.UInt32( + 2, "Peripheral 24 request acceptance" + ) + perip_request_acceptance_25 = Param.UInt32( + 2, "Peripheral 25 request acceptance" + ) + perip_request_acceptance_26 = Param.UInt32( + 2, "Peripheral 26 request acceptance" + ) + perip_request_acceptance_27 = Param.UInt32( + 2, "Peripheral 27 request acceptance" + ) + perip_request_acceptance_28 = Param.UInt32( + 2, "Peripheral 28 request acceptance" + ) + perip_request_acceptance_29 = Param.UInt32( + 2, "Peripheral 29 request acceptance" + ) + perip_request_acceptance_30 = Param.UInt32( + 2, "Peripheral 30 request acceptance" + ) + perip_request_acceptance_31 = Param.UInt32( + 2, "Peripheral 31 request acceptance" + ) # Singleton IRQ abort signal port # 32 bit wide IRQ master DMASEV port diff --git a/src/arch/arm/fastmodel/SConscript b/src/arch/arm/fastmodel/SConscript index 35c0da1361..9d9d183516 100644 --- a/src/arch/arm/fastmodel/SConscript +++ b/src/arch/arm/fastmodel/SConscript @@ -437,3 +437,4 @@ SimObject('FastModel.py', sim_objects=[ 'AmbaToTlmBridge64', 'AmbaFromTlmBridge64'], tags='arm fastmodel') Source('amba_to_tlm_bridge.cc', tags='arm fastmodel') Source('amba_from_tlm_bridge.cc', tags='arm fastmodel') +Source('remote_gdb.cc', tags='arm fastmodel') diff --git a/src/arch/arm/fastmodel/amba_from_tlm_bridge.cc b/src/arch/arm/fastmodel/amba_from_tlm_bridge.cc index 400535590c..f84e58121e 100644 --- a/src/arch/arm/fastmodel/amba_from_tlm_bridge.cc +++ b/src/arch/arm/fastmodel/amba_from_tlm_bridge.cc @@ -28,6 +28,8 @@ #include "arch/arm/fastmodel/amba_from_tlm_bridge.hh" #include "params/AmbaFromTlmBridge64.hh" +#include "pv_userpayload_extension.h" +#include "systemc/tlm_bridge/sc_ext.hh" namespace gem5 { @@ -36,11 +38,21 @@ GEM5_DEPRECATED_NAMESPACE(FastModel, fastmodel); namespace fastmodel { -AmbaFromTlmBridge64::AmbaFromTlmBridge64(const char *name) : +AmbaFromTlmBridge64::AmbaFromTlmBridge64(const sc_core::sc_module_name& name) : amba_pv::amba_pv_from_tlm_bridge<64>(name), - ambaWrapper(amba_pv_m, std::string(name) + ".amba", -1), - tlmWrapper(tlm_s, std::string(name) + ".tlm", -1) -{} + targetProxy("target_proxy"), + initiatorProxy("initiator_proxy"), + tlmWrapper(targetProxy, std::string(name) + ".tlm", -1), + ambaWrapper(amba_pv_m, std::string(name) + ".amba", -1) +{ + targetProxy.register_b_transport(this, &AmbaFromTlmBridge64::bTransport); + targetProxy.register_get_direct_mem_ptr( + this, &AmbaFromTlmBridge64::getDirectMemPtr); + targetProxy.register_transport_dbg(this, &AmbaFromTlmBridge64::transportDbg); + initiatorProxy.register_invalidate_direct_mem_ptr( + this, &AmbaFromTlmBridge64::invalidateDirectMemPtr); + initiatorProxy(tlm_s); +} Port & AmbaFromTlmBridge64::gem5_getPort(const std::string &if_name, int idx) @@ -55,6 +67,55 @@ AmbaFromTlmBridge64::gem5_getPort(const std::string &if_name, int idx) } } +void +AmbaFromTlmBridge64::bTransport(amba_pv::amba_pv_transaction &trans, + sc_core::sc_time &t) +{ + syncControlExtension(trans); + return initiatorProxy->b_transport(trans, t); +} + +bool +AmbaFromTlmBridge64::getDirectMemPtr(amba_pv::amba_pv_transaction &trans, + tlm::tlm_dmi &dmi_data) +{ + return initiatorProxy->get_direct_mem_ptr(trans, dmi_data); +} + +unsigned int +AmbaFromTlmBridge64::transportDbg(amba_pv::amba_pv_transaction &trans) +{ + syncControlExtension(trans); + return initiatorProxy->transport_dbg(trans); +} + +void +AmbaFromTlmBridge64::invalidateDirectMemPtr(sc_dt::uint64 start_range, + sc_dt::uint64 end_range) +{ + targetProxy->invalidate_direct_mem_ptr(start_range, end_range); +} + +void +AmbaFromTlmBridge64::syncControlExtension(amba_pv::amba_pv_transaction &trans) +{ + Gem5SystemC::ControlExtension *control_ex = nullptr; + trans.get_extension(control_ex); + if (!control_ex) { + return; + } + + amba_pv::amba_pv_extension *amba_ex = nullptr; + trans.get_extension(amba_ex); + if (!amba_ex) { + return; + } + + amba_ex->set_privileged(control_ex->isPrivileged()); + amba_ex->set_non_secure(!control_ex->isSecure()); + amba_ex->set_instruction(control_ex->isInstruction()); +} + } // namespace fastmodel fastmodel::AmbaFromTlmBridge64 * diff --git a/src/arch/arm/fastmodel/amba_from_tlm_bridge.hh b/src/arch/arm/fastmodel/amba_from_tlm_bridge.hh index a54617ddde..4484ea92c6 100644 --- a/src/arch/arm/fastmodel/amba_from_tlm_bridge.hh +++ b/src/arch/arm/fastmodel/amba_from_tlm_bridge.hh @@ -47,13 +47,25 @@ namespace fastmodel class AmbaFromTlmBridge64 : public amba_pv::amba_pv_from_tlm_bridge<64> { public: - AmbaFromTlmBridge64(const char *name); + AmbaFromTlmBridge64(const sc_core::sc_module_name &name); gem5::Port &gem5_getPort(const std::string &if_name, int idx=-1) override; private: - AmbaInitiator ambaWrapper; + void bTransport(amba_pv::amba_pv_transaction &trans, sc_core::sc_time &t); + bool getDirectMemPtr(amba_pv::amba_pv_transaction &trans, + tlm::tlm_dmi &dmi_data); + unsigned int transportDbg(amba_pv::amba_pv_transaction &trans); + void invalidateDirectMemPtr(sc_dt::uint64 start_range, + sc_dt::uint64 end_range); + void syncControlExtension(amba_pv::amba_pv_transaction &trans); + + tlm_utils::simple_target_socket< + AmbaFromTlmBridge64, 64, tlm::tlm_base_protocol_types> targetProxy; + tlm_utils::simple_initiator_socket< + AmbaFromTlmBridge64, 64, tlm::tlm_base_protocol_types> initiatorProxy; sc_gem5::TlmTargetWrapper<64> tlmWrapper; + AmbaInitiator ambaWrapper; }; } // namespace fastmodel diff --git a/src/arch/arm/fastmodel/amba_to_tlm_bridge.cc b/src/arch/arm/fastmodel/amba_to_tlm_bridge.cc index ad875099c6..e8807c474f 100644 --- a/src/arch/arm/fastmodel/amba_to_tlm_bridge.cc +++ b/src/arch/arm/fastmodel/amba_to_tlm_bridge.cc @@ -104,6 +104,7 @@ AmbaToTlmBridge64::bTransport(amba_pv::amba_pv_transaction &trans, sc_core::sc_time &t) { maybeSetupAtomicExtension(trans); + setupControlExtension(trans); return initiatorProxy->b_transport(trans, t); } @@ -160,7 +161,7 @@ AmbaToTlmBridge64::maybeSetupAtomicExtension( trans.set_data_ptr(dummy_buffer); // The return value would store in the extension. We don't need to specify - // need_return here. + // returnRequired here. atomic_ex = new Gem5SystemC::AtomicExtension( std::make_shared(fa), false); if (trans.has_mm()) @@ -169,6 +170,34 @@ AmbaToTlmBridge64::maybeSetupAtomicExtension( trans.set_extension(atomic_ex); } +void +AmbaToTlmBridge64::setupControlExtension(amba_pv::amba_pv_transaction &trans) +{ + Gem5SystemC::ControlExtension *control_ex = nullptr; + trans.get_extension(control_ex); + if (control_ex) { + return; + } + + amba_pv::amba_pv_extension *amba_ex = nullptr; + trans.get_extension(amba_ex); + if (!amba_ex) { + return; + } + + control_ex = new Gem5SystemC::ControlExtension(); + + control_ex->setPrivileged(amba_ex->is_privileged()); + control_ex->setSecure(!amba_ex->is_non_secure()); + control_ex->setInstruction(amba_ex->is_instruction()); + + if (trans.has_mm()) { + trans.set_auto_extension(control_ex); + } else { + trans.set_extension(control_ex); + } +} + } // namespace fastmodel fastmodel::AmbaToTlmBridge64 * diff --git a/src/arch/arm/fastmodel/amba_to_tlm_bridge.hh b/src/arch/arm/fastmodel/amba_to_tlm_bridge.hh index 6594fe460d..6874052a56 100644 --- a/src/arch/arm/fastmodel/amba_to_tlm_bridge.hh +++ b/src/arch/arm/fastmodel/amba_to_tlm_bridge.hh @@ -59,6 +59,7 @@ class AmbaToTlmBridge64 : public amba_pv::amba_pv_to_tlm_bridge<64> void invalidateDirectMemPtr(sc_dt::uint64 start_range, sc_dt::uint64 end_range); void maybeSetupAtomicExtension(amba_pv::amba_pv_transaction &trans); + void setupControlExtension(amba_pv::amba_pv_transaction &trans); tlm_utils::simple_target_socket< AmbaToTlmBridge64, 64, tlm::tlm_base_protocol_types> targetProxy; diff --git a/src/arch/arm/fastmodel/arm_fast_model.py b/src/arch/arm/fastmodel/arm_fast_model.py index f11443de2b..11004177c6 100644 --- a/src/arch/arm/fastmodel/arm_fast_model.py +++ b/src/arch/arm/fastmodel/arm_fast_model.py @@ -28,93 +28,118 @@ import os from m5.defines import buildEnv import _m5.arm_fast_model + def set_armlmd_license_file(force=False): - '''Set the ARMLMD_LICENSE_FILE environment variable. If "force" is - False, then it will only be set if it wasn't already set in the - environment. The value it's set to is the one gem5 was built with. - ''' - key = 'ARMLMD_LICENSE_FILE' + """Set the ARMLMD_LICENSE_FILE environment variable. If "force" is + False, then it will only be set if it wasn't already set in the + environment. The value it's set to is the one gem5 was built with. + """ + key = "ARMLMD_LICENSE_FILE" license_file = buildEnv[key] if force or key not in os.environ: os.environ[key] = license_file + # These methods wrap much of the SystemC Export API described in section # 7.6 of the Fast Models User Guide. + def scx_initialize(id): # Actually run scx_initialize. _m5.arm_fast_model.scx_initialize(id) + def scx_load_application(instance, application): _m5.arm_fast_model.scx_load_application(instance, application) + def scx_load_application_all(application): _m5.arm_fast_model.scx_load_application_all(application) + def scx_load_data(instance, data, address): _m5.arm_fast_model.scx_load_data(instance, data, address) + def scx_load_data_all(data, address): _m5.arm_fast_model.scx_load_data_all(data, address) + def scx_set_parameter(name, value): _m5.arm_fast_model.scx_set_parameter(name, value) + def scx_get_parameter(name): value = "" _m5.arm_fast_model.scx_get_parameter(name, value) return value + def scx_get_parameter_list(): return _m5.arm_fast_model.scx_get_parameter_list() + def scx_set_cpi_file(cpi_file_path): _m5.arm_fast_model.scx_set_cpi_file(cpi_file_path) + def scx_cpulimit(t): _m5.arm_fast_model.scx_cpulimit(t) + def scx_timelimit(t): _m5.arm_fast_model.scx_timelimit(t) + def scx_simlimit(t): _m5.arm_fast_model.scx_simlimit(t) -def scx_parse_and_configure( - self, argc, argv, trailer=None, sig_handler=True): + +def scx_parse_and_configure(self, argc, argv, trailer=None, sig_handler=True): _m5.arm_fast_model.scx_parse_and_configure( - argc, argv, trailer, sig_handler) + argc, argv, trailer, sig_handler + ) + def scx_start_cadi_server(start=True, run=True, debug=False): _m5.arm_fast_model.scx_start_cadi_server(start, run, debug) + def scx_enable_cadi_log(log=True): _m5.arm_fast_model.scx_enable_cadi_log(log) + def scx_prefix_appli_output(prefix=True): _m5.arm_fast_model.scx_prefix_appli_output(prefix) + def scx_print_port_number(print_=True): _m5.arm_fast_model.scx_print_port_number(print_) + def scx_print_statistics(print_=True): _m5.arm_fast_model.scx_print_statistics(print_) + def scx_load_plugin(file_): _m5.arm_fast_model.scx_load_plugin(file_) + def scx_sync(sync_time): _m5.arm_fast_model.scx_sync(sync_time) + def scx_set_min_sync_latency(latency): _m5.arm_fast_model.scx_set_min_sync_latency(latency) + def scx_get_min_sync_latency(arg=None): if arg: return _m5.arm_fast_model.scx_get_min_sync_latency(arg) else: return _m5.arm_fast_model.scx_get_min_sync_latency() + # This should be called once per simulation def setup_simulation(sim_name, min_sync_latency=100.0 / 100000000): set_armlmd_license_file() diff --git a/src/arch/arm/fastmodel/iris/Iris.py b/src/arch/arm/fastmodel/iris/Iris.py index 51eb3943dc..c38db908cc 100644 --- a/src/arch/arm/fastmodel/iris/Iris.py +++ b/src/arch/arm/fastmodel/iris/Iris.py @@ -44,42 +44,48 @@ from m5.objects.BaseISA import BaseISA from m5.objects.BaseTLB import BaseTLB from m5.objects.BaseMMU import BaseMMU + class IrisTLB(BaseTLB): - type = 'IrisTLB' - cxx_class = 'gem5::Iris::TLB' - cxx_header = 'arch/arm/fastmodel/iris/tlb.hh' + type = "IrisTLB" + cxx_class = "gem5::Iris::TLB" + cxx_header = "arch/arm/fastmodel/iris/tlb.hh" + class IrisMMU(BaseMMU): - type = 'IrisMMU' - cxx_class = 'gem5::Iris::MMU' - cxx_header = 'arch/arm/fastmodel/iris/mmu.hh' + type = "IrisMMU" + cxx_class = "gem5::Iris::MMU" + cxx_header = "arch/arm/fastmodel/iris/mmu.hh" itb = IrisTLB(entry_type="instruction") dtb = IrisTLB(entry_type="data") + class IrisInterrupts(BaseInterrupts): - type = 'IrisInterrupts' - cxx_class = 'gem5::Iris::Interrupts' - cxx_header = 'arch/arm/fastmodel/iris/interrupts.hh' + type = "IrisInterrupts" + cxx_class = "gem5::Iris::Interrupts" + cxx_header = "arch/arm/fastmodel/iris/interrupts.hh" + class IrisISA(BaseISA): - type = 'IrisISA' - cxx_class = 'gem5::Iris::ISA' - cxx_header = 'arch/arm/fastmodel/iris/isa.hh' + type = "IrisISA" + cxx_class = "gem5::Iris::ISA" + cxx_header = "arch/arm/fastmodel/iris/isa.hh" -class IrisCPU(): + +class IrisCPU: ArchMMU = IrisMMU ArchInterrupts = IrisInterrupts ArchISA = IrisISA + class IrisBaseCPU(BaseCPU, IrisCPU): - type = 'IrisBaseCPU' + type = "IrisBaseCPU" abstract = True - cxx_class = 'gem5::Iris::BaseCPU' - cxx_header = 'arch/arm/fastmodel/iris/cpu.hh' + cxx_class = "gem5::Iris::BaseCPU" + cxx_header = "arch/arm/fastmodel/iris/cpu.hh" @classmethod def memory_mode(cls): - return 'atomic_noncaching' + return "atomic_noncaching" @classmethod def require_caches(cls): @@ -87,18 +93,20 @@ class IrisBaseCPU(BaseCPU, IrisCPU): @classmethod def support_take_over(cls): - #TODO Make this work. + # TODO Make this work. return False evs = Param.SystemC_ScModule( - "Fast model exported virtual subsystem holding cores") + "Fast model exported virtual subsystem holding cores" + ) thread_paths = VectorParam.String( - "Sub-paths to elements in the EVS which support a thread context") + "Sub-paths to elements in the EVS which support a thread context" + ) mmu = IrisMMU() def createThreads(self): if len(self.isa) == 0: - self.isa = [ IrisISA() for i in range(self.numThreads) ] + self.isa = [IrisISA() for i in range(self.numThreads)] else: - assert(len(self.isa) == int(self.numThreads)) + assert len(self.isa) == int(self.numThreads) diff --git a/src/arch/arm/fastmodel/iris/isa.hh b/src/arch/arm/fastmodel/iris/isa.hh index f4f3b977bc..208d2601b2 100644 --- a/src/arch/arm/fastmodel/iris/isa.hh +++ b/src/arch/arm/fastmodel/iris/isa.hh @@ -58,6 +58,30 @@ class ISA : public BaseISA { return new ArmISA::PCState(new_inst_addr); } + + RegVal + readMiscRegNoEffect(RegIndex idx) const override + { + panic("readMiscRegNoEffect not implemented."); + } + + RegVal + readMiscReg(RegIndex idx) override + { + panic("readMiscReg not implemented."); + } + + void + setMiscRegNoEffect(RegIndex idx, RegVal val) override + { + panic("setMiscRegNoEffect not implemented."); + } + + void + setMiscReg(RegIndex idx, RegVal val) override + { + panic("setMiscReg not implemented."); + } }; } // namespace Iris diff --git a/src/arch/arm/fastmodel/iris/thread_context.cc b/src/arch/arm/fastmodel/iris/thread_context.cc index eb7ba68e41..b12536dbdd 100644 --- a/src/arch/arm/fastmodel/iris/thread_context.cc +++ b/src/arch/arm/fastmodel/iris/thread_context.cc @@ -46,6 +46,7 @@ #include "arch/arm/fastmodel/iris/cpu.hh" #include "arch/arm/fastmodel/iris/memory_spaces.hh" +#include "arch/arm/regs/vec.hh" #include "arch/arm/system.hh" #include "arch/arm/utility.hh" #include "base/logging.hh" @@ -532,6 +533,19 @@ ThreadContext::sendFunctional(PacketPtr pkt) writeMem(id, addr, data, size); } +void +ThreadContext::readMemWithCurrentMsn(Addr vaddr, size_t size, char *data) +{ + readMem(getMemorySpaceId(Iris::CurrentMsn), vaddr, data, size); +} + +void +ThreadContext::writeMemWithCurrentMsn(Addr vaddr, size_t size, + const char *data) +{ + writeMem(getMemorySpaceId(Iris::CurrentMsn), vaddr, data, size); +} + ThreadContext::Status ThreadContext::status() const { @@ -590,11 +604,24 @@ ThreadContext::pcState(const PCStateBase &val) call().resource_write(_instId, result, pcRscId, pc); } +iris::ResourceId +ThreadContext::getMiscRegRscId(RegIndex misc_reg) const +{ + iris::ResourceId rsc_id = iris::IRIS_UINT64_MAX; + if (misc_reg < miscRegIds.size()) + rsc_id = miscRegIds.at(misc_reg); + + panic_if(rsc_id == iris::IRIS_UINT64_MAX, + "Misc reg %s is not supported by fast model.", + ArmISA::miscRegClass[misc_reg]); + return rsc_id; +} + RegVal ThreadContext::readMiscRegNoEffect(RegIndex misc_reg) const { iris::ResourceReadResult result; - call().resource_read(_instId, result, miscRegIds.at(misc_reg)); + call().resource_read(_instId, result, getMiscRegRscId(misc_reg)); return result.data.at(0); } @@ -602,7 +629,7 @@ void ThreadContext::setMiscRegNoEffect(RegIndex misc_reg, const RegVal val) { iris::ResourceWriteResult result; - call().resource_write(_instId, result, miscRegIds.at(misc_reg), val); + call().resource_write(_instId, result, getMiscRegRscId(misc_reg), val); } RegVal @@ -623,30 +650,52 @@ void ThreadContext::getReg(const RegId ®, void *val) const { const RegIndex idx = reg.index(); + const bool flat = reg.regClass().isFlat(); const RegClassType type = reg.classValue(); - switch (type) { - case IntRegClass: - *(RegVal *)val = readIntReg(idx); - break; - case FloatRegClass: - *(RegVal *)val = readFloatReg(idx); - break; - case VecRegClass: - *(ArmISA::VecRegContainer *)val = readVecReg(reg); - break; - case VecElemClass: - *(RegVal *)val = readVecElem(reg); - break; - case VecPredRegClass: - *(ArmISA::VecPredRegContainer *)val = readVecPredReg(reg); - break; - case CCRegClass: - *(RegVal *)val = readCCReg(idx); - break; - case MiscRegClass: - panic("MiscRegs should not be read with getReg."); - default: - panic("Unrecognized register class type %d.", type); + if (flat) { + switch (type) { + case IntRegClass: + *(RegVal *)val = readIntRegFlat(idx); + break; + case VecRegClass: + *(ArmISA::VecRegContainer *)val = readVecRegFlat(idx); + break; + case VecElemClass: + *(RegVal *)val = readVecElemFlat(idx); + break; + case VecPredRegClass: + *(ArmISA::VecPredRegContainer *)val = readVecPredRegFlat(idx); + break; + case CCRegClass: + *(RegVal *)val = readCCRegFlat(idx); + break; + case MiscRegClass: + panic("MiscRegs should not be read with getReg."); + default: + panic("Unrecognized register class type %d.", type); + } + } else { + switch (type) { + case IntRegClass: + *(RegVal *)val = readIntReg(idx); + break; + case VecRegClass: + *(ArmISA::VecRegContainer *)val = readVecReg(reg); + break; + case VecElemClass: + *(RegVal *)val = readVecElem(reg); + break; + case VecPredRegClass: + *(ArmISA::VecPredRegContainer *)val = readVecPredReg(reg); + break; + case CCRegClass: + *(RegVal *)val = readCCReg(idx); + break; + case MiscRegClass: + panic("MiscRegs should not be read with getReg."); + default: + panic("Unrecognized register class type %d.", type); + } } } @@ -654,155 +703,119 @@ void ThreadContext::setReg(const RegId ®, const void *val) { const RegIndex idx = reg.index(); + const bool flat = reg.regClass().isFlat(); const RegClassType type = reg.classValue(); - switch (type) { - case IntRegClass: - setIntReg(idx, *(RegVal *)val); - break; - case FloatRegClass: - setFloatReg(idx, *(RegVal *)val); - break; - case VecRegClass: - setVecReg(reg, *(ArmISA::VecRegContainer *)val); - break; - case VecElemClass: - setVecElem(reg, *(RegVal *)val); - break; - case VecPredRegClass: - setVecPredReg(reg, *(ArmISA::VecPredRegContainer *)val); - break; - case CCRegClass: - setCCReg(idx, *(RegVal *)val); - break; - case MiscRegClass: - panic("MiscRegs should not be read with getReg."); - default: - panic("Unrecognized register class type %d.", type); + if (flat) { + switch (type) { + case IntRegClass: + setIntRegFlat(idx, *(RegVal *)val); + break; + case VecRegClass: + setVecRegFlat(idx, *(ArmISA::VecRegContainer *)val); + break; + case VecElemClass: + setVecElemFlat(idx, *(RegVal *)val); + break; + case VecPredRegClass: + setVecPredRegFlat(idx, *(ArmISA::VecPredRegContainer *)val); + break; + case CCRegClass: + setCCRegFlat(idx, *(RegVal *)val); + break; + case MiscRegClass: + panic("MiscRegs should not be read with getReg."); + default: + panic("Unrecognized register class type %d.", type); + } + } else { + switch (type) { + case IntRegClass: + setIntReg(idx, *(RegVal *)val); + break; + case VecRegClass: + setVecReg(reg, *(ArmISA::VecRegContainer *)val); + break; + case VecElemClass: + setVecElem(reg, *(RegVal *)val); + break; + case VecPredRegClass: + setVecPredReg(reg, *(ArmISA::VecPredRegContainer *)val); + break; + case CCRegClass: + setCCReg(idx, *(RegVal *)val); + break; + case MiscRegClass: + panic("MiscRegs should not be read with getReg."); + default: + panic("Unrecognized register class type %d.", type); + } } } void * ThreadContext::getWritableReg(const RegId ®) -{ - const RegClassType type = reg.classValue(); - switch (type) { - case VecRegClass: - return &getWritableVecReg(reg); - case VecPredRegClass: - return &getWritableVecPredReg(reg); - default: - panic("Unrecognized register class type %d.", type); - } -} - -RegVal -ThreadContext::getRegFlat(const RegId ®) const -{ - RegVal val; - getRegFlat(reg, &val); - return val; -} - -void -ThreadContext::setRegFlat(const RegId ®, RegVal val) -{ - setRegFlat(reg, &val); -} - -void -ThreadContext::getRegFlat(const RegId ®, void *val) const { const RegIndex idx = reg.index(); + const bool flat = reg.regClass().isFlat(); const RegClassType type = reg.classValue(); - switch (type) { - case IntRegClass: - *(RegVal *)val = readIntRegFlat(idx); - break; - case VecRegClass: - *(ArmISA::VecRegContainer *)val = readVecRegFlat(idx); - break; - case VecElemClass: - *(RegVal *)val = readVecElemFlat(idx); - break; - case VecPredRegClass: - *(ArmISA::VecPredRegContainer *)val = readVecPredRegFlat(idx); - break; - case CCRegClass: - *(RegVal *)val = readCCRegFlat(idx); - break; - case MiscRegClass: - panic("MiscRegs should not be read with getReg."); - default: - panic("Unrecognized register class type %d.", type); + if (flat) { + switch (type) { + case VecRegClass: + return &getWritableVecRegFlat(idx); + case VecPredRegClass: + return &getWritableVecPredRegFlat(idx); + default: + panic("Unrecognized register class type %d.", type); + } + } else { + switch (type) { + case VecRegClass: + return &getWritableVecReg(reg); + case VecPredRegClass: + return &getWritableVecPredReg(reg); + default: + panic("Unrecognized register class type %d.", type); + } } } -void -ThreadContext::setRegFlat(const RegId ®, const void *val) +iris::ResourceId +ThreadContext::getIntRegRscId(RegIndex int_reg) const { - const RegIndex idx = reg.index(); - const RegClassType type = reg.classValue(); - switch (type) { - case IntRegClass: - setIntRegFlat(idx, *(RegVal *)val); - break; - case VecRegClass: - setVecRegFlat(idx, *(ArmISA::VecRegContainer *)val); - break; - case VecElemClass: - setVecElemFlat(idx, *(RegVal *)val); - break; - case VecPredRegClass: - setVecPredRegFlat(idx, *(ArmISA::VecPredRegContainer *)val); - break; - case CCRegClass: - setCCRegFlat(idx, *(RegVal *)val); - break; - case MiscRegClass: - panic("MiscRegs should not be read with getReg."); - default: - panic("Unrecognized register class type %d.", type); - } -} + ArmISA::CPSR cpsr = readMiscRegNoEffect(ArmISA::MISCREG_CPSR); + auto ®Ids = cpsr.width ? intReg32Ids : intReg64Ids; + iris::ResourceId rsc_id = iris::IRIS_UINT64_MAX; + if (int_reg < regIds.size()) + rsc_id = regIds.at(int_reg); -void * -ThreadContext::getWritableRegFlat(const RegId ®) -{ - const RegIndex idx = reg.index(); - const RegClassType type = reg.classValue(); - switch (type) { - case VecRegClass: - return &getWritableVecRegFlat(idx); - case VecPredRegClass: - return &getWritableVecPredRegFlat(idx); - default: - panic("Unrecognized register class type %d.", type); - } + panic_if(rsc_id == iris::IRIS_UINT64_MAX, + "Int reg %s is not supported by fast model.", + ArmISA::intRegClass[int_reg]); + return rsc_id; } RegVal ThreadContext::readIntReg(RegIndex reg_idx) const { - ArmISA::CPSR cpsr = readMiscRegNoEffect(ArmISA::MISCREG_CPSR); - iris::ResourceReadResult result; - if (cpsr.width) - call().resource_read(_instId, result, intReg32Ids.at(reg_idx)); - else - call().resource_read(_instId, result, intReg64Ids.at(reg_idx)); + call().resource_read(_instId, result, getIntRegRscId(reg_idx)); return result.data.at(0); } void ThreadContext::setIntReg(RegIndex reg_idx, RegVal val) { - ArmISA::CPSR cpsr = readMiscRegNoEffect(ArmISA::MISCREG_CPSR); - iris::ResourceWriteResult result; - if (cpsr.width) - call().resource_write(_instId, result, intReg32Ids.at(reg_idx), val); - else - call().resource_write(_instId, result, intReg64Ids.at(reg_idx), val); + call().resource_write(_instId, result, getIntRegRscId(reg_idx), val); +} + +iris::ResourceId +ThreadContext::getIntRegFlatRscId(RegIndex int_reg) const +{ + iris::ResourceId rsc_id = iris::IRIS_UINT64_MAX; + if (int_reg < flattenedIntIds.size()) + rsc_id = flattenedIntIds.at(int_reg); + return rsc_id; } /* @@ -812,45 +825,63 @@ ThreadContext::setIntReg(RegIndex reg_idx, RegVal val) RegVal ThreadContext::readIntRegFlat(RegIndex idx) const { - if (idx >= flattenedIntIds.size()) - return 0; - iris::ResourceId res_id = flattenedIntIds.at(idx); - if (res_id == iris::IRIS_UINT64_MAX) + auto rsc_id = getIntRegFlatRscId(idx); + if (rsc_id == iris::IRIS_UINT64_MAX) return 0; iris::ResourceReadResult result; - call().resource_read(_instId, result, res_id); + call().resource_read(_instId, result, rsc_id); return result.data.at(0); } void ThreadContext::setIntRegFlat(RegIndex idx, uint64_t val) { - iris::ResourceId res_id = - (idx >= flattenedIntIds.size()) ? iris::IRIS_UINT64_MAX : - flattenedIntIds.at(idx); - panic_if(res_id == iris::IRIS_UINT64_MAX, - "Int reg %d is not supported by fast model.", idx); + auto rsc_id = getIntRegFlatRscId(idx); + panic_if(rsc_id == iris::IRIS_UINT64_MAX, + "Int reg %s is not supported by fast model.", + ArmISA::intRegClass[idx]); iris::ResourceWriteResult result; - call().resource_write(_instId, result, flattenedIntIds.at(idx), val); + call().resource_write(_instId, result, rsc_id, val); +} + +iris::ResourceId +ThreadContext::getCCRegFlatRscId(RegIndex cc_reg) const +{ + iris::ResourceId rsc_id = iris::IRIS_UINT64_MAX; + if (cc_reg < ccRegIds.size()) + rsc_id = ccRegIds.at(cc_reg); + return rsc_id; } RegVal ThreadContext::readCCRegFlat(RegIndex idx) const { - if (idx >= ccRegIds.size()) + auto rsc_id = getCCRegFlatRscId(idx); + if (rsc_id == iris::IRIS_UINT64_MAX) return 0; iris::ResourceReadResult result; - call().resource_read(_instId, result, ccRegIds.at(idx)); + call().resource_read(_instId, result, rsc_id); return result.data.at(0); } void ThreadContext::setCCRegFlat(RegIndex idx, RegVal val) { - panic_if(idx >= ccRegIds.size(), - "CC reg %d is not supported by fast model.", idx); + auto rsc_id = getCCRegFlatRscId(idx); + panic_if(rsc_id == iris::IRIS_UINT64_MAX, + "CC reg %s is not supported by fast model.", + ArmISA::ccRegClass[idx]); iris::ResourceWriteResult result; - call().resource_write(_instId, result, ccRegIds.at(idx), val); + call().resource_write(_instId, result, rsc_id, val); +} + +iris::ResourceId +ThreadContext::getVecRegRscId(RegIndex vec_reg) const +{ + iris::ResourceId rsc_id = iris::IRIS_UINT64_MAX; + if (vec_reg < vecRegIds.size()) + rsc_id = vecRegIds.at(vec_reg); + return rsc_id; } const ArmISA::VecRegContainer & @@ -863,11 +894,12 @@ ThreadContext::readVecReg(const RegId ®_id) const // Ignore accesses to registers which aren't architected. gem5 defines a // few extra registers which it uses internally in the implementation of // some instructions. - if (idx >= vecRegIds.size()) + auto rsc_id = getVecRegRscId(reg_id); + if (rsc_id == iris::IRIS_UINT64_MAX) return reg; iris::ResourceReadResult result; - call().resource_read(_instId, result, vecRegIds.at(idx)); + call().resource_read(_instId, result, rsc_id); size_t data_size = result.data.size() * (sizeof(*result.data.data())); size_t size = std::min(data_size, reg.size()); memcpy(reg.as(), (void *)result.data.data(), size); @@ -878,7 +910,16 @@ ThreadContext::readVecReg(const RegId ®_id) const const ArmISA::VecRegContainer & ThreadContext::readVecRegFlat(RegIndex idx) const { - return readVecReg(RegId(VecRegClass, idx)); + return readVecReg(ArmISA::vecRegClass[idx]); +} + +iris::ResourceId +ThreadContext::getVecPredRegRscId(RegIndex vec_reg) const +{ + iris::ResourceId rsc_id = iris::IRIS_UINT64_MAX; + if (vec_reg < vecPredRegIds.size()) + rsc_id = vecPredRegIds.at(vec_reg); + return rsc_id; } const ArmISA::VecPredRegContainer & @@ -889,11 +930,12 @@ ThreadContext::readVecPredReg(const RegId ®_id) const ArmISA::VecPredRegContainer ® = vecPredRegs.at(idx); reg.reset(); - if (idx >= vecPredRegIds.size()) + auto rsc_id = getVecPredRegRscId(reg_id); + if (rsc_id == iris::IRIS_UINT64_MAX) return reg; iris::ResourceReadResult result; - call().resource_read(_instId, result, vecPredRegIds.at(idx)); + call().resource_read(_instId, result, rsc_id); size_t offset = 0; size_t num_bits = reg.NUM_BITS; @@ -913,7 +955,7 @@ ThreadContext::readVecPredReg(const RegId ®_id) const ArmISA::VecPredRegContainer ThreadContext::readVecPredRegFlat(RegIndex idx) const { - return readVecPredReg(RegId(VecPredRegClass, idx)); + return readVecPredReg(ArmISA::vecPredRegClass[idx]); } } // namespace Iris diff --git a/src/arch/arm/fastmodel/iris/thread_context.hh b/src/arch/arm/fastmodel/iris/thread_context.hh index 2feb25e405..05209e685e 100644 --- a/src/arch/arm/fastmodel/iris/thread_context.hh +++ b/src/arch/arm/fastmodel/iris/thread_context.hh @@ -286,8 +286,10 @@ class ThreadContext : public gem5::ThreadContext void setReg(const RegId ®, RegVal val) override; void setReg(const RegId ®, const void *val) override; + iris::ResourceId getIntRegRscId(RegIndex int_reg) const; virtual RegVal readIntReg(RegIndex reg_idx) const; + iris::ResourceId getVecRegRscId(RegIndex vec_reg) const; virtual const ArmISA::VecRegContainer &readVecReg(const RegId ®) const; virtual ArmISA::VecRegContainer & getWritableVecReg(const RegId ®) @@ -301,6 +303,7 @@ class ThreadContext : public gem5::ThreadContext panic("%s not implemented.", __FUNCTION__); } + iris::ResourceId getVecPredRegRscId(RegIndex vec_reg) const; virtual const ArmISA::VecPredRegContainer & readVecPredReg(const RegId ®) const; virtual ArmISA::VecPredRegContainer & @@ -347,6 +350,7 @@ class ThreadContext : public gem5::ThreadContext const PCStateBase &pcState() const override; void pcState(const PCStateBase &val) override; + iris::ResourceId getMiscRegRscId(RegIndex misc_reg) const; RegVal readMiscRegNoEffect(RegIndex misc_reg) const override; RegVal readMiscReg(RegIndex misc_reg) override @@ -361,12 +365,6 @@ class ThreadContext : public gem5::ThreadContext setMiscRegNoEffect(misc_reg, val); } - RegId - flattenRegId(const RegId& regId) const override - { - panic("%s not implemented.", __FUNCTION__); - } - // Also not necessarily the best location for these two. Hopefully will go // away once we decide upon where st cond failures goes. unsigned @@ -393,13 +391,7 @@ class ThreadContext : public gem5::ThreadContext * serialization code to access all registers. */ - RegVal getRegFlat(const RegId ®) const override; - void getRegFlat(const RegId ®, void *val) const override; - void *getWritableRegFlat(const RegId ®) override; - - void setRegFlat(const RegId ®, RegVal val) override; - void setRegFlat(const RegId ®, const void *val) override; - + iris::ResourceId getIntRegFlatRscId(RegIndex int_reg) const; virtual RegVal readIntRegFlat(RegIndex idx) const; virtual void setIntRegFlat(RegIndex idx, uint64_t val); @@ -439,6 +431,7 @@ class ThreadContext : public gem5::ThreadContext panic("%s not implemented.", __FUNCTION__); } + iris::ResourceId getCCRegFlatRscId(RegIndex cc_reg) const; virtual RegVal readCCRegFlat(RegIndex idx) const; virtual void setCCRegFlat(RegIndex idx, RegVal val); /** @} */ @@ -461,6 +454,8 @@ class ThreadContext : public gem5::ThreadContext { panic("%s not implemented.", __FUNCTION__); } + void readMemWithCurrentMsn(Addr vaddr, size_t size, char *data); + void writeMemWithCurrentMsn(Addr vaddr, size_t size, const char *data); }; } // namespace Iris diff --git a/src/arch/arm/fastmodel/remote_gdb.cc b/src/arch/arm/fastmodel/remote_gdb.cc new file mode 100644 index 0000000000..e13fee8d70 --- /dev/null +++ b/src/arch/arm/fastmodel/remote_gdb.cc @@ -0,0 +1,61 @@ +/* * Copyright 2022 Google, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "arch/arm/fastmodel/remote_gdb.hh" + +#include "arch/arm/fastmodel/iris/thread_context.hh" + +namespace gem5 { + +namespace fastmodel { + +FastmodelRemoteGDB::FastmodelRemoteGDB(System *_system, int port) + : gem5::ArmISA::RemoteGDB(_system, port) +{ +} + +bool +FastmodelRemoteGDB::readBlob(Addr vaddr, size_t size, char *data) +{ + auto tc = dynamic_cast(context()); + panic_if(!tc, + "FastmodelRemoteGdb can only work on Iris::ThreadContext"); + tc->readMemWithCurrentMsn(vaddr, size, data); + return true; +} + +bool +FastmodelRemoteGDB::writeBlob(Addr vaddr, size_t size, const char *data) +{ + auto tc = dynamic_cast(context()); + panic_if(!tc, + "FastmodelRemoteGdb can only work on Iris::ThreadContext"); + tc->writeMemWithCurrentMsn(vaddr, size, data); + return true; +} + +} // namespace fastmodel +} // namespace gem5 diff --git a/src/arch/arm/vecregs.hh b/src/arch/arm/fastmodel/remote_gdb.hh similarity index 64% rename from src/arch/arm/vecregs.hh rename to src/arch/arm/fastmodel/remote_gdb.hh index 9f5621b5fc..93cf882e21 100644 --- a/src/arch/arm/vecregs.hh +++ b/src/arch/arm/fastmodel/remote_gdb.hh @@ -1,18 +1,5 @@ /* - * Copyright (c) 2010-2011, 2014, 2016-2019 ARM Limited - * All rights reserved - * - * The license below extends only to copyright in the software and shall - * not be construed as granting a license to any other intellectual - * property including but not limited to intellectual property relating - * to a hardware implementation of the functionality of the software - * licensed hereunder. You may use the software subject to the license - * terms below provided that you ensure that this notice is replicated - * unmodified and in its entirety in all distributions of the software, - * modified or unmodified, in source code or in binary form. - * - * Copyright (c) 2007-2008 The Florida State University - * All rights reserved. + * Copyright 2022 Google, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are @@ -38,9 +25,28 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef __ARCH_ARM_VECREGS_HH__ -#define __ARCH_ARM_VECREGS_HH__ +#ifndef __ARCH_ARM_FASTMODEL_REMOTE_GDB_HH__ +#define __ARCH_ARM_FASTMODEL_REMOTE_GDB_HH__ -#include "arch/arm/regs/vec.hh" +#include "arch/arm/remote_gdb.hh" -#endif +namespace gem5 +{ + +namespace fastmodel +{ + +class FastmodelRemoteGDB : public gem5::ArmISA::RemoteGDB +{ + public: + FastmodelRemoteGDB(System *_system, int port); + + private: + bool readBlob(Addr vaddr, size_t size, char *data) override; + bool writeBlob(Addr vaddr, size_t size, const char *data) override; +}; + +} // namespace fastmodel +} // namespace gem5 + +#endif // __ARCH_ARM_FASTMODEL_FASTMODEL_REMOTE_GDB_HH__ diff --git a/src/arch/arm/fastmodel/reset_controller/FastModelResetControllerExample.py b/src/arch/arm/fastmodel/reset_controller/FastModelResetControllerExample.py index b9327f4582..225c5d917b 100644 --- a/src/arch/arm/fastmodel/reset_controller/FastModelResetControllerExample.py +++ b/src/arch/arm/fastmodel/reset_controller/FastModelResetControllerExample.py @@ -30,11 +30,12 @@ from m5.objects.Device import BasicPioDevice from m5.objects.IntPin import IntSourcePin from m5.objects.Iris import IrisBaseCPU -class FastModelResetControllerExample(BasicPioDevice): - type = 'FastModelResetControllerExample' - cxx_class = 'gem5::fastmodel::ResetControllerExample' - cxx_header = 'arch/arm/fastmodel/reset_controller/example.hh' - cpu = Param.IrisBaseCPU('target cpu') - reset = IntSourcePin('reset pin') - halt = IntSourcePin('halt pin') +class FastModelResetControllerExample(BasicPioDevice): + type = "FastModelResetControllerExample" + cxx_class = "gem5::fastmodel::ResetControllerExample" + cxx_header = "arch/arm/fastmodel/reset_controller/example.hh" + + cpu = Param.IrisBaseCPU("target cpu") + reset = IntSourcePin("reset pin") + halt = IntSourcePin("halt pin") diff --git a/src/arch/arm/faults.cc b/src/arch/arm/faults.cc index 10097382b7..b4ef199201 100644 --- a/src/arch/arm/faults.cc +++ b/src/arch/arm/faults.cc @@ -205,106 +205,106 @@ template<> ArmFault::FaultVals ArmFaultVals::vals( // Some dummy values (the reset vector has an IMPLEMENTATION DEFINED // location in AArch64) "Reset", 0x000, 0x000, 0x000, 0x000, 0x000, MODE_SVC, - 0, 0, 0, 0, false, true, true, EC_UNKNOWN + 0, 0, 0, 0, false, true, true, ExceptionClass::UNKNOWN ); template<> ArmFault::FaultVals ArmFaultVals::vals( "Undefined Instruction", 0x004, 0x000, 0x200, 0x400, 0x600, MODE_UNDEFINED, - 4, 2, 0, 0, true, false, false, EC_UNKNOWN + 4, 2, 0, 0, true, false, false, ExceptionClass::UNKNOWN ); template<> ArmFault::FaultVals ArmFaultVals::vals( "Supervisor Call", 0x008, 0x000, 0x200, 0x400, 0x600, MODE_SVC, - 4, 2, 4, 2, true, false, false, EC_SVC_TO_HYP + 4, 2, 4, 2, true, false, false, ExceptionClass::SVC_TO_HYP ); template<> ArmFault::FaultVals ArmFaultVals::vals( "Secure Monitor Call", 0x008, 0x000, 0x200, 0x400, 0x600, MODE_MON, - 4, 4, 4, 4, false, true, true, EC_SMC_TO_HYP + 4, 4, 4, 4, false, true, true, ExceptionClass::SMC_TO_HYP ); template<> ArmFault::FaultVals ArmFaultVals::vals( "Hypervisor Call", 0x008, 0x000, 0x200, 0x400, 0x600, MODE_HYP, - 4, 4, 4, 4, true, false, false, EC_HVC + 4, 4, 4, 4, true, false, false, ExceptionClass::HVC ); template<> ArmFault::FaultVals ArmFaultVals::vals( "Prefetch Abort", 0x00C, 0x000, 0x200, 0x400, 0x600, MODE_ABORT, - 4, 4, 0, 0, true, true, false, EC_PREFETCH_ABORT_TO_HYP + 4, 4, 0, 0, true, true, false, ExceptionClass::PREFETCH_ABORT_TO_HYP ); template<> ArmFault::FaultVals ArmFaultVals::vals( "Data Abort", 0x010, 0x000, 0x200, 0x400, 0x600, MODE_ABORT, - 8, 8, 0, 0, true, true, false, EC_DATA_ABORT_TO_HYP + 8, 8, 0, 0, true, true, false, ExceptionClass::DATA_ABORT_TO_HYP ); template<> ArmFault::FaultVals ArmFaultVals::vals( "Virtual Data Abort", 0x010, 0x000, 0x200, 0x400, 0x600, MODE_ABORT, - 8, 8, 0, 0, true, true, false, EC_INVALID + 8, 8, 0, 0, true, true, false, ExceptionClass::INVALID ); template<> ArmFault::FaultVals ArmFaultVals::vals( // @todo: double check these values "Hypervisor Trap", 0x014, 0x000, 0x200, 0x400, 0x600, MODE_HYP, - 0, 0, 0, 0, false, false, false, EC_UNKNOWN + 0, 0, 0, 0, false, false, false, ExceptionClass::UNKNOWN ); template<> ArmFault::FaultVals ArmFaultVals::vals( "Secure Monitor Trap", 0x004, 0x000, 0x200, 0x400, 0x600, MODE_MON, - 4, 2, 0, 0, false, false, false, EC_UNKNOWN + 4, 2, 0, 0, false, false, false, ExceptionClass::UNKNOWN ); template<> ArmFault::FaultVals ArmFaultVals::vals( "IRQ", 0x018, 0x080, 0x280, 0x480, 0x680, MODE_IRQ, - 4, 4, 0, 0, false, true, false, EC_UNKNOWN + 4, 4, 0, 0, false, true, false, ExceptionClass::UNKNOWN ); template<> ArmFault::FaultVals ArmFaultVals::vals( "Virtual IRQ", 0x018, 0x080, 0x280, 0x480, 0x680, MODE_IRQ, - 4, 4, 0, 0, false, true, false, EC_INVALID + 4, 4, 0, 0, false, true, false, ExceptionClass::INVALID ); template<> ArmFault::FaultVals ArmFaultVals::vals( "FIQ", 0x01C, 0x100, 0x300, 0x500, 0x700, MODE_FIQ, - 4, 4, 0, 0, false, true, true, EC_UNKNOWN + 4, 4, 0, 0, false, true, true, ExceptionClass::UNKNOWN ); template<> ArmFault::FaultVals ArmFaultVals::vals( "Virtual FIQ", 0x01C, 0x100, 0x300, 0x500, 0x700, MODE_FIQ, - 4, 4, 0, 0, false, true, true, EC_INVALID + 4, 4, 0, 0, false, true, true, ExceptionClass::INVALID ); template<> ArmFault::FaultVals ArmFaultVals::vals( "Illegal Inst Set State Fault", 0x004, 0x000, 0x200, 0x400, 0x600, MODE_UNDEFINED, - 4, 2, 0, 0, true, false, false, EC_ILLEGAL_INST + 4, 2, 0, 0, true, false, false, ExceptionClass::ILLEGAL_INST ); template<> ArmFault::FaultVals ArmFaultVals::vals( // Some dummy values (SupervisorTrap is AArch64-only) "Supervisor Trap", 0x014, 0x000, 0x200, 0x400, 0x600, MODE_SVC, - 0, 0, 0, 0, false, false, false, EC_UNKNOWN + 0, 0, 0, 0, false, false, false, ExceptionClass::UNKNOWN ); template<> ArmFault::FaultVals ArmFaultVals::vals( // Some dummy values (PCAlignmentFault is AArch64-only) "PC Alignment Fault", 0x000, 0x000, 0x200, 0x400, 0x600, MODE_SVC, - 0, 0, 0, 0, true, false, false, EC_PC_ALIGNMENT + 0, 0, 0, 0, true, false, false, ExceptionClass::PC_ALIGNMENT ); template<> ArmFault::FaultVals ArmFaultVals::vals( // Some dummy values (SPAlignmentFault is AArch64-only) "SP Alignment Fault", 0x000, 0x000, 0x200, 0x400, 0x600, MODE_SVC, - 0, 0, 0, 0, true, false, false, EC_STACK_PTR_ALIGNMENT + 0, 0, 0, 0, true, false, false, ExceptionClass::STACK_PTR_ALIGNMENT ); template<> ArmFault::FaultVals ArmFaultVals::vals( // Some dummy values (SError is AArch64-only) "SError", 0x000, 0x180, 0x380, 0x580, 0x780, MODE_SVC, - 0, 0, 0, 0, false, true, true, EC_SERROR + 0, 0, 0, 0, false, true, true, ExceptionClass::SERROR ); template<> ArmFault::FaultVals ArmFaultVals::vals( // Some dummy values (SoftwareBreakpoint is AArch64-only) "Software Breakpoint", 0x000, 0x000, 0x200, 0x400, 0x600, MODE_SVC, - 0, 0, 0, 0, true, false, false, EC_SOFTWARE_BREAKPOINT + 0, 0, 0, 0, true, false, false, ExceptionClass::SOFTWARE_BREAKPOINT ); template<> ArmFault::FaultVals ArmFaultVals::vals( "Hardware Breakpoint", 0x000, 0x000, 0x200, 0x400, 0x600, MODE_SVC, - 0, 0, 0, 0, true, false, false, EC_HW_BREAKPOINT + 0, 0, 0, 0, true, false, false, ExceptionClass::HW_BREAKPOINT ); template<> ArmFault::FaultVals ArmFaultVals::vals( "Watchpoint", 0x000, 0x000, 0x200, 0x400, 0x600, MODE_SVC, - 0, 0, 0, 0, true, false, false, EC_WATCHPOINT + 0, 0, 0, 0, true, false, false, ExceptionClass::WATCHPOINT ); template<> ArmFault::FaultVals ArmFaultVals::vals( "SoftwareStep", 0x000, 0x000, 0x200, 0x400, 0x600, MODE_SVC, - 0, 0, 0, 0, true, false, false, EC_SOFTWARE_STEP + 0, 0, 0, 0, true, false, false, ExceptionClass::SOFTWARE_STEP ); template<> ArmFault::FaultVals ArmFaultVals::vals( // Some dummy values "ArmSev Flush", 0x000, 0x000, 0x000, 0x000, 0x000, MODE_SVC, - 0, 0, 0, 0, false, true, true, EC_UNKNOWN + 0, 0, 0, 0, false, true, true, ExceptionClass::UNKNOWN ); Addr @@ -514,7 +514,7 @@ ArmFault::invoke32(ThreadContext *tc, const StaticInstPtr &inst) return; SCTLR sctlr = tc->readMiscReg(MISCREG_SCTLR); - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); CPSR saved_cpsr = tc->readMiscReg(MISCREG_CPSR); saved_cpsr.nz = tc->getReg(cc_reg::Nz); saved_cpsr.c = tc->getReg(cc_reg::C); @@ -533,10 +533,10 @@ ArmFault::invoke32(ThreadContext *tc, const StaticInstPtr &inst) // Ensure Secure state if initially in Monitor mode if (have_security && saved_cpsr.mode == MODE_MON) { - SCR scr = tc->readMiscRegNoEffect(MISCREG_SCR); + SCR scr = tc->readMiscRegNoEffect(MISCREG_SCR_EL3); if (scr.ns) { scr.ns = 0; - tc->setMiscRegNoEffect(MISCREG_SCR, scr); + tc->setMiscRegNoEffect(MISCREG_SCR_EL3, scr); } } @@ -605,7 +605,7 @@ ArmFault::invoke32(ThreadContext *tc, const StaticInstPtr &inst) break; case MODE_UNDEFINED: tc->setMiscReg(MISCREG_SPSR_UND, saved_cpsr); - if (ec(tc) != EC_UNKNOWN) + if (ec(tc) != ExceptionClass::UNKNOWN) setSyndrome(tc, MISCREG_HSR); break; case MODE_HYP: @@ -734,7 +734,7 @@ ArmFault::vectorCatch(ThreadContext *tc, const StaticInstPtr &inst) { SelfDebug *sd = ArmISA::ISA::getSelfDebug(tc); VectorCatch* vc = sd->getVectorCatch(tc); - if (!vc->isVCMatch()) { + if (vc && !vc->isVCMatch()) { Fault fault = sd->testVectorCatch(tc, 0x0, this); if (fault != NoFault) fault->invoke(tc, inst); @@ -827,7 +827,7 @@ UndefinedInstruction::invoke(ThreadContext *tc, const StaticInstPtr &inst) bool UndefinedInstruction::routeToHyp(ThreadContext *tc) const { - HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR); + HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR_EL2); return fromEL == EL2 || (EL2Enabled(tc) && (fromEL == EL0) && hcr.tge); } @@ -841,7 +841,7 @@ UndefinedInstruction::iss() const return 0; } - if (overrideEc == EC_INVALID) + if (overrideEc == ExceptionClass::INVALID) return issRaw; uint32_t new_iss = 0; @@ -883,7 +883,7 @@ SupervisorCall::invoke(ThreadContext *tc, const StaticInstPtr &inst) bool SupervisorCall::routeToHyp(ThreadContext *tc) const { - HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR); + HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR_EL2); return fromEL == EL2 || (EL2Enabled(tc) && fromEL == EL0 && hcr.tge); } @@ -891,8 +891,8 @@ SupervisorCall::routeToHyp(ThreadContext *tc) const ExceptionClass SupervisorCall::ec(ThreadContext *tc) const { - return (overrideEc != EC_INVALID) ? overrideEc : - (from64 ? EC_SVC_64 : vals.ec); + return (overrideEc != ExceptionClass::INVALID) ? overrideEc : + (from64 ? ExceptionClass::SVC_64 : vals.ec); } uint32_t @@ -917,9 +917,9 @@ UndefinedInstruction::ec(ThreadContext *tc) const // If UndefinedInstruction is routed to hypervisor, // HSR.EC field is 0. if (hypRouted) - return EC_UNKNOWN; + return ExceptionClass::UNKNOWN; else - return (overrideEc != EC_INVALID) ? overrideEc : vals.ec; + return (overrideEc != ExceptionClass::INVALID) ? overrideEc : vals.ec; } @@ -944,13 +944,13 @@ HypervisorCall::routeToHyp(ThreadContext *tc) const ExceptionClass HypervisorCall::ec(ThreadContext *tc) const { - return from64 ? EC_HVC_64 : vals.ec; + return from64 ? ExceptionClass::HVC_64 : vals.ec; } ExceptionClass HypervisorTrap::ec(ThreadContext *tc) const { - return (overrideEc != EC_INVALID) ? overrideEc : vals.ec; + return (overrideEc != ExceptionClass::INVALID) ? overrideEc : vals.ec; } template @@ -1027,7 +1027,7 @@ SecureMonitorCall::invoke(ThreadContext *tc, const StaticInstPtr &inst) ExceptionClass SecureMonitorCall::ec(ThreadContext *tc) const { - return (from64 ? EC_SMC_64 : vals.ec); + return (from64 ? ExceptionClass::SMC_64 : vals.ec); } bool @@ -1051,16 +1051,16 @@ ExceptionClass SupervisorTrap::ec(ThreadContext *tc) const { if (hypRouted) - return EC_UNKNOWN; + return ExceptionClass::UNKNOWN; else - return (overrideEc != EC_INVALID) ? overrideEc : vals.ec; + return (overrideEc != ExceptionClass::INVALID) ? overrideEc : vals.ec; } ExceptionClass SecureMonitorTrap::ec(ThreadContext *tc) const { - return (overrideEc != EC_INVALID) ? overrideEc : - (from64 ? EC_SMC_64 : vals.ec); + return (overrideEc != ExceptionClass::INVALID) ? overrideEc : + (from64 ? ExceptionClass::SMC_64 : vals.ec); } template @@ -1212,7 +1212,7 @@ bool AbortFault::abortDisable(ThreadContext *tc) { if (ArmSystem::haveEL(tc, EL3)) { - SCR scr = tc->readMiscRegNoEffect(MISCREG_SCR); + SCR scr = tc->readMiscRegNoEffect(MISCREG_SCR_EL3); return (!scr.ns || scr.aw); } return true; @@ -1268,9 +1268,9 @@ PrefetchAbort::ec(ThreadContext *tc) const if (to64) { // AArch64 if (toEL == fromEL) - return EC_PREFETCH_ABORT_CURR_EL; + return ExceptionClass::PREFETCH_ABORT_CURR_EL; else - return EC_PREFETCH_ABORT_LOWER_EL; + return ExceptionClass::PREFETCH_ABORT_LOWER_EL; } else { // AArch32 // Abort faults have different EC codes depending on whether @@ -1302,12 +1302,7 @@ PrefetchAbort::iss() const bool PrefetchAbort::routeToMonitor(ThreadContext *tc) const { - SCR scr = 0; - if (from64) - scr = tc->readMiscRegNoEffect(MISCREG_SCR_EL3); - else - scr = tc->readMiscRegNoEffect(MISCREG_SCR); - + SCR scr = tc->readMiscRegNoEffect(MISCREG_SCR_EL3); return scr.ea && !isMMUFault(); } @@ -1316,7 +1311,7 @@ PrefetchAbort::routeToHyp(ThreadContext *tc) const { bool toHyp; - HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR); + HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR_EL2); HDCR hdcr = tc->readMiscRegNoEffect(MISCREG_HDCR); toHyp = fromEL == EL2; @@ -1336,9 +1331,9 @@ DataAbort::ec(ThreadContext *tc) const "SystemErrors (SErrors)!"); } if (toEL == fromEL) - return EC_DATA_ABORT_CURR_EL; + return ExceptionClass::DATA_ABORT_CURR_EL; else - return EC_DATA_ABORT_LOWER_EL; + return ExceptionClass::DATA_ABORT_LOWER_EL; } else { // AArch32 // Abort faults have different EC codes depending on whether @@ -1364,12 +1359,7 @@ DataAbort::il(ThreadContext *tc) const bool DataAbort::routeToMonitor(ThreadContext *tc) const { - SCR scr = 0; - if (from64) - scr = tc->readMiscRegNoEffect(MISCREG_SCR_EL3); - else - scr = tc->readMiscRegNoEffect(MISCREG_SCR); - + SCR scr = tc->readMiscRegNoEffect(MISCREG_SCR_EL3); return scr.ea && !isMMUFault(); } @@ -1378,7 +1368,7 @@ DataAbort::routeToHyp(ThreadContext *tc) const { bool toHyp; - HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR); + HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR_EL2); HDCR hdcr = tc->readMiscRegNoEffect(MISCREG_HDCR); bool amo = hcr.amo; @@ -1467,27 +1457,23 @@ void VirtualDataAbort::invoke(ThreadContext *tc, const StaticInstPtr &inst) { AbortFault::invoke(tc, inst); - HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR); + HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR_EL2); hcr.va = 0; - tc->setMiscRegNoEffect(MISCREG_HCR, hcr); + tc->setMiscRegNoEffect(MISCREG_HCR_EL2, hcr); } bool Interrupt::routeToMonitor(ThreadContext *tc) const { assert(ArmSystem::haveEL(tc, EL3)); - SCR scr = 0; - if (from64) - scr = tc->readMiscRegNoEffect(MISCREG_SCR_EL3); - else - scr = tc->readMiscRegNoEffect(MISCREG_SCR); + SCR scr = tc->readMiscRegNoEffect(MISCREG_SCR_EL3); return scr.irq; } bool Interrupt::routeToHyp(ThreadContext *tc) const { - HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR); + HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR_EL2); return fromEL == EL2 || (EL2Enabled(tc) && fromEL <= EL1 && (hcr.tge || hcr.imo)); } @@ -1496,7 +1482,7 @@ bool Interrupt::abortDisable(ThreadContext *tc) { if (ArmSystem::haveEL(tc, EL3)) { - SCR scr = tc->readMiscRegNoEffect(MISCREG_SCR); + SCR scr = tc->readMiscRegNoEffect(MISCREG_SCR_EL3); return (!scr.ns || scr.aw); } return true; @@ -1509,18 +1495,14 @@ bool FastInterrupt::routeToMonitor(ThreadContext *tc) const { assert(ArmSystem::haveEL(tc, EL3)); - SCR scr = 0; - if (from64) - scr = tc->readMiscRegNoEffect(MISCREG_SCR_EL3); - else - scr = tc->readMiscRegNoEffect(MISCREG_SCR); + SCR scr = tc->readMiscRegNoEffect(MISCREG_SCR_EL3); return scr.fiq; } bool FastInterrupt::routeToHyp(ThreadContext *tc) const { - HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR); + HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR_EL2); return fromEL == EL2 || (EL2Enabled(tc) && fromEL <= EL1 && (hcr.tge || hcr.fmo)); } @@ -1529,7 +1511,7 @@ bool FastInterrupt::abortDisable(ThreadContext *tc) { if (ArmSystem::haveEL(tc, EL3)) { - SCR scr = tc->readMiscRegNoEffect(MISCREG_SCR); + SCR scr = tc->readMiscRegNoEffect(MISCREG_SCR_EL3); return (!scr.ns || scr.aw); } return true; @@ -1541,7 +1523,7 @@ FastInterrupt::fiqDisable(ThreadContext *tc) if (ArmSystem::haveEL(tc, EL2)) { return true; } else if (ArmSystem::haveEL(tc, EL3)) { - SCR scr = tc->readMiscRegNoEffect(MISCREG_SCR); + SCR scr = tc->readMiscRegNoEffect(MISCREG_SCR_EL3); return (!scr.ns || scr.fw); } return true; @@ -1625,7 +1607,7 @@ SoftwareBreakpoint::routeToHyp(ThreadContext *tc) const ExceptionClass SoftwareBreakpoint::ec(ThreadContext *tc) const { - return from64 ? EC_SOFTWARE_BREAKPOINT_64 : vals.ec; + return from64 ? ExceptionClass::SOFTWARE_BREAKPOINT_64 : vals.ec; } HardwareBreakpoint::HardwareBreakpoint(Addr vaddr, uint32_t _iss) @@ -1646,9 +1628,9 @@ HardwareBreakpoint::ec(ThreadContext *tc) const { // AArch64 if (toEL == fromEL) - return EC_HW_BREAKPOINT_CURR_EL; + return ExceptionClass::HW_BREAKPOINT_CURR_EL; else - return EC_HW_BREAKPOINT_LOWER_EL; + return ExceptionClass::HW_BREAKPOINT_LOWER_EL; } void @@ -1734,9 +1716,9 @@ Watchpoint::ec(ThreadContext *tc) const { // AArch64 if (toEL == fromEL) - return EC_WATCHPOINT_CURR_EL; + return ExceptionClass::WATCHPOINT_CURR_EL; else - return EC_WATCHPOINT_LOWER_EL; + return ExceptionClass::WATCHPOINT_LOWER_EL; } SoftwareStepFault::SoftwareStepFault(ExtMachInst mach_inst, bool is_ldx, @@ -1762,9 +1744,9 @@ SoftwareStepFault::ec(ThreadContext *tc) const { // AArch64 if (toEL == fromEL) - return EC_SOFTWARE_STEP_CURR_EL; + return ExceptionClass::SOFTWARE_STEP_CURR_EL; else - return EC_SOFTWARE_STEP_LOWER_EL; + return ExceptionClass::SOFTWARE_STEP_LOWER_EL; } uint32_t diff --git a/src/arch/arm/faults.hh b/src/arch/arm/faults.hh index 6c0cda6b44..ec60d629f5 100644 --- a/src/arch/arm/faults.hh +++ b/src/arch/arm/faults.hh @@ -306,7 +306,8 @@ class ArmFaultVals : public ArmFault il(ThreadContext *tc) const override { // ESR.IL = 1 if exception cause is unknown (EC = 0) - return ec(tc) == EC_UNKNOWN || !machInst.thumb || machInst.bigThumb; + return ec(tc) == ExceptionClass::UNKNOWN || + !machInst.thumb || machInst.bigThumb; } uint32_t iss() const override { return issRaw; } }; @@ -336,7 +337,7 @@ class UndefinedInstruction : public ArmFaultVals bool _disabled = false) : ArmFaultVals(mach_inst), unknown(_unknown), disabled(_disabled), - overrideEc(EC_INVALID), mnemonic(_mnemonic) + overrideEc(ExceptionClass::INVALID), mnemonic(_mnemonic) {} UndefinedInstruction(ExtMachInst mach_inst, uint32_t _iss, ExceptionClass _overrideEc, const char *_mnemonic = NULL) : @@ -361,7 +362,7 @@ class SupervisorCall : public ArmFaultVals ExceptionClass overrideEc; public: SupervisorCall(ExtMachInst mach_inst, uint32_t _iss, - ExceptionClass _overrideEc = EC_INVALID) : + ExceptionClass _overrideEc = ExceptionClass::INVALID) : ArmFaultVals(mach_inst, _iss), overrideEc(_overrideEc) { @@ -404,7 +405,7 @@ class SupervisorTrap : public ArmFaultVals public: SupervisorTrap(ExtMachInst mach_inst, uint32_t _iss, - ExceptionClass _overrideEc = EC_INVALID) : + ExceptionClass _overrideEc = ExceptionClass::INVALID) : ArmFaultVals(mach_inst, _iss), overrideEc(_overrideEc) {} @@ -424,7 +425,7 @@ class SecureMonitorTrap : public ArmFaultVals public: SecureMonitorTrap(ExtMachInst mach_inst, uint32_t _iss, - ExceptionClass _overrideEc = EC_INVALID) : + ExceptionClass _overrideEc = ExceptionClass::INVALID) : ArmFaultVals(mach_inst, _iss), overrideEc(_overrideEc) {} @@ -454,7 +455,7 @@ class HypervisorTrap : public ArmFaultVals public: HypervisorTrap(ExtMachInst mach_inst, uint32_t _iss, - ExceptionClass _overrideEc = EC_INVALID) : + ExceptionClass _overrideEc = ExceptionClass::INVALID) : ArmFaultVals(mach_inst, _iss), overrideEc(_overrideEc) {} diff --git a/src/arch/arm/fs_workload.hh b/src/arch/arm/fs_workload.hh index 547bbf1e70..0811f3d6c7 100644 --- a/src/arch/arm/fs_workload.hh +++ b/src/arch/arm/fs_workload.hh @@ -153,7 +153,8 @@ class FsWorkload : public KernelWorkload setSystem(System *sys) override { KernelWorkload::setSystem(sys); - gdb = BaseRemoteGDB::build(system); + gdb = BaseRemoteGDB::build( + params().remote_gdb_port, system); } Addr diff --git a/src/arch/arm/htm.cc b/src/arch/arm/htm.cc index 2549fe36c9..9c249c56a7 100644 --- a/src/arch/arm/htm.cc +++ b/src/arch/arm/htm.cc @@ -77,17 +77,13 @@ ArmISA::HTMCheckpoint::save(ThreadContext *tc) nzcv = tc->readMiscReg(MISCREG_NZCV); daif = tc->readMiscReg(MISCREG_DAIF); for (auto n = 0; n < int_reg::NumArchRegs; n++) { - x[n] = tc->getReg(RegId(IntRegClass, n)); + x[n] = tc->getReg(intRegClass[n]); } // TODO first detect if FP is enabled at this EL - for (auto n = 0; n < NumVecRegs; n++) { - RegId idx = RegId(VecRegClass, n); - tc->getReg(idx, &z[n]); - } - for (auto n = 0; n < NumVecPredRegs; n++) { - RegId idx = RegId(VecPredRegClass, n); - tc->getReg(idx, &p[n]); - } + for (auto n = 0; n < NumVecRegs; n++) + tc->getReg(vecRegClass[n], &z[n]); + for (auto n = 0; n < NumVecPredRegs; n++) + tc->getReg(vecPredRegClass[n], &p[n]); fpcr = tc->readMiscReg(MISCREG_FPCR); fpsr = tc->readMiscReg(MISCREG_FPSR); pcstateckpt = tc->pcState().as(); @@ -103,18 +99,13 @@ ArmISA::HTMCheckpoint::restore(ThreadContext *tc, HtmFailureFaultCause cause) //tc->setMiscReg(MISCREG_ICC_PMR_EL1, tme_checkpoint->iccPmrEl1); tc->setMiscReg(MISCREG_NZCV, nzcv); tc->setMiscReg(MISCREG_DAIF, daif); - for (auto n = 0; n < int_reg::NumArchRegs; n++) { - tc->setReg(RegId(IntRegClass, n), x[n]); - } + for (auto n = 0; n < int_reg::NumArchRegs; n++) + tc->setReg(intRegClass[n], x[n]); // TODO first detect if FP is enabled at this EL - for (auto n = 0; n < NumVecRegs; n++) { - RegId idx = RegId(VecRegClass, n); - tc->setReg(idx, &z[n]); - } - for (auto n = 0; n < NumVecPredRegs; n++) { - RegId idx = RegId(VecPredRegClass, n); - tc->setReg(idx, &p[n]); - } + for (auto n = 0; n < NumVecRegs; n++) + tc->setReg(vecRegClass[n], &z[n]); + for (auto n = 0; n < NumVecPredRegs; n++) + tc->setReg(vecPredRegClass[n], &p[n]); tc->setMiscReg(MISCREG_FPCR, fpcr); tc->setMiscReg(MISCREG_FPSR, fpsr); @@ -158,7 +149,7 @@ ArmISA::HTMCheckpoint::restore(ThreadContext *tc, HtmFailureFaultCause cause) replaceBits(error_code, 15, 1); if (interrupt) replaceBits(error_code, 23, 1); - tc->setReg(RegId(IntRegClass, rt), error_code); + tc->setReg(intRegClass[rt], error_code); // set next PC pcstateckpt.uReset(); diff --git a/src/arch/arm/insts/mem64.cc b/src/arch/arm/insts/mem64.cc index f21c43064e..7576a5c2af 100644 --- a/src/arch/arm/insts/mem64.cc +++ b/src/arch/arm/insts/mem64.cc @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2013,2018, 2021 ARM Limited + * Copyright (c) 2011-2013,2018, 2021-2022 Arm Limited * All rights reserved * * The license below extends only to copyright in the software and shall @@ -58,6 +58,12 @@ SysDC64::generateDisassembly(Addr pc, const loader::SymbolTable *symtab) const } +uint32_t +SysDC64::iss() const +{ + const MiscRegNum64 &misc_reg = encodeAArch64SysReg(dest); + return _iss(misc_reg, base); +} void Memory64::startDisassembly(std::ostream &os) const diff --git a/src/arch/arm/insts/mem64.hh b/src/arch/arm/insts/mem64.hh index da9188781f..43aabed43f 100644 --- a/src/arch/arm/insts/mem64.hh +++ b/src/arch/arm/insts/mem64.hh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2013,2017-2019, 2021 ARM Limited + * Copyright (c) 2011-2013,2017-2019, 2021-2022 Arm Limited * All rights reserved * * The license below extends only to copyright in the software and shall @@ -54,19 +54,20 @@ class SysDC64 : public MiscRegOp64 protected: RegIndex base; MiscRegIndex dest; - uint64_t imm; // This is used for fault handling only mutable Addr faultAddr; SysDC64(const char *mnem, ExtMachInst _machInst, OpClass __opClass, - RegIndex _base, MiscRegIndex _dest, uint64_t _imm) + RegIndex _base, MiscRegIndex _dest) : MiscRegOp64(mnem, _machInst, __opClass, false), - base(_base), dest(_dest), imm(_imm), faultAddr(0) + base(_base), dest(_dest), faultAddr(0) {} std::string generateDisassembly( Addr pc, const loader::SymbolTable *symtab) const override; + + uint32_t iss() const override; }; class MightBeMicro64 : public ArmStaticInst diff --git a/src/arch/arm/insts/misc.cc b/src/arch/arm/insts/misc.cc index 06a712ec28..546d2caebb 100644 --- a/src/arch/arm/insts/misc.cc +++ b/src/arch/arm/insts/misc.cc @@ -361,7 +361,7 @@ McrMrcMiscInst::McrMrcMiscInst(const char *_mnemonic, ExtMachInst _machInst, } Fault -McrMrcMiscInst::execute(ExecContext *xc, Trace::InstRecord *traceData) const +McrMrcMiscInst::execute(ExecContext *xc, trace::InstRecord *traceData) const { return mcrMrc15Trap(miscReg, machInst, xc->tcBase(), iss); } @@ -380,7 +380,7 @@ McrMrcImplDefined::McrMrcImplDefined(const char *_mnemonic, {} Fault -McrMrcImplDefined::execute(ExecContext *xc, Trace::InstRecord *traceData) const +McrMrcImplDefined::execute(ExecContext *xc, trace::InstRecord *traceData) const { Fault fault = mcrMrc15Trap(miscReg, machInst, xc->tcBase(), iss); if (fault != NoFault) { @@ -408,7 +408,7 @@ TlbiOp::performTlbi(ExecContext *xc, MiscRegIndex dest_idx, RegVal value) const switch (dest_idx) { case MISCREG_TLBIALL: // TLBI all entries, EL0&1, { - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; TLBIALL tlbiOp(EL1, secure); @@ -418,7 +418,7 @@ TlbiOp::performTlbi(ExecContext *xc, MiscRegIndex dest_idx, RegVal value) const // TLB Invalidate All, Inner Shareable case MISCREG_TLBIALLIS: { - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; TLBIALL tlbiOp(EL1, secure); @@ -428,7 +428,7 @@ TlbiOp::performTlbi(ExecContext *xc, MiscRegIndex dest_idx, RegVal value) const // Instruction TLB Invalidate All case MISCREG_ITLBIALL: { - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; ITLBIALL tlbiOp(EL1, secure); @@ -438,7 +438,7 @@ TlbiOp::performTlbi(ExecContext *xc, MiscRegIndex dest_idx, RegVal value) const // Data TLB Invalidate All case MISCREG_DTLBIALL: { - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; DTLBIALL tlbiOp(EL1, secure); @@ -446,34 +446,61 @@ TlbiOp::performTlbi(ExecContext *xc, MiscRegIndex dest_idx, RegVal value) const return; } // TLB Invalidate by VA - // mcr tlbimval(is) is invalidating all matching entries - // regardless of the level of lookup, since in gem5 we cache - // in the tlb the last level of lookup only. case MISCREG_TLBIMVA: - case MISCREG_TLBIMVAL: { - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; TLBIMVA tlbiOp(EL1, secure, mbits(value, 31, 12), - bits(value, 7,0)); + bits(value, 7, 0), + false); + + tlbiOp(tc); + return; + } + // TLB Invalidate by VA, Last Level + case MISCREG_TLBIMVAL: + { + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); + + bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; + TLBIMVA tlbiOp(EL1, + secure, + mbits(value, 31, 12), + bits(value, 7, 0), + true); tlbiOp(tc); return; } // TLB Invalidate by VA, Inner Shareable case MISCREG_TLBIMVAIS: - case MISCREG_TLBIMVALIS: { - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; TLBIMVA tlbiOp(EL1, secure, mbits(value, 31, 12), - bits(value, 7,0)); + bits(value, 7, 0), + false); + + tlbiOp.broadcast(tc); + return; + } + // TLB Invalidate by VA, Last Level, Inner Shareable + case MISCREG_TLBIMVALIS: + { + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); + + bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; + TLBIMVA tlbiOp(EL1, + secure, + mbits(value, 31, 12), + bits(value, 7, 0), + true); tlbiOp.broadcast(tc); return; @@ -481,12 +508,12 @@ TlbiOp::performTlbi(ExecContext *xc, MiscRegIndex dest_idx, RegVal value) const // TLB Invalidate by ASID match case MISCREG_TLBIASID: { - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; TLBIASID tlbiOp(EL1, secure, - bits(value, 7,0)); + bits(value, 7, 0)); tlbiOp(tc); return; @@ -494,87 +521,137 @@ TlbiOp::performTlbi(ExecContext *xc, MiscRegIndex dest_idx, RegVal value) const // TLB Invalidate by ASID match, Inner Shareable case MISCREG_TLBIASIDIS: { - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; TLBIASID tlbiOp(EL1, secure, - bits(value, 7,0)); + bits(value, 7, 0)); tlbiOp.broadcast(tc); return; } - // mcr tlbimvaal(is) is invalidating all matching entries - // regardless of the level of lookup, since in gem5 we cache - // in the tlb the last level of lookup only. // TLB Invalidate by VA, All ASID case MISCREG_TLBIMVAA: - case MISCREG_TLBIMVAAL: { - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; TLBIMVAA tlbiOp(EL1, secure, - mbits(value, 31,12)); + mbits(value, 31, 12), false); + + tlbiOp(tc); + return; + } + // TLB Invalidate by VA, Last Level, All ASID + case MISCREG_TLBIMVAAL: + { + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); + + bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; + TLBIMVAA tlbiOp(EL1, secure, + mbits(value, 31, 12), true); tlbiOp(tc); return; } // TLB Invalidate by VA, All ASID, Inner Shareable case MISCREG_TLBIMVAAIS: - case MISCREG_TLBIMVAALIS: { - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; TLBIMVAA tlbiOp(EL1, secure, - mbits(value, 31,12)); + mbits(value, 31, 12), false); + + tlbiOp.broadcast(tc); + return; + } + // TLB Invalidate by VA, All ASID, Last Level, Inner Shareable + case MISCREG_TLBIMVAALIS: + { + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); + + bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; + TLBIMVAA tlbiOp(EL1, secure, + mbits(value, 31, 12), true); tlbiOp.broadcast(tc); return; } - // mcr tlbimvalh(is) is invalidating all matching entries - // regardless of the level of lookup, since in gem5 we cache - // in the tlb the last level of lookup only. // TLB Invalidate by VA, Hyp mode case MISCREG_TLBIMVAH: - case MISCREG_TLBIMVALH: { - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; TLBIMVAA tlbiOp(EL2, secure, - mbits(value, 31,12)); + mbits(value, 31, 12), false); + + tlbiOp(tc); + return; + } + // TLB Invalidate by VA, Last Level, Hyp mode + case MISCREG_TLBIMVALH: + { + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); + + bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; + TLBIMVAA tlbiOp(EL2, secure, + mbits(value, 31, 12), true); tlbiOp(tc); return; } // TLB Invalidate by VA, Hyp mode, Inner Shareable case MISCREG_TLBIMVAHIS: - case MISCREG_TLBIMVALHIS: { - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; TLBIMVAA tlbiOp(EL2, secure, - mbits(value, 31,12)); + mbits(value, 31, 12), false); + + tlbiOp.broadcast(tc); + return; + } + // TLB Invalidate by VA, Hyp mode, Last Level, Inner Shareable + case MISCREG_TLBIMVALHIS: + { + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); + + bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; + TLBIMVAA tlbiOp(EL2, secure, + mbits(value, 31, 12), true); tlbiOp.broadcast(tc); return; } - // mcr tlbiipas2l(is) is invalidating all matching entries - // regardless of the level of lookup, since in gem5 we cache - // in the tlb the last level of lookup only. // TLB Invalidate by Intermediate Physical Address, Stage 2 case MISCREG_TLBIIPAS2: - case MISCREG_TLBIIPAS2L: { - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; TLBIIPA tlbiOp(EL1, secure, - static_cast(bits(value, 35, 0)) << 12); + static_cast(bits(value, 35, 0)) << 12, + false); + + tlbiOp(tc); + return; + } + // TLB Invalidate by Intermediate Physical Address, Stage 2, + // Last Level + case MISCREG_TLBIIPAS2L: + { + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); + + bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; + TLBIIPA tlbiOp(EL1, + secure, + static_cast(bits(value, 35, 0)) << 12, + true); tlbiOp(tc); return; @@ -582,14 +659,29 @@ TlbiOp::performTlbi(ExecContext *xc, MiscRegIndex dest_idx, RegVal value) const // TLB Invalidate by Intermediate Physical Address, Stage 2, // Inner Shareable case MISCREG_TLBIIPAS2IS: - case MISCREG_TLBIIPAS2LIS: { - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; TLBIIPA tlbiOp(EL1, secure, - static_cast(bits(value, 35, 0)) << 12); + static_cast(bits(value, 35, 0)) << 12, + false); + + tlbiOp.broadcast(tc); + return; + } + // TLB Invalidate by Intermediate Physical Address, Stage 2, + // Last Level, Inner Shareable + case MISCREG_TLBIIPAS2LIS: + { + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); + + bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; + TLBIIPA tlbiOp(EL1, + secure, + static_cast(bits(value, 35, 0)) << 12, + true); tlbiOp.broadcast(tc); return; @@ -597,13 +689,13 @@ TlbiOp::performTlbi(ExecContext *xc, MiscRegIndex dest_idx, RegVal value) const // Instruction TLB Invalidate by VA case MISCREG_ITLBIMVA: { - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; ITLBIMVA tlbiOp(EL1, secure, mbits(value, 31, 12), - bits(value, 7,0)); + bits(value, 7, 0)); tlbiOp(tc); return; @@ -611,13 +703,13 @@ TlbiOp::performTlbi(ExecContext *xc, MiscRegIndex dest_idx, RegVal value) const // Data TLB Invalidate by VA case MISCREG_DTLBIMVA: { - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; DTLBIMVA tlbiOp(EL1, secure, mbits(value, 31, 12), - bits(value, 7,0)); + bits(value, 7, 0)); tlbiOp(tc); return; @@ -625,12 +717,12 @@ TlbiOp::performTlbi(ExecContext *xc, MiscRegIndex dest_idx, RegVal value) const // Instruction TLB Invalidate by ASID match case MISCREG_ITLBIASID: { - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; ITLBIASID tlbiOp(EL1, secure, - bits(value, 7,0)); + bits(value, 7, 0)); tlbiOp(tc); return; @@ -638,12 +730,12 @@ TlbiOp::performTlbi(ExecContext *xc, MiscRegIndex dest_idx, RegVal value) const // Data TLB Invalidate by ASID match case MISCREG_DTLBIASID: { - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; DTLBIASID tlbiOp(EL1, secure, - bits(value, 7,0)); + bits(value, 7, 0)); tlbiOp(tc); return; diff --git a/src/arch/arm/insts/misc.hh b/src/arch/arm/insts/misc.hh index d9f24b9565..6c6e4e109b 100644 --- a/src/arch/arm/insts/misc.hh +++ b/src/arch/arm/insts/misc.hh @@ -414,7 +414,7 @@ class McrMrcMiscInst : public ArmISA::ArmStaticInst uint64_t _iss, ArmISA::MiscRegIndex _miscReg); Fault execute(ExecContext *xc, - Trace::InstRecord *traceData) const override; + trace::InstRecord *traceData) const override; std::string generateDisassembly( Addr pc, const loader::SymbolTable *symtab) const override; @@ -432,7 +432,7 @@ class McrMrcImplDefined : public McrMrcMiscInst uint64_t _iss, ArmISA::MiscRegIndex _miscReg); Fault execute(ExecContext *xc, - Trace::InstRecord *traceData) const override; + trace::InstRecord *traceData) const override; std::string generateDisassembly( Addr pc, const loader::SymbolTable *symtab) const override; diff --git a/src/arch/arm/insts/misc64.cc b/src/arch/arm/insts/misc64.cc index 5bbb739db5..40a6ca4ce5 100644 --- a/src/arch/arm/insts/misc64.cc +++ b/src/arch/arm/insts/misc64.cc @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2013,2017-2021 Arm Limited + * Copyright (c) 2011-2013,2017-2022 Arm Limited * All rights reserved * * The license below extends only to copyright in the software and shall @@ -89,714 +89,38 @@ UnknownOp64::generateDisassembly( return csprintf("%-10s (inst %#08x)", "unknown", encoding()); } +uint32_t +MiscRegOp64::_iss(const MiscRegNum64 &misc_reg, RegIndex int_index) const +{ + return _miscRead | + (misc_reg.crm << 1) | + (int_index << 5) | + (misc_reg.crn << 10) | + (misc_reg.op1 << 14) | + (misc_reg.op2 << 17) | + (misc_reg.op0 << 20); +} + Fault -MiscRegOp64::trap(ThreadContext *tc, MiscRegIndex misc_reg, - ExceptionLevel el, uint32_t immediate) const +MiscRegOp64::generateTrap(ExceptionLevel el) const { - ExceptionClass ec = EC_TRAPPED_MSR_MRS_64; - - // Check for traps to supervisor (FP/SIMD regs) - if (el <= EL1 && checkEL1Trap(tc, misc_reg, el, ec, immediate)) { - return std::make_shared(machInst, immediate, ec); - } - - // Check for traps to hypervisor - if ((ArmSystem::haveEL(tc, EL2) && el <= EL2) && - checkEL2Trap(tc, misc_reg, el, ec, immediate)) { - return std::make_shared(machInst, immediate, ec); - } - - // Check for traps to secure monitor - if ((ArmSystem::haveEL(tc, EL3) && el <= EL3) && - checkEL3Trap(tc, misc_reg, el, ec, immediate)) { - return std::make_shared(machInst, immediate, ec); - } - - return NoFault; + return generateTrap(el, ExceptionClass::TRAPPED_MSR_MRS_64, iss()); } -bool -MiscRegOp64::checkEL1Trap(ThreadContext *tc, const MiscRegIndex misc_reg, - ExceptionLevel el, ExceptionClass &ec, - uint32_t &immediate) const +Fault +MiscRegOp64::generateTrap(ExceptionLevel el, ExceptionClass ec, + uint32_t iss) const { - const CPACR cpacr = tc->readMiscReg(MISCREG_CPACR_EL1); - const SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); - const SCTLR sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1); - const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); - - bool trap_to_sup = false; - switch (misc_reg) { - case MISCREG_DAIF: - trap_to_sup = !scr.ns && !scr.eel2 && !sctlr.uma && el == EL0; - trap_to_sup = trap_to_sup || - (el == EL0 && (scr.ns || scr.eel2) && !hcr.tge && !sctlr.uma); - break; - case MISCREG_DC_ZVA_Xt: - // In syscall-emulation mode, this test is skipped and DCZVA is always - // allowed at EL0 - trap_to_sup = el == EL0 && !sctlr.dze && FullSystem; - break; - case MISCREG_DC_CIVAC_Xt: - case MISCREG_DC_CVAC_Xt: - trap_to_sup = el == EL0 && !sctlr.uci; - break; - case MISCREG_FPCR: - case MISCREG_FPSR: - case MISCREG_FPEXC32_EL2: - if ((el == EL0 && cpacr.fpen != 0x3) || - (el == EL1 && !(cpacr.fpen & 0x1))) { - trap_to_sup = true; - ec = EC_TRAPPED_SIMD_FP; - immediate = 0x1E00000; - } - break; - case MISCREG_DC_CVAU_Xt: - trap_to_sup = !sctlr.uci && (!hcr.tge || (!scr.ns && !scr.eel2)) && - el == EL0; - break; - case MISCREG_CTR_EL0: - trap_to_sup = el == EL0 && !sctlr.uct && - (!hcr.tge || (!scr.ns && !scr.eel2)); - break; - case MISCREG_MDCCSR_EL0: - { - DBGDS32 mdscr = tc->readMiscReg(MISCREG_MDSCR_EL1); - trap_to_sup = el == EL0 && mdscr.tdcc && - (hcr.tge == 0x0 || ( scr.ns == 0x0)); - } - break; - case MISCREG_ZCR_EL1: - trap_to_sup = el == EL1 && ((cpacr.zen & 0x1) == 0x0); - break; - // Generic Timer - case MISCREG_CNTFRQ_EL0 ... MISCREG_CNTVOFF_EL2: - trap_to_sup = el == EL0 && - isGenericTimerSystemAccessTrapEL1(misc_reg, tc); - break; + switch (el) { + case EL1: + return std::make_shared(getEMI(), iss, ec); + case EL2: + return std::make_shared(getEMI(), iss, ec); + case EL3: + return std::make_shared(getEMI(), iss, ec); default: - break; + panic("Invalid EL: %d\n", el); } - return trap_to_sup; -} - -bool -MiscRegOp64::checkEL2Trap(ThreadContext *tc, const MiscRegIndex misc_reg, - ExceptionLevel el, ExceptionClass &ec, - uint32_t &immediate) const -{ - const CPTR cptr = tc->readMiscReg(MISCREG_CPTR_EL2); - const SCTLR sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1); - const SCTLR sctlr2 = tc->readMiscReg(MISCREG_SCTLR_EL2); - const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); - const SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); - const HDCR mdcr = tc->readMiscReg(MISCREG_MDCR_EL3); - - bool trap_to_hyp = false; - - switch (misc_reg) { - case MISCREG_IMPDEF_UNIMPL: - trap_to_hyp = EL2Enabled(tc) && hcr.tidcp && el == EL1; - break; - // GICv3 regs - case MISCREG_ICC_SGI0R_EL1: - { - auto *isa = static_cast(tc->getIsaPtr()); - if (isa->haveGICv3CpuIfc()) - trap_to_hyp = EL2Enabled(tc) && hcr.fmo && el == EL1; - } - break; - case MISCREG_ICC_SGI1R_EL1: - case MISCREG_ICC_ASGI1R_EL1: - { - auto *isa = static_cast(tc->getIsaPtr()); - if (isa->haveGICv3CpuIfc()) - trap_to_hyp = EL2Enabled(tc) && hcr.imo && el == EL1; - } - break; - case MISCREG_FPCR: - case MISCREG_FPSR: - case MISCREG_FPEXC32_EL2: - { - bool from_el2 = (el == EL2) && (scr.ns || scr.eel2) && - ELIs64(tc,EL2) && - ((!hcr.e2h && cptr.tfp) || - (hcr.e2h && (cptr.fpen == 0x0 || - cptr.fpen == 0xa))); - bool from_el1 = (el == EL1) && hcr.nv && - (!hcr.e2h || (hcr.e2h && !hcr.tge)); - trap_to_hyp = from_el2 || from_el1; - ec = EC_TRAPPED_SIMD_FP; - immediate = 0x1E00000; - } - break; - case MISCREG_CPACR_EL1: - trap_to_hyp = EL2Enabled(tc) && (el == EL1) && cptr.tcpac; - break; - case MISCREG_SCTLR_EL1: - case MISCREG_TTBR0_EL1: - case MISCREG_TTBR1_EL1: - case MISCREG_TCR_EL1: - case MISCREG_ESR_EL1: - case MISCREG_FAR_EL1: - case MISCREG_AFSR0_EL1: - case MISCREG_AFSR1_EL1: - case MISCREG_MAIR_EL1: - case MISCREG_AMAIR_EL1: - case MISCREG_CONTEXTIDR_EL1: - { - bool tvm = miscRead? hcr.trvm: hcr.tvm; - trap_to_hyp = EL2Enabled(tc) && (el == EL1) && tvm; - } - break; - case MISCREG_CPACR_EL12: - case MISCREG_SCTLR_EL12: - case MISCREG_TTBR0_EL12: - case MISCREG_TTBR1_EL12: - case MISCREG_TCR_EL12: - case MISCREG_ESR_EL12: - case MISCREG_FAR_EL12: - case MISCREG_AFSR0_EL12: - case MISCREG_AFSR1_EL12: - case MISCREG_MAIR_EL12: - case MISCREG_AMAIR_EL12: - case MISCREG_CONTEXTIDR_EL12: - case MISCREG_SPSR_EL12: - case MISCREG_ELR_EL12: - case MISCREG_VBAR_EL12: - trap_to_hyp = EL2Enabled(tc) && (el == EL1) && - (hcr.nv && (hcr.nv1 || !hcr.nv2)); - break; - case MISCREG_TLBI_VMALLE1: - case MISCREG_TLBI_VAE1_Xt: - case MISCREG_TLBI_ASIDE1_Xt: - case MISCREG_TLBI_VAAE1_Xt: - case MISCREG_TLBI_VALE1_Xt: - case MISCREG_TLBI_VAALE1_Xt: -// case MISCREG_TLBI_RVAE1: -// case MISCREG_TLBI_RVAAE1: -// case MISCREG_TLBI_RVALE1: -// case MISCREG_TLBI_RVAALE1: - case MISCREG_TLBI_VMALLE1IS: - case MISCREG_TLBI_VAE1IS_Xt: - case MISCREG_TLBI_ASIDE1IS_Xt: - case MISCREG_TLBI_VAAE1IS_Xt: - case MISCREG_TLBI_VALE1IS_Xt: - case MISCREG_TLBI_VAALE1IS_Xt: -// case MISCREG_TLBI_RVAE1IS: -// case MISCREG_TLBI_RVAAE1IS: -// case MISCREG_TLBI_RVALE1IS: -// case MISCREG_TLBI_RVAALE1IS: -// case MISCREG_TLBI_VMALLE1OS: -// case MISCREG_TLBI_VAE1OS: -// case MISCREG_TLBI_ASIDE1OS: -// case MISCREG_TLBI_VAAE1OS: -// case MISCREG_TLBI_VALE1OS: -// case MISCREG_TLBI_VAALE1OS: -// case MISCREG_TLBI_RVAE1OS: -// case MISCREG_TLBI_RVAAE1OS: -// case MISCREG_TLBI_RVALE1OS: -// case MISCREG_TLBI_RVAALE1OS: - trap_to_hyp = EL2Enabled(tc) && (el == EL1) && hcr.ttlb; - break; - case MISCREG_IC_IVAU_Xt: - case MISCREG_ICIALLU: - case MISCREG_ICIALLUIS: - trap_to_hyp = (el == EL1) && EL2Enabled(tc) && hcr.tpu; - break; - case MISCREG_DC_CVAU_Xt: - { - const bool el2_en = EL2Enabled(tc); - if (el == EL0 && el2_en) { - const bool in_host = hcr.e2h && hcr.tge; - const bool general_trap = el2_en && !in_host && hcr.tge && - !sctlr.uci; - const bool tpu_trap = el2_en && !in_host && hcr.tpu; - const bool host_trap = el2_en && in_host && !sctlr2.uci; - trap_to_hyp = general_trap || tpu_trap || host_trap; - } - else if (el == EL1 && el2_en) { - trap_to_hyp = hcr.tpu; - } - } - break; - case MISCREG_DC_IVAC_Xt: - trap_to_hyp = EL2Enabled(tc) && el == EL1 && hcr.tpc; - break; - case MISCREG_DC_CVAC_Xt: -// case MISCREG_DC_CVAP_Xt: - case MISCREG_DC_CIVAC_Xt: - { - const bool el2_en = EL2Enabled(tc); - if (el == EL0 && el2_en) { - - const bool in_host = hcr.e2h && hcr.tge; - const bool general_trap = el2_en && !in_host && hcr.tge && - !sctlr.uci; - const bool tpc_trap = el2_en && !in_host && hcr.tpc; - const bool host_trap = el2_en && in_host && !sctlr2.uci; - trap_to_hyp = general_trap || tpc_trap || host_trap; - } else if (el == EL1 && el2_en) { - trap_to_hyp = hcr.tpc; - } - } - break; - case MISCREG_DC_ISW_Xt: - case MISCREG_DC_CSW_Xt: - case MISCREG_DC_CISW_Xt: - trap_to_hyp = EL2Enabled(tc) && (el == EL1) && hcr.tsw; - break; - case MISCREG_ACTLR_EL1: - trap_to_hyp = EL2Enabled (tc) && (el == EL1) && hcr.tacr; - break; - case MISCREG_APDAKeyHi_EL1: - case MISCREG_APDAKeyLo_EL1: - case MISCREG_APDBKeyHi_EL1: - case MISCREG_APDBKeyLo_EL1: - case MISCREG_APGAKeyHi_EL1: - case MISCREG_APGAKeyLo_EL1: - case MISCREG_APIAKeyHi_EL1: - case MISCREG_APIAKeyLo_EL1: - case MISCREG_APIBKeyHi_EL1: - case MISCREG_APIBKeyLo_EL1: - trap_to_hyp = EL2Enabled(tc) && el == EL1 && !hcr.apk; - break; - case MISCREG_ID_PFR0_EL1: - case MISCREG_ID_PFR1_EL1: - //case MISCREG_ID_PFR2_EL1: - case MISCREG_ID_DFR0_EL1: - case MISCREG_ID_AFR0_EL1: - case MISCREG_ID_MMFR0_EL1: - case MISCREG_ID_MMFR1_EL1: - case MISCREG_ID_MMFR2_EL1: - case MISCREG_ID_MMFR3_EL1: - case MISCREG_ID_MMFR4_EL1: - case MISCREG_ID_ISAR0_EL1: - case MISCREG_ID_ISAR1_EL1: - case MISCREG_ID_ISAR2_EL1: - case MISCREG_ID_ISAR3_EL1: - case MISCREG_ID_ISAR4_EL1: - case MISCREG_ID_ISAR5_EL1: - case MISCREG_ID_ISAR6_EL1: - case MISCREG_MVFR0_EL1: - case MISCREG_MVFR1_EL1: - case MISCREG_MVFR2_EL1: - case MISCREG_ID_AA64PFR0_EL1: - case MISCREG_ID_AA64PFR1_EL1: - case MISCREG_ID_AA64DFR0_EL1: - case MISCREG_ID_AA64DFR1_EL1: - case MISCREG_ID_AA64ISAR0_EL1: - case MISCREG_ID_AA64ISAR1_EL1: - case MISCREG_ID_AA64MMFR0_EL1: - case MISCREG_ID_AA64MMFR1_EL1: - case MISCREG_ID_AA64MMFR2_EL1: - case MISCREG_ID_AA64AFR0_EL1: - case MISCREG_ID_AA64AFR1_EL1: - trap_to_hyp = EL2Enabled(tc) && el == EL1 && hcr.tid3; - break; - case MISCREG_CTR_EL0: - { - const bool el2_en = EL2Enabled(tc); - if (el == EL0 && el2_en) { - const bool in_host = hcr.e2h && hcr.tge; - const bool general_trap = el2_en && !in_host && hcr.tge && - !sctlr.uct; - const bool tid_trap = el2_en && !in_host && hcr.tid2; - const bool host_trap = el2_en && in_host && !sctlr2.uct; - trap_to_hyp = general_trap || tid_trap || host_trap; - } else if (el == EL1 && el2_en) { - trap_to_hyp = hcr.tid2; - } - } - break; - case MISCREG_CCSIDR_EL1: -// case MISCREG_CCSIDR2_EL1: - case MISCREG_CLIDR_EL1: - case MISCREG_CSSELR_EL1: - trap_to_hyp = EL2Enabled(tc) && (el == EL1) && hcr.tid2; - break; - case MISCREG_AIDR_EL1: - case MISCREG_REVIDR_EL1: - trap_to_hyp = EL2Enabled(tc) && (el == EL1) && hcr.tid1; - break; - // Generic Timer - case MISCREG_CNTFRQ_EL0 ... MISCREG_CNTVOFF_EL2: - trap_to_hyp = EL2Enabled(tc) && el <= EL1 && - isGenericTimerSystemAccessTrapEL2(misc_reg, tc); - break; - case MISCREG_DAIF: - trap_to_hyp = EL2Enabled(tc) && el == EL0 && - (hcr.tge && (hcr.e2h || !sctlr.uma)); - break; - case MISCREG_SPSR_EL1: - case MISCREG_ELR_EL1: - case MISCREG_VBAR_EL1: - trap_to_hyp = EL2Enabled(tc) && (el == EL1) && hcr.nv1 && !hcr.nv2; - break; - case MISCREG_HCR_EL2: - case MISCREG_HSTR_EL2: - case MISCREG_SP_EL1: - case MISCREG_TPIDR_EL2: - case MISCREG_VTCR_EL2: - case MISCREG_VTTBR_EL2: - trap_to_hyp = EL2Enabled(tc) && (el == EL1) && hcr.nv && !hcr.nv2; - break; -// case MISCREG_AT_S1E1WP_Xt: -// case MISCREG_AT_S1E1RP_Xt: - case MISCREG_AT_S1E1R_Xt: - case MISCREG_AT_S1E1W_Xt: - case MISCREG_AT_S1E0W_Xt: - case MISCREG_AT_S1E0R_Xt: - trap_to_hyp = EL2Enabled(tc) && (el == EL1) && hcr.at; - break; - case MISCREG_ACTLR_EL2: - case MISCREG_AFSR0_EL2: - case MISCREG_AFSR1_EL2: - case MISCREG_AMAIR_EL2: - case MISCREG_CONTEXTIDR_EL2: - case MISCREG_CPTR_EL2: - case MISCREG_DACR32_EL2: - case MISCREG_ESR_EL2: - case MISCREG_FAR_EL2: - case MISCREG_HACR_EL2: - case MISCREG_HPFAR_EL2: - case MISCREG_MAIR_EL2: -// case MISCREG_RMR_EL2: - case MISCREG_SCTLR_EL2: - case MISCREG_TCR_EL2: - case MISCREG_TTBR0_EL2: - case MISCREG_TTBR1_EL2: - case MISCREG_VBAR_EL2: - case MISCREG_VMPIDR_EL2: - case MISCREG_VPIDR_EL2: - case MISCREG_TLBI_ALLE1: - case MISCREG_TLBI_ALLE1IS: -// case MISCREG_TLBI_ALLE1OS: - case MISCREG_TLBI_ALLE2: - case MISCREG_TLBI_ALLE2IS: -// case MISCREG_TLBI_ALLE2OS: - case MISCREG_TLBI_IPAS2E1_Xt: - case MISCREG_TLBI_IPAS2E1IS_Xt: -// case MISCREG_TLBI_IPAS2E1OS: - case MISCREG_TLBI_IPAS2LE1_Xt: - case MISCREG_TLBI_IPAS2LE1IS_Xt: -// case MISCREG_TLBI_IPAS2LE1OS: -// case MISCREG_TLBI_RIPAS2E1: -// case MISCREG_TLBI_RIPAS2E1IS: -// case MISCREG_TLBI_RIPAS2E1OS: -// case MISCREG_TLBI_RIPAS2LE1: -// case MISCREG_TLBI_RIPAS2LE1IS: -// case MISCREG_TLBI_RIPAS2LE1OS: -// case MISCREG_TLBI_RVAE2: -// case MISCREG_TLBI_RVAE2IS: -// case MISCREG_TLBI_RVAE2OS: -// case MISCREG_TLBI_RVALE2: -// case MISCREG_TLBI_RVALE2IS: -// case MISCREG_TLBI_RVALE2OS: - case MISCREG_TLBI_VAE2_Xt: - case MISCREG_TLBI_VAE2IS_Xt: -// case MISCREG_TLBI_VAE2OS: - case MISCREG_TLBI_VALE2_Xt: - case MISCREG_TLBI_VALE2IS_Xt: -// case MISCREG_TLBI_VALE2OS: - case MISCREG_TLBI_VMALLS12E1: - case MISCREG_TLBI_VMALLS12E1IS: -// case MISCREG_TLBI_VMALLS12E1OS: - case MISCREG_AT_S1E2W_Xt: - case MISCREG_AT_S1E2R_Xt: - case MISCREG_AT_S12E1R_Xt: - case MISCREG_AT_S12E1W_Xt: - case MISCREG_AT_S12E0W_Xt: - case MISCREG_AT_S12E0R_Xt: - case MISCREG_SPSR_UND: - case MISCREG_SPSR_IRQ: - case MISCREG_SPSR_FIQ: - case MISCREG_SPSR_ABT: - case MISCREG_SPSR_EL2: - case MISCREG_ELR_EL2: - case MISCREG_IFSR32_EL2: - case MISCREG_DBGVCR32_EL2: - case MISCREG_MDCR_EL2: - trap_to_hyp = EL2Enabled(tc) && (el == EL1) && hcr.nv; - break; -// case MISCREG_VSTTBR_EL2: -// case MISCREG_VSTCR_EL2: -// trap_to_hyp = (el == EL1) && !scr.ns && scr.eel2 && ELIs64(tc,EL2) -// && !hcr.nv2 && hcr.nv && (!hcr.e2h|| (hcr.e2h && !hcr.tge)); -// break; - - //case MISCREG_LORC_EL1: - //case MISCREG_LOREA_EL1: - //case MISCREG_LORID_EL1: - //case MISCREG_LORN_EL1: - //case MISCREG_LORSA_EL1: - // trap_to_hyp = (el == EL1) && (scr.ns || scr.eel2) && ELIs64(tc,EL2) - // && hcr.tlor && (!hcr.e2h || (hcr.e2h && !hcr.tge)); - // break; - - case MISCREG_DC_ZVA_Xt: - { - const bool el2_en = EL2Enabled(tc); - if (el == EL0 && el2_en) { - const bool in_host = hcr.e2h && hcr.tge; - const bool general_trap = el2_en && !in_host && hcr.tge && - !sctlr.dze; - const bool tdz_trap = el2_en && !in_host && hcr.tdz; - const bool host_trap = el2_en && in_host && !sctlr2.dze; - trap_to_hyp = general_trap || tdz_trap || host_trap; - } else if (el == EL1 && el2_en) { - trap_to_hyp = hcr.tdz; - } - } - break; - case MISCREG_DBGBVR0_EL1: - case MISCREG_DBGBVR1_EL1: - case MISCREG_DBGBVR2_EL1: - case MISCREG_DBGBVR3_EL1: - case MISCREG_DBGBVR4_EL1: - case MISCREG_DBGBVR5_EL1: - case MISCREG_DBGBVR6_EL1: - case MISCREG_DBGBVR7_EL1: - case MISCREG_DBGBVR8_EL1: - case MISCREG_DBGBVR9_EL1: - case MISCREG_DBGBVR10_EL1: - case MISCREG_DBGBVR11_EL1: - case MISCREG_DBGBVR12_EL1: - case MISCREG_DBGBVR13_EL1: - case MISCREG_DBGBVR14_EL1: - case MISCREG_DBGBVR15_EL1: - case MISCREG_DBGBCR0_EL1: - case MISCREG_DBGBCR1_EL1: - case MISCREG_DBGBCR2_EL1: - case MISCREG_DBGBCR3_EL1: - case MISCREG_DBGBCR4_EL1: - case MISCREG_DBGBCR5_EL1: - case MISCREG_DBGBCR6_EL1: - case MISCREG_DBGBCR7_EL1: - case MISCREG_DBGBCR8_EL1: - case MISCREG_DBGBCR9_EL1: - case MISCREG_DBGBCR10_EL1: - case MISCREG_DBGBCR11_EL1: - case MISCREG_DBGBCR12_EL1: - case MISCREG_DBGBCR13_EL1: - case MISCREG_DBGBCR14_EL1: - case MISCREG_DBGBCR15_EL1: - case MISCREG_DBGWVR0_EL1: - case MISCREG_DBGWVR1_EL1: - case MISCREG_DBGWVR2_EL1: - case MISCREG_DBGWVR3_EL1: - case MISCREG_DBGWVR4_EL1: - case MISCREG_DBGWVR5_EL1: - case MISCREG_DBGWVR6_EL1: - case MISCREG_DBGWVR7_EL1: - case MISCREG_DBGWVR8_EL1: - case MISCREG_DBGWVR9_EL1: - case MISCREG_DBGWVR10_EL1: - case MISCREG_DBGWVR11_EL1: - case MISCREG_DBGWVR12_EL1: - case MISCREG_DBGWVR13_EL1: - case MISCREG_DBGWVR14_EL1: - case MISCREG_DBGWVR15_EL1: - case MISCREG_DBGWCR0_EL1: - case MISCREG_DBGWCR1_EL1: - case MISCREG_DBGWCR2_EL1: - case MISCREG_DBGWCR3_EL1: - case MISCREG_DBGWCR4_EL1: - case MISCREG_DBGWCR5_EL1: - case MISCREG_DBGWCR6_EL1: - case MISCREG_DBGWCR7_EL1: - case MISCREG_DBGWCR8_EL1: - case MISCREG_DBGWCR9_EL1: - case MISCREG_DBGWCR10_EL1: - case MISCREG_DBGWCR11_EL1: - case MISCREG_DBGWCR12_EL1: - case MISCREG_DBGWCR13_EL1: - case MISCREG_DBGWCR14_EL1: - case MISCREG_DBGWCR15_EL1: - case MISCREG_MDCCINT_EL1: - trap_to_hyp = EL2Enabled(tc) && (el == EL1) && mdcr.tda; - break; - case MISCREG_ZCR_EL1: - { - bool from_el1 = (el == EL1) && EL2Enabled(tc) && - ELIs64(tc, EL2) && ((!hcr.e2h && cptr.tz) || - (hcr.e2h && ((cptr.zen & 0x1) == 0x0))); - bool from_el2 = (el == EL2) && ((!hcr.e2h && cptr.tz) || - (hcr.e2h && ((cptr.zen & 0x1) == 0x0))); - trap_to_hyp = from_el1 || from_el2; - } - ec = EC_TRAPPED_SVE; - immediate = 0; - break; - case MISCREG_ZCR_EL2: - { - bool from_el1 = (el == EL1) && EL2Enabled(tc) && hcr.nv; - bool from_el2 = (el == EL2) && ((!hcr.e2h && cptr.tz) || - (hcr.e2h && ((cptr.zen & 0x1) == 0x0))); - trap_to_hyp = from_el1 || from_el2; - ec = from_el1 ? EC_TRAPPED_MSR_MRS_64: EC_TRAPPED_SVE; - } - immediate = 0; - break; - default: - break; - } - return trap_to_hyp; -} - -bool -MiscRegOp64::checkEL3Trap(ThreadContext *tc, const MiscRegIndex misc_reg, - ExceptionLevel el, ExceptionClass &ec, - uint32_t &immediate) const -{ - const CPTR cptr = tc->readMiscReg(MISCREG_CPTR_EL3); - const SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); - const HDCR mdcr = tc->readMiscReg(MISCREG_MDCR_EL3); - const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); - bool trap_to_mon = false; - - switch (misc_reg) { - // FP/SIMD regs - case MISCREG_FPCR: - case MISCREG_FPSR: - case MISCREG_FPEXC32_EL2: - trap_to_mon = cptr.tfp && ELIs64(tc, EL3); - ec = EC_TRAPPED_SIMD_FP; - immediate = 0x1E00000; - break; - // CPACR, CPTR - case MISCREG_CPACR_EL12: - trap_to_mon = ((el == EL2 && cptr.tcpac && ELIs64(tc, EL3)) || - (el == EL1 && cptr.tcpac && ELIs64(tc, EL3) && - (!hcr.nv2 || hcr.nv1 || !hcr.nv))) ; - break; - case MISCREG_CPACR_EL1: - trap_to_mon = el <= EL2 && cptr.tcpac && ELIs64(tc, EL3); - break; - case MISCREG_CPTR_EL2: - if (el == EL2) { - trap_to_mon = cptr.tcpac; - } - break; -// case MISCREG_LORC_EL1: -// case MISCREG_LOREA_EL1: -// case MISCREG_LORID_EL1: -// case MISCREG_LORN_EL1: -// case MISCREG_LORSA_EL1: -// trap_to_mon = (el <= EL2) && scr.ns && ELIs64(tc,EL3) -// && hcr.tlor && (!hcr.e2h || (hcr.e2h && !hcr.tge)); -// break; - case MISCREG_MDCCSR_EL0: - trap_to_mon = (el <= EL2) && ELIs64(tc, EL3) && mdcr.tda == 0x1; - break; - case MISCREG_APDAKeyHi_EL1: - case MISCREG_APDAKeyLo_EL1: - case MISCREG_APDBKeyHi_EL1: - case MISCREG_APDBKeyLo_EL1: - case MISCREG_APGAKeyHi_EL1: - case MISCREG_APGAKeyLo_EL1: - case MISCREG_APIAKeyHi_EL1: - case MISCREG_APIAKeyLo_EL1: - case MISCREG_APIBKeyHi_EL1: - case MISCREG_APIBKeyLo_EL1: - trap_to_mon = (el == EL1 || el == EL2) && scr.apk == 0 && - ELIs64(tc, EL3); - break; - // Generic Timer - case MISCREG_CNTFRQ_EL0 ... MISCREG_CNTVOFF_EL2: - trap_to_mon = el == EL1 && - isGenericTimerSystemAccessTrapEL3(misc_reg, tc); - break; - case MISCREG_DBGBVR0_EL1: - case MISCREG_DBGBVR1_EL1: - case MISCREG_DBGBVR2_EL1: - case MISCREG_DBGBVR3_EL1: - case MISCREG_DBGBVR4_EL1: - case MISCREG_DBGBVR5_EL1: - case MISCREG_DBGBVR6_EL1: - case MISCREG_DBGBVR7_EL1: - case MISCREG_DBGBVR8_EL1: - case MISCREG_DBGBVR9_EL1: - case MISCREG_DBGBVR10_EL1: - case MISCREG_DBGBVR11_EL1: - case MISCREG_DBGBVR12_EL1: - case MISCREG_DBGBVR13_EL1: - case MISCREG_DBGBVR14_EL1: - case MISCREG_DBGBVR15_EL1: - case MISCREG_DBGBCR0_EL1: - case MISCREG_DBGBCR1_EL1: - case MISCREG_DBGBCR2_EL1: - case MISCREG_DBGBCR3_EL1: - case MISCREG_DBGBCR4_EL1: - case MISCREG_DBGBCR5_EL1: - case MISCREG_DBGBCR6_EL1: - case MISCREG_DBGBCR7_EL1: - case MISCREG_DBGBCR8_EL1: - case MISCREG_DBGBCR9_EL1: - case MISCREG_DBGBCR10_EL1: - case MISCREG_DBGBCR11_EL1: - case MISCREG_DBGBCR12_EL1: - case MISCREG_DBGBCR13_EL1: - case MISCREG_DBGBCR14_EL1: - case MISCREG_DBGBCR15_EL1: - case MISCREG_DBGVCR32_EL2: - case MISCREG_DBGWVR0_EL1: - case MISCREG_DBGWVR1_EL1: - case MISCREG_DBGWVR2_EL1: - case MISCREG_DBGWVR3_EL1: - case MISCREG_DBGWVR4_EL1: - case MISCREG_DBGWVR5_EL1: - case MISCREG_DBGWVR6_EL1: - case MISCREG_DBGWVR7_EL1: - case MISCREG_DBGWVR8_EL1: - case MISCREG_DBGWVR9_EL1: - case MISCREG_DBGWVR10_EL1: - case MISCREG_DBGWVR11_EL1: - case MISCREG_DBGWVR12_EL1: - case MISCREG_DBGWVR13_EL1: - case MISCREG_DBGWVR14_EL1: - case MISCREG_DBGWVR15_EL1: - case MISCREG_DBGWCR0_EL1: - case MISCREG_DBGWCR1_EL1: - case MISCREG_DBGWCR2_EL1: - case MISCREG_DBGWCR3_EL1: - case MISCREG_DBGWCR4_EL1: - case MISCREG_DBGWCR5_EL1: - case MISCREG_DBGWCR6_EL1: - case MISCREG_DBGWCR7_EL1: - case MISCREG_DBGWCR8_EL1: - case MISCREG_DBGWCR9_EL1: - case MISCREG_DBGWCR10_EL1: - case MISCREG_DBGWCR11_EL1: - case MISCREG_DBGWCR12_EL1: - case MISCREG_DBGWCR13_EL1: - case MISCREG_DBGWCR14_EL1: - case MISCREG_DBGWCR15_EL1: - case MISCREG_MDCCINT_EL1: - case MISCREG_MDCR_EL2: - trap_to_mon = ELIs64(tc, EL3) && mdcr.tda && (el == EL2); - break; - case MISCREG_ZCR_EL1: - trap_to_mon = !cptr.ez && ((el == EL3) || - ((el <= EL2) && ArmSystem::haveEL(tc,EL3) && ELIs64(tc, EL3))); - ec = EC_TRAPPED_SVE; - immediate = 0; - break; - case MISCREG_ZCR_EL2: - trap_to_mon = !cptr.ez && ((el == EL3) || - ((el == EL2) && ArmSystem::haveEL(tc,EL3) && ELIs64(tc, EL3))); - ec = EC_TRAPPED_SVE; - immediate = 0; - break; - case MISCREG_ZCR_EL3: - trap_to_mon = !cptr.ez && (el == EL3); - ec = EC_TRAPPED_SVE; - immediate = 0; - break; - default: - break; - } - return trap_to_mon; } RegVal @@ -838,6 +162,13 @@ MiscRegRegImmOp64::generateDisassembly( return ss.str(); } +uint32_t +MiscRegRegImmOp64::iss() const +{ + const MiscRegNum64 &misc_reg = encodeAArch64SysReg(dest); + return _iss(misc_reg, op1); +} + std::string RegMiscRegImmOp64::generateDisassembly( Addr pc, const loader::SymbolTable *symtab) const @@ -850,27 +181,22 @@ RegMiscRegImmOp64::generateDisassembly( return ss.str(); } +uint32_t +RegMiscRegImmOp64::iss() const +{ + const MiscRegNum64 &misc_reg = encodeAArch64SysReg(op1); + return _iss(misc_reg, dest); +} + Fault MiscRegImplDefined64::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { auto tc = xc->tcBase(); const CPSR cpsr = tc->readMiscReg(MISCREG_CPSR); - const ExceptionLevel el = (ExceptionLevel) (uint8_t) cpsr.el; - Fault fault = trap(tc, miscReg, el, imm); - - if (fault != NoFault) { - return fault; - - } else if (warning) { - warn_once("\tinstruction '%s' unimplemented\n", fullMnemonic.c_str()); - return NoFault; - - } else { - return std::make_shared(machInst, false, - mnemonic); - } + return checkFaultAccessAArch64SysReg( + MISCREG_IMPDEF_UNIMPL, cpsr, tc, *this); } std::string @@ -880,6 +206,12 @@ MiscRegImplDefined64::generateDisassembly( return csprintf("%-10s (implementation defined)", fullMnemonic.c_str()); } +uint32_t +MiscRegImplDefined64::iss() const +{ + return _iss(miscReg, intReg); +} + std::string RegNone::generateDisassembly( Addr pc, const loader::SymbolTable *symtab) const @@ -917,7 +249,7 @@ TlbiOp64::performTlbi(ExecContext *xc, MiscRegIndex dest_idx, RegVal value) cons // AArch64 TLB Invalidate All, EL2 case MISCREG_TLBI_ALLE2: { - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; TLBIALLEL tlbiOp(EL2, secure); @@ -927,7 +259,7 @@ TlbiOp64::performTlbi(ExecContext *xc, MiscRegIndex dest_idx, RegVal value) cons // AArch64 TLB Invalidate All, EL2, Inner Shareable case MISCREG_TLBI_ALLE2IS: { - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; TLBIALLEL tlbiOp(EL2, secure); @@ -937,7 +269,7 @@ TlbiOp64::performTlbi(ExecContext *xc, MiscRegIndex dest_idx, RegVal value) cons // AArch64 TLB Invalidate All, EL1 case MISCREG_TLBI_ALLE1: { - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; TLBIALLEL tlbiOp(EL1, secure); @@ -947,7 +279,7 @@ TlbiOp64::performTlbi(ExecContext *xc, MiscRegIndex dest_idx, RegVal value) cons // AArch64 TLB Invalidate All, EL1, Inner Shareable case MISCREG_TLBI_ALLE1IS: { - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; TLBIALLEL tlbiOp(EL1, secure); @@ -956,7 +288,7 @@ TlbiOp64::performTlbi(ExecContext *xc, MiscRegIndex dest_idx, RegVal value) cons } case MISCREG_TLBI_VMALLS12E1: { - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; TLBIVMALL tlbiOp(EL1, secure, true); @@ -965,7 +297,7 @@ TlbiOp64::performTlbi(ExecContext *xc, MiscRegIndex dest_idx, RegVal value) cons } case MISCREG_TLBI_VMALLE1: { - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); ExceptionLevel target_el = EL1; if (EL2Enabled(tc)) { @@ -982,7 +314,7 @@ TlbiOp64::performTlbi(ExecContext *xc, MiscRegIndex dest_idx, RegVal value) cons } case MISCREG_TLBI_VMALLS12E1IS: { - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; TLBIVMALL tlbiOp(EL1, secure, true); @@ -991,7 +323,7 @@ TlbiOp64::performTlbi(ExecContext *xc, MiscRegIndex dest_idx, RegVal value) cons } case MISCREG_TLBI_VMALLE1IS: { - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); ExceptionLevel target_el = EL1; if (EL2Enabled(tc)) { @@ -1006,34 +338,50 @@ TlbiOp64::performTlbi(ExecContext *xc, MiscRegIndex dest_idx, RegVal value) cons tlbiOp.broadcast(tc); return; } - // VAEx(IS) and VALEx(IS) are the same because TLBs - // only store entries - // from the last level of translation table walks // AArch64 TLB Invalidate by VA, EL3 case MISCREG_TLBI_VAE3_Xt: + { + + TLBIMVAA tlbiOp(EL3, true, + static_cast(bits(value, 43, 0)) << 12, + false); + tlbiOp(tc); + return; + } + // AArch64 TLB Invalidate by VA, Last Level, EL3 case MISCREG_TLBI_VALE3_Xt: { TLBIMVAA tlbiOp(EL3, true, - static_cast(bits(value, 43, 0)) << 12); + static_cast(bits(value, 43, 0)) << 12, + true); tlbiOp(tc); return; } // AArch64 TLB Invalidate by VA, EL3, Inner Shareable case MISCREG_TLBI_VAE3IS_Xt: + { + TLBIMVAA tlbiOp(EL3, true, + static_cast(bits(value, 43, 0)) << 12, + false); + + tlbiOp.broadcast(tc); + return; + } + // AArch64 TLB Invalidate by VA, Last Level, EL3, Inner Shareable case MISCREG_TLBI_VALE3IS_Xt: { TLBIMVAA tlbiOp(EL3, true, - static_cast(bits(value, 43, 0)) << 12); + static_cast(bits(value, 43, 0)) << 12, + true); tlbiOp.broadcast(tc); return; } // AArch64 TLB Invalidate by VA, EL2 case MISCREG_TLBI_VAE2_Xt: - case MISCREG_TLBI_VALE2_Xt: { - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; @@ -1045,20 +393,45 @@ TlbiOp64::performTlbi(ExecContext *xc, MiscRegIndex dest_idx, RegVal value) cons TLBIMVA tlbiOp(EL2, secure, static_cast(bits(value, 43, 0)) << 12, - asid); + asid, false); tlbiOp(tc); } else { TLBIMVAA tlbiOp(EL2, secure, - static_cast(bits(value, 43, 0)) << 12); + static_cast(bits(value, 43, 0)) << 12, + false); + tlbiOp(tc); + } + return; + } + // AArch64 TLB Invalidate by VA, Last Level, EL2 + case MISCREG_TLBI_VALE2_Xt: + { + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); + HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + + bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; + + if (hcr.e2h) { + // The asid will only be used when e2h == 1 + auto asid = asid_16bits ? bits(value, 63, 48) : + bits(value, 55, 48); + + TLBIMVA tlbiOp(EL2, secure, + static_cast(bits(value, 43, 0)) << 12, + asid, true); + tlbiOp(tc); + } else { + TLBIMVAA tlbiOp(EL2, secure, + static_cast(bits(value, 43, 0)) << 12, + true); tlbiOp(tc); } return; } // AArch64 TLB Invalidate by VA, EL2, Inner Shareable case MISCREG_TLBI_VAE2IS_Xt: - case MISCREG_TLBI_VALE2IS_Xt: { - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; @@ -1070,20 +443,45 @@ TlbiOp64::performTlbi(ExecContext *xc, MiscRegIndex dest_idx, RegVal value) cons TLBIMVA tlbiOp(EL2, secure, static_cast(bits(value, 43, 0)) << 12, - asid); + asid, false); tlbiOp.broadcast(tc); } else { TLBIMVAA tlbiOp(EL2, secure, - static_cast(bits(value, 43, 0)) << 12); + static_cast(bits(value, 43, 0)) << 12, + false); + tlbiOp.broadcast(tc); + } + return; + } + // AArch64 TLB Invalidate by VA, Last Level, EL2, Inner Shareable + case MISCREG_TLBI_VALE2IS_Xt: + { + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); + HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + + bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; + + if (hcr.e2h) { + // The asid will only be used when e2h == 1 + auto asid = asid_16bits ? bits(value, 63, 48) : + bits(value, 55, 48); + + TLBIMVA tlbiOp(EL2, secure, + static_cast(bits(value, 43, 0)) << 12, + asid, true); + tlbiOp.broadcast(tc); + } else { + TLBIMVAA tlbiOp(EL2, secure, + static_cast(bits(value, 43, 0)) << 12, + true); tlbiOp.broadcast(tc); } return; } // AArch64 TLB Invalidate by VA, EL1 case MISCREG_TLBI_VAE1_Xt: - case MISCREG_TLBI_VALE1_Xt: { - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); auto asid = asid_16bits ? bits(value, 63, 48) : bits(value, 55, 48); @@ -1098,16 +496,38 @@ TlbiOp64::performTlbi(ExecContext *xc, MiscRegIndex dest_idx, RegVal value) cons bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; TLBIMVA tlbiOp(target_el, secure, static_cast(bits(value, 43, 0)) << 12, - asid); + asid, false); + + tlbiOp(tc); + return; + } + // AArch64 TLB Invalidate by VA, Last Level, EL1 + case MISCREG_TLBI_VALE1_Xt: + { + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); + auto asid = asid_16bits ? bits(value, 63, 48) : + bits(value, 55, 48); + + ExceptionLevel target_el = EL1; + if (EL2Enabled(tc)) { + HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + if (hcr.tge && hcr.e2h) { + target_el = EL2; + } + } + + bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; + TLBIMVA tlbiOp(target_el, secure, + static_cast(bits(value, 43, 0)) << 12, + asid, true); tlbiOp(tc); return; } // AArch64 TLB Invalidate by VA, EL1, Inner Shareable case MISCREG_TLBI_VAE1IS_Xt: - case MISCREG_TLBI_VALE1IS_Xt: { - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); auto asid = asid_16bits ? bits(value, 63, 48) : bits(value, 55, 48); @@ -1122,7 +542,29 @@ TlbiOp64::performTlbi(ExecContext *xc, MiscRegIndex dest_idx, RegVal value) cons bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; TLBIMVA tlbiOp(target_el, secure, static_cast(bits(value, 43, 0)) << 12, - asid); + asid, false); + + tlbiOp.broadcast(tc); + return; + } + case MISCREG_TLBI_VALE1IS_Xt: + { + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); + auto asid = asid_16bits ? bits(value, 63, 48) : + bits(value, 55, 48); + + ExceptionLevel target_el = EL1; + if (EL2Enabled(tc)) { + HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + if (hcr.tge && hcr.e2h) { + target_el = EL2; + } + } + + bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; + TLBIMVA tlbiOp(target_el, secure, + static_cast(bits(value, 43, 0)) << 12, + asid, true); tlbiOp.broadcast(tc); return; @@ -1130,7 +572,7 @@ TlbiOp64::performTlbi(ExecContext *xc, MiscRegIndex dest_idx, RegVal value) cons // AArch64 TLB Invalidate by ASID, EL1 case MISCREG_TLBI_ASIDE1_Xt: { - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); auto asid = asid_16bits ? bits(value, 63, 48) : bits(value, 55, 48); @@ -1150,7 +592,7 @@ TlbiOp64::performTlbi(ExecContext *xc, MiscRegIndex dest_idx, RegVal value) cons // AArch64 TLB Invalidate by ASID, EL1, Inner Shareable case MISCREG_TLBI_ASIDE1IS_Xt: { - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); auto asid = asid_16bits ? bits(value, 63, 48) : bits(value, 55, 48); @@ -1167,13 +609,10 @@ TlbiOp64::performTlbi(ExecContext *xc, MiscRegIndex dest_idx, RegVal value) cons tlbiOp.broadcast(tc); return; } - // VAAE1(IS) and VAALE1(IS) are the same because TLBs only store - // entries from the last level of translation table walks // AArch64 TLB Invalidate by VA, All ASID, EL1 case MISCREG_TLBI_VAAE1_Xt: - case MISCREG_TLBI_VAALE1_Xt: { - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); ExceptionLevel target_el = EL1; if (EL2Enabled(tc)) { @@ -1185,16 +624,37 @@ TlbiOp64::performTlbi(ExecContext *xc, MiscRegIndex dest_idx, RegVal value) cons bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; TLBIMVAA tlbiOp(target_el, secure, - static_cast(bits(value, 43, 0)) << 12); + static_cast(bits(value, 43, 0)) << 12, + false); + + tlbiOp(tc); + return; + } + // AArch64 TLB Invalidate by VA, Last Level, All ASID, EL1 + case MISCREG_TLBI_VAALE1_Xt: + { + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); + + ExceptionLevel target_el = EL1; + if (EL2Enabled(tc)) { + HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + if (hcr.tge && hcr.e2h) { + target_el = EL2; + } + } + + bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; + TLBIMVAA tlbiOp(target_el, secure, + static_cast(bits(value, 43, 0)) << 12, + true); tlbiOp(tc); return; } // AArch64 TLB Invalidate by VA, All ASID, EL1, Inner Shareable case MISCREG_TLBI_VAAE1IS_Xt: - case MISCREG_TLBI_VAALE1IS_Xt: { - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); ExceptionLevel target_el = EL1; if (EL2Enabled(tc)) { @@ -1206,7 +666,30 @@ TlbiOp64::performTlbi(ExecContext *xc, MiscRegIndex dest_idx, RegVal value) cons bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; TLBIMVAA tlbiOp(target_el, secure, - static_cast(bits(value, 43, 0)) << 12); + static_cast(bits(value, 43, 0)) << 12, + false); + + tlbiOp.broadcast(tc); + return; + } + // AArch64 TLB Invalidate by VA, All ASID, + // Last Level, EL1, Inner Shareable + case MISCREG_TLBI_VAALE1IS_Xt: + { + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); + + ExceptionLevel target_el = EL1; + if (EL2Enabled(tc)) { + HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + if (hcr.tge && hcr.e2h) { + target_el = EL2; + } + } + + bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; + TLBIMVAA tlbiOp(target_el, secure, + static_cast(bits(value, 43, 0)) << 12, + true); tlbiOp.broadcast(tc); return; @@ -1214,29 +697,77 @@ TlbiOp64::performTlbi(ExecContext *xc, MiscRegIndex dest_idx, RegVal value) cons // AArch64 TLB Invalidate by Intermediate Physical Address, // Stage 2, EL1 case MISCREG_TLBI_IPAS2E1_Xt: + { + if (EL2Enabled(tc)) { + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); + + bool secure = release->has(ArmExtension::SECURITY) && + !scr.ns && !bits(value, 63); + + const int top_bit = ArmSystem::physAddrRange(tc) == 52 ? + 39 : 35; + TLBIIPA tlbiOp(EL1, secure, + static_cast(bits(value, top_bit, 0)) << 12, + false); + + tlbiOp(tc); + } + return; + } + // AArch64 TLB Invalidate by Intermediate Physical Address, + // Stage 2, Last Level EL1 case MISCREG_TLBI_IPAS2LE1_Xt: { - SCR scr = tc->readMiscReg(MISCREG_SCR); + if (EL2Enabled(tc)) { + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); - bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; - TLBIIPA tlbiOp(EL1, secure, - static_cast(bits(value, 35, 0)) << 12); + bool secure = release->has(ArmExtension::SECURITY) && + !scr.ns && !bits(value, 63); - tlbiOp(tc); + TLBIIPA tlbiOp(EL1, secure, + static_cast(bits(value, 35, 0)) << 12, + true); + + tlbiOp(tc); + } return; } // AArch64 TLB Invalidate by Intermediate Physical Address, // Stage 2, EL1, Inner Shareable case MISCREG_TLBI_IPAS2E1IS_Xt: + { + if (EL2Enabled(tc)) { + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); + + bool secure = release->has(ArmExtension::SECURITY) && + !scr.ns && !bits(value, 63); + + const int top_bit = ArmSystem::physAddrRange(tc) == 52 ? + 39 : 35; + TLBIIPA tlbiOp(EL1, secure, + static_cast(bits(value, top_bit, 0)) << 12, + false); + + tlbiOp.broadcast(tc); + } + return; + } + // AArch64 TLB Invalidate by Intermediate Physical Address, + // Stage 2, Last Level, EL1, Inner Shareable case MISCREG_TLBI_IPAS2LE1IS_Xt: { - SCR scr = tc->readMiscReg(MISCREG_SCR); + if (EL2Enabled(tc)) { + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); - bool secure = release->has(ArmExtension::SECURITY) && !scr.ns; - TLBIIPA tlbiOp(EL1, secure, - static_cast(bits(value, 35, 0)) << 12); + bool secure = release->has(ArmExtension::SECURITY) && + !scr.ns && !bits(value, 63); - tlbiOp.broadcast(tc); + TLBIIPA tlbiOp(EL1, secure, + static_cast(bits(value, 35, 0)) << 12, + true); + + tlbiOp.broadcast(tc); + } return; } default: diff --git a/src/arch/arm/insts/misc64.hh b/src/arch/arm/insts/misc64.hh index 5e166f562a..b7b66c2674 100644 --- a/src/arch/arm/insts/misc64.hh +++ b/src/arch/arm/insts/misc64.hh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2013,2017-2019, 2021 Arm Limited + * Copyright (c) 2011-2013,2017-2019, 2021-2022 Arm Limited * All rights reserved * * The license below extends only to copyright in the software and shall @@ -124,29 +124,25 @@ class UnknownOp64 : public ArmISA::ArmStaticInst class MiscRegOp64 : public ArmISA::ArmStaticInst { protected: - bool miscRead; + bool _miscRead; MiscRegOp64(const char *mnem, ArmISA::ExtMachInst _machInst, OpClass __opClass, bool misc_read) : ArmISA::ArmStaticInst(mnem, _machInst, __opClass), - miscRead(misc_read) + _miscRead(misc_read) {} - Fault trap(ThreadContext *tc, ArmISA::MiscRegIndex misc_reg, - ArmISA::ExceptionLevel el, uint32_t immediate) const; - private: - bool checkEL1Trap(ThreadContext *tc, const ArmISA::MiscRegIndex misc_reg, - ArmISA::ExceptionLevel el, ArmISA::ExceptionClass &ec, - uint32_t &immediate) const; + uint32_t _iss(const ArmISA::MiscRegNum64 &misc_reg, + RegIndex int_index) const; - bool checkEL2Trap(ThreadContext *tc, const ArmISA::MiscRegIndex misc_reg, - ArmISA::ExceptionLevel el, ArmISA::ExceptionClass &ec, - uint32_t &immediate) const; + public: + virtual uint32_t iss() const { return 0; } - bool checkEL3Trap(ThreadContext *tc, const ArmISA::MiscRegIndex misc_reg, - ArmISA::ExceptionLevel el, ArmISA::ExceptionClass &ec, - uint32_t &immediate) const; + bool miscRead() const { return _miscRead; } + Fault generateTrap(ArmISA::ExceptionLevel el) const; + Fault generateTrap(ArmISA::ExceptionLevel el, + ArmISA::ExceptionClass ec, uint32_t iss) const; }; class MiscRegImmOp64 : public MiscRegOp64 @@ -178,17 +174,18 @@ class MiscRegRegImmOp64 : public MiscRegOp64 protected: ArmISA::MiscRegIndex dest; RegIndex op1; - uint32_t imm; MiscRegRegImmOp64(const char *mnem, ArmISA::ExtMachInst _machInst, OpClass __opClass, ArmISA::MiscRegIndex _dest, - RegIndex _op1, uint32_t _imm) : + RegIndex _op1) : MiscRegOp64(mnem, _machInst, __opClass, false), - dest(_dest), op1(_op1), imm(_imm) + dest(_dest), op1(_op1) {} std::string generateDisassembly( Addr pc, const loader::SymbolTable *symtab) const override; + + uint32_t iss() const override; }; class RegMiscRegImmOp64 : public MiscRegOp64 @@ -196,45 +193,45 @@ class RegMiscRegImmOp64 : public MiscRegOp64 protected: RegIndex dest; ArmISA::MiscRegIndex op1; - uint32_t imm; RegMiscRegImmOp64(const char *mnem, ArmISA::ExtMachInst _machInst, OpClass __opClass, RegIndex _dest, - ArmISA::MiscRegIndex _op1, uint32_t _imm) : + ArmISA::MiscRegIndex _op1) : MiscRegOp64(mnem, _machInst, __opClass, true), - dest(_dest), op1(_op1), imm(_imm) + dest(_dest), op1(_op1) {} std::string generateDisassembly( Addr pc, const loader::SymbolTable *symtab) const override; + + uint32_t iss() const override; }; class MiscRegImplDefined64 : public MiscRegOp64 { protected: const std::string fullMnemonic; - const ArmISA::MiscRegIndex miscReg; - const uint32_t imm; - const bool warning; + const ArmISA::MiscRegNum64 miscReg; + const RegIndex intReg; public: MiscRegImplDefined64(const char *mnem, ArmISA::ExtMachInst _machInst, - ArmISA::MiscRegIndex misc_reg, bool misc_read, - uint32_t _imm, const std::string full_mnem, - bool _warning) : + ArmISA::MiscRegNum64 &&misc_reg, RegIndex int_reg, + bool misc_read, const std::string full_mnem) : MiscRegOp64(mnem, _machInst, No_OpClass, misc_read), - fullMnemonic(full_mnem), miscReg(misc_reg), imm(_imm), - warning(_warning) + fullMnemonic(full_mnem), miscReg(misc_reg), intReg(int_reg) { - assert(miscReg == ArmISA::MISCREG_IMPDEF_UNIMPL); + assert(decodeAArch64SysReg(miscReg) == ArmISA::MISCREG_IMPDEF_UNIMPL); } protected: Fault execute(ExecContext *xc, - Trace::InstRecord *traceData) const override; + trace::InstRecord *traceData) const override; std::string generateDisassembly( Addr pc, const loader::SymbolTable *symtab) const override; + + uint32_t iss() const override; }; class RegNone : public ArmISA::ArmStaticInst @@ -257,8 +254,8 @@ class TlbiOp64 : public MiscRegRegImmOp64 protected: TlbiOp64(const char *mnem, ArmISA::ExtMachInst _machInst, OpClass __opClass, ArmISA::MiscRegIndex _dest, - RegIndex _op1, uint32_t _imm) : - MiscRegRegImmOp64(mnem, _machInst, __opClass, _dest, _op1, _imm) + RegIndex _op1) : + MiscRegRegImmOp64(mnem, _machInst, __opClass, _dest, _op1) {} void performTlbi(ExecContext *xc, diff --git a/src/arch/arm/insts/pred_inst.hh b/src/arch/arm/insts/pred_inst.hh index 29581a918b..da3db6c6a5 100644 --- a/src/arch/arm/insts/pred_inst.hh +++ b/src/arch/arm/insts/pred_inst.hh @@ -371,7 +371,7 @@ class PredMacroOp : public PredOp } Fault - execute(ExecContext *, Trace::InstRecord *) const override + execute(ExecContext *, trace::InstRecord *) const override { panic("Execute method called when it shouldn't!"); } diff --git a/src/arch/arm/insts/pseudo.cc b/src/arch/arm/insts/pseudo.cc index ca5e594130..3d017c1857 100644 --- a/src/arch/arm/insts/pseudo.cc +++ b/src/arch/arm/insts/pseudo.cc @@ -58,7 +58,7 @@ DecoderFaultInst::DecoderFaultInst(ExtMachInst _machInst) } Fault -DecoderFaultInst::execute(ExecContext *xc, Trace::InstRecord *traceData) const +DecoderFaultInst::execute(ExecContext *xc, trace::InstRecord *traceData) const { const Addr pc = xc->pcState().instAddr(); @@ -130,7 +130,7 @@ FailUnimplemented::FailUnimplemented(const char *_mnemonic, } Fault -FailUnimplemented::execute(ExecContext *xc, Trace::InstRecord *traceData) const +FailUnimplemented::execute(ExecContext *xc, trace::InstRecord *traceData) const { return std::make_shared(machInst, false, mnemonic); } @@ -166,7 +166,7 @@ WarnUnimplemented::WarnUnimplemented(const char *_mnemonic, } Fault -WarnUnimplemented::execute(ExecContext *xc, Trace::InstRecord *traceData) const +WarnUnimplemented::execute(ExecContext *xc, trace::InstRecord *traceData) const { if (!warned) { warn("\tinstruction '%s' unimplemented\n", @@ -190,7 +190,7 @@ IllegalExecInst::IllegalExecInst(ExtMachInst _machInst) {} Fault -IllegalExecInst::execute(ExecContext *xc, Trace::InstRecord *traceData) const +IllegalExecInst::execute(ExecContext *xc, trace::InstRecord *traceData) const { return std::make_shared(); } @@ -200,7 +200,7 @@ DebugStep::DebugStep(ExtMachInst _machInst) { } Fault -DebugStep::execute(ExecContext *xc, Trace::InstRecord *traceData) const +DebugStep::execute(ExecContext *xc, trace::InstRecord *traceData) const { PCState pc_state = xc->pcState().as(); pc_state.debugStep(false); diff --git a/src/arch/arm/insts/pseudo.hh b/src/arch/arm/insts/pseudo.hh index 215f965dc1..981fa385b3 100644 --- a/src/arch/arm/insts/pseudo.hh +++ b/src/arch/arm/insts/pseudo.hh @@ -57,7 +57,7 @@ class DecoderFaultInst : public ArmISA::ArmStaticInst DecoderFaultInst(ArmISA::ExtMachInst _machInst); Fault execute(ExecContext *xc, - Trace::InstRecord *traceData) const override; + trace::InstRecord *traceData) const override; std::string generateDisassembly( Addr pc, const loader::SymbolTable *symtab) const override; @@ -83,7 +83,7 @@ class FailUnimplemented : public ArmISA::ArmStaticInst const std::string& _fullMnemonic); Fault execute(ExecContext *xc, - Trace::InstRecord *traceData) const override; + trace::InstRecord *traceData) const override; std::string generateDisassembly( Addr pc, const loader::SymbolTable *symtab) const override; @@ -113,7 +113,7 @@ class WarnUnimplemented : public ArmISA::ArmStaticInst const std::string& _fullMnemonic); Fault execute(ExecContext *xc, - Trace::InstRecord *traceData) const override; + trace::InstRecord *traceData) const override; std::string generateDisassembly( Addr pc, const loader::SymbolTable *symtab) const override; @@ -132,7 +132,7 @@ class IllegalExecInst : public ArmISA::ArmStaticInst IllegalExecInst(ArmISA::ExtMachInst _machInst); Fault execute(ExecContext *xc, - Trace::InstRecord *traceData) const override; + trace::InstRecord *traceData) const override; }; class DebugStep : public ArmISA::ArmStaticInst @@ -141,7 +141,7 @@ class DebugStep : public ArmISA::ArmStaticInst DebugStep(ArmISA::ExtMachInst _machInst); Fault execute(ExecContext *xc, - Trace::InstRecord *traceData) const override; + trace::InstRecord *traceData) const override; }; } // namespace gem5 diff --git a/src/arch/arm/insts/static_inst.cc b/src/arch/arm/insts/static_inst.cc index 43679200ed..c07fb3922a 100644 --- a/src/arch/arm/insts/static_inst.cc +++ b/src/arch/arm/insts/static_inst.cc @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010-2014, 2016-2020 ARM Limited + * Copyright (c) 2010-2014, 2016-2020,2022 Arm Limited * Copyright (c) 2013 Advanced Micro Devices, Inc. * All rights reserved * @@ -656,14 +656,14 @@ ArmStaticInst::advSIMDFPAccessTrap64(ExceptionLevel el) const { switch (el) { case EL1: - return std::make_shared(machInst, 0x1E00000, - EC_TRAPPED_SIMD_FP); + return std::make_shared( + machInst, 0x1E00000, ExceptionClass::TRAPPED_SIMD_FP); case EL2: - return std::make_shared(machInst, 0x1E00000, - EC_TRAPPED_SIMD_FP); + return std::make_shared( + machInst, 0x1E00000, ExceptionClass::TRAPPED_SIMD_FP); case EL3: - return std::make_shared(machInst, 0x1E00000, - EC_TRAPPED_SIMD_FP); + return std::make_shared( + machInst, 0x1E00000, ExceptionClass::TRAPPED_SIMD_FP); default: panic("Illegal EL in advSIMDFPAccessTrap64\n"); @@ -715,8 +715,9 @@ ArmStaticInst::checkFPAdvSIMDEnabled64(ThreadContext *tc, CPSR cpsr, CPACR cpacr) const { const ExceptionLevel el = currEL(tc); - if ((el == EL0 && cpacr.fpen != 0x3) || - (el == EL1 && !(cpacr.fpen & 0x1))) + if (((el == EL0 && cpacr.fpen != 0x3) || + (el == EL1 && !(cpacr.fpen & 0x1))) && + !ELIsInHost(tc, el)) return advSIMDFPAccessTrap64(EL1); return checkFPAdvSIMDTrap64(tc, cpsr); @@ -780,11 +781,11 @@ ArmStaticInst::checkAdvSIMDOrFPEnabled32(ThreadContext *tc, if (cur_el == EL2) { return std::make_shared( machInst, iss, - EC_TRAPPED_HCPTR, mnemonic); + ExceptionClass::TRAPPED_HCPTR, mnemonic); } else { return std::make_shared( machInst, iss, - EC_TRAPPED_HCPTR); + ExceptionClass::TRAPPED_HCPTR); } } @@ -851,13 +852,15 @@ ArmStaticInst::checkForWFxTrap32(ThreadContext *tc, case EL1: return std::make_shared( machInst, iss, - EC_TRAPPED_WFI_WFE, mnemonic); + ExceptionClass::TRAPPED_WFI_WFE, mnemonic); case EL2: - return std::make_shared(machInst, iss, - EC_TRAPPED_WFI_WFE); + return std::make_shared( + machInst, iss, + ExceptionClass::TRAPPED_WFI_WFE); case EL3: - return std::make_shared(machInst, iss, - EC_TRAPPED_WFI_WFE); + return std::make_shared( + machInst, iss, + ExceptionClass::TRAPPED_WFI_WFE); default: panic("Unrecognized Exception Level: %d\n", targetEL); } @@ -882,14 +885,17 @@ ArmStaticInst::checkForWFxTrap64(ThreadContext *tc, 0x1E00000; /* WFI Instruction syndrome */ switch (targetEL) { case EL1: - return std::make_shared(machInst, iss, - EC_TRAPPED_WFI_WFE); + return std::make_shared( + machInst, iss, + ExceptionClass::TRAPPED_WFI_WFE); case EL2: - return std::make_shared(machInst, iss, - EC_TRAPPED_WFI_WFE); + return std::make_shared( + machInst, iss, + ExceptionClass::TRAPPED_WFI_WFE); case EL3: - return std::make_shared(machInst, iss, - EC_TRAPPED_WFI_WFE); + return std::make_shared( + machInst, iss, + ExceptionClass::TRAPPED_WFI_WFE); default: panic("Unrecognized Exception Level: %d\n", targetEL); } @@ -971,7 +977,7 @@ ArmStaticInst::undefinedFault32(ThreadContext *tc, // ArmFault class. return std::make_shared( machInst, 0, - EC_UNKNOWN, mnemonic); + ExceptionClass::UNKNOWN, mnemonic); } } @@ -982,11 +988,14 @@ ArmStaticInst::undefinedFault64(ThreadContext *tc, switch (pstateEL) { case EL0: case EL1: - return std::make_shared(machInst, 0, EC_UNKNOWN); + return std::make_shared( + machInst, 0, ExceptionClass::UNKNOWN); case EL2: - return std::make_shared(machInst, 0, EC_UNKNOWN); + return std::make_shared( + machInst, 0, ExceptionClass::UNKNOWN); case EL3: - return std::make_shared(machInst, 0, EC_UNKNOWN); + return std::make_shared( + machInst, 0, ExceptionClass::UNKNOWN); default: panic("Unrecognized Exception Level: %d\n", pstateEL); break; @@ -1000,12 +1009,14 @@ ArmStaticInst::sveAccessTrap(ExceptionLevel el) const { switch (el) { case EL1: - return std::make_shared(machInst, 0, EC_TRAPPED_SVE); + return std::make_shared( + machInst, 0, ExceptionClass::TRAPPED_SVE); case EL2: - return std::make_shared(machInst, 0, EC_TRAPPED_SVE); + return std::make_shared( + machInst, 0, ExceptionClass::TRAPPED_SVE); case EL3: - return std::make_shared(machInst, 0, - EC_TRAPPED_SVE); + return std::make_shared( + machInst, 0, ExceptionClass::TRAPPED_SVE); default: panic("Illegal EL in sveAccessTrap\n"); diff --git a/src/arch/arm/insts/static_inst.hh b/src/arch/arm/insts/static_inst.hh index e30ba28f7e..fa58f98de9 100644 --- a/src/arch/arm/insts/static_inst.hh +++ b/src/arch/arm/insts/static_inst.hh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010-2013,2016-2018 ARM Limited + * Copyright (c) 2010-2013,2016-2018, 2022 Arm Limited * All rights reserved * * The license below extends only to copyright in the software and shall @@ -398,12 +398,7 @@ class ArmStaticInst : public StaticInst xc->pcState(pc); } - inline Fault - disabledFault() const - { - return std::make_shared(machInst, false, - mnemonic, true); - } + inline Fault disabledFault() const { return undefined(true); } // Utility function used by checkForWFxTrap32 and checkForWFxTrap64 // Returns true if processor has to trap a WFI/WFE instruction. @@ -587,6 +582,13 @@ class ArmStaticInst : public StaticInst { return getCurSveVecLenInBits(tc) / (8 * sizeof(T)); } + + inline Fault + undefined(bool disabled=false) const + { + return std::make_shared( + machInst, false, mnemonic, disabled); + } }; } // namespace ArmISA diff --git a/src/arch/arm/insts/sve_macromem.hh b/src/arch/arm/insts/sve_macromem.hh index 8e94fa42fa..e7d5608cca 100644 --- a/src/arch/arm/insts/sve_macromem.hh +++ b/src/arch/arm/insts/sve_macromem.hh @@ -89,7 +89,7 @@ class SveLdStructSS : public PredMacroOp } Fault - execute(ExecContext *, Trace::InstRecord *) const override + execute(ExecContext *, trace::InstRecord *) const override { panic("Execute method called when it shouldn't!"); return NoFault; @@ -162,7 +162,7 @@ class SveStStructSS : public PredMacroOp } Fault - execute(ExecContext *, Trace::InstRecord *) const override + execute(ExecContext *, trace::InstRecord *) const override { panic("Execute method called when it shouldn't!"); return NoFault; @@ -235,7 +235,7 @@ class SveLdStructSI : public PredMacroOp } Fault - execute(ExecContext *, Trace::InstRecord *) const override + execute(ExecContext *, trace::InstRecord *) const override { panic("Execute method called when it shouldn't!"); return NoFault; @@ -309,7 +309,7 @@ class SveStStructSI : public PredMacroOp } Fault - execute(ExecContext *, Trace::InstRecord *) const override + execute(ExecContext *, trace::InstRecord *) const override { panic("Execute method called when it shouldn't!"); return NoFault; @@ -409,7 +409,7 @@ class SveIndexedMemVI : public PredMacroOp } Fault - execute(ExecContext *, Trace::InstRecord *) const override + execute(ExecContext *, trace::InstRecord *) const override { panic("Execute method called when it shouldn't!"); return NoFault; @@ -514,7 +514,7 @@ class SveIndexedMemSV : public PredMacroOp } Fault - execute(ExecContext *, Trace::InstRecord *) const override + execute(ExecContext *, trace::InstRecord *) const override { panic("Execute method called when it shouldn't!"); return NoFault; diff --git a/src/arch/arm/insts/tme64.cc b/src/arch/arm/insts/tme64.cc index 2b822831a4..adcc8dd77f 100644 --- a/src/arch/arm/insts/tme64.cc +++ b/src/arch/arm/insts/tme64.cc @@ -89,14 +89,14 @@ MicroTfence64::MicroTfence64(ExtMachInst machInst) Fault MicroTfence64::execute( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { return NoFault; } Fault MicroTfence64::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { panic("tfence should not have memory semantics"); @@ -105,7 +105,7 @@ MicroTfence64::initiateAcc(ExecContext *xc, Fault MicroTfence64::completeAcc(PacketPtr pkt, ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { panic("tfence should not have memory semantics"); @@ -123,7 +123,7 @@ Tstart64::Tstart64(ExtMachInst machInst, RegIndex _dest) _numSrcRegs = 0; _numDestRegs = 0; - setDestRegIdx(_numDestRegs++, RegId(IntRegClass, dest)); + setDestRegIdx(_numDestRegs++, intRegClass[dest]); _numTypedDestRegs[IntRegClass]++; flags[IsHtmStart] = true; flags[IsInteger] = true; @@ -134,7 +134,7 @@ Tstart64::Tstart64(ExtMachInst machInst, RegIndex _dest) Fault Tstart64::execute( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { panic("TME is not supported with atomic memory"); @@ -152,7 +152,7 @@ Ttest64::Ttest64(ExtMachInst machInst, RegIndex _dest) _numSrcRegs = 0; _numDestRegs = 0; - setDestRegIdx(_numDestRegs++, RegId(IntRegClass, dest)); + setDestRegIdx(_numDestRegs++, intRegClass[dest]); _numTypedDestRegs[IntRegClass]++; flags[IsInteger] = true; flags[IsMicroop] = true; @@ -171,7 +171,7 @@ Tcancel64::Tcancel64(ExtMachInst machInst, uint64_t _imm) Fault Tcancel64::execute( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { panic("TME is not supported with atomic memory"); @@ -201,7 +201,7 @@ MicroTcommit64::MicroTcommit64(ExtMachInst machInst) } Fault -MicroTcommit64::execute(ExecContext *xc, Trace::InstRecord *traceData) const +MicroTcommit64::execute(ExecContext *xc, trace::InstRecord *traceData) const { panic("TME is not supported with atomic memory"); diff --git a/src/arch/arm/insts/tme64.hh b/src/arch/arm/insts/tme64.hh index c8994f34d4..8588eaf925 100644 --- a/src/arch/arm/insts/tme64.hh +++ b/src/arch/arm/insts/tme64.hh @@ -106,9 +106,9 @@ class Tstart64 : public TmeRegNone64 public: Tstart64(ArmISA::ExtMachInst, RegIndex); - Fault execute(ExecContext *, Trace::InstRecord *) const; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const; - Fault completeAcc(PacketPtr, ExecContext *, Trace::InstRecord *) const; + Fault execute(ExecContext *, trace::InstRecord *) const; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const; + Fault completeAcc(PacketPtr, ExecContext *, trace::InstRecord *) const; }; class Ttest64 : public TmeRegNone64 @@ -119,7 +119,7 @@ class Ttest64 : public TmeRegNone64 public: Ttest64(ArmISA::ExtMachInst, RegIndex); - Fault execute(ExecContext *, Trace::InstRecord *) const; + Fault execute(ExecContext *, trace::InstRecord *) const; }; class Tcancel64 : public TmeImmOp64 @@ -127,9 +127,9 @@ class Tcancel64 : public TmeImmOp64 public: Tcancel64(ArmISA::ExtMachInst, uint64_t); - Fault execute(ExecContext *, Trace::InstRecord *) const; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const; - Fault completeAcc(PacketPtr, ExecContext *, Trace::InstRecord *) const; + Fault execute(ExecContext *, trace::InstRecord *) const; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const; + Fault completeAcc(PacketPtr, ExecContext *, trace::InstRecord *) const; }; class MicroTfence64 : public MicroTmeBasic64 @@ -137,9 +137,9 @@ class MicroTfence64 : public MicroTmeBasic64 public: MicroTfence64(ArmISA::ExtMachInst); - Fault execute(ExecContext *, Trace::InstRecord *) const; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const; - Fault completeAcc(PacketPtr, ExecContext *, Trace::InstRecord *) const; + Fault execute(ExecContext *, trace::InstRecord *) const; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const; + Fault completeAcc(PacketPtr, ExecContext *, trace::InstRecord *) const; }; class MicroTcommit64 : public MicroTmeBasic64 @@ -147,9 +147,9 @@ class MicroTcommit64 : public MicroTmeBasic64 public: MicroTcommit64(ArmISA::ExtMachInst); - Fault execute(ExecContext *, Trace::InstRecord *) const; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const; - Fault completeAcc(PacketPtr, ExecContext *, Trace::InstRecord *) const; + Fault execute(ExecContext *, trace::InstRecord *) const; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const; + Fault completeAcc(PacketPtr, ExecContext *, trace::InstRecord *) const; }; diff --git a/src/arch/arm/insts/tme64classic.cc b/src/arch/arm/insts/tme64classic.cc index c6c1e5493d..0c944f43c9 100644 --- a/src/arch/arm/insts/tme64classic.cc +++ b/src/arch/arm/insts/tme64classic.cc @@ -47,7 +47,7 @@ namespace ArmISAInst { Fault Tstart64::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { return std::make_shared(machInst, false, @@ -56,7 +56,7 @@ Tstart64::initiateAcc(ExecContext *xc, Fault Tstart64::completeAcc(PacketPtr pkt, ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { return std::make_shared(machInst, false, @@ -65,7 +65,7 @@ Tstart64::completeAcc(PacketPtr pkt, ExecContext *xc, Fault Ttest64::execute( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { return std::make_shared(machInst, false, @@ -74,7 +74,7 @@ Ttest64::execute( Fault Tcancel64::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { return std::make_shared(machInst, false, @@ -83,7 +83,7 @@ Tcancel64::initiateAcc(ExecContext *xc, Fault Tcancel64::completeAcc(PacketPtr pkt, ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { return std::make_shared(machInst, false, @@ -92,7 +92,7 @@ Tcancel64::completeAcc(PacketPtr pkt, ExecContext *xc, Fault MicroTcommit64::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { return std::make_shared(machInst, false, @@ -101,7 +101,7 @@ MicroTcommit64::initiateAcc(ExecContext *xc, Fault MicroTcommit64::completeAcc(PacketPtr pkt, ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { return std::make_shared(machInst, false, diff --git a/src/arch/arm/insts/tme64ruby.cc b/src/arch/arm/insts/tme64ruby.cc index 6999924d24..1bd5d9adbc 100644 --- a/src/arch/arm/insts/tme64ruby.cc +++ b/src/arch/arm/insts/tme64ruby.cc @@ -52,7 +52,7 @@ namespace ArmISAInst { Fault Tstart64::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; const uint64_t htm_depth = xc->getHtmTransactionalDepth(); @@ -85,7 +85,7 @@ Tstart64::initiateAcc(ExecContext *xc, Fault Tstart64::completeAcc(PacketPtr pkt, ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; uint64_t Mem; @@ -126,14 +126,14 @@ Tstart64::completeAcc(PacketPtr pkt, ExecContext *xc, uint64_t final_val = Dest64; - if (traceData) { traceData->setData(final_val); } + if (traceData) { traceData->setData(intRegClass, final_val); } } return fault; } Fault -Ttest64::execute(ExecContext *xc, Trace::InstRecord *traceData) const +Ttest64::execute(ExecContext *xc, trace::InstRecord *traceData) const { Fault fault = NoFault; uint64_t Dest64 = 0; @@ -156,14 +156,14 @@ Ttest64::execute(ExecContext *xc, Trace::InstRecord *traceData) const if (fault == NoFault) { uint64_t final_val = Dest64; xc->setRegOperand(this, 0, Dest64 & mask(intWidth)); - if (traceData) { traceData->setData(final_val); } + if (traceData) { traceData->setData(intRegClass, final_val); } } return fault; } Fault -Tcancel64::initiateAcc(ExecContext *xc, Trace::InstRecord *traceData) const +Tcancel64::initiateAcc(ExecContext *xc, trace::InstRecord *traceData) const { Fault fault = NoFault; @@ -182,7 +182,7 @@ Tcancel64::initiateAcc(ExecContext *xc, Trace::InstRecord *traceData) const Fault Tcancel64::completeAcc(PacketPtr pkt, ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; uint64_t Mem; @@ -209,7 +209,7 @@ Tcancel64::completeAcc(PacketPtr pkt, ExecContext *xc, Fault MicroTcommit64::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; const uint64_t htm_depth = xc->getHtmTransactionalDepth(); @@ -238,7 +238,7 @@ MicroTcommit64::initiateAcc(ExecContext *xc, Fault MicroTcommit64::completeAcc(PacketPtr pkt, ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; uint64_t Mem; diff --git a/src/arch/arm/interrupts.cc b/src/arch/arm/interrupts.cc index b0f18dfe3f..57b1334b77 100644 --- a/src/arch/arm/interrupts.cc +++ b/src/arch/arm/interrupts.cc @@ -50,17 +50,10 @@ ArmISA::Interrupts::takeInt(InterruptTypes int_type) const bool highest_el_is_64 = ArmSystem::highestELIs64(tc); CPSR cpsr = tc->readMiscReg(MISCREG_CPSR); - SCR scr; - HCR hcr; - hcr = tc->readMiscReg(MISCREG_HCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3);; + HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); ExceptionLevel el = currEL(tc); bool cpsr_mask_bit, scr_routing_bit, scr_fwaw_bit, hcr_mask_override_bit; - - if (!highest_el_is_64) - scr = tc->readMiscReg(MISCREG_SCR); - else - scr = tc->readMiscReg(MISCREG_SCR_EL3); - bool is_secure = isSecure(tc); switch(int_type) { diff --git a/src/arch/arm/interrupts.hh b/src/arch/arm/interrupts.hh index a510d29fb9..178ee6cea5 100644 --- a/src/arch/arm/interrupts.hh +++ b/src/arch/arm/interrupts.hh @@ -133,7 +133,7 @@ class Interrupts : public BaseInterrupts bool checkInterrupts() const override { - HCR hcr = tc->readMiscReg(MISCREG_HCR); + HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); if (!(intStatus || hcr.va || hcr.vi || hcr.vf)) return false; @@ -236,7 +236,7 @@ class Interrupts : public BaseInterrupts { assert(checkInterrupts()); - HCR hcr = tc->readMiscReg(MISCREG_HCR); + HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); CPSR cpsr = tc->readMiscReg(MISCREG_CPSR); bool no_vhe = !HaveExt(tc, ArmExtension::FEAT_VHE); diff --git a/src/arch/arm/isa.cc b/src/arch/arm/isa.cc index fda47b73ce..543e0eba7b 100644 --- a/src/arch/arm/isa.cc +++ b/src/arch/arm/isa.cc @@ -53,11 +53,7 @@ #include "cpu/checker/cpu.hh" #include "cpu/reg_class.hh" #include "debug/Arm.hh" -#include "debug/CCRegs.hh" -#include "debug/FloatRegs.hh" -#include "debug/IntRegs.hh" #include "debug/LLSC.hh" -#include "debug/MiscRegs.hh" #include "debug/VecPredRegs.hh" #include "debug/VecRegs.hh" #include "dev/arm/generic_timer.hh" @@ -74,34 +70,24 @@ namespace gem5 namespace ArmISA { -class MiscRegClassOps : public RegClassOps +namespace { - public: - std::string - regName(const RegId &id) const override - { - return miscRegName[id.index()]; - } -} miscRegClassOps; -VecElemRegClassOps vecRegElemClassOps(NumVecElemPerVecReg); -TypedRegClassOps vecRegClassOps; -TypedRegClassOps vecPredRegClassOps; +/* Not applicable to ARM */ +RegClass floatRegClass(FloatRegClass, FloatRegClassName, 0, debug::FloatRegs); + +} // anonymous namespace ISA::ISA(const Params &p) : BaseISA(p), system(NULL), - _decoderFlavor(p.decoderFlavor), pmu(p.pmu), impdefAsNop(p.impdef_nop), - afterStartup(false) + _decoderFlavor(p.decoderFlavor), pmu(p.pmu), impdefAsNop(p.impdef_nop) { - _regClasses.emplace_back(int_reg::NumRegs, debug::IntRegs); - _regClasses.emplace_back(0, debug::FloatRegs); - _regClasses.emplace_back(NumVecRegs, vecRegClassOps, debug::VecRegs, - sizeof(VecRegContainer)); - _regClasses.emplace_back(NumVecRegs * NumVecElemPerVecReg, - vecRegElemClassOps, debug::VecRegs); - _regClasses.emplace_back(NumVecPredRegs, vecPredRegClassOps, - debug::VecPredRegs, sizeof(VecPredRegContainer)); - _regClasses.emplace_back(cc_reg::NumRegs, debug::CCRegs); - _regClasses.emplace_back(NUM_MISCREGS, miscRegClassOps, debug::MiscRegs); + _regClasses.push_back(&flatIntRegClass); + _regClasses.push_back(&floatRegClass); + _regClasses.push_back(&vecRegClass); + _regClasses.push_back(&vecElemClass); + _regClasses.push_back(&vecPredRegClass); + _regClasses.push_back(&ccRegClass); + _regClasses.push_back(&miscRegClass); miscRegs[MISCREG_SCTLR_RST] = 0; @@ -140,8 +126,6 @@ ISA::ISA(const Params &p) : BaseISA(p), system(NULL), clear(); } -std::vector ISA::lookUpMiscReg(NUM_MISCREGS); - void ISA::clear() { @@ -528,8 +512,6 @@ ISA::startup() tc->setHtmCheckpointPtr(std::move(cpt)); } } - - afterStartup = true; } void @@ -542,15 +524,10 @@ ISA::setupThreadContext() selfDebug->init(tc); - Gicv3 *gicv3 = dynamic_cast(system->getGIC()); - if (!gicv3) - return; - - if (!gicv3CpuInterface) - gicv3CpuInterface.reset(gicv3->getCPUInterface(tc->contextId())); - - gicv3CpuInterface->setISA(this); - gicv3CpuInterface->setThreadContext(tc); + if (auto gicv3_ifc = getGICv3CPUInterface(tc); gicv3_ifc) { + gicv3_ifc->setISA(this); + gicv3_ifc->setThreadContext(tc); + } } void @@ -563,30 +540,23 @@ ISA::takeOverFrom(ThreadContext *new_tc, ThreadContext *old_tc) void ISA::copyRegsFrom(ThreadContext *src) { - for (int i = 0; i < int_reg::NumRegs; i++) { - RegId reg(IntRegClass, i); - tc->setRegFlat(reg, src->getRegFlat(reg)); - } + for (auto &id: flatIntRegClass) + tc->setReg(id, src->getReg(id)); - for (int i = 0; i < cc_reg::NumRegs; i++) { - RegId reg(CCRegClass, i); - tc->setReg(reg, src->getReg(reg)); - } + for (auto &id: ccRegClass) + tc->setReg(id, src->getReg(id)); for (int i = 0; i < NUM_MISCREGS; i++) tc->setMiscRegNoEffect(i, src->readMiscRegNoEffect(i)); ArmISA::VecRegContainer vc; - for (int i = 0; i < NumVecRegs; i++) { - RegId reg(VecRegClass, i); - src->getRegFlat(reg, &vc); - tc->setRegFlat(reg, &vc); + for (auto &id: vecRegClass) { + src->getReg(id, &vc); + tc->setReg(id, &vc); } - for (int i = 0; i < NumVecRegs * NumVecElemPerVecReg; i++) { - RegId reg(VecElemClass, i); - tc->setRegFlat(reg, src->getRegFlat(reg)); - } + for (auto &id: vecElemClass) + tc->setReg(id, src->getReg(id)); // setMiscReg "with effect" will set the misc register mapping correctly. // e.g. updateRegMap(val) @@ -750,12 +720,12 @@ ISA::redirectRegVHE(int misc_reg) } RegVal -ISA::readMiscRegNoEffect(int misc_reg) const +ISA::readMiscRegNoEffect(RegIndex idx) const { - assert(misc_reg < NUM_MISCREGS); + assert(idx < NUM_MISCREGS); - const auto ® = lookUpMiscReg[misc_reg]; // bit masks - const auto &map = getMiscIndices(misc_reg); + const auto ® = lookUpMiscReg[idx]; // bit masks + const auto &map = getMiscIndices(idx); int lower = map.first, upper = map.second; // NB!: apply architectural masks according to desired register, // despite possibly getting value from different (mapped) register. @@ -763,24 +733,24 @@ ISA::readMiscRegNoEffect(int misc_reg) const |(miscRegs[upper] << 32)); if (val & reg.res0()) { DPRINTF(MiscRegs, "Reading MiscReg %s with set res0 bits: %#x\n", - miscRegName[misc_reg], val & reg.res0()); + miscRegName[idx], val & reg.res0()); } if ((val & reg.res1()) != reg.res1()) { DPRINTF(MiscRegs, "Reading MiscReg %s with clear res1 bits: %#x\n", - miscRegName[misc_reg], (val & reg.res1()) ^ reg.res1()); + miscRegName[idx], (val & reg.res1()) ^ reg.res1()); } return (val & ~reg.raz()) | reg.rao(); // enforce raz/rao } RegVal -ISA::readMiscReg(int misc_reg) +ISA::readMiscReg(RegIndex idx) { CPSR cpsr = 0; SCR scr = 0; - if (misc_reg == MISCREG_CPSR) { - cpsr = miscRegs[misc_reg]; + if (idx == MISCREG_CPSR) { + cpsr = miscRegs[idx]; auto pc = tc->pcState().as(); cpsr.j = pc.jazelle() ? 1 : 0; cpsr.t = pc.thumb() ? 1 : 0; @@ -788,18 +758,19 @@ ISA::readMiscReg(int misc_reg) } #ifndef NDEBUG - if (!miscRegInfo[misc_reg][MISCREG_IMPLEMENTED]) { - if (miscRegInfo[misc_reg][MISCREG_WARN_NOT_FAIL]) + auto& miscreg_info = lookUpMiscReg[idx].info; + if (!miscreg_info[MISCREG_IMPLEMENTED]) { + if (miscreg_info[MISCREG_WARN_NOT_FAIL]) warn("Unimplemented system register %s read.\n", - miscRegName[misc_reg]); + miscRegName[idx]); else panic("Unimplemented system register %s read.\n", - miscRegName[misc_reg]); + miscRegName[idx]); } #endif - misc_reg = redirectRegVHE(misc_reg); + idx = redirectRegVHE(idx); - switch (unflattenMiscReg(misc_reg)) { + switch (unflattenMiscReg(idx)) { case MISCREG_HCR: case MISCREG_HCR2: if (!release->has(ArmExtension::VIRTUALIZATION)) @@ -817,7 +788,7 @@ ISA::readMiscReg(int misc_reg) // Security Extensions may limit the readability of CPACR if (release->has(ArmExtension::SECURITY)) { - scr = readMiscRegNoEffect(MISCREG_SCR); + scr = readMiscRegNoEffect(MISCREG_SCR_EL3); cpsr = readMiscRegNoEffect(MISCREG_CPSR); if (scr.ns && (cpsr.mode != MODE_MON) && ELIs32(tc, EL3)) { NSACR nsacr = readMiscRegNoEffect(MISCREG_NSACR); @@ -829,7 +800,7 @@ ISA::readMiscReg(int misc_reg) RegVal val = readMiscRegNoEffect(MISCREG_CPACR); val &= cpacrMask; DPRINTF(MiscRegs, "Reading misc reg %s: %#x\n", - miscRegName[misc_reg], val); + miscRegName[idx], val); return val; } case MISCREG_MPIDR: @@ -838,14 +809,14 @@ ISA::readMiscReg(int misc_reg) case MISCREG_VMPIDR: case MISCREG_VMPIDR_EL2: // top bit defined as RES1 - return readMiscRegNoEffect(misc_reg) | 0x80000000; + return readMiscRegNoEffect(idx) | 0x80000000; case MISCREG_ID_AFR0: // not implemented, so alias MIDR case MISCREG_REVIDR: // not implemented, so alias MIDR case MISCREG_MIDR: cpsr = readMiscRegNoEffect(MISCREG_CPSR); - scr = readMiscRegNoEffect(MISCREG_SCR); + scr = readMiscRegNoEffect(MISCREG_SCR_EL3); if ((cpsr.mode == MODE_HYP) || isSecure(tc)) { - return readMiscRegNoEffect(misc_reg); + return readMiscRegNoEffect(idx); } else { return readMiscRegNoEffect(MISCREG_VPIDR); } @@ -903,7 +874,7 @@ ISA::readMiscReg(int misc_reg) case MISCREG_PMINTENSET_EL1 ... MISCREG_PMOVSSET_EL0: case MISCREG_PMEVCNTR0_EL0 ... MISCREG_PMEVTYPER5_EL0: case MISCREG_PMCR ... MISCREG_PMOVSSET: - return pmu->readMiscReg(misc_reg); + return pmu->readMiscReg(idx); case MISCREG_CPSR_Q: panic("shouldn't be reading this register seperately\n"); @@ -999,14 +970,6 @@ ISA::readMiscReg(int misc_reg) case MISCREG_DBGDSCRint: return readMiscRegNoEffect(MISCREG_DBGDSCRint); case MISCREG_ISR: - { - auto ic = dynamic_cast( - tc->getCpuPtr()->getInterruptController(tc->threadId())); - return ic->getISR( - readMiscRegNoEffect(MISCREG_HCR), - readMiscRegNoEffect(MISCREG_CPSR), - readMiscRegNoEffect(MISCREG_SCR)); - } case MISCREG_ISR_EL1: { auto ic = dynamic_cast( @@ -1020,7 +983,7 @@ ISA::readMiscReg(int misc_reg) return 0x04; // DC ZVA clear 64-byte chunks case MISCREG_HCPTR: { - RegVal val = readMiscRegNoEffect(misc_reg); + RegVal val = readMiscRegNoEffect(idx); // The trap bit associated with CP14 is defined as RAZ val &= ~(1 << 14); // If a CP bit in NSACR is 0 then the corresponding bit in @@ -1071,27 +1034,27 @@ ISA::readMiscReg(int misc_reg) // Generic Timer registers case MISCREG_CNTFRQ ... MISCREG_CNTVOFF: case MISCREG_CNTFRQ_EL0 ... MISCREG_CNTVOFF_EL2: - return getGenericTimer().readMiscReg(misc_reg); + return getGenericTimer().readMiscReg(idx); case MISCREG_ICC_AP0R0 ... MISCREG_ICH_LRC15: case MISCREG_ICC_PMR_EL1 ... MISCREG_ICC_IGRPEN1_EL3: case MISCREG_ICH_AP0R0_EL2 ... MISCREG_ICH_LR15_EL2: - return getGICv3CPUInterface().readMiscReg(misc_reg); + return getGICv3CPUInterface().readMiscReg(idx); default: break; } - return readMiscRegNoEffect(misc_reg); + return readMiscRegNoEffect(idx); } void -ISA::setMiscRegNoEffect(int misc_reg, RegVal val) +ISA::setMiscRegNoEffect(RegIndex idx, RegVal val) { - assert(misc_reg < NUM_MISCREGS); + assert(idx < NUM_MISCREGS); - const auto ® = lookUpMiscReg[misc_reg]; // bit masks - const auto &map = getMiscIndices(misc_reg); + const auto ® = lookUpMiscReg[idx]; // bit masks + const auto &map = getMiscIndices(idx); int lower = map.first, upper = map.second; auto v = (val & ~reg.wi()) | reg.rao(); @@ -1099,23 +1062,23 @@ ISA::setMiscRegNoEffect(int misc_reg, RegVal val) miscRegs[lower] = bits(v, 31, 0); miscRegs[upper] = bits(v, 63, 32); DPRINTF(MiscRegs, "Writing MiscReg %s (%d %d:%d) : %#x\n", - miscRegName[misc_reg], misc_reg, lower, upper, v); + miscRegName[idx], idx, lower, upper, v); } else { miscRegs[lower] = v; DPRINTF(MiscRegs, "Writing MiscReg %s (%d %d) : %#x\n", - miscRegName[misc_reg], misc_reg, lower, v); + miscRegName[idx], idx, lower, v); } } void -ISA::setMiscReg(int misc_reg, RegVal val) +ISA::setMiscReg(RegIndex idx, RegVal val) { RegVal newVal = val; bool secure_lookup; SCR scr; - if (misc_reg == MISCREG_CPSR) { + if (idx == MISCREG_CPSR) { updateRegMap(val); @@ -1127,7 +1090,7 @@ ISA::setMiscReg(int misc_reg, RegVal val) } DPRINTF(Arm, "Updating CPSR from %#x to %#x f:%d i:%d a:%d mode:%#x\n", - miscRegs[misc_reg], cpsr, cpsr.f, cpsr.i, cpsr.a, cpsr.mode); + miscRegs[idx], cpsr, cpsr.f, cpsr.i, cpsr.a, cpsr.mode); PCState pc = tc->pcState().as(); pc.nextThumb(cpsr.t); pc.nextJazelle(cpsr.j); @@ -1146,7 +1109,7 @@ ISA::setMiscReg(int misc_reg, RegVal val) tc->pcState(pc); } - setMiscRegNoEffect(misc_reg, newVal); + setMiscRegNoEffect(idx, newVal); if (old_mode != cpsr.mode) { getMMUPtr(tc)->invalidateMiscReg(); @@ -1165,18 +1128,19 @@ ISA::setMiscReg(int misc_reg, RegVal val) } } else { #ifndef NDEBUG - if (!miscRegInfo[misc_reg][MISCREG_IMPLEMENTED]) { - if (miscRegInfo[misc_reg][MISCREG_WARN_NOT_FAIL]) + auto& miscreg_info = lookUpMiscReg[idx].info; + if (!miscreg_info[MISCREG_IMPLEMENTED]) { + if (miscreg_info[MISCREG_WARN_NOT_FAIL]) warn("Unimplemented system register %s write with %#x.\n", - miscRegName[misc_reg], val); + miscRegName[idx], val); else panic("Unimplemented system register %s write with %#x.\n", - miscRegName[misc_reg], val); + miscRegName[idx], val); } #endif - misc_reg = redirectRegVHE(misc_reg); + idx = redirectRegVHE(idx); - switch (unflattenMiscReg(misc_reg)) { + switch (unflattenMiscReg(idx)) { case MISCREG_CPACR: { @@ -1190,7 +1154,7 @@ ISA::setMiscReg(int misc_reg, RegVal val) // Security Extensions may limit the writability of CPACR if (release->has(ArmExtension::SECURITY)) { - scr = readMiscRegNoEffect(MISCREG_SCR); + scr = readMiscRegNoEffect(MISCREG_SCR_EL3); CPSR cpsr = readMiscRegNoEffect(MISCREG_CPSR); if (scr.ns && (cpsr.mode != MODE_MON) && ELIs32(tc, EL3)) { NSACR nsacr = readMiscRegNoEffect(MISCREG_NSACR); @@ -1204,7 +1168,7 @@ ISA::setMiscReg(int misc_reg, RegVal val) newVal &= cpacrMask; newVal |= old_val & ~cpacrMask; DPRINTF(MiscRegs, "Writing misc reg %s: %#x\n", - miscRegName[misc_reg], newVal); + miscRegName[idx], newVal); } break; case MISCREG_CPACR_EL1: @@ -1218,7 +1182,7 @@ ISA::setMiscReg(int misc_reg, RegVal val) } newVal &= cpacrMask; DPRINTF(MiscRegs, "Writing misc reg %s: %#x\n", - miscRegName[misc_reg], newVal); + miscRegName[idx], newVal); } break; case MISCREG_CPTR_EL2: @@ -1244,7 +1208,7 @@ ISA::setMiscReg(int misc_reg, RegVal val) cptrMask.res1_9_el2 = ones; newVal |= cptrMask; DPRINTF(MiscRegs, "Writing misc reg %s: %#x\n", - miscRegName[misc_reg], newVal); + miscRegName[idx], newVal); } break; case MISCREG_CPTR_EL3: @@ -1259,7 +1223,7 @@ ISA::setMiscReg(int misc_reg, RegVal val) } newVal &= cptrMask; DPRINTF(MiscRegs, "Writing misc reg %s: %#x\n", - miscRegName[misc_reg], newVal); + miscRegName[idx], newVal); } break; case MISCREG_CSSELR: @@ -1322,7 +1286,7 @@ ISA::setMiscReg(int misc_reg, RegVal val) newVal = (newVal & (uint32_t)fpscrMask) | (readMiscRegNoEffect(MISCREG_FPSCR) & ~(uint32_t)fpscrMask); - misc_reg = MISCREG_FPSCR; + idx = MISCREG_FPSCR; } break; case MISCREG_FPCR: @@ -1339,28 +1303,28 @@ ISA::setMiscReg(int misc_reg, RegVal val) newVal = (newVal & (uint32_t)fpscrMask) | (readMiscRegNoEffect(MISCREG_FPSCR) & ~(uint32_t)fpscrMask); - misc_reg = MISCREG_FPSCR; + idx = MISCREG_FPSCR; } break; case MISCREG_CPSR_Q: { assert(!(newVal & ~CpsrMaskQ)); newVal = readMiscRegNoEffect(MISCREG_CPSR) | newVal; - misc_reg = MISCREG_CPSR; + idx = MISCREG_CPSR; } break; case MISCREG_FPSCR_QC: { newVal = readMiscRegNoEffect(MISCREG_FPSCR) | (newVal & FpscrQcMask); - misc_reg = MISCREG_FPSCR; + idx = MISCREG_FPSCR; } break; case MISCREG_FPSCR_EXC: { newVal = readMiscRegNoEffect(MISCREG_FPSCR) | (newVal & FpscrExcMask); - misc_reg = MISCREG_FPSCR; + idx = MISCREG_FPSCR; } break; case MISCREG_FPEXC: @@ -1665,7 +1629,7 @@ ISA::setMiscReg(int misc_reg, RegVal val) case MISCREG_SCTLR: { DPRINTF(MiscRegs, "Writing SCTLR: %#x\n", newVal); - scr = readMiscRegNoEffect(MISCREG_SCR); + scr = readMiscRegNoEffect(MISCREG_SCR_EL3); MiscRegIndex sctlr_idx; if (release->has(ArmExtension::SECURITY) && @@ -1727,7 +1691,7 @@ ISA::setMiscReg(int misc_reg, RegVal val) case MISCREG_PMINTENSET_EL1 ... MISCREG_PMOVSSET_EL0: case MISCREG_PMEVCNTR0_EL0 ... MISCREG_PMEVTYPER5_EL0: case MISCREG_PMCR ... MISCREG_PMOVSSET: - pmu->setMiscReg(misc_reg, newVal); + pmu->setMiscReg(idx, newVal); break; @@ -1753,10 +1717,10 @@ ISA::setMiscReg(int misc_reg, RegVal val) break; } case MISCREG_HDFAR: // alias for secure DFAR - misc_reg = MISCREG_DFAR_S; + idx = MISCREG_DFAR_S; break; case MISCREG_HIFAR: // alias for secure IFAR - misc_reg = MISCREG_IFAR_S; + idx = MISCREG_IFAR_S; break; case MISCREG_ATS1CPR: addressTranslation(MMU::S1CTran, BaseMMU::Read, 0, val); @@ -1895,7 +1859,7 @@ ISA::setMiscReg(int misc_reg, RegVal val) CPSR cpsr = miscRegs[MISCREG_CPSR]; cpsr.daif = (uint8_t) ((CPSR) newVal).daif; newVal = cpsr; - misc_reg = MISCREG_CPSR; + idx = MISCREG_CPSR; } break; case MISCREG_SP_EL0: @@ -1912,7 +1876,7 @@ ISA::setMiscReg(int misc_reg, RegVal val) CPSR cpsr = miscRegs[MISCREG_CPSR]; cpsr.sp = (uint8_t) ((CPSR) newVal).sp; newVal = cpsr; - misc_reg = MISCREG_CPSR; + idx = MISCREG_CPSR; } break; case MISCREG_CURRENTEL: @@ -1920,7 +1884,7 @@ ISA::setMiscReg(int misc_reg, RegVal val) CPSR cpsr = miscRegs[MISCREG_CPSR]; cpsr.el = (uint8_t) ((CPSR) newVal).el; newVal = cpsr; - misc_reg = MISCREG_CPSR; + idx = MISCREG_CPSR; } break; case MISCREG_PAN: @@ -1931,7 +1895,7 @@ ISA::setMiscReg(int misc_reg, RegVal val) CPSR cpsr = miscRegs[MISCREG_CPSR]; cpsr.pan = (uint8_t) ((CPSR) newVal).pan; newVal = cpsr; - misc_reg = MISCREG_CPSR; + idx = MISCREG_CPSR; } break; case MISCREG_UAO: @@ -1942,7 +1906,7 @@ ISA::setMiscReg(int misc_reg, RegVal val) CPSR cpsr = miscRegs[MISCREG_CPSR]; cpsr.uao = (uint8_t) ((CPSR) newVal).uao; newVal = cpsr; - misc_reg = MISCREG_CPSR; + idx = MISCREG_CPSR; } break; case MISCREG_AT_S1E1R_Xt: @@ -1987,27 +1951,31 @@ ISA::setMiscReg(int misc_reg, RegVal val) return; case MISCREG_L2CTLR: warn("miscreg L2CTLR (%s) written with %#x. ignored...\n", - miscRegName[misc_reg], uint32_t(val)); + miscRegName[idx], uint32_t(val)); break; // Generic Timer registers case MISCREG_CNTFRQ ... MISCREG_CNTVOFF: case MISCREG_CNTFRQ_EL0 ... MISCREG_CNTVOFF_EL2: - getGenericTimer().setMiscReg(misc_reg, newVal); + getGenericTimer().setMiscReg(idx, newVal); break; case MISCREG_ICC_AP0R0 ... MISCREG_ICH_LRC15: case MISCREG_ICC_PMR_EL1 ... MISCREG_ICC_IGRPEN1_EL3: case MISCREG_ICH_AP0R0_EL2 ... MISCREG_ICH_LR15_EL2: - getGICv3CPUInterface().setMiscReg(misc_reg, newVal); + getGICv3CPUInterface().setMiscReg(idx, newVal); return; case MISCREG_ZCR_EL3: case MISCREG_ZCR_EL2: case MISCREG_ZCR_EL1: + // Set the value here as we need to update the regs before + // reading them back in getCurSveVecLenInBits to avoid + // setting stale vector lengths in the decoder. + setMiscRegNoEffect(idx, newVal); tc->getDecoderPtr()->as().setSveLen( (getCurSveVecLenInBits() >> 7) - 1); - break; + return; } - setMiscRegNoEffect(misc_reg, newVal); + setMiscRegNoEffect(idx, newVal); } } @@ -2035,10 +2003,28 @@ ISA::getGenericTimer() BaseISADevice & ISA::getGICv3CPUInterface() { - panic_if(!gicv3CpuInterface, "GICV3 cpu interface is not registered!"); + if (gicv3CpuInterface) + return *gicv3CpuInterface.get(); + + auto gicv3_ifc = getGICv3CPUInterface(tc); + panic_if(!gicv3_ifc, "The system does not have a GICv3 irq controller\n"); + gicv3CpuInterface.reset(gicv3_ifc); + return *gicv3CpuInterface.get(); } +BaseISADevice* +ISA::getGICv3CPUInterface(ThreadContext *tc) +{ + assert(system); + Gicv3 *gicv3 = dynamic_cast(system->getGIC()); + if (gicv3) { + return gicv3->getCPUInterface(tc->contextId()); + } else { + return nullptr; + } +} + bool ISA::inSecureState() const { @@ -2205,7 +2191,7 @@ ISA::addressTranslation(MMU::ArmTranslationType tran_type, if (fault == NoFault) { Addr paddr = req->getPaddr(); TTBCR ttbcr = readMiscRegNoEffect(MISCREG_TTBCR); - HCR hcr = readMiscRegNoEffect(MISCREG_HCR); + HCR hcr = readMiscRegNoEffect(MISCREG_HCR_EL2); uint8_t max_paddr_bit = 0; if (release->has(ArmExtension::LPAE) && @@ -2250,18 +2236,6 @@ ISA::addressTranslation(MMU::ArmTranslationType tran_type, return; } -ISA::MiscRegLUTEntryInitializer::chain -ISA::MiscRegLUTEntryInitializer::highest(ArmSystem *const sys) const -{ - switch (FullSystem ? sys->highestEL() : EL1) { - case EL0: - case EL1: priv(); break; - case EL2: hyp(); break; - case EL3: mon(); break; - } - return *this; -} - template static inline void lockedSnoopHandler(ThreadContext *tc, XC *xc, PacketPtr pkt, diff --git a/src/arch/arm/isa.hh b/src/arch/arm/isa.hh index 599411fc39..9e1afa714b 100644 --- a/src/arch/arm/isa.hh +++ b/src/arch/arm/isa.hh @@ -46,6 +46,7 @@ #include "arch/arm/pcstate.hh" #include "arch/arm/regs/int.hh" #include "arch/arm/regs/misc.hh" +#include "arch/arm/regs/vec.hh" #include "arch/arm/self_debug.hh" #include "arch/arm/system.hh" #include "arch/arm/types.hh" @@ -103,469 +104,20 @@ namespace ArmISA */ bool impdefAsNop; - bool afterStartup; - SelfDebug * selfDebug; - /** MiscReg metadata **/ - struct MiscRegLUTEntry - { - uint32_t lower; // Lower half mapped to this register - uint32_t upper; // Upper half mapped to this register - uint64_t _reset; // value taken on reset (i.e. initialization) - uint64_t _res0; // reserved - uint64_t _res1; // reserved - uint64_t _raz; // read as zero (fixed at 0) - uint64_t _rao; // read as one (fixed at 1) - public: - MiscRegLUTEntry() : - lower(0), upper(0), - _reset(0), _res0(0), _res1(0), _raz(0), _rao(0) {} - uint64_t reset() const { return _reset; } - uint64_t res0() const { return _res0; } - uint64_t res1() const { return _res1; } - uint64_t raz() const { return _raz; } - uint64_t rao() const { return _rao; } - // raz/rao implies writes ignored - uint64_t wi() const { return _raz | _rao; } - }; - - /** Metadata table accessible via the value of the register */ - static std::vector lookUpMiscReg; - - class MiscRegLUTEntryInitializer - { - struct MiscRegLUTEntry &entry; - std::bitset &info; - typedef const MiscRegLUTEntryInitializer& chain; - public: - chain - mapsTo(uint32_t l, uint32_t u = 0) const - { - entry.lower = l; - entry.upper = u; - return *this; - } - chain - res0(uint64_t mask) const - { - entry._res0 = mask; - return *this; - } - chain - res1(uint64_t mask) const - { - entry._res1 = mask; - return *this; - } - chain - raz(uint64_t mask) const - { - entry._raz = mask; - return *this; - } - chain - rao(uint64_t mask) const - { - entry._rao = mask; - return *this; - } - chain - implemented(bool v = true) const - { - info[MISCREG_IMPLEMENTED] = v; - return *this; - } - chain - unimplemented() const - { - return implemented(false); - } - chain - unverifiable(bool v = true) const - { - info[MISCREG_UNVERIFIABLE] = v; - return *this; - } - chain - warnNotFail(bool v = true) const - { - info[MISCREG_WARN_NOT_FAIL] = v; - return *this; - } - chain - mutex(bool v = true) const - { - info[MISCREG_MUTEX] = v; - return *this; - } - chain - banked(bool v = true) const - { - info[MISCREG_BANKED] = v; - return *this; - } - chain - banked64(bool v = true) const - { - info[MISCREG_BANKED64] = v; - return *this; - } - chain - bankedChild(bool v = true) const - { - info[MISCREG_BANKED_CHILD] = v; - return *this; - } - chain - userNonSecureRead(bool v = true) const - { - info[MISCREG_USR_NS_RD] = v; - return *this; - } - chain - userNonSecureWrite(bool v = true) const - { - info[MISCREG_USR_NS_WR] = v; - return *this; - } - chain - userSecureRead(bool v = true) const - { - info[MISCREG_USR_S_RD] = v; - return *this; - } - chain - userSecureWrite(bool v = true) const - { - info[MISCREG_USR_S_WR] = v; - return *this; - } - chain - user(bool v = true) const - { - userNonSecureRead(v); - userNonSecureWrite(v); - userSecureRead(v); - userSecureWrite(v); - return *this; - } - chain - privNonSecureRead(bool v = true) const - { - info[MISCREG_PRI_NS_RD] = v; - return *this; - } - chain - privNonSecureWrite(bool v = true) const - { - info[MISCREG_PRI_NS_WR] = v; - return *this; - } - chain - privNonSecure(bool v = true) const - { - privNonSecureRead(v); - privNonSecureWrite(v); - return *this; - } - chain - privSecureRead(bool v = true) const - { - info[MISCREG_PRI_S_RD] = v; - return *this; - } - chain - privSecureWrite(bool v = true) const - { - info[MISCREG_PRI_S_WR] = v; - return *this; - } - chain - privSecure(bool v = true) const - { - privSecureRead(v); - privSecureWrite(v); - return *this; - } - chain - priv(bool v = true) const - { - privSecure(v); - privNonSecure(v); - return *this; - } - chain - privRead(bool v = true) const - { - privSecureRead(v); - privNonSecureRead(v); - return *this; - } - chain - hypE2HSecureRead(bool v = true) const - { - info[MISCREG_HYP_E2H_S_RD] = v; - return *this; - } - chain - hypE2HNonSecureRead(bool v = true) const - { - info[MISCREG_HYP_E2H_NS_RD] = v; - return *this; - } - chain - hypE2HRead(bool v = true) const - { - hypE2HSecureRead(v); - hypE2HNonSecureRead(v); - return *this; - } - chain - hypE2HSecureWrite(bool v = true) const - { - info[MISCREG_HYP_E2H_S_WR] = v; - return *this; - } - chain - hypE2HNonSecureWrite(bool v = true) const - { - info[MISCREG_HYP_E2H_NS_WR] = v; - return *this; - } - chain - hypE2HWrite(bool v = true) const - { - hypE2HSecureWrite(v); - hypE2HNonSecureWrite(v); - return *this; - } - chain - hypE2H(bool v = true) const - { - hypE2HRead(v); - hypE2HWrite(v); - return *this; - } - chain - hypSecureRead(bool v = true) const - { - info[MISCREG_HYP_S_RD] = v; - return *this; - } - chain - hypNonSecureRead(bool v = true) const - { - info[MISCREG_HYP_NS_RD] = v; - return *this; - } - chain - hypRead(bool v = true) const - { - hypE2HRead(v); - hypSecureRead(v); - hypNonSecureRead(v); - return *this; - } - chain - hypSecureWrite(bool v = true) const - { - info[MISCREG_HYP_S_WR] = v; - return *this; - } - chain - hypNonSecureWrite(bool v = true) const - { - info[MISCREG_HYP_NS_WR] = v; - return *this; - } - chain - hypWrite(bool v = true) const - { - hypE2HWrite(v); - hypSecureWrite(v); - hypNonSecureWrite(v); - return *this; - } - chain - hypSecure(bool v = true) const - { - hypE2HSecureRead(v); - hypE2HSecureWrite(v); - hypSecureRead(v); - hypSecureWrite(v); - return *this; - } - chain - hyp(bool v = true) const - { - hypRead(v); - hypWrite(v); - return *this; - } - chain - monE2HRead(bool v = true) const - { - info[MISCREG_MON_E2H_RD] = v; - return *this; - } - chain - monE2HWrite(bool v = true) const - { - info[MISCREG_MON_E2H_WR] = v; - return *this; - } - chain - monE2H(bool v = true) const - { - monE2HRead(v); - monE2HWrite(v); - return *this; - } - chain - monSecureRead(bool v = true) const - { - monE2HRead(v); - info[MISCREG_MON_NS0_RD] = v; - return *this; - } - chain - monSecureWrite(bool v = true) const - { - monE2HWrite(v); - info[MISCREG_MON_NS0_WR] = v; - return *this; - } - chain - monNonSecureRead(bool v = true) const - { - monE2HRead(v); - info[MISCREG_MON_NS1_RD] = v; - return *this; - } - chain - monNonSecureWrite(bool v = true) const - { - monE2HWrite(v); - info[MISCREG_MON_NS1_WR] = v; - return *this; - } - chain - mon(bool v = true) const - { - monSecureRead(v); - monSecureWrite(v); - monNonSecureRead(v); - monNonSecureWrite(v); - return *this; - } - chain - monSecure(bool v = true) const - { - monSecureRead(v); - monSecureWrite(v); - return *this; - } - chain - monNonSecure(bool v = true) const - { - monNonSecureRead(v); - monNonSecureWrite(v); - return *this; - } - chain - allPrivileges(bool v = true) const - { - userNonSecureRead(v); - userNonSecureWrite(v); - userSecureRead(v); - userSecureWrite(v); - privNonSecureRead(v); - privNonSecureWrite(v); - privSecureRead(v); - privSecureWrite(v); - hypRead(v); - hypWrite(v); - monSecureRead(v); - monSecureWrite(v); - monNonSecureRead(v); - monNonSecureWrite(v); - return *this; - } - chain - nonSecure(bool v = true) const - { - userNonSecureRead(v); - userNonSecureWrite(v); - privNonSecureRead(v); - privNonSecureWrite(v); - hypRead(v); - hypWrite(v); - monNonSecureRead(v); - monNonSecureWrite(v); - return *this; - } - chain - secure(bool v = true) const - { - userSecureRead(v); - userSecureWrite(v); - privSecureRead(v); - privSecureWrite(v); - monSecureRead(v); - monSecureWrite(v); - return *this; - } - chain - reads(bool v) const - { - userNonSecureRead(v); - userSecureRead(v); - privNonSecureRead(v); - privSecureRead(v); - hypRead(v); - monSecureRead(v); - monNonSecureRead(v); - return *this; - } - chain - writes(bool v) const - { - userNonSecureWrite(v); - userSecureWrite(v); - privNonSecureWrite(v); - privSecureWrite(v); - hypWrite(v); - monSecureWrite(v); - monNonSecureWrite(v); - return *this; - } - chain - exceptUserMode() const - { - user(0); - return *this; - } - chain highest(ArmSystem *const sys) const; - MiscRegLUTEntryInitializer(struct MiscRegLUTEntry &e, - std::bitset &i) - : entry(e), - info(i) - { - // force unimplemented registers to be thusly declared - implemented(1); - } - }; - const MiscRegLUTEntryInitializer InitReg(uint32_t reg) { - return MiscRegLUTEntryInitializer(lookUpMiscReg[reg], - miscRegInfo[reg]); + return MiscRegLUTEntryInitializer(lookUpMiscReg[reg]); } void initializeMiscRegMetadata(); + BaseISADevice &getGenericTimer(); + BaseISADevice &getGICv3CPUInterface(); + BaseISADevice *getGICv3CPUInterface(ThreadContext *tc); + RegVal miscRegs[NUM_MISCREGS]; const RegId *intRegMap; @@ -607,11 +159,11 @@ namespace ArmISA } } - BaseISADevice &getGenericTimer(); - BaseISADevice &getGICv3CPUInterface(); + public: + const RegId &mapIntRegId(RegIndex idx) const { return intRegMap[idx]; } public: - void clear(); + void clear() override; protected: void clear32(const ArmISAParams &p, const SCTLR &sctlr_rst); @@ -640,102 +192,10 @@ namespace ArmISA const ArmRelease* getRelease() const { return release; } - RegVal readMiscRegNoEffect(int misc_reg) const; - RegVal readMiscReg(int misc_reg); - void setMiscRegNoEffect(int misc_reg, RegVal val); - void setMiscReg(int misc_reg, RegVal val); - - RegId - flattenRegId(const RegId& regId) const - { - switch (regId.classValue()) { - case IntRegClass: - return RegId(IntRegClass, flattenIntIndex(regId.index())); - case FloatRegClass: - return RegId(FloatRegClass, flattenFloatIndex(regId.index())); - case VecRegClass: - return RegId(VecRegClass, flattenVecIndex(regId.index())); - case VecElemClass: - return RegId(VecElemClass, flattenVecElemIndex(regId.index())); - case VecPredRegClass: - return RegId(VecPredRegClass, - flattenVecPredIndex(regId.index())); - case CCRegClass: - return RegId(CCRegClass, flattenCCIndex(regId.index())); - case MiscRegClass: - return RegId(MiscRegClass, flattenMiscIndex(regId.index())); - case InvalidRegClass: - return RegId(); - } - panic("Unrecognized register class %d.", regId.classValue()); - } - - int - flattenIntIndex(int reg) const - { - assert(reg >= 0); - if (reg < int_reg::NumArchRegs) { - return intRegMap[reg]; - } else if (reg < int_reg::NumRegs) { - return reg; - } else if (reg == int_reg::Spx) { - CPSR cpsr = miscRegs[MISCREG_CPSR]; - ExceptionLevel el = opModeToEL( - (OperatingMode) (uint8_t) cpsr.mode); - if (!cpsr.sp && el != EL0) - return int_reg::Sp0; - switch (el) { - case EL3: - return int_reg::Sp3; - case EL2: - return int_reg::Sp2; - case EL1: - return int_reg::Sp1; - case EL0: - return int_reg::Sp0; - default: - panic("Invalid exception level"); - return 0; // Never happens. - } - } else { - return flattenIntRegModeIndex(reg); - } - } - - int - flattenFloatIndex(int reg) const - { - assert(reg >= 0); - return reg; - } - - int - flattenVecIndex(int reg) const - { - assert(reg >= 0); - return reg; - } - - int - flattenVecElemIndex(int reg) const - { - assert(reg >= 0); - return reg; - } - - int - flattenVecPredIndex(int reg) const - { - assert(reg >= 0); - return reg; - } - - int - flattenCCIndex(int reg) const - { - assert(reg >= 0); - return reg; - } + RegVal readMiscRegNoEffect(RegIndex idx) const override; + RegVal readMiscReg(RegIndex idx) override; + void setMiscRegNoEffect(RegIndex idx, RegVal val) override; + void setMiscReg(RegIndex, RegVal val) override; int flattenMiscIndex(int reg) const @@ -793,7 +253,7 @@ namespace ArmISA flat_idx = MISCREG_SPSR; break; } - } else if (miscRegInfo[reg][MISCREG_MUTEX]) { + } else if (lookUpMiscReg[reg].info[MISCREG_MUTEX]) { // Mutually exclusive CP15 register switch (reg) { case MISCREG_PRRR_MAIR0: @@ -842,7 +302,7 @@ namespace ArmISA break; } } else { - if (miscRegInfo[reg][MISCREG_BANKED]) { + if (lookUpMiscReg[reg].info[MISCREG_BANKED]) { bool secure_reg = !highestELIs64 && inSecureState(); flat_idx += secure_reg ? 2 : 1; } else { @@ -863,7 +323,7 @@ namespace ArmISA snsBankedIndex64(MiscRegIndex reg, bool ns) const { int reg_as_int = static_cast(reg); - if (miscRegInfo[reg][MISCREG_BANKED64]) { + if (lookUpMiscReg[reg].info[MISCREG_BANKED64]) { reg_as_int += (release->has(ArmExtension::SECURITY) && !ns) ? 2 : 1; } @@ -886,8 +346,8 @@ namespace ArmISA int lower = lookUpMiscReg[flat_idx].lower; int upper = lookUpMiscReg[flat_idx].upper; // upper == 0, which is CPSR, is not MISCREG_BANKED_CHILD (no-op) - lower += S && miscRegInfo[lower][MISCREG_BANKED_CHILD]; - upper += S && miscRegInfo[upper][MISCREG_BANKED_CHILD]; + lower += S && lookUpMiscReg[lower].info[MISCREG_BANKED_CHILD]; + upper += S && lookUpMiscReg[upper].info[MISCREG_BANKED_CHILD]; return std::make_pair(lower, upper); } @@ -933,17 +393,6 @@ namespace ArmISA enums::DecoderFlavor decoderFlavor() const { return _decoderFlavor; } - /** Returns true if the ISA has a GICv3 cpu interface */ - bool - haveGICv3CpuIfc() const - { - // gicv3CpuInterface is initialized at startup time, hence - // trying to read its value before the startup stage will lead - // to an error - assert(afterStartup); - return gicv3CpuInterface != nullptr; - } - PARAMS(ArmISA); ISA(const Params &p); diff --git a/src/arch/arm/isa/copyright.txt b/src/arch/arm/isa/copyright.txt index 899a8df8b3..75647747fd 100644 --- a/src/arch/arm/isa/copyright.txt +++ b/src/arch/arm/isa/copyright.txt @@ -35,4 +35,3 @@ // ARISING OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN // IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF SUCH // DAMAGES. - diff --git a/src/arch/arm/isa/decoder/aarch64.isa b/src/arch/arm/isa/decoder/aarch64.isa index f2f0964829..34a17f2371 100644 --- a/src/arch/arm/isa/decoder/aarch64.isa +++ b/src/arch/arm/isa/decoder/aarch64.isa @@ -43,4 +43,3 @@ Aarch64::aarch64(); - diff --git a/src/arch/arm/isa/decoder/arm.isa b/src/arch/arm/isa/decoder/arm.isa index 0349b398b4..70802a072d 100644 --- a/src/arch/arm/isa/decoder/arm.isa +++ b/src/arch/arm/isa/decoder/arm.isa @@ -132,4 +132,3 @@ format DataOp { } } } - diff --git a/src/arch/arm/isa/formats/aarch64.isa b/src/arch/arm/isa/formats/aarch64.isa index a55d0dd859..37eb995bfd 100644 --- a/src/arch/arm/isa/formats/aarch64.isa +++ b/src/arch/arm/isa/formats/aarch64.isa @@ -459,14 +459,7 @@ namespace Aarch64 bool read = l; MiscRegIndex miscReg = decodeAArch64SysReg(op0, op1, crn, crm, op2); - if (read) { - if ((miscReg == MISCREG_DC_CIVAC_Xt) || - (miscReg == MISCREG_DC_CVAC_Xt) || - (miscReg == MISCREG_DC_IVAC_Xt) || - (miscReg == MISCREG_DC_ZVA_Xt)) { - return new Unknown64(machInst); - } - } + // Check for invalid registers if (miscReg == MISCREG_UNKNOWN) { auto full_mnemonic = @@ -483,41 +476,42 @@ namespace Aarch64 read ? "mrs" : "msr", op0, op1, crn, crm, op2); - uint32_t iss = msrMrs64IssBuild( - read, op0, op1, crn, crm, op2, rt); - return new MiscRegImplDefined64( read ? "mrs" : "msr", - machInst, miscReg, read, iss, full_mnemonic, - miscRegInfo[miscReg][MISCREG_WARN_NOT_FAIL]); + machInst, MiscRegNum64(op0, op1, crn, crm, op2), + rt, read, full_mnemonic); - } else if (miscRegInfo[miscReg][MISCREG_IMPLEMENTED]) { - if (miscReg == MISCREG_NZCV) { - if (read) - return new MrsNZCV64(machInst, rt, miscReg); - else - return new MsrNZCV64(machInst, miscReg, rt); - } - uint32_t iss = msrMrs64IssBuild(read, op0, op1, crn, - crm, op2, rt); + } else { if (read) { - StaticInstPtr si = new Mrs64(machInst, rt, miscReg, - iss); - if (miscRegInfo[miscReg][MISCREG_UNVERIFIABLE]) - si->setFlag(StaticInst::IsUnverifiable); - return si; + switch (miscReg) { + case MISCREG_NZCV: + return new MrsNZCV64(machInst, rt, miscReg); + case MISCREG_DC_CIVAC_Xt: + case MISCREG_DC_CVAC_Xt: + case MISCREG_DC_IVAC_Xt: + case MISCREG_DC_ZVA_Xt: + return new Unknown64(machInst); + default: { + StaticInstPtr si = new Mrs64(machInst, rt, miscReg); + if (lookUpMiscReg[miscReg].info[MISCREG_UNVERIFIABLE]) + si->setFlag(StaticInst::IsUnverifiable); + return si; + } + } } else { switch (miscReg) { + case MISCREG_NZCV: + return new MsrNZCV64(machInst, miscReg, rt); case MISCREG_DC_ZVA_Xt: - return new Dczva(machInst, rt, miscReg, iss); + return new Dczva(machInst, rt, miscReg); case MISCREG_DC_CVAU_Xt: - return new Dccvau(machInst, rt, miscReg, iss); + return new Dccvau(machInst, rt, miscReg); case MISCREG_DC_CVAC_Xt: - return new Dccvac(machInst, rt, miscReg, iss); + return new Dccvac(machInst, rt, miscReg); case MISCREG_DC_CIVAC_Xt: - return new Dccivac(machInst, rt, miscReg, iss); + return new Dccivac(machInst, rt, miscReg); case MISCREG_DC_IVAC_Xt: - return new Dcivac(machInst, rt, miscReg, iss); + return new Dcivac(machInst, rt, miscReg); // 64-bit TLBIs split into "Local" // and "Shareable" case MISCREG_TLBI_ALLE3: @@ -537,7 +531,7 @@ namespace Aarch64 case MISCREG_TLBI_IPAS2E1_Xt: case MISCREG_TLBI_IPAS2LE1_Xt: return new Tlbi64LocalHub( - machInst, miscReg, rt, iss); + machInst, miscReg, rt); case MISCREG_TLBI_ALLE3IS: case MISCREG_TLBI_ALLE2IS: case MISCREG_TLBI_ALLE1IS: @@ -555,22 +549,11 @@ namespace Aarch64 case MISCREG_TLBI_IPAS2E1IS_Xt: case MISCREG_TLBI_IPAS2LE1IS_Xt: return new Tlbi64ShareableHub( - machInst, miscReg, rt, iss, dec.dvmEnabled); + machInst, miscReg, rt, dec.dvmEnabled); default: - return new Msr64(machInst, miscReg, rt, iss); + return new Msr64(machInst, miscReg, rt); } } - } else if (miscRegInfo[miscReg][MISCREG_WARN_NOT_FAIL]) { - std::string full_mnem = csprintf("%s %s", - read ? "mrs" : "msr", miscRegName[miscReg]); - return new WarnUnimplemented(read ? "mrs" : "msr", - machInst, full_mnem); - } else { - return new FailUnimplemented(read ? "mrs" : "msr", - machInst, - csprintf("%s %s", - read ? "mrs" : "msr", - miscRegName[miscReg])); } } break; diff --git a/src/arch/arm/isa/formats/breakpoint.isa b/src/arch/arm/isa/formats/breakpoint.isa index d94c943bce..d8e43f3650 100644 --- a/src/arch/arm/isa/formats/breakpoint.isa +++ b/src/arch/arm/isa/formats/breakpoint.isa @@ -56,4 +56,3 @@ def format ArmBkptHlt() {{ } ''' }}; - diff --git a/src/arch/arm/isa/formats/mem.isa b/src/arch/arm/isa/formats/mem.isa index aecd378613..235bcfae25 100644 --- a/src/arch/arm/isa/formats/mem.isa +++ b/src/arch/arm/isa/formats/mem.isa @@ -1154,4 +1154,3 @@ def format Thumb16MemLit() {{ } ''' % loadImmClassName(False, True, False) }}; - diff --git a/src/arch/arm/isa/formats/misc.isa b/src/arch/arm/isa/formats/misc.isa index ad3c6e9aed..4f9646174c 100644 --- a/src/arch/arm/isa/formats/misc.isa +++ b/src/arch/arm/isa/formats/misc.isa @@ -202,34 +202,34 @@ let {{ const uint32_t crn = bits(machInst, 19, 16); const uint32_t opc2 = bits(machInst, 7, 5); const uint32_t crm = bits(machInst, 3, 0); - const MiscRegIndex miscReg = decodeCP15Reg(crn, opc1, crm, opc2); + const MiscRegIndex misc_reg = decodeCP15Reg(crn, opc1, crm, opc2); const RegIndex rt = (RegIndex)(uint32_t)bits(machInst, 15, 12); - const bool isRead = bits(machInst, 20); - uint32_t iss = mcrMrcIssBuild(isRead, crm, rt, crn, opc1, opc2); + const bool is_read = bits(machInst, 20); + uint32_t iss = mcrMrcIssBuild(is_read, crm, rt, crn, opc1, opc2); - switch (miscReg) { + switch (misc_reg) { case MISCREG_NOP: - return new McrMrcMiscInst(isRead ? "mrc nop" : "mcr nop", + return new McrMrcMiscInst(is_read ? "mrc nop" : "mcr nop", machInst, iss, MISCREG_NOP); case MISCREG_UNKNOWN: - return new FailUnimplemented(isRead ? "mrc unkown" : "mcr unkown", + return new FailUnimplemented(is_read ? "mrc unkown" : "mcr unkown", machInst, csprintf("miscreg crn:%d opc1:%d crm:%d opc2:%d %s unknown", - crn, opc1, crm, opc2, isRead ? "read" : "write")); + crn, opc1, crm, opc2, is_read ? "read" : "write")); case MISCREG_IMPDEF_UNIMPL: - if (miscRegInfo[miscReg][MISCREG_WARN_NOT_FAIL]) { + if (lookUpMiscReg[misc_reg].info[MISCREG_WARN_NOT_FAIL]) { auto mnemonic = csprintf("miscreg crn:%d opc1:%d crm:%d opc2:%d %s", - crn, opc1, crm, opc2, isRead ? "read" : "write"); + crn, opc1, crm, opc2, is_read ? "read" : "write"); return new WarnUnimplemented( - isRead ? "mrc implementation defined" : - "mcr implementation defined", + is_read ? "mrc implementation defined" : + "mcr implementation defined", machInst, mnemonic + " treated as NOP"); } else { return new McrMrcImplDefined( - isRead ? "mrc implementation defined" : + is_read ? "mrc implementation defined" : "mcr implementation defined", machInst, iss, MISCREG_IMPDEF_UNIMPL); } @@ -240,13 +240,13 @@ let {{ case MISCREG_CP15DMB: return new Dmb(machInst, iss); case MISCREG_DCIMVAC: - return new McrDcimvac(machInst, miscReg, rt, iss); + return new McrDcimvac(machInst, misc_reg, rt, iss); case MISCREG_DCCMVAC: - return new McrDccmvac(machInst, miscReg, rt, iss); + return new McrDccmvac(machInst, misc_reg, rt, iss); case MISCREG_DCCMVAU: - return new McrDccmvau(machInst, miscReg, rt, iss); + return new McrDccmvau(machInst, misc_reg, rt, iss); case MISCREG_DCCIMVAC: - return new McrDccimvac(machInst, miscReg, rt, iss); + return new McrDccimvac(machInst, misc_reg, rt, iss); case MISCREG_TLBIALL: case MISCREG_TLBIALLIS: case MISCREG_ITLBIALL: @@ -277,11 +277,12 @@ let {{ case MISCREG_TLBIALLNSNHIS: case MISCREG_TLBIALLH: case MISCREG_TLBIALLHIS: - return new Tlbi(machInst, miscReg, rt, iss); + return new Tlbi(machInst, misc_reg, rt, iss); default: - if (miscRegInfo[miscReg][MISCREG_WARN_NOT_FAIL]) { + auto& miscreg_info = lookUpMiscReg[misc_reg].info; + if (miscreg_info[MISCREG_WARN_NOT_FAIL]) { std::string full_mnem = csprintf("%s %s", - isRead ? "mrc" : "mcr", miscRegName[miscReg]); + is_read ? "mrc" : "mcr", miscRegName[misc_reg]); warn("\\tinstruction '%s' unimplemented\\n", full_mnem); // Remove the warn flag and set the implemented flag. This @@ -290,18 +291,18 @@ let {{ // creating the instruction to access an register that isn't // implemented sounds a bit silly, but its required to get // the correct behaviour for hyp traps and undef exceptions. - miscRegInfo[miscReg][MISCREG_IMPLEMENTED] = true; - miscRegInfo[miscReg][MISCREG_WARN_NOT_FAIL] = false; + miscreg_info[MISCREG_IMPLEMENTED] = true; + miscreg_info[MISCREG_WARN_NOT_FAIL] = false; } - if (miscRegInfo[miscReg][MISCREG_IMPLEMENTED]) { - if (isRead) - return new Mrc15(machInst, rt, miscReg, iss); - return new Mcr15(machInst, miscReg, rt, iss); + if (miscreg_info[MISCREG_IMPLEMENTED]) { + if (is_read) + return new Mrc15(machInst, rt, misc_reg, iss); + return new Mcr15(machInst, misc_reg, rt, iss); } else { - return new FailUnimplemented(isRead ? "mrc" : "mcr", machInst, - csprintf("%s %s", isRead ? "mrc" : "mcr", - miscRegName[miscReg])); + return new FailUnimplemented(is_read ? "mrc" : "mcr", machInst, + csprintf("%s %s", is_read ? "mrc" : "mcr", + miscRegName[misc_reg])); } } } @@ -325,21 +326,22 @@ let {{ { const uint32_t crm = bits(machInst, 3, 0); const uint32_t opc1 = bits(machInst, 7, 4); - const MiscRegIndex miscReg = decodeCP15Reg64(crm, opc1); + const MiscRegIndex misc_reg = decodeCP15Reg64(crm, opc1); const RegIndex rt = (RegIndex) (uint32_t) bits(machInst, 15, 12); const RegIndex rt2 = (RegIndex) (uint32_t) bits(machInst, 19, 16); - const bool isRead = bits(machInst, 20); + const bool is_read = bits(machInst, 20); - switch (miscReg) { + switch (misc_reg) { case MISCREG_UNKNOWN: - return new FailUnimplemented(isRead ? "mrc" : "mcr", machInst, + return new FailUnimplemented(is_read ? "mrc" : "mcr", machInst, csprintf("miscreg crm:%d opc1:%d 64-bit %s unknown", - crm, opc1, isRead ? "read" : "write")); + crm, opc1, is_read ? "read" : "write")); default: - if (miscRegInfo[miscReg][MISCREG_WARN_NOT_FAIL]) { + auto& miscreg_info = lookUpMiscReg[misc_reg].info; + if (miscreg_info[MISCREG_WARN_NOT_FAIL]) { std::string full_mnem = csprintf("%s %s", - isRead ? "mrrc" : "mcrr", miscRegName[miscReg]); + is_read ? "mrrc" : "mcrr", miscRegName[misc_reg]); warn("\\tinstruction '%s' unimplemented\\n", full_mnem); // Remove the warn flag and set the implemented flag. This @@ -348,24 +350,24 @@ let {{ // creating the instruction to access an register that isn't // implemented sounds a bit silly, but its required to get // the correct behaviour for hyp traps and undef exceptions. - miscRegInfo[miscReg][MISCREG_IMPLEMENTED] = true; - miscRegInfo[miscReg][MISCREG_WARN_NOT_FAIL] = false; + miscreg_info[MISCREG_IMPLEMENTED] = true; + miscreg_info[MISCREG_WARN_NOT_FAIL] = false; } - if (miscRegInfo[miscReg][MISCREG_IMPLEMENTED]) { - uint32_t iss = mcrrMrrcIssBuild(isRead, crm, rt, rt2, opc1); + if (miscreg_info[MISCREG_IMPLEMENTED]) { + uint32_t iss = mcrrMrrcIssBuild(is_read, crm, rt, rt2, opc1); - if (isRead) { - StaticInstPtr si = new Mrrc15(machInst, miscReg, rt2, rt, iss); - if (miscRegInfo[miscReg][MISCREG_UNVERIFIABLE]) + if (is_read) { + StaticInstPtr si = new Mrrc15(machInst, misc_reg, rt2, rt, iss); + if (miscreg_info[MISCREG_UNVERIFIABLE]) si->setFlag(StaticInst::IsUnverifiable); return si; } - return new Mcrr15(machInst, rt2, rt, miscReg, iss); + return new Mcrr15(machInst, rt2, rt, misc_reg, iss); } else { - return new FailUnimplemented(isRead ? "mrrc" : "mcrr", machInst, + return new FailUnimplemented(is_read ? "mrrc" : "mcrr", machInst, csprintf("%s %s", - isRead ? "mrrc" : "mcrr", miscRegName[miscReg])); + is_read ? "mrrc" : "mcrr", miscRegName[misc_reg])); } } } diff --git a/src/arch/arm/isa/formats/pred.isa b/src/arch/arm/isa/formats/pred.isa index 689a71d340..e971ccf603 100644 --- a/src/arch/arm/isa/formats/pred.isa +++ b/src/arch/arm/isa/formats/pred.isa @@ -202,4 +202,3 @@ def format PredImmOp(code, *opt_flags) {{ decode_block = BasicDecode.subst(iop) exec_output = PredOpExecute.subst(iop) }}; - diff --git a/src/arch/arm/isa/includes.isa b/src/arch/arm/isa/includes.isa index c01eb7292b..386af4e05d 100644 --- a/src/arch/arm/isa/includes.isa +++ b/src/arch/arm/isa/includes.isa @@ -70,6 +70,11 @@ output header {{ #include "mem/packet.hh" #include "sim/faults.hh" +namespace gem5::ArmISA +{ +class Decoder; +} // namespace gem5::ArmISA + namespace gem5::ArmISAInst { using namespace ArmISA; diff --git a/src/arch/arm/isa/insts/branch.isa b/src/arch/arm/isa/insts/branch.isa index c14e1f8d5f..23ec1e1e13 100644 --- a/src/arch/arm/isa/insts/branch.isa +++ b/src/arch/arm/isa/insts/branch.isa @@ -153,7 +153,8 @@ let {{ if (ArmSystem::haveEL(xc->tcBase(), EL2) && hstr.tjdbx && !isSecure(xc->tcBase()) && (cpsr.mode != MODE_HYP)) { - fault = std::make_shared(machInst, op1, EC_TRAPPED_BXJ); + fault = std::make_shared( + machInst, op1, ExceptionClass::TRAPPED_BXJ); } IWNPC = Op1; ''' diff --git a/src/arch/arm/isa/insts/crypto64.isa b/src/arch/arm/isa/insts/crypto64.isa index 35ea4fef6e..1ae580fa97 100644 --- a/src/arch/arm/isa/insts/crypto64.isa +++ b/src/arch/arm/isa/insts/crypto64.isa @@ -167,4 +167,3 @@ let {{ cryptoRegRegRegInst("sha256su1", "SHA256SU164", "SimdShaSigma3Op", sha2_enabled, sha256_su1Code) }}; - diff --git a/src/arch/arm/isa/insts/data.isa b/src/arch/arm/isa/insts/data.isa index 9a14d97ce7..31fc172883 100644 --- a/src/arch/arm/isa/insts/data.isa +++ b/src/arch/arm/isa/insts/data.isa @@ -342,8 +342,9 @@ let {{ ''', flagType="none", buildCc=False) buildRegDataInst("qdadd", ''' int32_t midRes; - resTemp = saturateOp<32>(midRes, Op2_sw, Op2_sw) | - saturateOp<32>(midRes, Op1_sw, midRes); + const bool res0 = saturateOp<32>(midRes, Op2_sw, Op2_sw); + const bool res1 = saturateOp<32>(midRes, Op1_sw, midRes); + resTemp = (res0 || res1) ? 1 : 0; Dest = midRes; ''', flagType="saturate", buildNonCc=False) buildRegDataInst("qsub", ''' @@ -377,8 +378,9 @@ let {{ ''', flagType="none", buildCc=False) buildRegDataInst("qdsub", ''' int32_t midRes; - resTemp = saturateOp<32>(midRes, Op2_sw, Op2_sw) | - saturateOp<32>(midRes, Op1_sw, midRes, true); + const bool res0 = saturateOp<32>(midRes, Op2_sw, Op2_sw); + const bool res1 = saturateOp<32>(midRes, Op1_sw, midRes, true); + resTemp = (res0 || res1) ? 1 : 0; Dest = midRes; ''', flagType="saturate", buildNonCc=False) buildRegDataInst("qasx", ''' diff --git a/src/arch/arm/isa/insts/data64.isa b/src/arch/arm/isa/insts/data64.isa index 922e92358e..a617dc3ebb 100644 --- a/src/arch/arm/isa/insts/data64.isa +++ b/src/arch/arm/isa/insts/data64.isa @@ -1,6 +1,6 @@ // -*- mode:c++ -*- -// Copyright (c) 2011-2013, 2016-2021 Arm Limited +// Copyright (c) 2011-2013, 2016-2022 Arm Limited // All rights reserved // // The license below extends only to copyright in the software and shall @@ -311,34 +311,19 @@ let {{ ''') msrMrs64EnabledCheckCode = ''' - // Check for read/write access right - if (!can%sAArch64SysReg(flat_idx, Hcr64, Scr64, cpsr, xc->tcBase())) { - return std::make_shared(machInst, false, - mnemonic); - } + auto pre_flat = (MiscRegIndex)snsBankedIndex64(%s, xc->tcBase()); + auto *isa = static_cast(xc->tcBase()->getIsaPtr()); + auto flat_idx = (MiscRegIndex)isa->flattenMiscIndex(pre_flat); - fault = this->trap(xc->tcBase(), flat_idx, el, imm); - if (fault != NoFault) return fault; + // Check for read/write access right + if (fault = checkFaultAccessAArch64SysReg(flat_idx, Cpsr, + xc->tcBase(), *this); fault != NoFault) { + return fault; + } ''' - msr_check_code = ''' - auto pre_flat = (MiscRegIndex)snsBankedIndex64(dest, xc->tcBase()); - MiscRegIndex flat_idx = (MiscRegIndex) xc->tcBase()-> - flattenRegId(RegId(MiscRegClass, pre_flat)).index(); - CPSR cpsr = Cpsr; - ExceptionLevel el = (ExceptionLevel) (uint8_t) cpsr.el; - %s - ''' % (msrMrs64EnabledCheckCode % ('Write'),) - - mrs_check_code = ''' - auto pre_flat = (MiscRegIndex)snsBankedIndex64(op1, xc->tcBase()); - MiscRegIndex flat_idx = (MiscRegIndex) xc->tcBase()-> - flattenRegId(RegId(MiscRegClass, pre_flat)).index(); - CPSR cpsr = Cpsr; - ExceptionLevel el = (ExceptionLevel) (uint8_t) cpsr.el; - %s - ''' % (msrMrs64EnabledCheckCode % ('Read'),) - + msr_check_code = msrMrs64EnabledCheckCode % 'dest' + mrs_check_code = msrMrs64EnabledCheckCode % 'op1' mrsCode = mrs_check_code + ''' XDest = MiscOp1_ud; @@ -517,6 +502,8 @@ let {{ faultAddr = EA; HCR hcr = Hcr64; SCR scr = Scr64; + CPSR cpsr = Cpsr; + ExceptionLevel el = (ExceptionLevel) (uint8_t) cpsr.el; if (el == EL1 && ArmSystem::haveEL(xc->tcBase(), EL2) && hcr.vm && (scr.ns || !ArmSystem::haveEL(xc->tcBase(), EL3))) { memAccessFlags = memAccessFlags | Request::CLEAN; @@ -544,21 +531,12 @@ let {{ msrImmPermission = ''' auto pre_flat = (MiscRegIndex)snsBankedIndex64(dest, xc->tcBase()); - MiscRegIndex misc_index = (MiscRegIndex) xc->tcBase()-> - flattenRegId(RegId(MiscRegClass, pre_flat)).index(); + auto *isa = static_cast(xc->tcBase()->getIsaPtr()); + auto misc_index = (MiscRegIndex)isa->flattenMiscIndex(pre_flat); - if (!miscRegInfo[misc_index][MISCREG_IMPLEMENTED]) { - return std::make_shared( - machInst, false, - mnemonic); - } - - if (!canWriteAArch64SysReg(misc_index, Hcr64, - Scr64, Cpsr, xc->tcBase())) { - - return std::make_shared( - machInst, 0, EC_TRAPPED_MSR_MRS_64, - mnemonic); + if (fault = checkFaultAccessAArch64SysReg(misc_index, + Cpsr, xc->tcBase(), *this); fault != NoFault) { + return fault; } ''' diff --git a/src/arch/arm/isa/insts/fp.isa b/src/arch/arm/isa/insts/fp.isa index 00f2b8095d..1d54324f1b 100644 --- a/src/arch/arm/isa/insts/fp.isa +++ b/src/arch/arm/isa/insts/fp.isa @@ -213,7 +213,8 @@ let {{ if (!isSecure(xc->tcBase()) && (cpsr.mode != MODE_HYP)) { HCR hcr = Hcr; bool hypTrap = false; - switch(xc->tcBase()->flattenRegId(RegId(MiscRegClass, op1)).index()) { + auto *isa = static_cast(xc->tcBase()->getIsaPtr()); + switch (isa->flattenMiscIndex(op1)) { case MISCREG_FPSID: hypTrap = hcr.tid0; break; @@ -224,7 +225,7 @@ let {{ } if (hypTrap) { return std::make_shared(machInst, imm, - EC_TRAPPED_CP10_MRC_VMRS); + ExceptionClass::TRAPPED_CP10_MRC_VMRS); } } Dest = MiscOp1; diff --git a/src/arch/arm/isa/insts/mem.isa b/src/arch/arm/isa/insts/mem.isa index a7add8a6d6..5c9273851e 100644 --- a/src/arch/arm/isa/insts/mem.isa +++ b/src/arch/arm/isa/insts/mem.isa @@ -229,4 +229,3 @@ let {{ raise Exception("Illegal combination of post and writeback") return base }}; - diff --git a/src/arch/arm/isa/insts/misc.isa b/src/arch/arm/isa/insts/misc.isa index 608340747b..bfcb69340d 100644 --- a/src/arch/arm/isa/insts/misc.isa +++ b/src/arch/arm/isa/insts/misc.isa @@ -96,8 +96,9 @@ let {{ if ((cpsr.mode != MODE_USER) && FullSystem) { if (EL2Enabled(xc->tcBase()) && (cpsr.mode != MODE_HYP) && hcr.tsc) { - fault = std::make_shared(machInst, 0, - EC_SMC_TO_HYP); + fault = std::make_shared( + machInst, 0, + ExceptionClass::SMC_TO_HYP); } else { if (scr.scd) { fault = disabledFault(); @@ -889,8 +890,8 @@ let {{ exec_output += PredOpExecute.subst(bfiIop) mrc14code = ''' - MiscRegIndex miscReg = (MiscRegIndex) xc->tcBase()->flattenRegId( - RegId(MiscRegClass, op1)).index(); + auto *isa = static_cast(xc->tcBase()->getIsaPtr()); + auto miscReg = (MiscRegIndex)isa->flattenMiscIndex(op1); auto [can_read, undefined] = canReadCoprocReg(miscReg, Scr, Cpsr, xc->tcBase()); if (!can_read || undefined) { @@ -898,8 +899,9 @@ let {{ mnemonic); } if (mcrMrc14TrapToHyp((MiscRegIndex) op1, xc->tcBase(), imm)) { - return std::make_shared(machInst, imm, - EC_TRAPPED_CP14_MCR_MRC); + return std::make_shared( + machInst, imm, + ExceptionClass::TRAPPED_CP14_MCR_MRC); } Dest = MiscOp1; ''' @@ -913,8 +915,8 @@ let {{ mcr14code = ''' - MiscRegIndex miscReg = (MiscRegIndex) xc->tcBase()->flattenRegId( - RegId(MiscRegClass, dest)).index(); + auto *isa = static_cast(xc->tcBase()->getIsaPtr()); + auto miscReg = (MiscRegIndex)isa->flattenMiscIndex(dest); auto [can_write, undefined] = canWriteCoprocReg(miscReg, Scr, Cpsr, xc->tcBase()); if (undefined || !can_write) { @@ -922,8 +924,9 @@ let {{ mnemonic); } if (mcrMrc14TrapToHyp(miscReg, xc->tcBase(), imm)) { - return std::make_shared(machInst, imm, - EC_TRAPPED_CP14_MCR_MRC); + return std::make_shared( + machInst, imm, + ExceptionClass::TRAPPED_CP14_MCR_MRC); } MiscDest = Op1; ''' @@ -937,9 +940,8 @@ let {{ mrc15code = ''' int preFlatOp1 = snsBankedIndex(op1, xc->tcBase()); - MiscRegIndex miscReg = (MiscRegIndex) - xc->tcBase()->flattenRegId(RegId(MiscRegClass, - preFlatOp1)).index(); + auto *isa = static_cast(xc->tcBase()->getIsaPtr()); + auto miscReg = (MiscRegIndex)isa->flattenMiscIndex(preFlatOp1); Fault fault = mcrMrc15Trap(miscReg, machInst, xc->tcBase(), imm); @@ -969,9 +971,8 @@ let {{ mcr15CheckCode = ''' int preFlatDest = snsBankedIndex(dest, xc->tcBase()); - MiscRegIndex miscReg = (MiscRegIndex) - xc->tcBase()->flattenRegId(RegId(MiscRegClass, - preFlatDest)).index(); + auto *isa = static_cast(xc->tcBase()->getIsaPtr()); + auto miscReg = (MiscRegIndex)isa->flattenMiscIndex(preFlatDest); Fault fault = mcrMrc15Trap(miscReg, machInst, xc->tcBase(), imm); @@ -1015,9 +1016,8 @@ let {{ mrrc15code = ''' int preFlatOp1 = snsBankedIndex(op1, xc->tcBase()); - MiscRegIndex miscReg = (MiscRegIndex) - xc->tcBase()->flattenRegId(RegId(MiscRegClass, - preFlatOp1)).index(); + auto *isa = static_cast(xc->tcBase()->getIsaPtr()); + auto miscReg = (MiscRegIndex)isa->flattenMiscIndex(preFlatOp1); Fault fault = mcrrMrrc15Trap(miscReg, machInst, xc->tcBase(), imm); @@ -1047,9 +1047,8 @@ let {{ mcrr15code = ''' int preFlatDest = snsBankedIndex(dest, xc->tcBase()); - MiscRegIndex miscReg = (MiscRegIndex) - xc->tcBase()->flattenRegId(RegId(MiscRegClass, - preFlatDest)).index(); + auto *isa = static_cast(xc->tcBase()->getIsaPtr()); + auto miscReg = (MiscRegIndex)isa->flattenMiscIndex(preFlatDest); Fault fault = mcrrMrrc15Trap(miscReg, machInst, xc->tcBase(), imm); @@ -1201,7 +1200,7 @@ let {{ if ((imm != 0) && mcrMrc15TrapToHyp(MISCREG_CP15ISB, xc->tcBase(), imm)) { return std::make_shared(machInst, imm, - EC_TRAPPED_CP15_MCR_MRC); + ExceptionClass::TRAPPED_CP15_MCR_MRC); } ''' isbIop = ArmInstObjParams("isb", "Isb", "ImmOp", @@ -1217,7 +1216,7 @@ let {{ if ((imm != 0) && mcrMrc15TrapToHyp(MISCREG_CP15DSB, xc->tcBase(), imm)) { return std::make_shared(machInst, imm, - EC_TRAPPED_CP15_MCR_MRC); + ExceptionClass::TRAPPED_CP15_MCR_MRC); } ''' dsbIop = ArmInstObjParams("dsb", "Dsb", "ImmOp", @@ -1234,7 +1233,7 @@ let {{ if ((imm != 0) && mcrMrc15TrapToHyp(MISCREG_CP15DMB, xc->tcBase(), imm)) { return std::make_shared(machInst, imm, - EC_TRAPPED_CP15_MCR_MRC); + ExceptionClass::TRAPPED_CP15_MCR_MRC); } ''' dmbIop = ArmInstObjParams("dmb", "Dmb", "ImmOp", diff --git a/src/arch/arm/isa/insts/pauth.isa b/src/arch/arm/isa/insts/pauth.isa index ea16ab999d..830991deae 100644 --- a/src/arch/arm/isa/insts/pauth.isa +++ b/src/arch/arm/isa/insts/pauth.isa @@ -90,7 +90,7 @@ let {{ code = pacEnabledCode(hint) + """ uint64_t res; - fault = stripPAC(xc->tcBase(), XDest, data, &res); + stripPAC(xc->tcBase(), XDest, data, &res); XDest = res; """ regoptype = 'RegOp' diff --git a/src/arch/arm/isa/operands.isa b/src/arch/arm/isa/operands.isa index 9c8deaf009..2addd10def 100644 --- a/src/arch/arm/isa/operands.isa +++ b/src/arch/arm/isa/operands.isa @@ -103,7 +103,7 @@ let {{ @overrideInOperand def regId(self): return f'gem5::ArmISA::couldBeZero({self.reg_spec}) ? RegId() : ' \ - f'RegId({self.reg_class}, {self.reg_spec})' + f'{self.reg_class}[{self.reg_spec}]' def __init__(self, idx, ctype='uw', id=srtNormal): super().__init__(ctype, idx, 'IsInteger', id) @@ -123,7 +123,7 @@ let {{ else xc->setRegOperand(this, {self.dest_reg_idx}, {self.base_name}); if (traceData) - traceData->setData({self.base_name}); + traceData->setData({self.reg_class}, {self.base_name}); ''' class PIntReg(IntReg): @@ -148,7 +148,7 @@ let {{ else xc->setRegOperand(this, {self.dest_reg_idx}, {self.base_name}); if (traceData) - traceData->setData({self.base_name}); + traceData->setData({self.reg_class}, {self.base_name}); ''' class IntRegAIWPC(IntReg): @@ -165,14 +165,14 @@ let {{ xc->setRegOperand(this, {self.dest_reg_idx}, {self.base_name}); {"}"} if (traceData) - traceData->setData({self.base_name}); + traceData->setData({self.reg_class}, {self.base_name}); ''' class IntReg64(IntRegOp): @overrideInOperand def regId(self): return f'gem5::ArmISA::couldBeZero({self.reg_spec}) ? RegId() : ' \ - f'RegId({self.reg_class}, {self.reg_spec})' + f'{self.reg_class}[{self.reg_spec}]' @overrideInOperand def makeRead(self): '''aarch64 read''' @@ -186,7 +186,7 @@ let {{ xc->setRegOperand(this, {self.dest_reg_idx}, {self.base_name} & mask(intWidth)); if (traceData) - traceData->setData({self.base_name}); + traceData->setData({self.reg_class}, {self.base_name}); ''' def __init__(self, idx, id=srtNormal): super().__init__('ud', idx, 'IsInteger', id) @@ -205,7 +205,7 @@ let {{ xc->setRegOperand(this, {self.dest_reg_idx}, {self.base_name} & mask(aarch64 ? 64 : 32)); if (traceData) - traceData->setData({self.base_name}); + traceData->setData({self.reg_class}, {self.base_name}); ''' class IntRegW64(IntReg64): @@ -222,7 +222,7 @@ let {{ xc->setRegOperand(this, {self.dest_reg_idx}, {self.base_name} & mask(32)); if (traceData) - traceData->setData({self.base_name}); + traceData->setData({self.reg_class}, {self.base_name}); ''' class CCReg(CCRegOp): @@ -248,7 +248,7 @@ let {{ xc->setMiscReg(snsBankedIndex(dest, xc->tcBase()), {self.base_name}); if (traceData) - traceData->setData({self.base_name}); + traceData->setData({self.reg_class}, {self.base_name}); ''' def __init__(self, idx, id=srtNormal, ctype='uw'): super().__init__(idx, id, ctype, (None, None, 'IsControl')) diff --git a/src/arch/arm/isa/templates/basic.isa b/src/arch/arm/isa/templates/basic.isa index d0bc82da23..3620c0084c 100644 --- a/src/arch/arm/isa/templates/basic.isa +++ b/src/arch/arm/isa/templates/basic.isa @@ -51,7 +51,7 @@ def template BasicDeclare {{ public: /// Constructor. %(class_name)s(ExtMachInst machInst); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -84,7 +84,7 @@ def template BasicConstructor64 {{ def template BasicExecute {{ Fault %(class_name)s::execute( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { Fault fault = NoFault; diff --git a/src/arch/arm/isa/templates/branch.isa b/src/arch/arm/isa/templates/branch.isa index b886a97402..7d0efbafce 100644 --- a/src/arch/arm/isa/templates/branch.isa +++ b/src/arch/arm/isa/templates/branch.isa @@ -45,7 +45,7 @@ class %(class_name)s : public %(base_class)s // Constructor %(class_name)s(ExtMachInst machInst, int32_t _imm, ConditionCode _condCode); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; std::unique_ptr branchTarget( const PCStateBase &branch_pc) const override; @@ -82,7 +82,7 @@ class %(class_name)s : public %(base_class)s // Constructor %(class_name)s(ExtMachInst machInst, RegIndex _op1, ConditionCode _condCode); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -115,10 +115,10 @@ class %(class_name)s : public %(base_class)s public: // Constructor %(class_name)s(ExtMachInst machInst, RegIndex _op1, RegIndex _op2); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; }; }}; @@ -149,7 +149,7 @@ class %(class_name)s : public %(base_class)s public: // Constructor %(class_name)s(ExtMachInst machInst, int32_t imm, RegIndex _op1); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; std::unique_ptr branchTarget( const PCStateBase &branch_pc) const override; @@ -186,5 +186,3 @@ def template BranchTarget {{ return std::unique_ptr{pc_ptr}; } }}; - - diff --git a/src/arch/arm/isa/templates/branch64.isa b/src/arch/arm/isa/templates/branch64.isa index b3914e0e42..99edc8ee36 100644 --- a/src/arch/arm/isa/templates/branch64.isa +++ b/src/arch/arm/isa/templates/branch64.isa @@ -44,7 +44,7 @@ class %(class_name)s : public %(base_class)s public: // Constructor %(class_name)s(ExtMachInst machInst, int64_t _imm); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -67,7 +67,7 @@ class %(class_name)s : public %(base_class)s // Constructor %(class_name)s(ExtMachInst machInst, int64_t _imm, ConditionCode _condCode); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -90,7 +90,7 @@ class %(class_name)s : public %(base_class)s public: // Constructor %(class_name)s(ExtMachInst machInst, RegIndex _op1); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -112,7 +112,7 @@ class %(class_name)s : public %(base_class)s public: // Constructor %(class_name)s(ExtMachInst machInst, RegIndex _op1, RegIndex _op2); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -135,7 +135,7 @@ class %(class_name)s : public %(base_class)s public: // Constructor %(class_name)s(ExtMachInst machInst, int64_t imm, RegIndex _op1); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -159,7 +159,7 @@ class %(class_name)s : public %(base_class)s // Constructor %(class_name)s(ExtMachInst machInst, int64_t _imm1, int64_t _imm2, RegIndex _op1); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; diff --git a/src/arch/arm/isa/templates/crypto.isa b/src/arch/arm/isa/templates/crypto.isa index 417d643222..f2080a27df 100644 --- a/src/arch/arm/isa/templates/crypto.isa +++ b/src/arch/arm/isa/templates/crypto.isa @@ -39,7 +39,7 @@ // storage/extraction here is fixed as constants. def template CryptoPredOpExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; %(op_decl)s; diff --git a/src/arch/arm/isa/templates/data64.isa b/src/arch/arm/isa/templates/data64.isa index 69c4b64b4c..99bda0b9a2 100644 --- a/src/arch/arm/isa/templates/data64.isa +++ b/src/arch/arm/isa/templates/data64.isa @@ -45,7 +45,7 @@ class %(class_name)s : public %(base_class)s // Constructor %(class_name)s(ExtMachInst machInst, RegIndex _dest, RegIndex _op1, uint64_t _imm); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -72,7 +72,7 @@ class %(class_name)s : public %(base_class)s %(class_name)s(ExtMachInst machInst, RegIndex _dest, RegIndex _op1, RegIndex _op2, int32_t _shiftAmt, ArmShiftType _shiftType); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -100,7 +100,7 @@ class %(class_name)s : public %(base_class)s %(class_name)s(ExtMachInst machInst, RegIndex _dest, RegIndex _op1, RegIndex _op2, ArmExtendType _extendType, int32_t _shiftAmt); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -126,7 +126,7 @@ class %(class_name)s : public %(base_class)s public: // Constructor %(class_name)s(ExtMachInst machInst, RegIndex _dest, RegIndex _op1); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -150,7 +150,7 @@ class %(class_name)s : public %(base_class)s // Constructor %(class_name)s(ExtMachInst machInst, RegIndex _dest, RegIndex _op1, RegIndex _op2); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -175,7 +175,7 @@ class %(class_name)s : public %(base_class)s // Constructor %(class_name)s(ExtMachInst machInst, RegIndex _dest, RegIndex _op1, RegIndex _op2, uint64_t _imm); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -201,7 +201,7 @@ class %(class_name)s : public %(base_class)s // Constructor %(class_name)s(ExtMachInst machInst, RegIndex _dest, RegIndex _op1, RegIndex _op2, RegIndex _op3); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -227,7 +227,7 @@ class %(class_name)s : public %(base_class)s // Constructor %(class_name)s(ExtMachInst machInst, RegIndex _op1, uint64_t _imm, ConditionCode _condCode, uint8_t _defCc); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -253,7 +253,7 @@ class %(class_name)s : public %(base_class)s // Constructor %(class_name)s(ExtMachInst machInst, RegIndex _op1, RegIndex _op2, ConditionCode _condCode, uint8_t _defCc); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -280,7 +280,7 @@ class %(class_name)s : public %(base_class)s %(class_name)s(ExtMachInst machInst, RegIndex _dest, RegIndex _op1, RegIndex _op2, ConditionCode _condCode); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; diff --git a/src/arch/arm/isa/templates/macromem.isa b/src/arch/arm/isa/templates/macromem.isa index dfda85ccdd..51380019ad 100644 --- a/src/arch/arm/isa/templates/macromem.isa +++ b/src/arch/arm/isa/templates/macromem.isa @@ -53,10 +53,10 @@ def template MicroMemDeclare {{ %(class_name)s(ExtMachInst machInst, RegIndex _ura, RegIndex _urb, bool _up, uint8_t _imm); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; }; }}; @@ -90,10 +90,10 @@ def template MicroMemPairDeclare {{ %(class_name)s(ExtMachInst machInst, RegIndex _dreg1, RegIndex _dreg2, RegIndex _base, bool _up, uint8_t _imm); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; }; }}; @@ -145,10 +145,10 @@ def template MicroNeonMemDeclare {{ } } - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; }; }}; @@ -169,7 +169,7 @@ def template MicroSetPCCPSRDeclare {{ RegIndex _ura, RegIndex _urb, RegIndex _urc); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -202,19 +202,19 @@ def template MicroSetPCCPSRConstructor {{ def template MicroNeonMemExecDeclare {{ template Fault %(class_name)s<%(targs)s>::execute( - ExecContext *, Trace::InstRecord *) const; + ExecContext *, trace::InstRecord *) const; template Fault %(class_name)s<%(targs)s>::initiateAcc( - ExecContext *, Trace::InstRecord *) const; + ExecContext *, trace::InstRecord *) const; template Fault %(class_name)s<%(targs)s>::completeAcc(PacketPtr, - ExecContext *, Trace::InstRecord *) const; + ExecContext *, trace::InstRecord *) const; }}; def template MicroNeonExecDeclare {{ template Fault %(class_name)s<%(targs)s>::execute( - ExecContext *, Trace::InstRecord *) const; + ExecContext *, trace::InstRecord *) const; }}; //////////////////////////////////////////////////////////////////// @@ -244,14 +244,14 @@ def template MicroNeonMixDeclare {{ } } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; def template MicroNeonMixExecute {{ template Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; uint64_t resTemp = 0; @@ -299,7 +299,7 @@ def template MicroNeonMixLaneDeclare {{ } } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -317,7 +317,7 @@ def template MicroIntMovDeclare {{ public: %(class_name)s(ExtMachInst machInst, RegIndex _ura, RegIndex _urb); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; def template MicroIntMovConstructor {{ @@ -352,7 +352,7 @@ def template MicroIntImmDeclare {{ %(class_name)s(ExtMachInst machInst, RegIndex _ura, RegIndex _urb, int32_t _imm); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -397,7 +397,7 @@ def template MicroIntRegDeclare {{ %(class_name)s(ExtMachInst machInst, RegIndex _ura, RegIndex _urb, RegIndex _urc, int32_t _shiftAmt, ArmShiftType _shiftType); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -423,7 +423,7 @@ def template MicroIntXERegDeclare {{ %(class_name)s(ExtMachInst machInst, RegIndex _ura, RegIndex _urb, RegIndex _urc, ArmExtendType _type, uint32_t _shiftAmt); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; diff --git a/src/arch/arm/isa/templates/mem.isa b/src/arch/arm/isa/templates/mem.isa index 35af77577f..43969b62f1 100644 --- a/src/arch/arm/isa/templates/mem.isa +++ b/src/arch/arm/isa/templates/mem.isa @@ -42,7 +42,7 @@ def template PanicExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { panic("Execute function executed when it shouldn't be!\n"); return NoFault; @@ -52,7 +52,7 @@ def template PanicExecute {{ def template PanicInitiateAcc {{ Fault %(class_name)s::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { panic("InitiateAcc function executed when it shouldn't be!\n"); return NoFault; @@ -62,7 +62,7 @@ def template PanicInitiateAcc {{ def template PanicCompleteAcc {{ Fault %(class_name)s::completeAcc(PacketPtr pkt, ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { panic("CompleteAcc function executed when it shouldn't be!\n"); return NoFault; @@ -73,7 +73,7 @@ def template PanicCompleteAcc {{ def template SwapExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -109,7 +109,7 @@ def template SwapExecute {{ def template SwapInitiateAcc {{ Fault %(class_name)s::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -137,7 +137,7 @@ def template SwapInitiateAcc {{ def template SwapCompleteAcc {{ Fault %(class_name)s::completeAcc(PacketPtr pkt, ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; @@ -163,7 +163,7 @@ def template SwapCompleteAcc {{ def template LoadExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -194,7 +194,7 @@ def template NeonLoadExecute {{ template Fault %(class_name)s::execute( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -230,7 +230,7 @@ def template NeonLoadExecute {{ def template StoreExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -264,7 +264,7 @@ def template NeonStoreExecute {{ template Fault %(class_name)s::execute( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -303,7 +303,7 @@ def template NeonStoreExecute {{ def template StoreExExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -342,7 +342,7 @@ def template StoreExExecute {{ def template StoreExInitiateAcc {{ Fault %(class_name)s::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -371,7 +371,7 @@ def template StoreExInitiateAcc {{ def template StoreInitiateAcc {{ Fault %(class_name)s::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -401,7 +401,7 @@ def template NeonStoreInitiateAcc {{ template Fault %(class_name)s::initiateAcc( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -434,7 +434,7 @@ def template NeonStoreInitiateAcc {{ def template LoadInitiateAcc {{ Fault %(class_name)s::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -460,7 +460,7 @@ def template NeonLoadInitiateAcc {{ template Fault %(class_name)s::initiateAcc( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -487,7 +487,7 @@ def template NeonLoadInitiateAcc {{ def template LoadCompleteAcc {{ Fault %(class_name)s::completeAcc(PacketPtr pkt, ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; @@ -515,7 +515,7 @@ def template NeonLoadCompleteAcc {{ template Fault %(class_name)s::completeAcc( - PacketPtr pkt, ExecContext *xc, Trace::InstRecord *traceData) const + PacketPtr pkt, ExecContext *xc, trace::InstRecord *traceData) const { Fault fault = NoFault; @@ -543,7 +543,7 @@ def template NeonLoadCompleteAcc {{ def template StoreCompleteAcc {{ Fault %(class_name)s::completeAcc(PacketPtr pkt, ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { return NoFault; } @@ -553,7 +553,7 @@ def template NeonStoreCompleteAcc {{ template Fault %(class_name)s::completeAcc( - PacketPtr pkt, ExecContext *xc, Trace::InstRecord *traceData) const + PacketPtr pkt, ExecContext *xc, trace::InstRecord *traceData) const { return NoFault; } @@ -562,7 +562,7 @@ def template NeonStoreCompleteAcc {{ def template StoreExCompleteAcc {{ Fault %(class_name)s::completeAcc(PacketPtr pkt, ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; @@ -596,10 +596,10 @@ def template RfeDeclare {{ %(class_name)s(ExtMachInst machInst, uint32_t _base, int _mode, bool _wb); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; }; }}; @@ -617,10 +617,10 @@ def template SrsDeclare {{ %(class_name)s(ExtMachInst machInst, uint32_t _regMode, int _mode, bool _wb); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; }; }}; @@ -638,10 +638,10 @@ def template SwapDeclare {{ %(class_name)s(ExtMachInst machInst, uint32_t _dest, uint32_t _op1, uint32_t _base); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; }; }}; @@ -660,10 +660,10 @@ def template LoadStoreDImmDeclare {{ uint32_t _dest, uint32_t _dest2, uint32_t _base, bool _add, int32_t _imm); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; }; }}; @@ -682,10 +682,10 @@ def template StoreExDImmDeclare {{ uint32_t _result, uint32_t _dest, uint32_t _dest2, uint32_t _base, bool _add, int32_t _imm); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; }; }}; @@ -703,10 +703,10 @@ def template LoadStoreImmDeclare {{ %(class_name)s(ExtMachInst machInst, uint32_t _dest, uint32_t _base, bool _add, int32_t _imm); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; void annotateFault(ArmISA::ArmFault *fault) override @@ -731,10 +731,10 @@ def template StoreExImmDeclare {{ uint32_t _result, uint32_t _dest, uint32_t _base, bool _add, int32_t _imm); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; }; }}; @@ -755,10 +755,10 @@ def template StoreDRegDeclare {{ int32_t _shiftAmt, uint32_t _shiftType, uint32_t _index); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; }; }}; @@ -778,10 +778,10 @@ def template StoreRegDeclare {{ int32_t _shiftAmt, uint32_t _shiftType, uint32_t _index); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; void annotateFault(ArmISA::ArmFault *fault) override @@ -808,10 +808,10 @@ def template LoadDRegDeclare {{ int32_t _shiftAmt, uint32_t _shiftType, uint32_t _index); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; }; }}; @@ -831,10 +831,10 @@ def template LoadRegDeclare {{ int32_t _shiftAmt, uint32_t _shiftType, uint32_t _index); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; void annotateFault(ArmISA::ArmFault *fault) override @@ -858,10 +858,10 @@ def template LoadImmDeclare {{ %(class_name)s(ExtMachInst machInst, uint32_t _dest, uint32_t _base, bool _add, int32_t _imm); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; void annotateFault(ArmISA::ArmFault *fault) override @@ -1288,4 +1288,3 @@ def template LoadImmConstructor {{ #endif } }}; - diff --git a/src/arch/arm/isa/templates/mem64.isa b/src/arch/arm/isa/templates/mem64.isa index 5097fb0fff..991e97caaa 100644 --- a/src/arch/arm/isa/templates/mem64.isa +++ b/src/arch/arm/isa/templates/mem64.isa @@ -1,6 +1,6 @@ // -*- mode:c++ -*- -// Copyright (c) 2011-2014, 2017, 2019 ARM Limited +// Copyright (c) 2011-2014, 2017, 2019, 2022 Arm Limited // All rights reserved // // The license below extends only to copyright in the software and shall @@ -47,7 +47,7 @@ let {{ def template Load64Execute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -72,7 +72,7 @@ def template Load64Execute {{ def template Load64FpExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -97,7 +97,7 @@ def template Load64FpExecute {{ def template Store64Execute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -126,7 +126,7 @@ def template Store64Execute {{ def template Store64InitiateAcc {{ Fault %(class_name)s::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -151,7 +151,7 @@ def template Store64InitiateAcc {{ def template StoreEx64Execute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -185,7 +185,7 @@ def template StoreEx64Execute {{ def template StoreEx64InitiateAcc {{ Fault %(class_name)s::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -210,7 +210,7 @@ def template StoreEx64InitiateAcc {{ def template Load64InitiateAcc {{ Fault %(class_name)s::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -230,7 +230,7 @@ def template Load64InitiateAcc {{ def template Load64CompleteAcc {{ Fault %(class_name)s::completeAcc(PacketPtr pkt, ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; @@ -255,7 +255,7 @@ def template Load64CompleteAcc {{ def template Store64CompleteAcc {{ Fault %(class_name)s::completeAcc(PacketPtr pkt, ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { return NoFault; } @@ -264,7 +264,7 @@ def template Store64CompleteAcc {{ def template StoreEx64CompleteAcc {{ Fault %(class_name)s::completeAcc(PacketPtr pkt, ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; @@ -291,12 +291,12 @@ def template DCStore64Declare {{ public: /// Constructor. %(class_name)s(ExtMachInst machInst, RegIndex _base, - MiscRegIndex _dest, uint64_t _imm); + MiscRegIndex _dest); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; void annotateFault(ArmISA::ArmFault *fault) override @@ -308,9 +308,9 @@ def template DCStore64Declare {{ def template DCStore64Constructor {{ %(class_name)s::%(class_name)s(ExtMachInst machInst, RegIndex _base, - MiscRegIndex _dest, uint64_t _imm) : + MiscRegIndex _dest) : %(base_class)s("%(mnemonic)s", machInst, %(op_class)s, - _base, _dest, _imm) + _base, _dest) { %(set_reg_idx_arr)s; %(constructor)s; @@ -321,7 +321,7 @@ def template DCStore64Constructor {{ def template DCStore64Execute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -352,7 +352,7 @@ def template DCStore64Execute {{ def template DCStore64InitiateAcc {{ Fault %(class_name)s::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -387,10 +387,10 @@ def template LoadStoreImm64Declare {{ %(class_name)s(ExtMachInst machInst, RegIndex _dest, RegIndex _base, int64_t _imm); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; void annotateFault(ArmISA::ArmFault *fault) override @@ -413,10 +413,10 @@ def template LoadStoreImmU64Declare {{ bool noAlloc = false, bool exclusive = false, bool acrel = false); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; void annotateFault(ArmISA::ArmFault *fault) override @@ -439,10 +439,10 @@ def template LoadStoreImmDU64Declare {{ int64_t _imm = 0, bool noAlloc = false, bool exclusive = false, bool acrel = false); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; void annotateFault(ArmISA::ArmFault *fault) override @@ -467,10 +467,10 @@ def template StoreImmDEx64Declare {{ RegIndex _result, RegIndex _dest, RegIndex _dest2, RegIndex _base, int64_t _imm = 0); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; }; }}; @@ -487,10 +487,10 @@ def template LoadStoreReg64Declare {{ RegIndex _dest, RegIndex _base, RegIndex _offset, ArmExtendType _type, uint32_t _shiftAmt); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; void annotateFault(ArmISA::ArmFault *fault) override @@ -514,10 +514,10 @@ def template LoadStoreRegU64Declare {{ bool noAlloc = false, bool exclusive = false, bool acrel = false); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; void annotateFault(ArmISA::ArmFault *fault) override @@ -538,10 +538,10 @@ def template LoadStoreRaw64Declare {{ %(class_name)s(ExtMachInst machInst, RegIndex _dest, RegIndex _base); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; void annotateFault(ArmISA::ArmFault *fault) override @@ -562,10 +562,10 @@ def template LoadStoreEx64Declare {{ %(class_name)s(ExtMachInst machInst, RegIndex _dest, RegIndex _base, RegIndex _result); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; void annotateFault(ArmISA::ArmFault *fault) override @@ -585,10 +585,10 @@ def template LoadStoreLit64Declare {{ /// Constructor. %(class_name)s(ExtMachInst machInst, RegIndex _dest, int64_t _imm); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; void annotateFault(ArmISA::ArmFault *fault) override @@ -610,10 +610,10 @@ def template LoadStoreLitU64Declare {{ bool noAlloc = false, bool exclusive = false, bool acrel = false); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; void annotateFault(ArmISA::ArmFault *fault) override @@ -782,7 +782,7 @@ def template LoadStoreLitU64Constructor {{ def template AmoOpExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; @@ -812,7 +812,7 @@ def template AmoOpExecute {{ def template AmoOpInitiateAcc {{ Fault %(class_name)s::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; @@ -834,7 +834,7 @@ def template AmoOpInitiateAcc {{ def template AmoOpCompleteAcc {{ Fault %(class_name)s::completeAcc(PacketPtr pkt, ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { %(op_decl)s; %(op_rd)s; @@ -862,10 +862,10 @@ def template AmoOpDeclare {{ %(class_name)s(ExtMachInst machInst, RegIndex _dest, RegIndex _base, RegIndex _result); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; void annotateFault(ArmISA::ArmFault *fault) override @@ -901,10 +901,10 @@ def template AmoArithmeticOpDeclare {{ %(class_name)s(ExtMachInst machInst, RegIndex _dest, RegIndex _base, RegIndex _result); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; void annotateFault(ArmISA::ArmFault *fault) override @@ -923,7 +923,7 @@ def template AmoArithmeticOpConstructor {{ %(set_reg_idx_arr)s; %(constructor)s; isXZR = false; - uint32_t r2 = RegId(IntRegClass, dest).index() ; + uint32_t r2 = dest; flags[IsStore] = false; flags[IsLoad] = false; if (r2 == 31) { diff --git a/src/arch/arm/isa/templates/misc.isa b/src/arch/arm/isa/templates/misc.isa index 36c78f696b..3bb3afafd4 100644 --- a/src/arch/arm/isa/templates/misc.isa +++ b/src/arch/arm/isa/templates/misc.isa @@ -44,7 +44,7 @@ class %(class_name)s : public %(base_class)s public: // Constructor %(class_name)s(ExtMachInst machInst, RegIndex _dest); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -76,7 +76,7 @@ class %(class_name)s : public %(base_class)s // Constructor %(class_name)s(ExtMachInst machInst, RegIndex _dest, uint8_t _sysM, bool _r); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -109,7 +109,7 @@ class %(class_name)s : public %(base_class)s // Constructor %(class_name)s(ExtMachInst machInst, RegIndex _op1, uint8_t _sysM, bool _r); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -138,7 +138,7 @@ class %(class_name)s : public %(base_class)s public: // Constructor %(class_name)s(ExtMachInst machInst, RegIndex _op1, uint8_t mask); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -166,7 +166,7 @@ class %(class_name)s : public %(base_class)s public: // Constructor %(class_name)s(ExtMachInst machInst, uint32_t imm, uint8_t mask); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -195,7 +195,7 @@ class %(class_name)s : public %(base_class)s // Constructor %(class_name)s(ExtMachInst machInst, MiscRegIndex _op1, RegIndex _dest, RegIndex _dest2, uint32_t imm); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -226,7 +226,7 @@ class %(class_name)s : public %(base_class)s // Constructor %(class_name)s(ExtMachInst machInst, RegIndex _op1, RegIndex _op2, MiscRegIndex _dest, uint32_t imm); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -256,7 +256,7 @@ class %(class_name)s : public %(base_class)s public: // Constructor %(class_name)s(ExtMachInst machInst, uint64_t _imm); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -283,7 +283,7 @@ class %(class_name)s : public %(base_class)s public: // Constructor %(class_name)s(ExtMachInst machInst, RegIndex _dest, uint64_t _imm); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -311,7 +311,7 @@ class %(class_name)s : public %(base_class)s public: // Constructor %(class_name)s(ExtMachInst machInst, RegIndex _dest, RegIndex _op1); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -340,7 +340,7 @@ class %(class_name)s : public %(base_class)s // Constructor %(class_name)s(ExtMachInst machInst, RegIndex _dest, RegIndex _op1, RegIndex _op2, uint64_t _imm); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -371,7 +371,7 @@ class %(class_name)s : public %(base_class)s // Constructor %(class_name)s(ExtMachInst machInst, RegIndex _dest, RegIndex _op1, RegIndex _op2, RegIndex _op3); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -402,7 +402,7 @@ class %(class_name)s : public %(base_class)s // Constructor %(class_name)s(ExtMachInst machInst, RegIndex _dest, RegIndex _op1, RegIndex _op2); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -432,7 +432,7 @@ class %(class_name)s : public %(base_class)s // Constructor %(class_name)s(ExtMachInst machInst, RegIndex _dest, RegIndex _op1, uint64_t _imm); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -462,7 +462,7 @@ class %(class_name)s : public %(base_class)s // Constructor %(class_name)s(ExtMachInst machInst, MiscRegIndex _dest, RegIndex _op1, uint64_t _imm); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -492,7 +492,7 @@ class %(class_name)s : public %(base_class)s // Constructor %(class_name)s(ExtMachInst machInst, RegIndex _dest, MiscRegIndex _op1, uint64_t _imm); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -522,7 +522,7 @@ class %(class_name)s : public %(base_class)s // Constructor %(class_name)s(ExtMachInst machInst, RegIndex _dest, uint64_t _imm1, uint64_t _imm2); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -552,7 +552,7 @@ class %(class_name)s : public %(base_class)s // Constructor %(class_name)s(ExtMachInst machInst, RegIndex _dest, RegIndex _op1, uint64_t _imm1, uint64_t _imm2); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -583,7 +583,7 @@ class %(class_name)s : public %(base_class)s // Constructor %(class_name)s(ExtMachInst machInst, RegIndex _dest, uint64_t _imm, RegIndex _op1); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -614,7 +614,7 @@ class %(class_name)s : public %(base_class)s %(class_name)s(ExtMachInst machInst, RegIndex _dest, uint64_t _imm, RegIndex _op1, int32_t _shiftAmt, ArmShiftType _shiftType); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -646,17 +646,17 @@ def template MiscRegRegImmMemOpDeclare {{ // Constructor %(class_name)s(ExtMachInst machInst, MiscRegIndex _dest, RegIndex _op1, uint64_t _imm); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; }; }}; def template Mcr15Execute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -687,7 +687,7 @@ def template Mcr15Execute {{ def template Mcr15InitiateAcc {{ Fault %(class_name)s::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -718,7 +718,7 @@ def template Mcr15InitiateAcc {{ def template Mcr15CompleteAcc {{ Fault %(class_name)s::completeAcc(PacketPtr pkt, ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { return NoFault; } diff --git a/src/arch/arm/isa/templates/misc64.isa b/src/arch/arm/isa/templates/misc64.isa index 7c15c39639..af6b4c6888 100644 --- a/src/arch/arm/isa/templates/misc64.isa +++ b/src/arch/arm/isa/templates/misc64.isa @@ -45,7 +45,7 @@ class %(class_name)s : public %(base_class)s // Constructor %(class_name)s(ExtMachInst machInst,uint64_t _imm); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -69,7 +69,7 @@ class %(class_name)s : public %(base_class)s %(class_name)s(ExtMachInst machInst, RegIndex _dest, RegIndex _op1, uint64_t _imm1, uint64_t _imm2); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -95,7 +95,7 @@ class %(class_name)s : public %(base_class)s // Constructor %(class_name)s(ExtMachInst machInst, RegIndex _dest, RegIndex _op1, RegIndex _op2, uint64_t _imm); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -121,7 +121,7 @@ class %(class_name)s : public %(base_class)s // Constructor %(class_name)s(ExtMachInst machInst, MiscRegIndex _dest, uint64_t _imm); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -144,17 +144,17 @@ class %(class_name)s : public %(base_class)s public: // Constructor %(class_name)s(ExtMachInst machInst, MiscRegIndex _dest, - RegIndex _op1, uint64_t _imm); + RegIndex _op1); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; def template MiscRegRegOp64Constructor {{ %(class_name)s::%(class_name)s(ExtMachInst machInst, MiscRegIndex _dest, - RegIndex _op1, uint64_t _imm) : + RegIndex _op1) : %(base_class)s("%(mnemonic)s", machInst, %(op_class)s, - _dest, _op1, _imm) + _dest, _op1) { %(set_reg_idx_arr)s; %(constructor)s; @@ -170,17 +170,17 @@ class %(class_name)s : public %(base_class)s public: // Constructor %(class_name)s(ExtMachInst machInst, RegIndex _dest, - MiscRegIndex _op1, uint64_t _imm); + MiscRegIndex _op1); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; def template RegMiscRegOp64Constructor {{ %(class_name)s::%(class_name)s(ExtMachInst machInst, RegIndex _dest, - MiscRegIndex _op1, uint64_t _imm) : + MiscRegIndex _op1) : %(base_class)s("%(mnemonic)s", machInst, %(op_class)s, - _dest, _op1, _imm) + _dest, _op1) { %(set_reg_idx_arr)s; %(constructor)s; @@ -197,7 +197,7 @@ class %(class_name)s : public %(base_class)s public: // Constructor %(class_name)s(ExtMachInst machInst, RegIndex _dest); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -221,7 +221,7 @@ class %(class_name)s : public %(base_class)s // Constructor %(class_name)s(ExtMachInst machInst, RegIndex _dest); - Fault execute(ExecContext *, Trace::InstRecord *) const; + Fault execute(ExecContext *, trace::InstRecord *) const; }; }}; @@ -244,12 +244,12 @@ class %(class_name)s : public %(base_class)s public: // Constructor %(class_name)s(ExtMachInst machInst, MiscRegIndex _dest, - RegIndex _op1, uint64_t _imm, bool dvm_enabled); + RegIndex _op1, bool dvm_enabled); - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; - Fault execute(ExecContext *, Trace::InstRecord *) const override; + trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -266,19 +266,18 @@ def template DvmDeclare {{ public: /// Constructor. %(class_name)s(ExtMachInst machInst, bool dvm_enabled); - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; - Fault execute(ExecContext *, Trace::InstRecord *) const override; + trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; def template DvmTlbiConstructor {{ %(class_name)s::%(class_name)s(ExtMachInst machInst, MiscRegIndex _dest, - RegIndex _op1, uint64_t _imm, - bool dvm_enabled) : + RegIndex _op1, bool dvm_enabled) : %(base_class)s("%(mnemonic)s", machInst, %(op_class)s, - _dest, _op1, _imm), + _dest, _op1), dvmEnabled(dvm_enabled) { %(set_reg_idx_arr)s; @@ -307,7 +306,7 @@ def template DvmConstructor {{ def template DvmInitiateAcc {{ Fault %(class_name)s::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; @@ -327,7 +326,7 @@ def template DvmInitiateAcc {{ def template DvmCompleteAcc {{ Fault %(class_name)s::completeAcc(PacketPtr pkt, ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { return NoFault; } diff --git a/src/arch/arm/isa/templates/mult.isa b/src/arch/arm/isa/templates/mult.isa index e94cc933be..7a68c8a8c4 100644 --- a/src/arch/arm/isa/templates/mult.isa +++ b/src/arch/arm/isa/templates/mult.isa @@ -45,7 +45,7 @@ class %(class_name)s : public %(base_class)s // Constructor %(class_name)s(ExtMachInst machInst, RegIndex _reg0, RegIndex _reg1, RegIndex _reg2); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -76,7 +76,7 @@ class %(class_name)s : public %(base_class)s %(class_name)s(ExtMachInst machInst, RegIndex _reg0, RegIndex _reg1, RegIndex _reg2, RegIndex _reg3); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; diff --git a/src/arch/arm/isa/templates/neon.isa b/src/arch/arm/isa/templates/neon.isa index 2a6442452b..682061c44c 100644 --- a/src/arch/arm/isa/templates/neon.isa +++ b/src/arch/arm/isa/templates/neon.isa @@ -74,7 +74,7 @@ class %(class_name)s : public %(base_class)s } } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -105,7 +105,7 @@ class %(class_name)s : public %(base_class)s } } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -135,7 +135,7 @@ class %(class_name)s : public %(base_class)s } } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -163,7 +163,7 @@ class %(class_name)s : public %(base_class)s } } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -193,14 +193,14 @@ class %(class_name)s : public %(base_class)s } } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; def template NeonExecDeclare {{ template Fault %(class_name)s<%(targs)s>::execute( - ExecContext *, Trace::InstRecord *) const; + ExecContext *, trace::InstRecord *) const; }}; output header {{ @@ -233,7 +233,7 @@ def template NeonEqualRegExecute {{ template Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; %(op_decl)s; @@ -285,7 +285,7 @@ def template NeonUnequalRegExecute {{ template Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { typedef typename bigger_type_t::type BigElement; Fault fault = NoFault; diff --git a/src/arch/arm/isa/templates/neon64.isa b/src/arch/arm/isa/templates/neon64.isa index f315f1e43a..b8fb6db8f8 100644 --- a/src/arch/arm/isa/templates/neon64.isa +++ b/src/arch/arm/isa/templates/neon64.isa @@ -60,7 +60,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -86,7 +86,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -109,7 +109,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -134,7 +134,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -160,7 +160,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -183,21 +183,21 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; def template NeonXExecDeclare {{ template Fault %(class_name)s<%(targs)s>::execute( - ExecContext *, Trace::InstRecord *) const; + ExecContext *, trace::InstRecord *) const; }}; def template NeonXEqualRegOpExecute {{ template Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; %(op_decl)s; @@ -232,7 +232,7 @@ def template NeonXUnequalRegOpExecute {{ template Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { typedef typename bigger_type_t::type BigElement; Fault fault = NoFault; @@ -299,17 +299,17 @@ def template MicroNeonMemDeclare64 {{ %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; }; }}; def template NeonLoadExecute64 {{ Fault %(class_name)s::execute( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -340,7 +340,7 @@ def template NeonLoadExecute64 {{ def template NeonLoadInitiateAcc64 {{ Fault %(class_name)s::initiateAcc( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -362,7 +362,7 @@ def template NeonLoadInitiateAcc64 {{ def template NeonLoadCompleteAcc64 {{ Fault %(class_name)s::completeAcc( - PacketPtr pkt, ExecContext *xc, Trace::InstRecord *traceData) const + PacketPtr pkt, ExecContext *xc, trace::InstRecord *traceData) const { Fault fault = NoFault; @@ -388,7 +388,7 @@ def template NeonLoadCompleteAcc64 {{ def template NeonStoreExecute64 {{ Fault %(class_name)s::execute( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -422,7 +422,7 @@ def template NeonStoreExecute64 {{ def template NeonStoreInitiateAcc64 {{ Fault %(class_name)s::initiateAcc( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -450,7 +450,7 @@ def template NeonStoreInitiateAcc64 {{ def template NeonStoreCompleteAcc64 {{ Fault %(class_name)s::completeAcc( - PacketPtr pkt, ExecContext *xc, Trace::InstRecord *traceData) const + PacketPtr pkt, ExecContext *xc, trace::InstRecord *traceData) const { return NoFault; } @@ -533,7 +533,7 @@ def template MicroNeonMixDeclare64 {{ %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -556,14 +556,14 @@ def template MicroNeonMixLaneDeclare64 {{ %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; def template MicroNeonMixExecute64 {{ Fault %(class_name)s::execute( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { Fault fault = NoFault; uint64_t resTemp = 0; diff --git a/src/arch/arm/isa/templates/pred.isa b/src/arch/arm/isa/templates/pred.isa index 3acc724c2a..176d03ac75 100644 --- a/src/arch/arm/isa/templates/pred.isa +++ b/src/arch/arm/isa/templates/pred.isa @@ -58,7 +58,7 @@ class %(class_name)s : public %(base_class)s // Constructor %(class_name)s(ExtMachInst machInst, RegIndex _dest, RegIndex _op1, uint32_t _imm, bool _rotC=true); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -99,7 +99,7 @@ class %(class_name)s : public %(base_class)s %(class_name)s(ExtMachInst machInst, RegIndex _dest, RegIndex _op1, RegIndex _op2, int32_t _shiftAmt, ArmShiftType _shiftType); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -146,7 +146,7 @@ class %(class_name)s : public %(base_class)s %(class_name)s(ExtMachInst machInst, RegIndex _dest, RegIndex _op1, RegIndex _op2, RegIndex _shift, ArmShiftType _shiftType); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -171,7 +171,7 @@ def template DataRegRegConstructor {{ def template PredOpExecute {{ Fault %(class_name)s::execute( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { Fault fault = NoFault; uint64_t resTemp = 0; @@ -195,7 +195,7 @@ def template PredOpExecute {{ def template QuiescePredOpExecute {{ Fault %(class_name)s::execute( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { Fault fault = NoFault; uint64_t resTemp = 0; @@ -221,7 +221,7 @@ def template QuiescePredOpExecute {{ def template QuiescePredOpExecuteWithFixup {{ Fault %(class_name)s::execute( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { Fault fault = NoFault; uint64_t resTemp = 0; diff --git a/src/arch/arm/isa/templates/sve.isa b/src/arch/arm/isa/templates/sve.isa index b52a189b89..87316f1440 100644 --- a/src/arch/arm/isa/templates/sve.isa +++ b/src/arch/arm/isa/templates/sve.isa @@ -69,7 +69,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -95,7 +95,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -119,7 +119,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -144,7 +144,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -168,7 +168,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -195,7 +195,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -221,7 +221,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -247,7 +247,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -273,7 +273,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -300,7 +300,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -326,7 +326,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -352,7 +352,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -379,7 +379,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -405,7 +405,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -431,7 +431,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -457,7 +457,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -483,7 +483,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -509,7 +509,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -535,7 +535,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -564,7 +564,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -590,7 +590,7 @@ class SveIndexII : public SveIndexIIOp %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -616,7 +616,7 @@ class SveIndexIR : public SveIndexIROp %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -642,7 +642,7 @@ class SveIndexRI : public SveIndexRIOp %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -668,7 +668,7 @@ class SveIndexRR : public SveIndexRROp %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -692,7 +692,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -717,7 +717,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -742,7 +742,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -769,7 +769,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -794,7 +794,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -817,7 +817,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -842,7 +842,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -868,7 +868,7 @@ class %(class_name)s : public %(base_class)s esize = sizeof(Element); } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -888,7 +888,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -910,7 +910,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -936,7 +936,7 @@ class %(class_name)s : public %(base_class)s scalar_width = (sizeof(Element) == 8) ? 64 : 32; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -962,7 +962,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -980,7 +980,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -998,7 +998,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -1016,7 +1016,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -1034,7 +1034,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -1052,7 +1052,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -1081,7 +1081,7 @@ class %(class_name)s : public %(base_class)s esize = sizeof(Element); } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -1110,7 +1110,7 @@ class %(class_name)s : public %(base_class)s esize = sizeof(Element); } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -1137,7 +1137,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -1164,7 +1164,7 @@ class %(class_name)s : public %(base_class)s %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -1172,7 +1172,7 @@ def template SveWideningOpExecute {{ template Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; %(op_decl)s; @@ -1190,7 +1190,7 @@ def template SveWideningOpExecute {{ def template SveNonTemplatedOpExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; %(op_decl)s; @@ -1209,7 +1209,7 @@ def template SveOpExecute {{ template Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; %(op_decl)s; @@ -1227,5 +1227,5 @@ def template SveOpExecute {{ def template SveOpExecDeclare {{ template Fault %(class_name)s<%(targs)s>::execute( - ExecContext *, Trace::InstRecord *) const; + ExecContext *, trace::InstRecord *) const; }}; diff --git a/src/arch/arm/isa/templates/sve_mem.isa b/src/arch/arm/isa/templates/sve_mem.isa index 5eb89755cd..aa131f8a4b 100644 --- a/src/arch/arm/isa/templates/sve_mem.isa +++ b/src/arch/arm/isa/templates/sve_mem.isa @@ -54,10 +54,10 @@ def template SveMemFillSpillOpDeclare {{ %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; void annotateFault(ArmISA::ArmFault *fault) override @@ -88,10 +88,10 @@ def template SveContigMemSSOpDeclare {{ %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; void annotateFault(ArmISA::ArmFault *fault) override @@ -122,10 +122,10 @@ def template SveContigMemSIOpDeclare {{ %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; void annotateFault(ArmISA::ArmFault *fault) override @@ -138,22 +138,22 @@ def template SveContigMemSIOpDeclare {{ def template SveContigMemExecDeclare {{ template Fault %(class_name)s%(tpl_args)s::execute(ExecContext *, - Trace::InstRecord *) const; + trace::InstRecord *) const; template Fault %(class_name)s%(tpl_args)s::initiateAcc(ExecContext *, - Trace::InstRecord *) const; + trace::InstRecord *) const; template Fault %(class_name)s%(tpl_args)s::completeAcc(PacketPtr, - ExecContext *, Trace::InstRecord *) const; + ExecContext *, trace::InstRecord *) const; }}; def template SveContigLoadExecute {{ %(tpl_header)s Fault %(class_name)s%(tpl_args)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -188,7 +188,7 @@ def template SveContigLoadInitiateAcc {{ %(tpl_header)s Fault %(class_name)s%(tpl_args)s::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -215,7 +215,7 @@ def template SveContigLoadCompleteAcc {{ %(tpl_header)s Fault %(class_name)s%(tpl_args)s::completeAcc(PacketPtr pkt, - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { [[maybe_unused]] bool aarch64 = true; unsigned eCount = @@ -243,7 +243,7 @@ def template SveContigStoreExecute {{ %(tpl_header)s Fault %(class_name)s%(tpl_args)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -281,7 +281,7 @@ def template SveContigStoreInitiateAcc {{ %(tpl_header)s Fault %(class_name)s%(tpl_args)s::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -315,7 +315,7 @@ def template SveContigStoreCompleteAcc {{ %(tpl_header)s Fault %(class_name)s%(tpl_args)s::completeAcc(PacketPtr pkt, - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { return NoFault; } @@ -325,7 +325,7 @@ def template SveLoadAndReplExecute {{ %(tpl_header)s Fault %(class_name)s%(tpl_args)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -357,7 +357,7 @@ def template SveLoadAndReplInitiateAcc {{ %(tpl_header)s Fault %(class_name)s%(tpl_args)s::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -383,7 +383,7 @@ def template SveLoadAndReplCompleteAcc {{ %(tpl_header)s Fault %(class_name)s%(tpl_args)s::completeAcc(PacketPtr pkt, - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { Fault fault = NoFault; [[maybe_unused]] bool aarch64 = true; @@ -454,10 +454,10 @@ def template SveIndexedMemVIMicroopDeclare {{ } } - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; void annotateFault(ArmISA::ArmFault *fault) override @@ -542,10 +542,10 @@ def template SveIndexedMemSVMicroopDeclare {{ } } - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; void annotateFault(ArmISA::ArmFault *fault) override @@ -581,7 +581,7 @@ def template SveGatherLoadMicroopExecute {{ %(tpl_header)s Fault %(class_name)s%(tpl_args)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -630,7 +630,7 @@ def template SveGatherLoadMicroopInitiateAcc {{ %(tpl_header)s Fault %(class_name)s%(tpl_args)s::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -673,7 +673,7 @@ def template SveGatherLoadMicroopCompleteAcc {{ %(tpl_header)s Fault %(class_name)s%(tpl_args)s::completeAcc(PacketPtr pkt, - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { [[maybe_unused]] bool aarch64 = true; @@ -698,7 +698,7 @@ def template SveScatterStoreMicroopExecute {{ %(tpl_header)s Fault %(class_name)s%(tpl_args)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -729,7 +729,7 @@ def template SveScatterStoreMicroopInitiateAcc {{ %(tpl_header)s Fault %(class_name)s%(tpl_args)s::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -758,7 +758,7 @@ def template SveScatterStoreMicroopCompleteAcc {{ %(tpl_header)s Fault %(class_name)s%(tpl_args)s::completeAcc(PacketPtr pkt, - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { return NoFault; } @@ -786,7 +786,7 @@ def template SveFirstFaultWritebackMicroopDeclare {{ %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; std::string generateDisassembly(Addr pc, @@ -804,7 +804,7 @@ def template SveFirstFaultWritebackMicroopExecute {{ %(tpl_header)s Fault %(class_name)s%(tpl_args)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { [[maybe_unused]] bool aarch64 = true; @@ -847,7 +847,7 @@ def template SveGatherLoadCpySrcVecMicroopDeclare {{ %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; std::string generateDisassembly(Addr pc, @@ -864,7 +864,7 @@ def template SveGatherLoadCpySrcVecMicroopDeclare {{ def template SveGatherLoadCpySrcVecMicroopExecute {{ Fault SveGatherLoadCpySrcVecMicroop::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; %(op_decl)s; @@ -916,10 +916,10 @@ def template SveStructMemSIMicroopDeclare {{ baseIsSP = isSP(_base); } - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; void annotateFault(ArmISA::ArmFault *fault) override @@ -970,22 +970,22 @@ def template SveStructMemSIMicroopDeclare {{ def template SveStructMemExecDeclare {{ template Fault %(class_name)s<%(targs)s>::execute(ExecContext *, - Trace::InstRecord *) const; + trace::InstRecord *) const; template Fault %(class_name)s<%(targs)s>::initiateAcc(ExecContext *, - Trace::InstRecord *) const; + trace::InstRecord *) const; template Fault %(class_name)s<%(targs)s>::completeAcc(PacketPtr, - ExecContext *, Trace::InstRecord *) const; + ExecContext *, trace::InstRecord *) const; }}; def template SveStructLoadExecute {{ template Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -1019,7 +1019,7 @@ def template SveStructLoadInitiateAcc {{ template Fault %(class_name)s::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -1046,7 +1046,7 @@ def template SveStructLoadCompleteAcc {{ template Fault %(class_name)s::completeAcc(PacketPtr pkt, - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { Fault fault = NoFault; [[maybe_unused]] bool aarch64 = true; @@ -1078,7 +1078,7 @@ def template SveStructStoreExecute {{ template Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -1116,7 +1116,7 @@ def template SveStructStoreInitiateAcc {{ template Fault %(class_name)s::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -1150,7 +1150,7 @@ def template SveStructStoreCompleteAcc {{ template Fault %(class_name)s::completeAcc(PacketPtr pkt, - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { return NoFault; } @@ -1193,10 +1193,10 @@ def template SveStructMemSSMicroopDeclare {{ baseIsSP = isSP(_base); } - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; void annotateFault(ArmISA::ArmFault *fault) override @@ -1271,7 +1271,7 @@ def template SveIntrlvMicroopDeclare {{ %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; std::string generateDisassembly(Addr pc, @@ -1312,7 +1312,7 @@ def template SveDeIntrlvMicroopDeclare {{ %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; std::string generateDisassembly(Addr pc, @@ -1329,14 +1329,14 @@ def template SveDeIntrlvMicroopDeclare {{ def template SveIntrlvMicroopExecDeclare {{ template Fault %(class_name)s<%(targs)s>::execute( - ExecContext *, Trace::InstRecord *) const; + ExecContext *, trace::InstRecord *) const; }}; def template SveIntrlvMicroopExecute {{ template Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; %(op_decl)s; diff --git a/src/arch/arm/isa/templates/vfp.isa b/src/arch/arm/isa/templates/vfp.isa index 2f59f4fae2..40dfcacaac 100644 --- a/src/arch/arm/isa/templates/vfp.isa +++ b/src/arch/arm/isa/templates/vfp.isa @@ -106,7 +106,7 @@ class %(class_name)s : public %(base_class)s %(class_name)s(ExtMachInst machInst, RegIndex _dest, RegIndex _op1, VfpMicroMode mode = VfpNotAMicroop); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -137,7 +137,7 @@ class %(class_name)s : public %(base_class)s // Constructor %(class_name)s(ExtMachInst machInst, RegIndex _dest, uint64_t _imm, VfpMicroMode mode = VfpNotAMicroop); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -168,7 +168,7 @@ class %(class_name)s : public %(base_class)s %(class_name)s(ExtMachInst machInst, RegIndex _dest, RegIndex _op1, uint64_t _imm, VfpMicroMode mode = VfpNotAMicroop); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -202,7 +202,7 @@ class %(class_name)s : public %(base_class)s %(class_name)s(ExtMachInst machInst, RegIndex _dest, RegIndex _op1, RegIndex _op2, VfpMicroMode mode = VfpNotAMicroop); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -237,7 +237,7 @@ class %(class_name)s : public %(base_class)s RegIndex _dest, RegIndex _op1, RegIndex _op2, ConditionCode _cond, VfpMicroMode mode = VfpNotAMicroop); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; diff --git a/src/arch/arm/isa/templates/vfp64.isa b/src/arch/arm/isa/templates/vfp64.isa index 883fd43913..0d7a447c33 100644 --- a/src/arch/arm/isa/templates/vfp64.isa +++ b/src/arch/arm/isa/templates/vfp64.isa @@ -93,7 +93,7 @@ class %(class_name)s : public %(base_class)s %(class_name)s(ExtMachInst machInst, RegIndex _dest, RegIndex _op1, RegIndex _op2, RegIndex _op3, VfpMicroMode mode=VfpNotAMicroop); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; diff --git a/src/arch/arm/kvm/ArmKvmCPU.py b/src/arch/arm/kvm/ArmKvmCPU.py index 4c1f62d6f2..56770a5b0a 100644 --- a/src/arch/arm/kvm/ArmKvmCPU.py +++ b/src/arch/arm/kvm/ArmKvmCPU.py @@ -36,7 +36,8 @@ from m5.params import * from m5.objects.BaseKvmCPU import BaseKvmCPU + class ArmKvmCPU(BaseKvmCPU): - type = 'ArmKvmCPU' + type = "ArmKvmCPU" cxx_header = "arch/arm/kvm/arm_cpu.hh" - cxx_class = 'gem5::ArmKvmCPU' + cxx_class = "gem5::ArmKvmCPU" diff --git a/src/arch/arm/kvm/ArmV8KvmCPU.py b/src/arch/arm/kvm/ArmV8KvmCPU.py index 2f4ddd362b..a6d83bb610 100644 --- a/src/arch/arm/kvm/ArmV8KvmCPU.py +++ b/src/arch/arm/kvm/ArmV8KvmCPU.py @@ -36,7 +36,8 @@ from m5.params import * from m5.objects.BaseArmKvmCPU import BaseArmKvmCPU + class ArmV8KvmCPU(BaseArmKvmCPU): - type = 'ArmV8KvmCPU' + type = "ArmV8KvmCPU" cxx_header = "arch/arm/kvm/armv8_cpu.hh" - cxx_class = 'gem5::ArmV8KvmCPU' + cxx_class = "gem5::ArmV8KvmCPU" diff --git a/src/arch/arm/kvm/BaseArmKvmCPU.py b/src/arch/arm/kvm/BaseArmKvmCPU.py index 364fe447a1..f896256063 100644 --- a/src/arch/arm/kvm/BaseArmKvmCPU.py +++ b/src/arch/arm/kvm/BaseArmKvmCPU.py @@ -38,10 +38,11 @@ from m5.objects.ArmCPU import ArmCPU from m5.objects.ArmMMU import ArmMMU from m5.objects.BaseKvmCPU import BaseKvmCPU + class BaseArmKvmCPU(BaseKvmCPU, ArmCPU): - type = 'BaseArmKvmCPU' + type = "BaseArmKvmCPU" cxx_header = "arch/arm/kvm/base_cpu.hh" - cxx_class = 'gem5::BaseArmKvmCPU' + cxx_class = "gem5::BaseArmKvmCPU" abstract = True mmu = ArmMMU() diff --git a/src/arch/arm/kvm/KvmGic.py b/src/arch/arm/kvm/KvmGic.py index de86653486..1002d3c2f5 100644 --- a/src/arch/arm/kvm/KvmGic.py +++ b/src/arch/arm/kvm/KvmGic.py @@ -38,20 +38,26 @@ from m5.proxy import * from m5.objects.Gic import GicV2, Gicv3 -class MuxingKvmGicV2(GicV2): - type = 'MuxingKvmGicV2' - cxx_header = "arch/arm/kvm/gic.hh" - cxx_class = 'gem5::MuxingKvmGic' - cxx_template_params = [ 'class Types' ] - simulate_gic = Param.Bool(False, - "Forcing the simulation to use the gem5 GIC instead of the host GIC") +class MuxingKvmGicV2(GicV2): + type = "MuxingKvmGicV2" + cxx_header = "arch/arm/kvm/gic.hh" + cxx_class = "gem5::MuxingKvmGic" + cxx_template_params = ["class Types"] + + simulate_gic = Param.Bool( + False, + "Forcing the simulation to use the gem5 GIC instead of the host GIC", + ) + class MuxingKvmGicV3(Gicv3): - type = 'MuxingKvmGicV3' + type = "MuxingKvmGicV3" cxx_header = "arch/arm/kvm/gic.hh" - cxx_class = 'gem5::MuxingKvmGic' - cxx_template_params = [ 'class Types' ] + cxx_class = "gem5::MuxingKvmGic" + cxx_template_params = ["class Types"] - simulate_gic = Param.Bool(False, - "Forcing the simulation to use the gem5 GIC instead of the host GIC") + simulate_gic = Param.Bool( + False, + "Forcing the simulation to use the gem5 GIC instead of the host GIC", + ) diff --git a/src/arch/arm/kvm/SConscript b/src/arch/arm/kvm/SConscript index 131c4578bf..06b7efe063 100644 --- a/src/arch/arm/kvm/SConscript +++ b/src/arch/arm/kvm/SConscript @@ -37,22 +37,20 @@ Import('*') -import platform -host_isa = platform.machine() - -if not (env['CONF']['USE_KVM'] and env['CONF']['KVM_ISA'] == 'arm'): - Return() +if env['CONF']['KVM_ISA'] == 'arm': + import platform + host_isa = platform.machine() + env.TagImplies(f'{host_isa} kvm', 'arm kvm') SimObject('KvmGic.py', - sim_objects=['MuxingKvmGicV2', 'MuxingKvmGicV3'], tags='arm isa') -Source('gic.cc', tags='arm isa') + sim_objects=['MuxingKvmGicV2', 'MuxingKvmGicV3'], tags='arm kvm') +Source('gic.cc', tags='arm kvm') -SimObject('BaseArmKvmCPU.py', sim_objects=['BaseArmKvmCPU'], tags='arm isa') -Source('base_cpu.cc', tags='arm isa') +SimObject('BaseArmKvmCPU.py', sim_objects=['BaseArmKvmCPU'], tags='arm kvm') +Source('base_cpu.cc', tags='arm kvm') -if host_isa == "armv7l": - SimObject('ArmKvmCPU.py', sim_objects=['ArmKvmCPU'], tags='arm isa') - Source('arm_cpu.cc', tags='arm isa') -elif host_isa == "aarch64": - SimObject('ArmV8KvmCPU.py', sim_objects=['ArmV8KvmCPU'], tags='arm isa') - Source('armv8_cpu.cc', tags='arm isa') +SimObject('ArmKvmCPU.py', sim_objects=['ArmKvmCPU'], tags='armv71 kvm') +Source('arm_cpu.cc', tags='armv71 kvm') + +SimObject('ArmV8KvmCPU.py', sim_objects=['ArmV8KvmCPU'], tags='aarch64 kvm') +Source('armv8_cpu.cc', tags='aarch64 kvm') diff --git a/src/arch/mips/O3CPU.py b/src/arch/arm/kvm/SConsopts similarity index 89% rename from src/arch/mips/O3CPU.py rename to src/arch/arm/kvm/SConsopts index 8f7b14cfc6..000bff7ef7 100644 --- a/src/arch/mips/O3CPU.py +++ b/src/arch/arm/kvm/SConsopts @@ -23,9 +23,14 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from m5.objects.MipsCPU import MipsO3CPU +Import('*') -O3CPU = MipsO3CPU +host_isa = None +try: + import platform + host_isa = platform.machine() +except: + pass -# Deprecated -DerivO3CPU = O3CPU +if host_isa in ('armv7l', 'aarch64'): + main['CONF']['KVM_ISA'] = 'arm' diff --git a/src/arch/arm/kvm/arm_cpu.cc b/src/arch/arm/kvm/arm_cpu.cc index 42aeea603e..1063c7c27f 100644 --- a/src/arch/arm/kvm/arm_cpu.cc +++ b/src/arch/arm/kvm/arm_cpu.cc @@ -664,7 +664,7 @@ ArmKvmCPU::updateKvmStateCore() for (const KvmIntRegInfo *ri(kvmIntRegs); ri->idx != init_reg::NumRegs; ++ri) { - uint64_t value = tc->getRegFlat(RegId(IntRegClass, ri->idx)); + uint64_t value = tc->getReg(flatIntRegClass[ri->idx]); DPRINTF(KvmContext, "kvm(%s) := 0x%x\n", ri->name, value); setOneReg(ri->id, value); } @@ -772,9 +772,8 @@ ArmKvmCPU::updateKvmStateVFP(uint64_t id, bool show_warnings) const unsigned idx_base = idx << 1; const unsigned idx_hi = idx_base + 1; const unsigned idx_lo = idx_base + 0; - uint64_t value = - ((uint64_t)tc->getRegFlat(RegId(FloatRegClass, idx_hi)) << 32) | - tc->getRegFlat(RegId(FloatRegClass, idx_lo)); + uint64_t value = (tc->getReg(floatRegClass[idx_hi]) << 32) | + tc->getReg(floatRegClass[idx_lo]); setOneReg(id, value); } else if (regIsVfpCtrl(id)) { @@ -804,7 +803,7 @@ ArmKvmCPU::updateTCStateCore() for (const KvmIntRegInfo *ri(kvmIntRegs); ri->idx != int_reg::NumRegs; ++ri) { - tc->setRegFlat(RegId(IntRegClass, ri->idx), getOneRegU32(ri->id)); + tc->setReg(intRegClass[ri->idx], getOneRegU32(ri->id)); } for (const KvmCoreMiscRegInfo *ri(kvmCoreMiscRegs); @@ -915,9 +914,8 @@ ArmKvmCPU::updateTCStateVFP(uint64_t id, bool show_warnings) const unsigned idx_lo = idx_base + 0; uint64_t value = getOneRegU64(id); - tc->setRegFlat(RegId(FloatRegClass, idx_hi), - (value >> 32) & 0xFFFFFFFF); - tc->setRegFlat(RegId(FloatRegClass, idx_lo), value & 0xFFFFFFFF); + tc->setReg(floatRegClass[idx_hi], (value >> 32) & 0xFFFFFFFF); + tc->setReg(floatRegClass[idx_lo], value & 0xFFFFFFFF); } else if (regIsVfpCtrl(id)) { MiscRegIndex idx = decodeVFPCtrlReg(id); if (idx == NUM_MISCREGS) { diff --git a/src/arch/arm/kvm/armv8_cpu.cc b/src/arch/arm/kvm/armv8_cpu.cc index a059e14da0..6aa0f57156 100644 --- a/src/arch/arm/kvm/armv8_cpu.cc +++ b/src/arch/arm/kvm/armv8_cpu.cc @@ -39,6 +39,9 @@ #include +#include "arch/arm/regs/int.hh" +#include "arch/arm/regs/vec.hh" +#include "arch/arm/utility.hh" #include "debug/KvmContext.hh" #include "params/ArmV8KvmCPU.hh" @@ -249,7 +252,7 @@ ArmV8KvmCPU::updateKvmState() } for (const auto &ri : intRegMap) { - const uint64_t value = tc->getReg(RegId(IntRegClass, ri.idx)); + const uint64_t value = tc->getReg(intRegClass[ri.idx]); DPRINTF(KvmContext, " %s := 0x%x\n", ri.name, value); setOneReg(ri.kvm, value); } @@ -259,7 +262,7 @@ ArmV8KvmCPU::updateKvmState() if (!inAArch64(tc)) syncVecElemsToRegs(tc); ArmISA::VecRegContainer vc; - tc->getReg(RegId(VecRegClass, i), &vc); + tc->getReg(vecRegClass[i], &vc); auto v = vc.as(); for (int j = 0; j < FP_REGS_PER_VFP_REG; j++) reg.s[j].i = v[j]; @@ -320,21 +323,23 @@ ArmV8KvmCPU::updateThreadContext() if (inAArch64(tc)) { tc->setReg(int_reg::x(i), value); } else { - tc->setRegFlat(int_reg::x(i), value); + tc->setReg(flatIntRegClass[i], value); } } for (const auto &ri : intRegMap) { const auto value(getOneRegU64(ri.kvm)); DPRINTF(KvmContext, " %s := 0x%x\n", ri.name, value); - tc->setReg(RegId(IntRegClass, ri.idx), value); + tc->setReg(intRegClass[ri.idx], value); } for (int i = 0; i < NUM_QREGS; ++i) { KvmFPReg reg; DPRINTF(KvmContext, " Q%i: %s\n", i, getAndFormatOneReg(kvmFPReg(i))); getOneReg(kvmFPReg(i), reg.data); - auto v = tc->getWritableVecReg(RegId(VecRegClass, i)).as(); + auto *vc = static_cast( + tc->getWritableReg(vecRegClass[i])); + auto v = vc->as(); for (int j = 0; j < FP_REGS_PER_VFP_REG; j++) v[j] = reg.s[j].i; if (!inAArch64(tc)) @@ -389,7 +394,7 @@ ArmV8KvmCPU::getSysRegMap() const const uint64_t crm(EXTRACT_FIELD(reg, KVM_REG_ARM64_SYSREG_CRM)); const uint64_t op2(EXTRACT_FIELD(reg, KVM_REG_ARM64_SYSREG_OP2)); const MiscRegIndex idx(decodeAArch64SysReg(op0, op1, crn, crm, op2)); - const auto &info(miscRegInfo[idx]); + const auto &info(lookUpMiscReg[idx].info); const bool writeable( info[MISCREG_USR_NS_WR] || info[MISCREG_USR_S_WR] || info[MISCREG_PRI_S_WR] || info[MISCREG_PRI_NS_WR] || diff --git a/src/arch/arm/kvm/gic.cc b/src/arch/arm/kvm/gic.cc index e87bdde969..0fd73c4b28 100644 --- a/src/arch/arm/kvm/gic.cc +++ b/src/arch/arm/kvm/gic.cc @@ -96,8 +96,11 @@ KvmKernelGic::setIntState(unsigned type, unsigned vcpu, unsigned irq, static const bool vcpu2_enabled = vm.kvm->capIRQLineLayout2(); uint32_t kvm_vcpu = (vcpu_index << KVM_ARM_IRQ_VCPU_SHIFT); + +#if defined(KVM_ARM_IRQ_VCPU2_SHIFT) if (vcpu2_enabled) kvm_vcpu |= vcpu2_index << KVM_ARM_IRQ_VCPU2_SHIFT; +#endif panic_if((!vcpu2_enabled && vcpu2_index) || kvm_vcpu > 0xffff, "VCPU out of range"); diff --git a/src/arch/arm/mmu.cc b/src/arch/arm/mmu.cc index ff55c3cf1e..824974ab21 100644 --- a/src/arch/arm/mmu.cc +++ b/src/arch/arm/mmu.cc @@ -619,7 +619,7 @@ std::pair MMU::s1PermBits64(TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc, CachedState &state, bool r, bool w, bool x) { - bool grant = false, grant_read = true; + bool grant = false, grant_read = true, grant_write = true, grant_exec = true; const uint8_t ap = te->ap & 0b11; // 2-bit access protection field const bool is_priv = state.isPriv && !(req->getFlags() & UserMode); @@ -628,11 +628,6 @@ MMU::s1PermBits64(TlbEntry *te, const RequestPtr &req, Mode mode, uint8_t xn = te->xn; uint8_t pxn = te->pxn; - if (ArmSystem::haveEL(tc, EL3) && state.isSecure && - te->ns && state.scr.sif) { - xn = true; - } - DPRINTF(TLBVerbose, "Checking S1 permissions: ap:%d, xn:%d, pxn:%d, r:%d, " "w:%d, x:%d, is_priv: %d, wxn: %d\n", ap, xn, pxn, r, w, x, is_priv, wxn); @@ -642,101 +637,90 @@ MMU::s1PermBits64(TlbEntry *te, const RequestPtr &req, Mode mode, } ExceptionLevel regime = !is_priv ? EL0 : state.aarch64EL; - switch (regime) { - case EL0: - { - grant_read = ap & 0x1; - uint8_t perm = (ap << 2) | (xn << 1) | pxn; - switch (perm) { - case 0: - case 1: - case 8: - case 9: - grant = x; - break; - case 4: - case 5: - grant = r || w || (x && !wxn); - break; - case 6: - case 7: - grant = r || w; - break; - case 12: - case 13: - grant = r || x; - break; - case 14: - case 15: - grant = r; - break; - default: - grant = false; - } + if (hasUnprivRegime(regime, state)) { + bool pr = false; + bool pw = false; + bool ur = false; + bool uw = false; + // Apply leaf permissions + switch (ap) { + case 0b00: // Privileged access + pr = 1; pw = 1; ur = 0; uw = 0; + break; + case 0b01: // No effect + pr = 1; pw = 1; ur = 1; uw = 1; + break; + case 0b10: // Read-only, privileged access + pr = 1; pw = 0; ur = 0; uw = 0; + break; + case 0b11: // Read-only + pr = 1; pw = 0; ur = 1; uw = 0; + break; } - break; - case EL1: - { - uint8_t perm = (ap << 2) | (xn << 1) | pxn; - switch (perm) { - case 0: - case 2: - grant = r || w || (x && !wxn); - break; - case 1: - case 3: - case 4: - case 5: - case 6: - case 7: - // regions that are writeable at EL0 should not be - // executable at EL1 - grant = r || w; - break; - case 8: - case 10: - case 12: - case 14: - grant = r || x; - break; - case 9: - case 11: - case 13: - case 15: - grant = r; - break; - default: - grant = false; - } + + // Locations writable by unprivileged cannot be executed by privileged + const bool px = !(pxn || uw); + const bool ux = !xn; + + grant_read = is_priv ? pr : ur; + grant_write = is_priv ? pw : uw; + grant_exec = is_priv ? px : ux; + } else { + switch (bits(ap, 1)) { + case 0b0: // No effect + grant_read = 1; grant_write = 1; + break; + case 0b1: // Read-Only + grant_read = 1; grant_write = 0; + break; } - break; - case EL2: - case EL3: - { - uint8_t perm = (ap & 0x2) | xn; - switch (perm) { - case 0: - grant = r || w || (x && !wxn); - break; - case 1: - grant = r || w; - break; - case 2: - grant = r || x; - break; - case 3: - grant = r; - break; - default: - grant = false; - } - } - break; + grant_exec = !xn; + } + + // Do not allow execution from writable location + // if wxn is set + grant_exec = grant_exec && !(wxn && grant_write); + + if (ArmSystem::haveEL(tc, EL3) && state.isSecure && te->ns) { + grant_exec = grant_exec && !state.scr.sif; + } + + if (x) { + grant = grant_exec; + } else if (req->isAtomic()) { + grant = grant_read && grant_write; + } else if (w) { + grant = grant_write; + } else { + grant = grant_read; } return std::make_pair(grant, grant_read); } +bool +MMU::hasUnprivRegime(ExceptionLevel el, bool e2h) +{ + switch (el) { + case EL0: + case EL1: + // EL1&0 + return true; + case EL2: + // EL2&0 or EL2 + return e2h; + case EL3: + default: + return false; + } +} + +bool +MMU::hasUnprivRegime(ExceptionLevel el, CachedState &state) +{ + return hasUnprivRegime(el, state.hcr.e2h); +} + bool MMU::faultPAN(ThreadContext *tc, uint8_t ap, const RequestPtr &req, Mode mode, const bool is_priv, CachedState &state) @@ -1332,7 +1316,7 @@ MMU::CachedState::updateMiscReg(ThreadContext *tc, !isSecure)); ttbcr = tc->readMiscReg(snsBankedIndex(MISCREG_TTBCR, tc, !isSecure)); - scr = tc->readMiscReg(MISCREG_SCR); + scr = tc->readMiscReg(MISCREG_SCR_EL3); isPriv = cpsr.mode != MODE_USER; if (longDescFormatInUse(tc)) { uint64_t ttbr_asid = tc->readMiscReg( @@ -1351,7 +1335,7 @@ MMU::CachedState::updateMiscReg(ThreadContext *tc, !isSecure)); dacr = tc->readMiscReg(snsBankedIndex(MISCREG_DACR, tc, !isSecure)); - hcr = tc->readMiscReg(MISCREG_HCR); + hcr = tc->readMiscReg(MISCREG_HCR_EL2); if (mmu->release()->has(ArmExtension::VIRTUALIZATION)) { vmid = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48); diff --git a/src/arch/arm/mmu.hh b/src/arch/arm/mmu.hh index 171dbf5f82..089edbd0ed 100644 --- a/src/arch/arm/mmu.hh +++ b/src/arch/arm/mmu.hh @@ -390,6 +390,8 @@ class MMU : public BaseMMU */ static ExceptionLevel tranTypeEL(CPSR cpsr, ArmTranslationType type); + static bool hasUnprivRegime(ExceptionLevel el, bool e2h); + public: /** Lookup an entry in the TLB * @param vpn virtual address @@ -446,6 +448,8 @@ class MMU : public BaseMMU bool faultPAN(ThreadContext *tc, uint8_t ap, const RequestPtr &req, Mode mode, const bool is_priv, CachedState &state); + bool hasUnprivRegime(ExceptionLevel el, CachedState &state); + std::pair s1PermBits64( TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc, CachedState &state, bool r, bool w, bool x); diff --git a/src/arch/arm/nativetrace.cc b/src/arch/arm/nativetrace.cc index 0303ff6bf1..1b8f286f5e 100644 --- a/src/arch/arm/nativetrace.cc +++ b/src/arch/arm/nativetrace.cc @@ -41,7 +41,9 @@ #include "arch/arm/nativetrace.hh" #include "arch/arm/regs/cc.hh" +#include "arch/arm/regs/int.hh" #include "arch/arm/regs/misc.hh" +#include "arch/arm/regs/vec.hh" #include "base/compiler.hh" #include "cpu/thread_context.hh" #include "debug/ExecRegDelta.hh" @@ -53,7 +55,7 @@ namespace gem5 using namespace ArmISA; -namespace Trace { +namespace trace { [[maybe_unused]] static const char *regNames[] = { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", @@ -66,7 +68,7 @@ namespace Trace { }; void -Trace::ArmNativeTrace::ThreadState::update(NativeTrace *parent) +ArmNativeTrace::ThreadState::update(NativeTrace *parent) { oldState = state[current]; current = (current + 1) % 2; @@ -101,7 +103,7 @@ Trace::ArmNativeTrace::ThreadState::update(NativeTrace *parent) } void -Trace::ArmNativeTrace::ThreadState::update(ThreadContext *tc) +ArmNativeTrace::ThreadState::update(ThreadContext *tc) { oldState = state[current]; current = (current + 1) % 2; @@ -109,7 +111,7 @@ Trace::ArmNativeTrace::ThreadState::update(ThreadContext *tc) // Regular int regs for (int i = 0; i < 15; i++) { - newState[i] = tc->getReg(RegId(IntRegClass, i)); + newState[i] = tc->getReg(intRegClass[i]); changed[i] = (oldState[i] != newState[i]); } @@ -129,7 +131,7 @@ Trace::ArmNativeTrace::ThreadState::update(ThreadContext *tc) for (int i = 0; i < NumVecV7ArchRegs; i++) { ArmISA::VecRegContainer vec_container; - tc->getReg(RegId(VecRegClass, i), &vec_container); + tc->getReg(vecRegClass[i], &vec_container); auto *vec = vec_container.as(); newState[STATE_F0 + 2*i] = vec[0]; newState[STATE_F0 + 2*i + 1] = vec[1]; @@ -139,7 +141,7 @@ Trace::ArmNativeTrace::ThreadState::update(ThreadContext *tc) } void -Trace::ArmNativeTrace::check(NativeTraceRecord *record) +ArmNativeTrace::check(NativeTraceRecord *record) { ThreadContext *tc = record->getThread(); // This area is read only on the target. It can't stop there to tell us @@ -221,5 +223,5 @@ Trace::ArmNativeTrace::check(NativeTraceRecord *record) } } -} // namespace Trace +} // namespace trace } // namespace gem5 diff --git a/src/arch/arm/nativetrace.hh b/src/arch/arm/nativetrace.hh index c5abd44ca8..509567a4f8 100644 --- a/src/arch/arm/nativetrace.hh +++ b/src/arch/arm/nativetrace.hh @@ -36,7 +36,7 @@ namespace gem5 { -namespace Trace { +namespace trace { class ArmNativeTrace : public NativeTrace { @@ -110,7 +110,7 @@ class ArmNativeTrace : public NativeTrace void check(NativeTraceRecord *record); }; -} // namespace Trace +} // namespace trace } // namespace gem5 #endif // __ARCH_ARM_NATIVETRACE_HH__ diff --git a/src/arch/arm/pauth_helpers.cc b/src/arch/arm/pauth_helpers.cc index c204a07808..3e8f18e03a 100644 --- a/src/arch/arm/pauth_helpers.cc +++ b/src/arch/arm/pauth_helpers.cc @@ -1,5 +1,6 @@ // -*- mode:c++ -*- +// Copyright (c) 2020 ARM Limited // Copyright (c) 2020 Metempsy Technology Consulting // All rights reserved // @@ -120,9 +121,11 @@ ArmISA::trapPACUse(ThreadContext *tc, ExceptionLevel target_el) switch (target_el) { case EL2: - return std::make_shared(0x0, 0, EC_TRAPPED_PAC); + return std::make_shared( + 0x0, 0, ExceptionClass::TRAPPED_PAC); case EL3: - return std::make_shared(0x0, 0, EC_TRAPPED_PAC); + return std::make_shared( + 0x0, 0, ExceptionClass::TRAPPED_PAC); default: return NoFault; } @@ -859,13 +862,9 @@ ArmISA::addPACIB(ThreadContext* tc, uint64_t X, uint64_t Y, uint64_t* out){ -Fault -ArmISA::stripPAC(ThreadContext* tc, uint64_t A, bool data, uint64_t* out){ - bool trapEL2 = false; - bool trapEL3 = false; - - uint64_t ptr; - +void +ArmISA::stripPAC(ThreadContext* tc, uint64_t A, bool data, uint64_t* out) +{ ExceptionLevel el = currEL(tc); bool tbi = calculateTBI(tc, el, A, data); @@ -873,52 +872,15 @@ ArmISA::stripPAC(ThreadContext* tc, uint64_t A, bool data, uint64_t* out){ int bottom_PAC_bit = calculateBottomPACBit(tc, el, selbit); int top_bit = tbi ? 55 : 63; - uint32_t nbits = (top_bit+1) - bottom_PAC_bit; + uint32_t nbits = (top_bit + 1) - bottom_PAC_bit; uint64_t pacbits = ((uint64_t)0x1 << nbits) -1; // 2^n -1; uint64_t mask = pacbits << bottom_PAC_bit; // creates mask - if (selbit) { - ptr = A | mask; + *out = A | mask; } else { - ptr = A & ~mask; + *out = A & ~mask; } - - SCR scr3 = tc->readMiscReg(MISCREG_SCR_EL3); - HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); - bool have_el3 = ArmSystem::haveEL(tc, EL3); - - switch (el) - { - case EL0: - trapEL2 = (EL2Enabled(tc) && hcr.api == 0 && - (hcr.tge == 0 || hcr.e2h == 0)); - trapEL3 = have_el3 && scr3.api == 0; - break; - case EL1: - trapEL2 = EL2Enabled(tc) && hcr.api == 0; - trapEL3 = have_el3 && scr3.api == 0; - break; - case EL2: - trapEL2 = false; - trapEL3 = have_el3 && scr3.api == 0; - break; - case EL3: - trapEL2 = false; - trapEL3 = false; - break; - default: - // Unnaccessible - break; - } - if (trapEL2) - return trapPACUse(tc, EL2); - else if (trapEL3) - return trapPACUse(tc, EL3); - else - *out = ptr; - - return NoFault; } } // namespace gem5 diff --git a/src/arch/arm/pauth_helpers.hh b/src/arch/arm/pauth_helpers.hh index 11aec267b8..9316c962ed 100644 --- a/src/arch/arm/pauth_helpers.hh +++ b/src/arch/arm/pauth_helpers.hh @@ -1,5 +1,6 @@ // -*- mode:c++ -*- +// Copyright (c) 2020 ARM Limited // Copyright (c) 2020 Metempsy Technology Consulting // All rights reserved // @@ -113,15 +114,12 @@ namespace ArmISA Fault addPACIB(ThreadContext* tc, uint64_t X, uint64_t Y, uint64_t* out); - // Strip() - // ======= - // Strip() returns a 64-bit value containing A, but replacing the + // stripPAC returns a 64-bit value containing A, but replacing the // pointer authentication code field bits with the extension of the // address bits. This can apply to either instructions or data, where, // as the use of tagged pointers is distinct, it might be // handled differently. - - Fault + void stripPAC(ThreadContext* tc, uint64_t A, bool data, uint64_t* out); } // namespace ArmISA diff --git a/src/arch/arm/reg_abi.cc b/src/arch/arm/reg_abi.cc index 3422eb1c41..9c89b7d072 100644 --- a/src/arch/arm/reg_abi.cc +++ b/src/arch/arm/reg_abi.cc @@ -26,6 +26,7 @@ */ #include "arch/arm/reg_abi.hh" +#include "arch/arm/regs/int.hh" namespace gem5 { @@ -33,8 +34,14 @@ namespace gem5 namespace ArmISA { -const std::vector RegABI32::ArgumentRegs = {0, 1, 2, 3, 4, 5, 6}; -const std::vector RegABI64::ArgumentRegs = {0, 1, 2, 3, 4, 5, 6}; +const std::vector RegABI32::ArgumentRegs = { + int_reg::R0, int_reg::R1, int_reg::R2, int_reg::R3, + int_reg::R4, int_reg::R5, int_reg::R6 +}; +const std::vector RegABI64::ArgumentRegs = { + int_reg::X0, int_reg::X1, int_reg::X2, int_reg::X3, + int_reg::X4, int_reg::X5, int_reg::X6 +}; } // namespace ArmISA } // namespace gem5 diff --git a/src/arch/arm/reg_abi.hh b/src/arch/arm/reg_abi.hh index d8a0ffae8a..1d5272c66d 100644 --- a/src/arch/arm/reg_abi.hh +++ b/src/arch/arm/reg_abi.hh @@ -41,12 +41,12 @@ namespace ArmISA struct RegABI32 : public GenericSyscallABI32 { - static const std::vector ArgumentRegs; + static const std::vector ArgumentRegs; }; struct RegABI64 : public GenericSyscallABI64 { - static const std::vector ArgumentRegs; + static const std::vector ArgumentRegs; }; } // namespace ArmISA diff --git a/src/arch/arm/regs/cc.hh b/src/arch/arm/regs/cc.hh index 2aa55faa01..ba7552799a 100644 --- a/src/arch/arm/regs/cc.hh +++ b/src/arch/arm/regs/cc.hh @@ -39,6 +39,7 @@ #define __ARCH_ARM_REGS_CC_HH__ #include "cpu/reg_class.hh" +#include "debug/CCRegs.hh" namespace gem5 { @@ -60,13 +61,21 @@ enum : RegIndex NumRegs }; +} // namespace cc_reg + +inline constexpr RegClass ccRegClass(CCRegClass, CCRegClassName, + cc_reg::NumRegs, debug::CCRegs); + +namespace cc_reg +{ + inline constexpr RegId - Nz(CCRegClass, _NzIdx), - C(CCRegClass, _CIdx), - V(CCRegClass, _VIdx), - Ge(CCRegClass, _GeIdx), - Fp(CCRegClass, _FpIdx), - Zero(CCRegClass, _ZeroIdx); + Nz = ccRegClass[_NzIdx], + C = ccRegClass[_CIdx], + V = ccRegClass[_VIdx], + Ge = ccRegClass[_GeIdx], + Fp = ccRegClass[_FpIdx], + Zero = ccRegClass[_ZeroIdx]; const char * const RegName[NumRegs] = { "nz", diff --git a/src/arch/riscv/vecregs.hh b/src/arch/arm/regs/int.cc similarity index 59% rename from src/arch/riscv/vecregs.hh rename to src/arch/arm/regs/int.cc index a6c11e1121..d1a7edd190 100644 --- a/src/arch/riscv/vecregs.hh +++ b/src/arch/arm/regs/int.cc @@ -1,9 +1,5 @@ /* - * Copyright (c) 2013 ARM Limited - * Copyright (c) 2014-2015 Sven Karlsson - * Copyright (c) 2019 Yifei Liu - * Copyright (c) 2020 Barkhausen Institut - * Copyright (c) 2021 StreamComputing Corp + * Copyright (c) 2010-2014 ARM Limited * All rights reserved * * The license below extends only to copyright in the software and shall @@ -15,8 +11,7 @@ * unmodified and in its entirety in all distributions of the software, * modified or unmodified, in source code or in binary form. * - * Copyright (c) 2016 RISC-V Foundation - * Copyright (c) 2016 The University of Virginia + * Copyright (c) 2009 The Regents of The University of Michigan * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -43,23 +38,54 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef __ARCH_RISCV_VECREGS_HH__ -#define __ARCH_RISCV_VECREGS_HH__ +#include "arch/arm/regs/int.hh" -#include "arch/generic/vec_pred_reg.hh" -#include "arch/generic/vec_reg.hh" +#include "arch/arm/isa.hh" +#include "arch/arm/regs/misc.hh" +#include "arch/arm/utility.hh" +#include "base/logging.hh" namespace gem5 { -namespace RiscvISA +namespace ArmISA { -// Not applicable to RISC-V -using VecRegContainer = ::gem5::DummyVecRegContainer; -using VecPredRegContainer = ::gem5::DummyVecPredRegContainer; +RegId +IntRegClassOps::flatten(const BaseISA &isa, const RegId &id) const +{ + const RegIndex reg_idx = id.index(); -} // namespace RiscvISA + auto &arm_isa = static_cast(isa); + + if (reg_idx < int_reg::NumArchRegs) { + return {flatIntRegClass, arm_isa.mapIntRegId(reg_idx)}; + } else if (reg_idx < int_reg::NumRegs) { + return {flatIntRegClass, id}; + } else if (reg_idx == int_reg::Spx) { + auto &arm_isa = static_cast(isa); + CPSR cpsr = arm_isa.readMiscRegNoEffect(MISCREG_CPSR); + ExceptionLevel el = opModeToEL((OperatingMode)(uint8_t)cpsr.mode); + + if (!cpsr.sp && el != EL0) + return {flatIntRegClass, int_reg::Sp0}; + + switch (el) { + case EL3: + return {flatIntRegClass, int_reg::Sp3}; + case EL2: + return {flatIntRegClass, int_reg::Sp2}; + case EL1: + return {flatIntRegClass, int_reg::Sp1}; + case EL0: + return {flatIntRegClass, int_reg::Sp0}; + default: + panic("Invalid exception level"); + } + } else { + return {flatIntRegClass, flattenIntRegModeIndex(reg_idx)}; + } +} + +} // namespace ArmISA } // namespace gem5 - -#endif // __ARCH_RISCV_VECREGS_HH__ diff --git a/src/arch/arm/regs/int.hh b/src/arch/arm/regs/int.hh index 34c2685fe7..5489ef8420 100644 --- a/src/arch/arm/regs/int.hh +++ b/src/arch/arm/regs/int.hh @@ -46,6 +46,7 @@ #include "arch/arm/types.hh" #include "base/logging.hh" #include "cpu/reg_class.hh" +#include "debug/IntRegs.hh" #include "sim/core.hh" namespace gem5 @@ -160,94 +161,114 @@ enum : RegIndex _X31Idx }; +} // namespace int_reg + +class IntRegClassOps : public RegClassOps +{ + RegId flatten(const BaseISA &isa, const RegId &id) const override; +}; + +inline constexpr IntRegClassOps intRegClassOps; + +inline constexpr RegClass intRegClass = + RegClass(IntRegClass, IntRegClassName, int_reg::NumRegs, debug::IntRegs). + ops(intRegClassOps). + needsFlattening(); + +inline constexpr RegClass flatIntRegClass = + RegClass(IntRegClass, IntRegClassName, int_reg::NumRegs, debug::IntRegs); + +namespace int_reg +{ + inline constexpr RegId /* All the unique register indices. */ - R0(IntRegClass, _R0Idx), - R1(IntRegClass, _R1Idx), - R2(IntRegClass, _R2Idx), - R3(IntRegClass, _R3Idx), - R4(IntRegClass, _R4Idx), - R5(IntRegClass, _R5Idx), - R6(IntRegClass, _R6Idx), - R7(IntRegClass, _R7Idx), - R8(IntRegClass, _R8Idx), - R9(IntRegClass, _R9Idx), - R10(IntRegClass, _R10Idx), - R11(IntRegClass, _R11Idx), - R12(IntRegClass, _R12Idx), - R13(IntRegClass, _R13Idx), - R14(IntRegClass, _R14Idx), - R15(IntRegClass, _R15Idx), + R0 = intRegClass[_R0Idx], + R1 = intRegClass[_R1Idx], + R2 = intRegClass[_R2Idx], + R3 = intRegClass[_R3Idx], + R4 = intRegClass[_R4Idx], + R5 = intRegClass[_R5Idx], + R6 = intRegClass[_R6Idx], + R7 = intRegClass[_R7Idx], + R8 = intRegClass[_R8Idx], + R9 = intRegClass[_R9Idx], + R10 = intRegClass[_R10Idx], + R11 = intRegClass[_R11Idx], + R12 = intRegClass[_R12Idx], + R13 = intRegClass[_R13Idx], + R14 = intRegClass[_R14Idx], + R15 = intRegClass[_R15Idx], - R13Svc(IntRegClass, _R13SvcIdx), - R14Svc(IntRegClass, _R14SvcIdx), + R13Svc = intRegClass[_R13SvcIdx], + R14Svc = intRegClass[_R14SvcIdx], - R13Mon(IntRegClass, _R13MonIdx), - R14Mon(IntRegClass, _R14MonIdx), + R13Mon = intRegClass[_R13MonIdx], + R14Mon = intRegClass[_R14MonIdx], - R13Hyp(IntRegClass, _R13HypIdx), + R13Hyp = intRegClass[_R13HypIdx], - R13Abt(IntRegClass, _R13AbtIdx), - R14Abt(IntRegClass, _R14AbtIdx), + R13Abt = intRegClass[_R13AbtIdx], + R14Abt = intRegClass[_R14AbtIdx], - R13Und(IntRegClass, _R13UndIdx), - R14Und(IntRegClass, _R14UndIdx), + R13Und = intRegClass[_R13UndIdx], + R14Und = intRegClass[_R14UndIdx], - R13Irq(IntRegClass, _R13IrqIdx), - R14Irq(IntRegClass, _R14IrqIdx), + R13Irq = intRegClass[_R13IrqIdx], + R14Irq = intRegClass[_R14IrqIdx], - R8Fiq(IntRegClass, _R8FiqIdx), - R9Fiq(IntRegClass, _R9FiqIdx), - R10Fiq(IntRegClass, _R10FiqIdx), - R11Fiq(IntRegClass, _R11FiqIdx), - R12Fiq(IntRegClass, _R12FiqIdx), - R13Fiq(IntRegClass, _R13FiqIdx), - R14Fiq(IntRegClass, _R14FiqIdx), + R8Fiq = intRegClass[_R8FiqIdx], + R9Fiq = intRegClass[_R9FiqIdx], + R10Fiq = intRegClass[_R10FiqIdx], + R11Fiq = intRegClass[_R11FiqIdx], + R12Fiq = intRegClass[_R12FiqIdx], + R13Fiq = intRegClass[_R13FiqIdx], + R14Fiq = intRegClass[_R14FiqIdx], - Zero(IntRegClass, _ZeroIdx), - Ureg0(IntRegClass, _Ureg0Idx), - Ureg1(IntRegClass, _Ureg1Idx), - Ureg2(IntRegClass, _Ureg2Idx), + Zero = intRegClass[_ZeroIdx], + Ureg0 = intRegClass[_Ureg0Idx], + Ureg1 = intRegClass[_Ureg1Idx], + Ureg2 = intRegClass[_Ureg2Idx], - Sp0(IntRegClass, _Sp0Idx), - Sp1(IntRegClass, _Sp1Idx), - Sp2(IntRegClass, _Sp2Idx), - Sp3(IntRegClass, _Sp3Idx), + Sp0 = intRegClass[_Sp0Idx], + Sp1 = intRegClass[_Sp1Idx], + Sp2 = intRegClass[_Sp2Idx], + Sp3 = intRegClass[_Sp3Idx], - Spx(IntRegClass, _SpxIdx), + Spx = intRegClass[_SpxIdx], - X0(IntRegClass, _X0Idx), - X1(IntRegClass, _X1Idx), - X2(IntRegClass, _X2Idx), - X3(IntRegClass, _X3Idx), - X4(IntRegClass, _X4Idx), - X5(IntRegClass, _X5Idx), - X6(IntRegClass, _X6Idx), - X7(IntRegClass, _X7Idx), - X8(IntRegClass, _X8Idx), - X9(IntRegClass, _X9Idx), - X10(IntRegClass, _X10Idx), - X11(IntRegClass, _X11Idx), - X12(IntRegClass, _X12Idx), - X13(IntRegClass, _X13Idx), - X14(IntRegClass, _X14Idx), - X15(IntRegClass, _X15Idx), - X16(IntRegClass, _X16Idx), - X17(IntRegClass, _X17Idx), - X18(IntRegClass, _X18Idx), - X19(IntRegClass, _X19Idx), - X20(IntRegClass, _X20Idx), - X21(IntRegClass, _X21Idx), - X22(IntRegClass, _X22Idx), - X23(IntRegClass, _X23Idx), - X24(IntRegClass, _X24Idx), - X25(IntRegClass, _X25Idx), - X26(IntRegClass, _X26Idx), - X27(IntRegClass, _X27Idx), - X28(IntRegClass, _X28Idx), - X29(IntRegClass, _X29Idx), - X30(IntRegClass, _X30Idx), - X31(IntRegClass, _X31Idx); + X0 = intRegClass[_X0Idx], + X1 = intRegClass[_X1Idx], + X2 = intRegClass[_X2Idx], + X3 = intRegClass[_X3Idx], + X4 = intRegClass[_X4Idx], + X5 = intRegClass[_X5Idx], + X6 = intRegClass[_X6Idx], + X7 = intRegClass[_X7Idx], + X8 = intRegClass[_X8Idx], + X9 = intRegClass[_X9Idx], + X10 = intRegClass[_X10Idx], + X11 = intRegClass[_X11Idx], + X12 = intRegClass[_X12Idx], + X13 = intRegClass[_X13Idx], + X14 = intRegClass[_X14Idx], + X15 = intRegClass[_X15Idx], + X16 = intRegClass[_X16Idx], + X17 = intRegClass[_X17Idx], + X18 = intRegClass[_X18Idx], + X19 = intRegClass[_X19Idx], + X20 = intRegClass[_X20Idx], + X21 = intRegClass[_X21Idx], + X22 = intRegClass[_X22Idx], + X23 = intRegClass[_X23Idx], + X24 = intRegClass[_X24Idx], + X25 = intRegClass[_X25Idx], + X26 = intRegClass[_X26Idx], + X27 = intRegClass[_X27Idx], + X28 = intRegClass[_X28Idx], + X29 = intRegClass[_X29Idx], + X30 = intRegClass[_X30Idx], + X31 = intRegClass[_X31Idx]; inline constexpr auto &Sp = R13, @@ -424,7 +445,7 @@ static inline RegId x(unsigned index) { assert(index < NumArchRegs); - return RegId(IntRegClass, _X0Idx + index); + return intRegClass[_X0Idx + index]; } const RegMap RegUsrMap = { @@ -550,7 +571,7 @@ regInMode(OperatingMode mode, int reg) } // namespace int_reg -static inline int +static inline const RegId & flattenIntRegModeIndex(int reg) { int mode = reg / int_reg::regsPerMode; diff --git a/src/arch/arm/regs/misc.cc b/src/arch/arm/regs/misc.cc index 00d5501d06..142b25f8c4 100644 --- a/src/arch/arm/regs/misc.cc +++ b/src/arch/arm/regs/misc.cc @@ -39,9 +39,11 @@ #include +#include "arch/arm/insts/misc64.hh" #include "arch/arm/isa.hh" #include "base/logging.hh" #include "cpu/thread_context.hh" +#include "dev/arm/gic_v3_cpu_interface.hh" #include "sim/full_system.hh" namespace gem5 @@ -562,13 +564,14 @@ std::tuple canReadCoprocReg(MiscRegIndex reg, SCR scr, CPSR cpsr, ThreadContext *tc) { bool secure = !scr.ns; - bool canRead = false; + bool can_read = false; bool undefined = false; + auto& miscreg_info = lookUpMiscReg[reg].info; switch (cpsr.mode) { case MODE_USER: - canRead = secure ? miscRegInfo[reg][MISCREG_USR_S_RD] : - miscRegInfo[reg][MISCREG_USR_NS_RD]; + can_read = secure ? miscreg_info[MISCREG_USR_S_RD] : + miscreg_info[MISCREG_USR_NS_RD]; break; case MODE_FIQ: case MODE_IRQ: @@ -576,15 +579,15 @@ canReadCoprocReg(MiscRegIndex reg, SCR scr, CPSR cpsr, ThreadContext *tc) case MODE_ABORT: case MODE_UNDEFINED: case MODE_SYSTEM: - canRead = secure ? miscRegInfo[reg][MISCREG_PRI_S_RD] : - miscRegInfo[reg][MISCREG_PRI_NS_RD]; + can_read = secure ? miscreg_info[MISCREG_PRI_S_RD] : + miscreg_info[MISCREG_PRI_NS_RD]; break; case MODE_MON: - canRead = secure ? miscRegInfo[reg][MISCREG_MON_NS0_RD] : - miscRegInfo[reg][MISCREG_MON_NS1_RD]; + can_read = secure ? miscreg_info[MISCREG_MON_NS0_RD] : + miscreg_info[MISCREG_MON_NS1_RD]; break; case MODE_HYP: - canRead = miscRegInfo[reg][MISCREG_HYP_NS_RD]; + can_read = miscreg_info[MISCREG_HYP_NS_RD]; break; default: undefined = true; @@ -600,21 +603,22 @@ canReadCoprocReg(MiscRegIndex reg, SCR scr, CPSR cpsr, ThreadContext *tc) } // can't do permissions checkes on the root of a banked pair of regs - assert(!miscRegInfo[reg][MISCREG_BANKED]); - return std::make_tuple(canRead, undefined); + assert(!miscreg_info[MISCREG_BANKED]); + return std::make_tuple(can_read, undefined); } std::tuple canWriteCoprocReg(MiscRegIndex reg, SCR scr, CPSR cpsr, ThreadContext *tc) { bool secure = !scr.ns; - bool canWrite = false; + bool can_write = false; bool undefined = false; + const auto& miscreg_info = lookUpMiscReg[reg].info; switch (cpsr.mode) { case MODE_USER: - canWrite = secure ? miscRegInfo[reg][MISCREG_USR_S_WR] : - miscRegInfo[reg][MISCREG_USR_NS_WR]; + can_write = secure ? miscreg_info[MISCREG_USR_S_WR] : + miscreg_info[MISCREG_USR_NS_WR]; break; case MODE_FIQ: case MODE_IRQ: @@ -622,15 +626,15 @@ canWriteCoprocReg(MiscRegIndex reg, SCR scr, CPSR cpsr, ThreadContext *tc) case MODE_ABORT: case MODE_UNDEFINED: case MODE_SYSTEM: - canWrite = secure ? miscRegInfo[reg][MISCREG_PRI_S_WR] : - miscRegInfo[reg][MISCREG_PRI_NS_WR]; + can_write = secure ? miscreg_info[MISCREG_PRI_S_WR] : + miscreg_info[MISCREG_PRI_NS_WR]; break; case MODE_MON: - canWrite = secure ? miscRegInfo[reg][MISCREG_MON_NS0_WR] : - miscRegInfo[reg][MISCREG_MON_NS1_WR]; + can_write = secure ? miscreg_info[MISCREG_MON_NS0_WR] : + miscreg_info[MISCREG_MON_NS1_WR]; break; case MODE_HYP: - canWrite = miscRegInfo[reg][MISCREG_HYP_NS_WR]; + can_write = miscreg_info[MISCREG_HYP_NS_WR]; break; default: undefined = true; @@ -646,8 +650,8 @@ canWriteCoprocReg(MiscRegIndex reg, SCR scr, CPSR cpsr, ThreadContext *tc) } // can't do permissions checkes on the root of a banked pair of regs - assert(!miscRegInfo[reg][MISCREG_BANKED]); - return std::make_tuple(canWrite, undefined); + assert(!miscreg_info[MISCREG_BANKED]); + return std::make_tuple(can_write, undefined); } bool @@ -665,7 +669,7 @@ AArch32isUndefinedGenericTimer(MiscRegIndex reg, ThreadContext *tc) int snsBankedIndex(MiscRegIndex reg, ThreadContext *tc) { - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); return snsBankedIndex(reg, tc, scr.ns); } @@ -673,7 +677,7 @@ int snsBankedIndex(MiscRegIndex reg, ThreadContext *tc, bool ns) { int reg_as_int = static_cast(reg); - if (miscRegInfo[reg][MISCREG_BANKED]) { + if (lookUpMiscReg[reg].info[MISCREG_BANKED]) { reg_as_int += (ArmSystem::haveEL(tc, EL3) && !ArmSystem::highestELIs64(tc) && !ns) ? 2 : 1; } @@ -684,7 +688,7 @@ int snsBankedIndex64(MiscRegIndex reg, ThreadContext *tc) { auto *isa = static_cast(tc->getIsaPtr()); - SCR scr = tc->readMiscReg(MISCREG_SCR); + SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); return isa->snsBankedIndex64(reg, scr.ns); } @@ -703,9 +707,9 @@ preUnflattenMiscReg() { int reg = -1; for (int i = 0 ; i < NUM_MISCREGS; i++){ - if (miscRegInfo[i][MISCREG_BANKED]) + if (lookUpMiscReg[i].info[MISCREG_BANKED]) reg = i; - if (miscRegInfo[i][MISCREG_BANKED_CHILD]) + if (lookUpMiscReg[i].info[MISCREG_BANKED_CHILD]) unflattenResultMiscReg[i] = reg; else unflattenResultMiscReg[i] = i; @@ -720,90 +724,14 @@ unflattenMiscReg(int reg) return unflattenResultMiscReg[reg]; } -bool -canReadAArch64SysReg(MiscRegIndex reg, HCR hcr, SCR scr, CPSR cpsr, - ThreadContext *tc) +Fault +checkFaultAccessAArch64SysReg(MiscRegIndex reg, CPSR cpsr, + ThreadContext *tc, const MiscRegOp64 &inst) { - // Check for SP_EL0 access while SPSEL == 0 - if ((reg == MISCREG_SP_EL0) && (tc->readMiscReg(MISCREG_SPSEL) == 0)) - return false; - - // Check for RVBAR access - if (reg == MISCREG_RVBAR_EL1) { - ExceptionLevel highest_el = ArmSystem::highestEL(tc); - if (highest_el == EL2 || highest_el == EL3) - return false; - } - if (reg == MISCREG_RVBAR_EL2) { - ExceptionLevel highest_el = ArmSystem::highestEL(tc); - if (highest_el == EL3) - return false; - } - - bool secure = ArmSystem::haveEL(tc, EL3) && !scr.ns; - bool el2_host = EL2Enabled(tc) && hcr.e2h; - - switch (currEL(cpsr)) { - case EL0: - return secure ? miscRegInfo[reg][MISCREG_USR_S_RD] : - miscRegInfo[reg][MISCREG_USR_NS_RD]; - case EL1: - return secure ? miscRegInfo[reg][MISCREG_PRI_S_RD] : - miscRegInfo[reg][MISCREG_PRI_NS_RD]; - case EL2: - if (el2_host) { - return secure ? miscRegInfo[reg][MISCREG_HYP_E2H_S_RD] : - miscRegInfo[reg][MISCREG_HYP_E2H_NS_RD]; - } else { - return secure ? miscRegInfo[reg][MISCREG_HYP_S_RD] : - miscRegInfo[reg][MISCREG_HYP_NS_RD]; - } - case EL3: - return el2_host ? miscRegInfo[reg][MISCREG_MON_E2H_RD] : - secure ? miscRegInfo[reg][MISCREG_MON_NS0_RD] : - miscRegInfo[reg][MISCREG_MON_NS1_RD]; - default: - panic("Invalid exception level"); - } + return lookUpMiscReg[reg].checkFault(tc, inst, currEL(cpsr)); } -bool -canWriteAArch64SysReg(MiscRegIndex reg, HCR hcr, SCR scr, CPSR cpsr, - ThreadContext *tc) -{ - // Check for SP_EL0 access while SPSEL == 0 - if ((reg == MISCREG_SP_EL0) && (tc->readMiscReg(MISCREG_SPSEL) == 0)) - return false; - ExceptionLevel el = currEL(cpsr); - - bool secure = ArmSystem::haveEL(tc, EL3) && !scr.ns; - bool el2_host = EL2Enabled(tc) && hcr.e2h; - - switch (el) { - case EL0: - return secure ? miscRegInfo[reg][MISCREG_USR_S_WR] : - miscRegInfo[reg][MISCREG_USR_NS_WR]; - case EL1: - return secure ? miscRegInfo[reg][MISCREG_PRI_S_WR] : - miscRegInfo[reg][MISCREG_PRI_NS_WR]; - case EL2: - if (el2_host) { - return secure ? miscRegInfo[reg][MISCREG_HYP_E2H_S_WR] : - miscRegInfo[reg][MISCREG_HYP_E2H_NS_WR]; - } else { - return secure ? miscRegInfo[reg][MISCREG_HYP_S_WR] : - miscRegInfo[reg][MISCREG_HYP_NS_WR]; - } - case EL3: - return el2_host ? miscRegInfo[reg][MISCREG_MON_E2H_WR] : - secure ? miscRegInfo[reg][MISCREG_MON_NS0_WR] : - miscRegInfo[reg][MISCREG_MON_NS1_WR]; - default: - panic("Invalid exception level"); - } -} - -std::bitset miscRegInfo[NUM_MISCREGS]; // initialized below +std::vector lookUpMiscReg(NUM_MISCREGS); namespace { @@ -1289,6 +1217,758 @@ std::unordered_map miscRegNumToIdx{ { MiscRegNum64(3, 7, 14, 2, 2), MISCREG_CNTPS_CVAL_EL1 } }; +Fault +faultSpEL0(const MiscRegLUTEntry &entry, ThreadContext *tc, + const MiscRegOp64 &inst) +{ + if (tc->readMiscReg(MISCREG_SPSEL) == 0) + return inst.undefined(); + else + return NoFault; +} + +Fault +faultDaif(const MiscRegLUTEntry &entry, ThreadContext *tc, + const MiscRegOp64 &inst) +{ + const bool el2_enabled = EL2Enabled(tc); + const HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR_EL2); + const SCTLR sctlr = tc->readMiscRegNoEffect(MISCREG_SCTLR_EL1); + if ((el2_enabled && hcr.e2h && hcr.tge) || sctlr.uma == 0) { + if (el2_enabled && hcr.tge) { + return inst.generateTrap(EL2); + } else { + return inst.generateTrap(EL1); + } + } else { + return NoFault; + } +} + +Fault +faultDczvaEL0(const MiscRegLUTEntry &entry, ThreadContext *tc, + const MiscRegOp64 &inst) +{ + if (!FullSystem) + return NoFault; + + const SCTLR sctlr = tc->readMiscRegNoEffect(MISCREG_SCTLR_EL1); + const SCTLR sctlr2 = tc->readMiscRegNoEffect(MISCREG_SCTLR_EL2); + const HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR_EL2); + + const bool el2_enabled = EL2Enabled(tc); + const bool in_host = hcr.e2h && hcr.tge; + if (!(el2_enabled && in_host) && !sctlr.dze) { + if (el2_enabled && hcr.tge) { + return inst.generateTrap(EL2); + } else { + return inst.generateTrap(EL1); + } + } else if (el2_enabled && !in_host && hcr.tdz) { + return inst.generateTrap(EL2); + } else if (el2_enabled && in_host && !sctlr2.dze) { + return inst.generateTrap(EL2); + } else { + return NoFault; + } +} + +Fault +faultCvacEL0(const MiscRegLUTEntry &entry, ThreadContext *tc, + const MiscRegOp64 &inst) +{ + const SCTLR sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1); + const SCTLR sctlr2 = tc->readMiscReg(MISCREG_SCTLR_EL2); + const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + + const bool el2_enabled = EL2Enabled(tc); + const bool in_host = hcr.e2h && hcr.tge; + if (!(el2_enabled && in_host) && !sctlr.uci) { + if (el2_enabled && hcr.tge) { + return inst.generateTrap(EL2); + } else { + return inst.generateTrap(EL1); + } + } else if (el2_enabled && !in_host && hcr.tpc) { + return inst.generateTrap(EL2); + } else if (el2_enabled && in_host && !sctlr2.uci) { + return inst.generateTrap(EL2); + } else { + return NoFault; + } +} + +Fault +faultFpcrEL0(const MiscRegLUTEntry &entry, ThreadContext *tc, + const MiscRegOp64 &inst) +{ + const CPACR cpacr = tc->readMiscReg(MISCREG_CPACR_EL1); + const CPTR cptr_el2 = tc->readMiscReg(MISCREG_CPTR_EL2); + const CPTR cptr_el3 = tc->readMiscReg(MISCREG_CPTR_EL3); + + const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + const bool el2_enabled = EL2Enabled(tc); + const bool in_host = hcr.e2h && hcr.tge; + if (!(el2_enabled && in_host) && cpacr.fpen != 0b11) { + if (el2_enabled && hcr.tge) { + return inst.generateTrap(EL2, ExceptionClass::UNKNOWN, inst.iss()); + } else { + return inst.generateTrap(EL1, + ExceptionClass::TRAPPED_SIMD_FP, 0x1E00000); + } + } else if (el2_enabled && in_host && cptr_el2.fpen != 0b11) { + return inst.generateTrap(EL2, + ExceptionClass::TRAPPED_SIMD_FP, 0x1E00000); + } else if (el2_enabled && hcr.e2h && ((cptr_el2.fpen & 0b1) == 0b0)) { + return inst.generateTrap(EL2, + ExceptionClass::TRAPPED_SIMD_FP, 0x1E00000); + } else if (el2_enabled && !hcr.e2h && cptr_el2.tfp) { + return inst.generateTrap(EL2, + ExceptionClass::TRAPPED_SIMD_FP, 0x1E00000); + } else if (ArmSystem::haveEL(tc, EL3) && cptr_el3.tfp) { + return inst.generateTrap(EL3, + ExceptionClass::TRAPPED_SIMD_FP, 0x1E00000); + } else { + return NoFault; + } +} + +Fault +faultFpcrEL1(const MiscRegLUTEntry &entry, ThreadContext *tc, + const MiscRegOp64 &inst) +{ + const CPACR cpacr = tc->readMiscReg(MISCREG_CPACR_EL1); + const CPTR cptr_el2 = tc->readMiscReg(MISCREG_CPTR_EL2); + const CPTR cptr_el3 = tc->readMiscReg(MISCREG_CPTR_EL3); + + const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + const bool el2_enabled = EL2Enabled(tc); + if ((cpacr.fpen & 0b1) == 0b0) { + return inst.generateTrap(EL1, + ExceptionClass::TRAPPED_SIMD_FP, 0x1E00000); + } else if (el2_enabled && !hcr.e2h && cptr_el2.tfp) { + return inst.generateTrap(EL2, + ExceptionClass::TRAPPED_SIMD_FP, 0x1E00000); + } else if (el2_enabled && hcr.e2h && ((cptr_el2.fpen & 0b1) == 0b0)) { + return inst.generateTrap(EL2, + ExceptionClass::TRAPPED_SIMD_FP, 0x1E00000); + } else if (ArmSystem::haveEL(tc, EL3) && cptr_el3.tfp) { + return inst.generateTrap(EL3, + ExceptionClass::TRAPPED_SIMD_FP, 0x1E00000); + } else { + return NoFault; + } +} + +Fault +faultFpcrEL2(const MiscRegLUTEntry &entry, ThreadContext *tc, + const MiscRegOp64 &inst) +{ + const CPTR cptr_el2 = tc->readMiscReg(MISCREG_CPTR_EL2); + const CPTR cptr_el3 = tc->readMiscReg(MISCREG_CPTR_EL3); + + const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + if (!hcr.e2h && cptr_el2.tfp) { + return inst.generateTrap(EL2, + ExceptionClass::TRAPPED_SIMD_FP, 0x1E00000); + } else if (hcr.e2h && ((cptr_el2.fpen & 0b1) == 0b0)) { + return inst.generateTrap(EL2, + ExceptionClass::TRAPPED_SIMD_FP, 0x1E00000); + } else if (ArmSystem::haveEL(tc, EL3) && cptr_el3.tfp) { + return inst.generateTrap(EL3, + ExceptionClass::TRAPPED_SIMD_FP, 0x1E00000); + } else { + return NoFault; + } +} + +Fault +faultFpcrEL3(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + const CPTR cptr_el3 = tc->readMiscReg(MISCREG_CPTR_EL3); + if (cptr_el3.tfp) { + return inst.generateTrap(EL3, + ExceptionClass::TRAPPED_SIMD_FP, 0x1E00000); + } else { + return NoFault; + } +} + +Fault +faultPouEL0(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + const SCTLR sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1); + const SCTLR sctlr2 = tc->readMiscReg(MISCREG_SCTLR_EL2); + const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + + const bool el2_enabled = EL2Enabled(tc); + const bool in_host = hcr.e2h && hcr.tge; + if (!(el2_enabled && in_host) && !sctlr.uci) { + if (el2_enabled && hcr.tge) { + return inst.generateTrap(EL2); + } else { + return inst.generateTrap(EL1); + } + } else if (el2_enabled && !in_host && hcr.tpu) { + return inst.generateTrap(EL2); + } else if (el2_enabled && !in_host && hcr.tocu) { + return inst.generateTrap(EL2); + } else if (el2_enabled && in_host && !sctlr2.uci) { + return inst.generateTrap(EL2); + } else { + return NoFault; + } +} + +Fault +faultPouEL1(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + const bool el2_enabled = EL2Enabled(tc); + if (el2_enabled && hcr.tpu) { + return inst.generateTrap(EL2); + } else if (el2_enabled && hcr.tocu) { + return inst.generateTrap(EL2); + } else { + return NoFault; + } +} + +Fault +faultPouIsEL1(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + const bool el2_enabled = EL2Enabled(tc); + if (el2_enabled && hcr.tpu) { + return inst.generateTrap(EL2); + } else if (el2_enabled && hcr.ticab) { + return inst.generateTrap(EL2); + } else { + return NoFault; + } +} + +Fault +faultCtrEL0(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + const SCTLR sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1); + const SCTLR sctlr2 = tc->readMiscReg(MISCREG_SCTLR_EL2); + const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + + const bool el2_enabled = EL2Enabled(tc); + const bool in_host = hcr.e2h && hcr.tge; + if (!(el2_enabled && in_host) && !sctlr.uct) { + if (el2_enabled && hcr.tge) { + return inst.generateTrap(EL2); + } else { + return inst.generateTrap(EL1); + } + } else if (el2_enabled && !in_host && hcr.tid2) { + return inst.generateTrap(EL2); + } else if (el2_enabled && in_host && !sctlr2.uct) { + return inst.generateTrap(EL2); + } else { + return NoFault; + } +} + +Fault +faultMdccsrEL0(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + const DBGDS32 mdscr = tc->readMiscReg(MISCREG_MDSCR_EL1); + const HDCR mdcr_el2 = tc->readMiscReg(MISCREG_MDCR_EL2); + const HDCR mdcr_el3 = tc->readMiscReg(MISCREG_MDCR_EL3); + + const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + const bool el2_enabled = EL2Enabled(tc); + if (mdscr.tdcc) { + if (el2_enabled && hcr.tge) { + return inst.generateTrap(EL2); + } else { + return inst.generateTrap(EL1); + } + } else if (el2_enabled && mdcr_el2.tdcc) { + return inst.generateTrap(EL2); + } else if (el2_enabled && (hcr.tge || (mdcr_el2.tde || mdcr_el2.tda))) { + return inst.generateTrap(EL2); + } else if (ArmSystem::haveEL(tc, EL3) && (mdcr_el3.tdcc || mdcr_el3.tda)) { + return inst.generateTrap(EL3); + } else { + return NoFault; + } +} + +Fault +faultMdccsrEL1(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + const HDCR mdcr_el2 = tc->readMiscReg(MISCREG_MDCR_EL2); + const HDCR mdcr_el3 = tc->readMiscReg(MISCREG_MDCR_EL3); + + const bool el2_enabled = EL2Enabled(tc); + if (el2_enabled && mdcr_el2.tdcc) { + return inst.generateTrap(EL2); + } else if (el2_enabled && (mdcr_el2.tde || mdcr_el2.tda)) { + return inst.generateTrap(EL2); + } else if (ArmSystem::haveEL(tc, EL3) && (mdcr_el3.tdcc || mdcr_el3.tda)) { + return inst.generateTrap(EL3); + } else { + return NoFault; + } +} + +Fault +faultMdccsrEL2(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + const HDCR mdcr_el3 = tc->readMiscReg(MISCREG_MDCR_EL3); + if (ArmSystem::haveEL(tc, EL3) && (mdcr_el3.tdcc || mdcr_el3.tda)) { + return inst.generateTrap(EL3); + } else { + return NoFault; + } +} + +Fault +faultDebugEL1(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + const HDCR mdcr_el2 = tc->readMiscReg(MISCREG_MDCR_EL2); + const HDCR mdcr_el3 = tc->readMiscReg(MISCREG_MDCR_EL3); + + const bool el2_enabled = EL2Enabled(tc); + if (el2_enabled && (mdcr_el2.tde || mdcr_el2.tda)) { + return inst.generateTrap(EL2); + } else if (ArmSystem::haveEL(tc, EL3) && mdcr_el3.tda) { + return inst.generateTrap(EL3); + } else { + return NoFault; + } +} + +Fault +faultDebugEL2(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + const HDCR mdcr_el3 = tc->readMiscReg(MISCREG_MDCR_EL3); + if (ArmSystem::haveEL(tc, EL3) && mdcr_el3.tda) { + return inst.generateTrap(EL3); + } else { + return NoFault; + } +} + +Fault +faultZcrEL1(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + const CPACR cpacr_el1 = tc->readMiscReg(MISCREG_CPACR_EL1); + const CPTR cptr_el2 = tc->readMiscReg(MISCREG_CPTR_EL2); + const CPTR cptr_el3 = tc->readMiscReg(MISCREG_CPTR_EL3); + + const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + const bool el2_enabled = EL2Enabled(tc); + if (!(cpacr_el1.zen & 0x1)) { + return inst.generateTrap(EL1, ExceptionClass::TRAPPED_SVE, 0); + } else if (el2_enabled && !hcr.e2h && cptr_el2.tz) { + return inst.generateTrap(EL2, ExceptionClass::TRAPPED_SVE, 0); + } else if (el2_enabled && hcr.e2h && !(cptr_el2.zen & 0x1)) { + return inst.generateTrap(EL2, ExceptionClass::TRAPPED_SVE, 0); + } else if (ArmSystem::haveEL(tc, EL3) && !cptr_el3.ez) { + return inst.generateTrap(EL3, ExceptionClass::TRAPPED_SVE, 0); + } else { + return NoFault; + } +} + +Fault +faultZcrEL2(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + const CPTR cptr_el2 = tc->readMiscReg(MISCREG_CPTR_EL2); + const CPTR cptr_el3 = tc->readMiscReg(MISCREG_CPTR_EL3); + + const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + if (!hcr.e2h && cptr_el2.tz) { + return inst.generateTrap(EL2, ExceptionClass::TRAPPED_SVE, 0); + } else if (hcr.e2h && !(cptr_el2.zen & 0x1)) { + return inst.generateTrap(EL2, ExceptionClass::TRAPPED_SVE, 0); + } else if (ArmSystem::haveEL(tc, EL3) && !cptr_el3.ez) { + return inst.generateTrap(EL3, ExceptionClass::TRAPPED_SVE, 0); + } else { + return NoFault; + } +} + +Fault +faultZcrEL3(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + const CPTR cptr_el3 = tc->readMiscReg(MISCREG_CPTR_EL3); + if (!cptr_el3.ez) { + return inst.generateTrap(EL3, ExceptionClass::TRAPPED_SVE, 0); + } else { + return NoFault; + } +} + +Fault +faultGicv3(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + auto gic = static_cast(tc->getSystemPtr())->getGIC(); + if (!gic->supportsVersion(BaseGic::GicVersion::GIC_V3)) { + return inst.undefined(); + } else { + return NoFault; + } +} + +Fault +faultIccSgiEL1(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + if (auto fault = faultGicv3(entry, tc, inst); fault != NoFault) { + return fault; + } + + const Gicv3CPUInterface::ICH_HCR_EL2 ich_hcr = + tc->readMiscReg(MISCREG_ICH_HCR_EL2); + const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + const SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); + if (EL2Enabled(tc) && (hcr.fmo || hcr.imo || ich_hcr.TC)) { + return inst.generateTrap(EL2); + } else if (ArmSystem::haveEL(tc, EL3) && scr.irq && scr.fiq) { + return inst.generateTrap(EL3); + } else { + return NoFault; + } +} + +Fault +faultIccSgiEL2(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + if (auto fault = faultGicv3(entry, tc, inst); fault != NoFault) { + return fault; + } + + const SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); + if (ArmSystem::haveEL(tc, EL3) && scr.irq && scr.fiq) { + return inst.generateTrap(EL3); + } else { + return NoFault; + } +} + +Fault +faultCpacrEL1(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + const CPTR cptr_el2 = tc->readMiscReg(MISCREG_CPTR_EL2); + const CPTR cptr_el3 = tc->readMiscReg(MISCREG_CPTR_EL3); + if (EL2Enabled(tc) && cptr_el2.tcpac) { + return inst.generateTrap(EL2); + } else if (ArmSystem::haveEL(tc, EL3) && cptr_el3.tcpac) { + return inst.generateTrap(EL3); + } else { + return NoFault; + } +} + +Fault +faultCpacrEL2(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + const CPTR cptr_el3 = tc->readMiscReg(MISCREG_CPTR_EL3); + if (ArmSystem::haveEL(tc, EL3) && cptr_el3.tcpac) { + return inst.generateTrap(EL3); + } else { + return NoFault; + } +} + +Fault +faultCpacrVheEL2(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + const HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR_EL2); + if (hcr.e2h) { + return faultCpacrEL2(entry, tc, inst); + } else { + return inst.undefined(); + } +} + +#define HCR_TRAP(bitfield) [] (const MiscRegLUTEntry &entry, \ + ThreadContext *tc, const MiscRegOp64 &inst) -> Fault \ +{ \ + const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); \ + if (EL2Enabled(tc) && hcr.bitfield) { \ + return inst.generateTrap(EL2); \ + } else { \ + return NoFault; \ + } \ +} + +Fault +faultPauthEL1(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + const SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); + if (EL2Enabled(tc) && !hcr.apk) { + return inst.generateTrap(EL2); + } else if (ArmSystem::haveEL(tc, EL3) && !scr.apk) { + return inst.generateTrap(EL3); + } else { + return NoFault; + } +} + +Fault +faultPauthEL2(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + const SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); + if (ArmSystem::haveEL(tc, EL3) && !scr.apk) { + return inst.generateTrap(EL3); + } else { + return NoFault; + } +} + +Fault +faultGenericTimerEL0(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + const bool el2_enabled = EL2Enabled(tc); + const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + const bool in_host = el2_enabled && hcr.e2h && hcr.tge; + const CNTKCTL cntkctl_el1 = tc->readMiscReg(MISCREG_CNTKCTL_EL1); + const CNTHCTL_E2H cnthctl_el2 = tc->readMiscReg(MISCREG_CNTHCTL_EL2); + if (!(in_host) && !cntkctl_el1.el0pcten && !cntkctl_el1.el0vcten) { + if (el2_enabled && hcr.tge) + return inst.generateTrap(EL2); + else + return inst.generateTrap(EL1); + } else if (in_host && !cnthctl_el2.el0pcten && !cnthctl_el2.el0vcten) { + return inst.generateTrap(EL2); + } else { + return NoFault; + } +} + +Fault +faultCntpctEL0(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + const bool el2_enabled = EL2Enabled(tc); + const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + const bool in_host = el2_enabled && hcr.e2h && hcr.tge; + const CNTKCTL cntkctl_el1 = tc->readMiscReg(MISCREG_CNTKCTL_EL1); + const RegVal cnthctl_el2 = tc->readMiscReg(MISCREG_CNTHCTL_EL2); + if (!(in_host) && !cntkctl_el1.el0pcten) { + if (el2_enabled && hcr.tge) + return inst.generateTrap(EL2); + else + return inst.generateTrap(EL1); + } else if (el2_enabled && !hcr.e2h && + !static_cast(cnthctl_el2).el1pcten) { + return inst.generateTrap(EL2); + } else if (el2_enabled && hcr.e2h && !hcr.tge && + !static_cast(cnthctl_el2).el1pcten) { + return inst.generateTrap(EL2); + } else if (in_host && + !static_cast(cnthctl_el2).el0pcten) { + return inst.generateTrap(EL2); + } else { + return NoFault; + } +} + +Fault +faultCntpctEL1(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + const bool el2_enabled = EL2Enabled(tc); + const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + const RegVal cnthctl_el2 = tc->readMiscReg(MISCREG_CNTHCTL_EL2); + if (el2_enabled && hcr.e2h && + !static_cast(cnthctl_el2).el1pcten) { + return inst.generateTrap(EL2); + } else if (el2_enabled && !hcr.e2h && + !static_cast(cnthctl_el2).el1pcten) { + return inst.generateTrap(EL2); + } else { + return NoFault; + } +} + +Fault +faultCntvctEL0(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + const bool el2_enabled = EL2Enabled(tc); + const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + const bool in_host = el2_enabled && hcr.e2h && hcr.tge; + const CNTKCTL cntkctl_el1 = tc->readMiscReg(MISCREG_CNTKCTL_EL1); + const CNTHCTL_E2H cnthctl_el2 = tc->readMiscReg(MISCREG_CNTHCTL_EL2); + if (!(in_host) && !cntkctl_el1.el0vcten) { + if (el2_enabled && hcr.tge) + return inst.generateTrap(EL2); + else + return inst.generateTrap(EL1); + } else if (in_host && !cnthctl_el2.el0vcten) { + return inst.generateTrap(EL2); + } else if (el2_enabled && !(hcr.e2h && hcr.tge) && cnthctl_el2.el1tvct) { + return inst.generateTrap(EL2); + } else { + return NoFault; + } +} + +Fault +faultCntvctEL1(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + const CNTHCTL cnthctl_el2 = tc->readMiscReg(MISCREG_CNTHCTL_EL2); + if (EL2Enabled(tc) && cnthctl_el2.el1tvct) { + return inst.generateTrap(EL2); + } else { + return NoFault; + } +} + +//TODO: See faultCntpctEL0 +Fault +faultCntpCtlEL0(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + const bool el2_enabled = EL2Enabled(tc); + const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + const bool in_host = el2_enabled && hcr.e2h && hcr.tge; + const CNTKCTL cntkctl_el1 = tc->readMiscReg(MISCREG_CNTKCTL_EL1); + const RegVal cnthctl_el2 = tc->readMiscReg(MISCREG_CNTHCTL_EL2); + if (!(in_host) && !cntkctl_el1.el0pten) { + if (el2_enabled && hcr.tge) + return inst.generateTrap(EL2); + else + return inst.generateTrap(EL1); + } else if (el2_enabled && !hcr.e2h && + !static_cast(cnthctl_el2).el1pcen) { + return inst.generateTrap(EL2); + } else if (el2_enabled && hcr.e2h && !hcr.tge && + !static_cast(cnthctl_el2).el1pten) { + return inst.generateTrap(EL2); + } else if (in_host && + !static_cast(cnthctl_el2).el0pten) { + return inst.generateTrap(EL2); + } else { + return NoFault; + } +} + +Fault +faultCntpCtlEL1(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + const bool el2_enabled = EL2Enabled(tc); + const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + const RegVal cnthctl_el2 = tc->readMiscReg(MISCREG_CNTHCTL_EL2); + if (el2_enabled && !hcr.e2h && + !static_cast(cnthctl_el2).el1pcen) { + return inst.generateTrap(EL2); + } else if (el2_enabled && hcr.e2h && + !static_cast(cnthctl_el2).el1pten) { + return inst.generateTrap(EL2); + } else { + return NoFault; + } +} + +// TODO: see faultCntvctEL0 +Fault +faultCntvCtlEL0(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + const bool el2_enabled = EL2Enabled(tc); + const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + const bool in_host = el2_enabled && hcr.e2h && hcr.tge; + const CNTKCTL cntkctl_el1 = tc->readMiscReg(MISCREG_CNTKCTL_EL1); + const CNTHCTL_E2H cnthctl_el2 = tc->readMiscReg(MISCREG_CNTHCTL_EL2); + if (!(in_host) && !cntkctl_el1.el0vten) { + if (el2_enabled && hcr.tge) + return inst.generateTrap(EL2); + else + return inst.generateTrap(EL1); + } else if (in_host && !cnthctl_el2.el0vten) { + return inst.generateTrap(EL2); + } else if (el2_enabled && !(hcr.e2h && hcr.tge) && cnthctl_el2.el1tvt) { + return inst.generateTrap(EL2); + } else { + return NoFault; + } +} + +Fault +faultCntvCtlEL1(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + const CNTHCTL cnthctl_el2 = tc->readMiscReg(MISCREG_CNTHCTL_EL2); + if (EL2Enabled(tc) && cnthctl_el2.el1tvt) { + return inst.generateTrap(EL2); + } else { + return NoFault; + } +} + +Fault +faultCntpsCtlEL1(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + const SCR scr = tc->readMiscReg(MISCREG_SCR_EL3); + if (ArmSystem::haveEL(tc, EL3) && !scr.ns) { + if (scr.eel2) + return inst.undefined(); + else if (!scr.st) + return inst.generateTrap(EL3); + else + return NoFault; + } else { + return inst.undefined(); + } +} + +Fault +faultUnimplemented(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + if (entry.info[MISCREG_WARN_NOT_FAIL]) { + return NoFault; + } else { + return inst.undefined(); + } +} + +Fault +faultImpdefUnimplEL1(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + if (EL2Enabled(tc) && hcr.tidcp) { + return inst.generateTrap(EL2); + } else { + return faultUnimplemented(entry, tc, inst); + } +} + } MiscRegIndex @@ -1297,12 +1977,19 @@ decodeAArch64SysReg(unsigned op0, unsigned op1, unsigned op2) { MiscRegNum64 sys_reg(op0, op1, crn, crm, op2); + return decodeAArch64SysReg(sys_reg); +} + +MiscRegIndex +decodeAArch64SysReg(const MiscRegNum64 &sys_reg) +{ auto it = miscRegNumToIdx.find(sys_reg); if (it != miscRegNumToIdx.end()) { return it->second; } else { // Check for a pseudo register before returning MISCREG_UNKNOWN - if ((op0 == 1 || op0 == 3) && (crn == 11 || crn == 15)) { + if ((sys_reg.op0 == 1 || sys_reg.op0 == 3) && + (sys_reg.crn == 11 || sys_reg.crn == 15)) { return MISCREG_IMPDEF_UNIMPL; } else { return MISCREG_UNKNOWN; @@ -1317,10 +2004,68 @@ encodeAArch64SysReg(MiscRegIndex misc_reg) it != idxToMiscRegNum.end()) { return it->second; } else { - panic("Invalid MiscRegIndex: %n\n", misc_reg); + panic("Invalid MiscRegIndex: %d\n", misc_reg); } } +Fault +MiscRegLUTEntry::checkFault(ThreadContext *tc, + const MiscRegOp64 &inst, ExceptionLevel el) +{ + return !inst.miscRead() ? faultWrite[el](*this, tc, inst) : + faultRead[el](*this, tc, inst); +} + +template +Fault +MiscRegLUTEntry::defaultFault(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + if (isSecureBelowEL3(tc) ? entry.info[Sec] : entry.info[NonSec]) { + return NoFault; + } else { + return inst.undefined(); + } +} + +static Fault +defaultFaultE2H_EL2(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + const HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR_EL2); + if (hcr.e2h) { + return NoFault; + } else { + return inst.undefined(); + } +} + +static Fault +defaultFaultE2H_EL3(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst) +{ + const HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR_EL2); + const bool el2_host = EL2Enabled(tc) && hcr.e2h; + if (el2_host) { + return NoFault; + } else { + return inst.undefined(); + } +} + +MiscRegLUTEntryInitializer::chain +MiscRegLUTEntryInitializer::highest(ArmSystem *const sys) const +{ + switch (FullSystem ? sys->highestEL() : EL1) { + case EL0: + case EL1: priv(); break; + case EL2: hyp(); break; + case EL3: mon(); break; + } + return *this; +} + + void ISA::initializeMiscRegMetadata() { @@ -1357,6 +2102,8 @@ ISA::initializeMiscRegMetadata() const bool vhe_implemented = release->has(ArmExtension::FEAT_VHE); const bool sel2_implemented = release->has(ArmExtension::FEAT_SEL2); + + /** * Some registers alias with others, and therefore need to be translated. * When two mapping registers are given, they are the 32b lower and @@ -2369,6 +3116,8 @@ ISA::initializeMiscRegMetadata() // AArch64 registers (Op0=2); InitReg(MISCREG_MDCCINT_EL1) + .fault(EL1, faultMdccsrEL1) + .fault(EL2, faultMdccsrEL2) .allPrivileges(); InitReg(MISCREG_OSDTRRX_EL1) .allPrivileges() @@ -2384,199 +3133,329 @@ ISA::initializeMiscRegMetadata() .mapsTo(MISCREG_DBGOSECCR); InitReg(MISCREG_DBGBVR0_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGBVR0, MISCREG_DBGBXVR0); InitReg(MISCREG_DBGBVR1_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGBVR1, MISCREG_DBGBXVR1); InitReg(MISCREG_DBGBVR2_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGBVR2, MISCREG_DBGBXVR2); InitReg(MISCREG_DBGBVR3_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGBVR3, MISCREG_DBGBXVR3); InitReg(MISCREG_DBGBVR4_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGBVR4, MISCREG_DBGBXVR4); InitReg(MISCREG_DBGBVR5_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGBVR5, MISCREG_DBGBXVR5); InitReg(MISCREG_DBGBVR6_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGBVR6, MISCREG_DBGBXVR6); InitReg(MISCREG_DBGBVR7_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGBVR7, MISCREG_DBGBXVR7); InitReg(MISCREG_DBGBVR8_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGBVR8, MISCREG_DBGBXVR8); InitReg(MISCREG_DBGBVR9_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGBVR9, MISCREG_DBGBXVR9); InitReg(MISCREG_DBGBVR10_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGBVR10, MISCREG_DBGBXVR10); InitReg(MISCREG_DBGBVR11_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGBVR11, MISCREG_DBGBXVR11); InitReg(MISCREG_DBGBVR12_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGBVR12, MISCREG_DBGBXVR12); InitReg(MISCREG_DBGBVR13_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGBVR13, MISCREG_DBGBXVR13); InitReg(MISCREG_DBGBVR14_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGBVR14, MISCREG_DBGBXVR14); InitReg(MISCREG_DBGBVR15_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGBVR15, MISCREG_DBGBXVR15); InitReg(MISCREG_DBGBCR0_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGBCR0); InitReg(MISCREG_DBGBCR1_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGBCR1); InitReg(MISCREG_DBGBCR2_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGBCR2); InitReg(MISCREG_DBGBCR3_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGBCR3); InitReg(MISCREG_DBGBCR4_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGBCR4); InitReg(MISCREG_DBGBCR5_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGBCR5); InitReg(MISCREG_DBGBCR6_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGBCR6); InitReg(MISCREG_DBGBCR7_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGBCR7); InitReg(MISCREG_DBGBCR8_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGBCR8); InitReg(MISCREG_DBGBCR9_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGBCR9); InitReg(MISCREG_DBGBCR10_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGBCR10); InitReg(MISCREG_DBGBCR11_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGBCR11); InitReg(MISCREG_DBGBCR12_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGBCR12); InitReg(MISCREG_DBGBCR13_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGBCR13); InitReg(MISCREG_DBGBCR14_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGBCR14); InitReg(MISCREG_DBGBCR15_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGBCR15); InitReg(MISCREG_DBGWVR0_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGWVR0); InitReg(MISCREG_DBGWVR1_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGWVR1); InitReg(MISCREG_DBGWVR2_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGWVR2); InitReg(MISCREG_DBGWVR3_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGWVR3); InitReg(MISCREG_DBGWVR4_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGWVR4); InitReg(MISCREG_DBGWVR5_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGWVR5); InitReg(MISCREG_DBGWVR6_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGWVR6); InitReg(MISCREG_DBGWVR7_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGWVR7); InitReg(MISCREG_DBGWVR8_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGWVR8); InitReg(MISCREG_DBGWVR9_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGWVR9); InitReg(MISCREG_DBGWVR10_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGWVR10); InitReg(MISCREG_DBGWVR11_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGWVR11); InitReg(MISCREG_DBGWVR12_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGWVR12); InitReg(MISCREG_DBGWVR13_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGWVR13); InitReg(MISCREG_DBGWVR14_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGWVR14); InitReg(MISCREG_DBGWVR15_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGWVR15); InitReg(MISCREG_DBGWCR0_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGWCR0); InitReg(MISCREG_DBGWCR1_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGWCR1); InitReg(MISCREG_DBGWCR2_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGWCR2); InitReg(MISCREG_DBGWCR3_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGWCR3); InitReg(MISCREG_DBGWCR4_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGWCR4); InitReg(MISCREG_DBGWCR5_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGWCR5); InitReg(MISCREG_DBGWCR6_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGWCR6); InitReg(MISCREG_DBGWCR7_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGWCR7); InitReg(MISCREG_DBGWCR8_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGWCR8); InitReg(MISCREG_DBGWCR9_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGWCR9); InitReg(MISCREG_DBGWCR10_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGWCR10); InitReg(MISCREG_DBGWCR11_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGWCR11); InitReg(MISCREG_DBGWCR12_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGWCR12); InitReg(MISCREG_DBGWCR13_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGWCR13); InitReg(MISCREG_DBGWCR14_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGWCR14); InitReg(MISCREG_DBGWCR15_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultDebugEL1) + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGWCR15); InitReg(MISCREG_MDCCSR_EL0) .allPrivileges().writes(0) - //monSecureWrite(0).monNonSecureWrite(0) + .faultRead(EL0, faultMdccsrEL0) + .faultRead(EL1, faultMdccsrEL1) + .faultRead(EL2, faultMdccsrEL2) .mapsTo(MISCREG_DBGDSCRint); InitReg(MISCREG_MDDTR_EL0) .allPrivileges(); @@ -2586,6 +3465,7 @@ ISA::initializeMiscRegMetadata() .allPrivileges(); InitReg(MISCREG_DBGVCR32_EL2) .hyp().mon() + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_DBGVCR); InitReg(MISCREG_MDRAR_EL1) .allPrivileges().monSecureWrite(0).monNonSecureWrite(0) @@ -2620,115 +3500,172 @@ ISA::initializeMiscRegMetadata() InitReg(MISCREG_MPIDR_EL1) .allPrivileges().exceptUserMode().writes(0); InitReg(MISCREG_REVIDR_EL1) + .faultRead(EL1, HCR_TRAP(tid1)) .allPrivileges().exceptUserMode().writes(0); InitReg(MISCREG_ID_PFR0_EL1) .allPrivileges().exceptUserMode().writes(0) + .faultRead(EL1, HCR_TRAP(tid3)) .mapsTo(MISCREG_ID_PFR0); InitReg(MISCREG_ID_PFR1_EL1) .allPrivileges().exceptUserMode().writes(0) + .faultRead(EL1, HCR_TRAP(tid3)) .mapsTo(MISCREG_ID_PFR1); InitReg(MISCREG_ID_DFR0_EL1) .allPrivileges().exceptUserMode().writes(0) + .faultRead(EL1, HCR_TRAP(tid3)) .mapsTo(MISCREG_ID_DFR0); InitReg(MISCREG_ID_AFR0_EL1) .allPrivileges().exceptUserMode().writes(0) + .faultRead(EL1, HCR_TRAP(tid3)) .mapsTo(MISCREG_ID_AFR0); InitReg(MISCREG_ID_MMFR0_EL1) .allPrivileges().exceptUserMode().writes(0) + .faultRead(EL1, HCR_TRAP(tid3)) .mapsTo(MISCREG_ID_MMFR0); InitReg(MISCREG_ID_MMFR1_EL1) .allPrivileges().exceptUserMode().writes(0) + .faultRead(EL1, HCR_TRAP(tid3)) .mapsTo(MISCREG_ID_MMFR1); InitReg(MISCREG_ID_MMFR2_EL1) .allPrivileges().exceptUserMode().writes(0) + .faultRead(EL1, HCR_TRAP(tid3)) .mapsTo(MISCREG_ID_MMFR2); InitReg(MISCREG_ID_MMFR3_EL1) .allPrivileges().exceptUserMode().writes(0) + .faultRead(EL1, HCR_TRAP(tid3)) .mapsTo(MISCREG_ID_MMFR3); InitReg(MISCREG_ID_MMFR4_EL1) .allPrivileges().exceptUserMode().writes(0) + .faultRead(EL1, HCR_TRAP(tid3)) .mapsTo(MISCREG_ID_MMFR4); InitReg(MISCREG_ID_ISAR0_EL1) .allPrivileges().exceptUserMode().writes(0) + .faultRead(EL1, HCR_TRAP(tid3)) .mapsTo(MISCREG_ID_ISAR0); InitReg(MISCREG_ID_ISAR1_EL1) .allPrivileges().exceptUserMode().writes(0) + .faultRead(EL1, HCR_TRAP(tid3)) .mapsTo(MISCREG_ID_ISAR1); InitReg(MISCREG_ID_ISAR2_EL1) .allPrivileges().exceptUserMode().writes(0) + .faultRead(EL1, HCR_TRAP(tid3)) .mapsTo(MISCREG_ID_ISAR2); InitReg(MISCREG_ID_ISAR3_EL1) .allPrivileges().exceptUserMode().writes(0) + .faultRead(EL1, HCR_TRAP(tid3)) .mapsTo(MISCREG_ID_ISAR3); InitReg(MISCREG_ID_ISAR4_EL1) .allPrivileges().exceptUserMode().writes(0) + .faultRead(EL1, HCR_TRAP(tid3)) .mapsTo(MISCREG_ID_ISAR4); InitReg(MISCREG_ID_ISAR5_EL1) .allPrivileges().exceptUserMode().writes(0) + .faultRead(EL1, HCR_TRAP(tid3)) .mapsTo(MISCREG_ID_ISAR5); InitReg(MISCREG_ID_ISAR6_EL1) .allPrivileges().exceptUserMode().writes(0) + .faultRead(EL1, HCR_TRAP(tid3)) .mapsTo(MISCREG_ID_ISAR6); InitReg(MISCREG_MVFR0_EL1) + .faultRead(EL1, HCR_TRAP(tid3)) .allPrivileges().exceptUserMode().writes(0); InitReg(MISCREG_MVFR1_EL1) + .faultRead(EL1, HCR_TRAP(tid3)) .allPrivileges().exceptUserMode().writes(0); InitReg(MISCREG_MVFR2_EL1) + .faultRead(EL1, HCR_TRAP(tid3)) .allPrivileges().exceptUserMode().writes(0); InitReg(MISCREG_ID_AA64PFR0_EL1) + .faultRead(EL1, HCR_TRAP(tid3)) .allPrivileges().exceptUserMode().writes(0); InitReg(MISCREG_ID_AA64PFR1_EL1) + .faultRead(EL1, HCR_TRAP(tid3)) .allPrivileges().exceptUserMode().writes(0); InitReg(MISCREG_ID_AA64DFR0_EL1) + .faultRead(EL1, HCR_TRAP(tid3)) .allPrivileges().exceptUserMode().writes(0); InitReg(MISCREG_ID_AA64DFR1_EL1) + .faultRead(EL1, HCR_TRAP(tid3)) .allPrivileges().exceptUserMode().writes(0); InitReg(MISCREG_ID_AA64AFR0_EL1) + .faultRead(EL1, HCR_TRAP(tid3)) .allPrivileges().exceptUserMode().writes(0); InitReg(MISCREG_ID_AA64AFR1_EL1) + .faultRead(EL1, HCR_TRAP(tid3)) .allPrivileges().exceptUserMode().writes(0); InitReg(MISCREG_ID_AA64ISAR0_EL1) + .faultRead(EL1, HCR_TRAP(tid3)) .allPrivileges().exceptUserMode().writes(0); InitReg(MISCREG_ID_AA64ISAR1_EL1) + .faultRead(EL1, HCR_TRAP(tid3)) .allPrivileges().exceptUserMode().writes(0); InitReg(MISCREG_ID_AA64MMFR0_EL1) + .faultRead(EL1, HCR_TRAP(tid3)) .allPrivileges().exceptUserMode().writes(0); InitReg(MISCREG_ID_AA64MMFR1_EL1) + .faultRead(EL1, HCR_TRAP(tid3)) .allPrivileges().exceptUserMode().writes(0); InitReg(MISCREG_ID_AA64MMFR2_EL1) + .faultRead(EL1, HCR_TRAP(tid3)) .allPrivileges().exceptUserMode().writes(0); InitReg(MISCREG_APDAKeyHi_EL1) + .fault(EL1, faultPauthEL1) + .fault(EL2, faultPauthEL2) .allPrivileges().exceptUserMode(); InitReg(MISCREG_APDAKeyLo_EL1) + .fault(EL1, faultPauthEL1) + .fault(EL2, faultPauthEL2) .allPrivileges().exceptUserMode(); InitReg(MISCREG_APDBKeyHi_EL1) + .fault(EL1, faultPauthEL1) + .fault(EL2, faultPauthEL2) .allPrivileges().exceptUserMode(); InitReg(MISCREG_APDBKeyLo_EL1) + .fault(EL1, faultPauthEL1) + .fault(EL2, faultPauthEL2) .allPrivileges().exceptUserMode(); InitReg(MISCREG_APGAKeyHi_EL1) + .fault(EL1, faultPauthEL1) + .fault(EL2, faultPauthEL2) .allPrivileges().exceptUserMode(); InitReg(MISCREG_APGAKeyLo_EL1) + .fault(EL1, faultPauthEL1) + .fault(EL2, faultPauthEL2) .allPrivileges().exceptUserMode(); InitReg(MISCREG_APIAKeyHi_EL1) + .fault(EL1, faultPauthEL1) + .fault(EL2, faultPauthEL2) .allPrivileges().exceptUserMode(); InitReg(MISCREG_APIAKeyLo_EL1) + .fault(EL1, faultPauthEL1) + .fault(EL2, faultPauthEL2) .allPrivileges().exceptUserMode(); InitReg(MISCREG_APIBKeyHi_EL1) + .fault(EL1, faultPauthEL1) + .fault(EL2, faultPauthEL2) .allPrivileges().exceptUserMode(); InitReg(MISCREG_APIBKeyLo_EL1) + .fault(EL1, faultPauthEL1) + .fault(EL2, faultPauthEL2) .allPrivileges().exceptUserMode(); InitReg(MISCREG_CCSIDR_EL1) + .faultRead(EL1, HCR_TRAP(tid2)) .allPrivileges().exceptUserMode().writes(0); InitReg(MISCREG_CLIDR_EL1) + .faultRead(EL1, HCR_TRAP(tid2)) .allPrivileges().exceptUserMode().writes(0); InitReg(MISCREG_AIDR_EL1) + .faultRead(EL1, HCR_TRAP(tid1)) .allPrivileges().exceptUserMode().writes(0); InitReg(MISCREG_CSSELR_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, HCR_TRAP(tid2)) .mapsTo(MISCREG_CSSELR_NS); InitReg(MISCREG_CTR_EL0) + .faultRead(EL0, faultCtrEL0) + .faultRead(EL1, HCR_TRAP(tid2)) .reads(1); InitReg(MISCREG_DCZID_EL0) .reads(1); @@ -2740,6 +3677,8 @@ ISA::initializeMiscRegMetadata() .mapsTo(MISCREG_VMPIDR); InitReg(MISCREG_SCTLR_EL1) .allPrivileges().exceptUserMode() + .faultRead(EL1, HCR_TRAP(trvm)) + .faultWrite(EL1, HCR_TRAP(tvm)) .res0( 0x20440 | (EnDB ? 0 : 0x2000) | (IESB ? 0 : 0x200000) | (EnDA ? 0 : 0x8000000) @@ -2750,8 +3689,8 @@ ISA::initializeMiscRegMetadata() | (LSMAOE ? 0 : 0x10000000)) .mapsTo(MISCREG_SCTLR_NS); InitReg(MISCREG_SCTLR_EL12) - .monE2H() - .hypE2H() + .fault(EL2, defaultFaultE2H_EL2) + .fault(EL3, defaultFaultE2H_EL3) .res0( 0x20440 | (EnDB ? 0 : 0x2000) | (IESB ? 0 : 0x200000) | (EnDA ? 0 : 0x8000000) @@ -2763,13 +3702,16 @@ ISA::initializeMiscRegMetadata() .mapsTo(MISCREG_SCTLR_EL1); InitReg(MISCREG_ACTLR_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, HCR_TRAP(tacr)) .mapsTo(MISCREG_ACTLR_NS); InitReg(MISCREG_CPACR_EL1) .allPrivileges().exceptUserMode() + .fault(EL1, faultCpacrEL1) + .fault(EL2, faultCpacrEL2) .mapsTo(MISCREG_CPACR); InitReg(MISCREG_CPACR_EL12) - .monE2H() - .hypE2H() + .fault(EL2, faultCpacrVheEL2) + .fault(EL3, defaultFaultE2H_EL3) .mapsTo(MISCREG_CPACR_EL1); InitReg(MISCREG_SCTLR_EL2) .hyp().mon() @@ -2788,9 +3730,11 @@ ISA::initializeMiscRegMetadata() .mapsTo(MISCREG_HCR, MISCREG_HCR2); InitReg(MISCREG_MDCR_EL2) .hyp().mon() + .fault(EL2, faultDebugEL2) .mapsTo(MISCREG_HDCR); InitReg(MISCREG_CPTR_EL2) .hyp().mon() + .fault(EL2, faultCpacrEL2) .mapsTo(MISCREG_HCPTR); InitReg(MISCREG_HSTR_EL2) .hyp().mon() @@ -2821,24 +3765,30 @@ ISA::initializeMiscRegMetadata() .mapsTo(MISCREG_SDCR); InitReg(MISCREG_TTBR0_EL1) .allPrivileges().exceptUserMode() + .faultRead(EL1, HCR_TRAP(trvm)) + .faultWrite(EL1, HCR_TRAP(tvm)) .mapsTo(MISCREG_TTBR0_NS); InitReg(MISCREG_TTBR0_EL12) - .monE2H() - .hypE2H() + .fault(EL2, defaultFaultE2H_EL2) + .fault(EL3, defaultFaultE2H_EL3) .mapsTo(MISCREG_TTBR0_EL1); InitReg(MISCREG_TTBR1_EL1) .allPrivileges().exceptUserMode() + .faultRead(EL1, HCR_TRAP(trvm)) + .faultWrite(EL1, HCR_TRAP(tvm)) .mapsTo(MISCREG_TTBR1_NS); InitReg(MISCREG_TTBR1_EL12) - .monE2H() - .hypE2H() + .fault(EL2, defaultFaultE2H_EL2) + .fault(EL3, defaultFaultE2H_EL3) .mapsTo(MISCREG_TTBR1_EL1); InitReg(MISCREG_TCR_EL1) .allPrivileges().exceptUserMode() + .faultRead(EL1, HCR_TRAP(trvm)) + .faultWrite(EL1, HCR_TRAP(tvm)) .mapsTo(MISCREG_TTBCR_NS); InitReg(MISCREG_TCR_EL12) - .monE2H() - .hypE2H() + .fault(EL2, defaultFaultE2H_EL2) + .fault(EL3, defaultFaultE2H_EL3) .mapsTo(MISCREG_TTBCR_NS); InitReg(MISCREG_TTBR0_EL2) .hyp().mon() @@ -2869,34 +3819,46 @@ ISA::initializeMiscRegMetadata() .allPrivileges().exceptUserMode() .mapsTo(MISCREG_SPSR_SVC); // NAM C5.2.17 SPSR_EL1 InitReg(MISCREG_SPSR_EL12) - .monE2H() - .hypE2H() + .fault(EL2, defaultFaultE2H_EL2) + .fault(EL3, defaultFaultE2H_EL3) .mapsTo(MISCREG_SPSR_SVC); InitReg(MISCREG_ELR_EL1) .allPrivileges().exceptUserMode(); InitReg(MISCREG_ELR_EL12) - .monE2H() - .hypE2H() + .fault(EL2, defaultFaultE2H_EL2) + .fault(EL3, defaultFaultE2H_EL3) .mapsTo(MISCREG_ELR_EL1); InitReg(MISCREG_SP_EL0) - .allPrivileges().exceptUserMode(); + .allPrivileges().exceptUserMode() + .fault(EL1, faultSpEL0) + .fault(EL2, faultSpEL0) + .fault(EL3, faultSpEL0); InitReg(MISCREG_SPSEL) .allPrivileges().exceptUserMode(); InitReg(MISCREG_CURRENTEL) .allPrivileges().exceptUserMode().writes(0); InitReg(MISCREG_PAN) - .allPrivileges().exceptUserMode() - .implemented(release->has(ArmExtension::FEAT_PAN)); + .allPrivileges(release->has(ArmExtension::FEAT_PAN)) + .exceptUserMode(); InitReg(MISCREG_UAO) .allPrivileges().exceptUserMode(); InitReg(MISCREG_NZCV) .allPrivileges(); InitReg(MISCREG_DAIF) - .allPrivileges(); + .allPrivileges() + .fault(EL0, faultDaif); InitReg(MISCREG_FPCR) - .allPrivileges(); + .allPrivileges() + .fault(EL0, faultFpcrEL0) + .fault(EL1, faultFpcrEL1) + .fault(EL2, faultFpcrEL2) + .fault(EL3, faultFpcrEL3); InitReg(MISCREG_FPSR) - .allPrivileges(); + .allPrivileges() + .fault(EL0, faultFpcrEL0) + .fault(EL1, faultFpcrEL1) + .fault(EL2, faultFpcrEL2) + .fault(EL3, faultFpcrEL3); InitReg(MISCREG_DSPSR_EL0) .allPrivileges(); InitReg(MISCREG_DLR_EL0) @@ -2925,23 +3887,29 @@ ISA::initializeMiscRegMetadata() .mon(); InitReg(MISCREG_AFSR0_EL1) .allPrivileges().exceptUserMode() + .faultRead(EL1, HCR_TRAP(trvm)) + .faultWrite(EL1, HCR_TRAP(tvm)) .mapsTo(MISCREG_ADFSR_NS); InitReg(MISCREG_AFSR0_EL12) - .monE2H() - .hypE2H() + .fault(EL2, defaultFaultE2H_EL2) + .fault(EL3, defaultFaultE2H_EL3) .mapsTo(MISCREG_ADFSR_NS); InitReg(MISCREG_AFSR1_EL1) .allPrivileges().exceptUserMode() + .faultRead(EL1, HCR_TRAP(trvm)) + .faultWrite(EL1, HCR_TRAP(tvm)) .mapsTo(MISCREG_AIFSR_NS); InitReg(MISCREG_AFSR1_EL12) - .monE2H() - .hypE2H() + .fault(EL2, defaultFaultE2H_EL2) + .fault(EL3, defaultFaultE2H_EL3) .mapsTo(MISCREG_AIFSR_NS); InitReg(MISCREG_ESR_EL1) + .faultRead(EL1, HCR_TRAP(trvm)) + .faultWrite(EL1, HCR_TRAP(tvm)) .allPrivileges().exceptUserMode(); InitReg(MISCREG_ESR_EL12) - .monE2H() - .hypE2H() + .fault(EL2, defaultFaultE2H_EL2) + .fault(EL3, defaultFaultE2H_EL3) .mapsTo(MISCREG_ESR_EL1); InitReg(MISCREG_IFSR32_EL2) .hyp().mon() @@ -2956,7 +3924,9 @@ ISA::initializeMiscRegMetadata() .hyp().mon() .mapsTo(MISCREG_HSR); InitReg(MISCREG_FPEXC32_EL2) - .hyp().mon().mapsTo(MISCREG_FPEXC); + .fault(EL2, faultFpcrEL2) + .fault(EL3, faultFpcrEL3) + .mapsTo(MISCREG_FPEXC); InitReg(MISCREG_AFSR0_EL3) .mon(); InitReg(MISCREG_AFSR1_EL3) @@ -2965,10 +3935,12 @@ ISA::initializeMiscRegMetadata() .mon(); InitReg(MISCREG_FAR_EL1) .allPrivileges().exceptUserMode() + .faultRead(EL1, HCR_TRAP(trvm)) + .faultWrite(EL1, HCR_TRAP(tvm)) .mapsTo(MISCREG_DFAR_NS, MISCREG_IFAR_NS); InitReg(MISCREG_FAR_EL12) - .monE2H() - .hypE2H() + .fault(EL2, defaultFaultE2H_EL2) + .fault(EL3, defaultFaultE2H_EL3) .mapsTo(MISCREG_DFAR_NS, MISCREG_IFAR_NS); InitReg(MISCREG_FAR_EL2) .hyp().mon() @@ -2980,46 +3952,61 @@ ISA::initializeMiscRegMetadata() .mon(); InitReg(MISCREG_IC_IALLUIS) .warnNotFail() + .faultWrite(EL1, faultPouIsEL1) .writes(1).exceptUserMode(); InitReg(MISCREG_PAR_EL1) .allPrivileges().exceptUserMode() .mapsTo(MISCREG_PAR_NS); InitReg(MISCREG_IC_IALLU) .warnNotFail() + .faultWrite(EL1, faultPouEL1) .writes(1).exceptUserMode(); InitReg(MISCREG_DC_IVAC_Xt) - .warnNotFail() + .faultWrite(EL1, HCR_TRAP(tpc)) .writes(1).exceptUserMode(); InitReg(MISCREG_DC_ISW_Xt) .warnNotFail() + .faultWrite(EL1, HCR_TRAP(tsw)) .writes(1).exceptUserMode(); InitReg(MISCREG_AT_S1E1R_Xt) + .faultWrite(EL1, HCR_TRAP(at)) .writes(1).exceptUserMode(); InitReg(MISCREG_AT_S1E1W_Xt) + .faultWrite(EL1, HCR_TRAP(at)) .writes(1).exceptUserMode(); InitReg(MISCREG_AT_S1E0R_Xt) + .faultWrite(EL1, HCR_TRAP(at)) .writes(1).exceptUserMode(); InitReg(MISCREG_AT_S1E0W_Xt) + .faultWrite(EL1, HCR_TRAP(at)) .writes(1).exceptUserMode(); InitReg(MISCREG_DC_CSW_Xt) .warnNotFail() + .faultWrite(EL1, HCR_TRAP(tsw)) .writes(1).exceptUserMode(); InitReg(MISCREG_DC_CISW_Xt) .warnNotFail() + .faultWrite(EL1, HCR_TRAP(tsw)) .writes(1).exceptUserMode(); InitReg(MISCREG_DC_ZVA_Xt) - .warnNotFail() - .writes(1).userSecureWrite(0); + .writes(1) + .faultWrite(EL0, faultDczvaEL0) + .faultWrite(EL1, HCR_TRAP(tdz)); InitReg(MISCREG_IC_IVAU_Xt) + .faultWrite(EL0, faultPouEL0) + .faultWrite(EL1, faultPouEL1) .writes(1); InitReg(MISCREG_DC_CVAC_Xt) - .warnNotFail() + .faultWrite(EL0, faultCvacEL0) + .faultWrite(EL1, HCR_TRAP(tpc)) .writes(1); InitReg(MISCREG_DC_CVAU_Xt) - .warnNotFail() + .faultWrite(EL0, faultPouEL0) + .faultWrite(EL1, faultPouEL1) .writes(1); InitReg(MISCREG_DC_CIVAC_Xt) - .warnNotFail() + .faultWrite(EL0, faultCvacEL0) + .faultWrite(EL1, HCR_TRAP(tpc)) .writes(1); InitReg(MISCREG_AT_S1E2R_Xt) .monNonSecureWrite().hypWrite(); @@ -3038,28 +4025,40 @@ ISA::initializeMiscRegMetadata() InitReg(MISCREG_AT_S1E3W_Xt) .monSecureWrite().monNonSecureWrite(); InitReg(MISCREG_TLBI_VMALLE1IS) + .faultWrite(EL1, HCR_TRAP(ttlb)) .writes(1).exceptUserMode(); InitReg(MISCREG_TLBI_VAE1IS_Xt) + .faultWrite(EL1, HCR_TRAP(ttlb)) .writes(1).exceptUserMode(); InitReg(MISCREG_TLBI_ASIDE1IS_Xt) + .faultWrite(EL1, HCR_TRAP(ttlb)) .writes(1).exceptUserMode(); InitReg(MISCREG_TLBI_VAAE1IS_Xt) + .faultWrite(EL1, HCR_TRAP(ttlb)) .writes(1).exceptUserMode(); InitReg(MISCREG_TLBI_VALE1IS_Xt) + .faultWrite(EL1, HCR_TRAP(ttlb)) .writes(1).exceptUserMode(); InitReg(MISCREG_TLBI_VAALE1IS_Xt) + .faultWrite(EL1, HCR_TRAP(ttlb)) .writes(1).exceptUserMode(); InitReg(MISCREG_TLBI_VMALLE1) + .faultWrite(EL1, HCR_TRAP(ttlb)) .writes(1).exceptUserMode(); InitReg(MISCREG_TLBI_VAE1_Xt) + .faultWrite(EL1, HCR_TRAP(ttlb)) .writes(1).exceptUserMode(); InitReg(MISCREG_TLBI_ASIDE1_Xt) + .faultWrite(EL1, HCR_TRAP(ttlb)) .writes(1).exceptUserMode(); InitReg(MISCREG_TLBI_VAAE1_Xt) + .faultWrite(EL1, HCR_TRAP(ttlb)) .writes(1).exceptUserMode(); InitReg(MISCREG_TLBI_VALE1_Xt) + .faultWrite(EL1, HCR_TRAP(ttlb)) .writes(1).exceptUserMode(); InitReg(MISCREG_TLBI_VAALE1_Xt) + .faultWrite(EL1, HCR_TRAP(ttlb)) .writes(1).exceptUserMode(); InitReg(MISCREG_TLBI_IPAS2E1IS_Xt) .hypWrite().monSecureWrite().monNonSecureWrite(); @@ -3150,17 +4149,21 @@ ISA::initializeMiscRegMetadata() .mapsTo(MISCREG_PMOVSSET); InitReg(MISCREG_MAIR_EL1) .allPrivileges().exceptUserMode() + .faultRead(EL1, HCR_TRAP(trvm)) + .faultWrite(EL1, HCR_TRAP(tvm)) .mapsTo(MISCREG_PRRR_NS, MISCREG_NMRR_NS); InitReg(MISCREG_MAIR_EL12) - .monE2H() - .hypE2H() + .fault(EL2, defaultFaultE2H_EL2) + .fault(EL3, defaultFaultE2H_EL3) .mapsTo(MISCREG_PRRR_NS, MISCREG_NMRR_NS); InitReg(MISCREG_AMAIR_EL1) .allPrivileges().exceptUserMode() + .faultRead(EL1, HCR_TRAP(trvm)) + .faultWrite(EL1, HCR_TRAP(tvm)) .mapsTo(MISCREG_AMAIR0_NS, MISCREG_AMAIR1_NS); InitReg(MISCREG_AMAIR_EL12) - .monE2H() - .hypE2H() + .fault(EL2, defaultFaultE2H_EL2) + .fault(EL3, defaultFaultE2H_EL3) .mapsTo(MISCREG_AMAIR0_NS, MISCREG_AMAIR1_NS); InitReg(MISCREG_MAIR_EL2) .hyp().mon() @@ -3180,11 +4183,11 @@ ISA::initializeMiscRegMetadata() .allPrivileges().exceptUserMode() .mapsTo(MISCREG_VBAR_NS); InitReg(MISCREG_VBAR_EL12) - .monE2H() - .hypE2H() + .fault(EL2, defaultFaultE2H_EL2) + .fault(EL3, defaultFaultE2H_EL3) .mapsTo(MISCREG_VBAR_NS); InitReg(MISCREG_RVBAR_EL1) - .allPrivileges().exceptUserMode().writes(0); + .privRead(FullSystem && system->highestEL() == EL1); InitReg(MISCREG_ISR_EL1) .allPrivileges().exceptUserMode().writes(0); InitReg(MISCREG_VBAR_EL2) @@ -3192,7 +4195,7 @@ ISA::initializeMiscRegMetadata() .res0(0x7ff) .mapsTo(MISCREG_HVBAR); InitReg(MISCREG_RVBAR_EL2) - .mon().hyp().writes(0); + .hypRead(FullSystem && system->highestEL() == EL2); InitReg(MISCREG_VBAR_EL3) .mon(); InitReg(MISCREG_RVBAR_EL3) @@ -3201,10 +4204,12 @@ ISA::initializeMiscRegMetadata() .mon(); InitReg(MISCREG_CONTEXTIDR_EL1) .allPrivileges().exceptUserMode() + .faultRead(EL1, HCR_TRAP(trvm)) + .faultWrite(EL1, HCR_TRAP(tvm)) .mapsTo(MISCREG_CONTEXTIDR_NS); InitReg(MISCREG_CONTEXTIDR_EL12) - .monE2H() - .hypE2H() + .fault(EL2, defaultFaultE2H_EL2) + .fault(EL3, defaultFaultE2H_EL3) .mapsTo(MISCREG_CONTEXTIDR_NS); InitReg(MISCREG_TPIDR_EL1) .allPrivileges().exceptUserMode() @@ -3223,65 +4228,82 @@ ISA::initializeMiscRegMetadata() // BEGIN Generic Timer (AArch64) InitReg(MISCREG_CNTFRQ_EL0) .reads(1) + .faultRead(EL0, faultGenericTimerEL0) .highest(system) .privSecureWrite(aarch32EL3) .mapsTo(MISCREG_CNTFRQ); InitReg(MISCREG_CNTPCT_EL0) .unverifiable() + .faultRead(EL0, faultCntpctEL0) + .faultRead(EL1, faultCntpctEL1) .reads(1) .mapsTo(MISCREG_CNTPCT); InitReg(MISCREG_CNTVCT_EL0) .unverifiable() + .faultRead(EL0, faultCntvctEL0) + .faultRead(EL1, faultCntvctEL1) .reads(1) .mapsTo(MISCREG_CNTVCT); InitReg(MISCREG_CNTP_CTL_EL0) .allPrivileges() + .fault(EL0, faultCntpCtlEL0) + .fault(EL1, faultCntpCtlEL1) .res0(0xfffffffffffffff8) .mapsTo(MISCREG_CNTP_CTL_NS); InitReg(MISCREG_CNTP_CVAL_EL0) .allPrivileges() + .fault(EL0, faultCntpCtlEL0) + .fault(EL1, faultCntpCtlEL1) .mapsTo(MISCREG_CNTP_CVAL_NS); InitReg(MISCREG_CNTP_TVAL_EL0) .allPrivileges() + .fault(EL0, faultCntpCtlEL0) + .fault(EL1, faultCntpCtlEL1) .res0(0xffffffff00000000) .mapsTo(MISCREG_CNTP_TVAL_NS); InitReg(MISCREG_CNTV_CTL_EL0) .allPrivileges() + .fault(EL0, faultCntvCtlEL0) + .fault(EL1, faultCntvCtlEL1) .res0(0xfffffffffffffff8) .mapsTo(MISCREG_CNTV_CTL); InitReg(MISCREG_CNTV_CVAL_EL0) .allPrivileges() + .fault(EL0, faultCntvCtlEL0) + .fault(EL1, faultCntvCtlEL1) .mapsTo(MISCREG_CNTV_CVAL); InitReg(MISCREG_CNTV_TVAL_EL0) .allPrivileges() + .fault(EL0, faultCntvCtlEL0) + .fault(EL1, faultCntvCtlEL1) .res0(0xffffffff00000000) .mapsTo(MISCREG_CNTV_TVAL); InitReg(MISCREG_CNTP_CTL_EL02) - .monE2H() - .hypE2H() + .fault(EL2, defaultFaultE2H_EL2) + .fault(EL3, defaultFaultE2H_EL3) .res0(0xfffffffffffffff8) .mapsTo(MISCREG_CNTP_CTL_NS); InitReg(MISCREG_CNTP_CVAL_EL02) - .monE2H() - .hypE2H() + .fault(EL2, defaultFaultE2H_EL2) + .fault(EL3, defaultFaultE2H_EL3) .mapsTo(MISCREG_CNTP_CVAL_NS); InitReg(MISCREG_CNTP_TVAL_EL02) - .monE2H() - .hypE2H() + .fault(EL2, defaultFaultE2H_EL2) + .fault(EL3, defaultFaultE2H_EL3) .res0(0xffffffff00000000) .mapsTo(MISCREG_CNTP_TVAL_NS); InitReg(MISCREG_CNTV_CTL_EL02) - .monE2H() - .hypE2H() + .fault(EL2, defaultFaultE2H_EL2) + .fault(EL3, defaultFaultE2H_EL3) .res0(0xfffffffffffffff8) .mapsTo(MISCREG_CNTV_CTL); InitReg(MISCREG_CNTV_CVAL_EL02) - .monE2H() - .hypE2H() + .fault(EL2, defaultFaultE2H_EL2) + .fault(EL3, defaultFaultE2H_EL3) .mapsTo(MISCREG_CNTV_CVAL); InitReg(MISCREG_CNTV_TVAL_EL02) - .monE2H() - .hypE2H() + .fault(EL2, defaultFaultE2H_EL2) + .fault(EL3, defaultFaultE2H_EL3) .res0(0xffffffff00000000) .mapsTo(MISCREG_CNTV_TVAL); InitReg(MISCREG_CNTKCTL_EL1) @@ -3290,20 +4312,23 @@ ISA::initializeMiscRegMetadata() .res0(0xfffffffffffdfc00) .mapsTo(MISCREG_CNTKCTL); InitReg(MISCREG_CNTKCTL_EL12) - .monE2H() - .hypE2H() + .fault(EL2, defaultFaultE2H_EL2) + .fault(EL3, defaultFaultE2H_EL3) .res0(0xfffffffffffdfc00) .mapsTo(MISCREG_CNTKCTL); InitReg(MISCREG_CNTPS_CTL_EL1) .mon() .privSecure() + .fault(EL1, faultCntpsCtlEL1) .res0(0xfffffffffffffff8); InitReg(MISCREG_CNTPS_CVAL_EL1) .mon() - .privSecure(); + .privSecure() + .fault(EL1, faultCntpsCtlEL1); InitReg(MISCREG_CNTPS_TVAL_EL1) .mon() .privSecure() + .fault(EL1, faultCntpsCtlEL1) .res0(0xffffffff00000000); InitReg(MISCREG_CNTHCTL_EL2) .mon() @@ -3325,47 +4350,38 @@ ISA::initializeMiscRegMetadata() .res0(0xffffffff00000000) .mapsTo(MISCREG_CNTHP_TVAL); InitReg(MISCREG_CNTHPS_CTL_EL2) - .mon() - .hypSecure() - .res0(0xfffffffffffffff8) - .implemented(sel2_implemented); + .mon(sel2_implemented) + .hypSecure(sel2_implemented) + .res0(0xfffffffffffffff8); InitReg(MISCREG_CNTHPS_CVAL_EL2) - .mon() - .hypSecure() - .implemented(sel2_implemented); + .mon(sel2_implemented) + .hypSecure(sel2_implemented); InitReg(MISCREG_CNTHPS_TVAL_EL2) - .mon() - .hypSecure() - .res0(0xffffffff00000000) - .implemented(sel2_implemented); + .mon(sel2_implemented) + .hypSecure(sel2_implemented) + .res0(0xffffffff00000000); InitReg(MISCREG_CNTHV_CTL_EL2) - .mon() + .mon(vhe_implemented) .hyp() - .res0(0xfffffffffffffff8) - .implemented(vhe_implemented); + .res0(0xfffffffffffffff8); InitReg(MISCREG_CNTHV_CVAL_EL2) - .mon() - .hyp() - .implemented(vhe_implemented); + .mon(vhe_implemented) + .hyp(vhe_implemented); InitReg(MISCREG_CNTHV_TVAL_EL2) - .mon() - .hyp() - .res0(0xffffffff00000000) - .implemented(vhe_implemented); + .mon(vhe_implemented) + .hyp(vhe_implemented) + .res0(0xffffffff00000000); InitReg(MISCREG_CNTHVS_CTL_EL2) - .mon() - .hypSecure() - .res0(0xfffffffffffffff8) - .implemented(vhe_implemented && sel2_implemented); + .mon(vhe_implemented && sel2_implemented) + .hypSecure(vhe_implemented && sel2_implemented) + .res0(0xfffffffffffffff8); InitReg(MISCREG_CNTHVS_CVAL_EL2) - .mon() - .hypSecure() - .implemented(vhe_implemented && sel2_implemented); + .mon(vhe_implemented && sel2_implemented) + .hypSecure(vhe_implemented && sel2_implemented); InitReg(MISCREG_CNTHVS_TVAL_EL2) - .mon() - .hypSecure() - .res0(0xffffffff00000000) - .implemented(vhe_implemented && sel2_implemented); + .mon(vhe_implemented && sel2_implemented) + .hypSecure(vhe_implemented && sel2_implemented) + .res0(0xffffffff00000000); // ENDIF Armv8.1-VHE InitReg(MISCREG_CNTVOFF_EL2) .mon() @@ -3435,9 +4451,8 @@ ISA::initializeMiscRegMetadata() InitReg(MISCREG_CPUMERRSR_EL1) .allPrivileges().exceptUserMode(); InitReg(MISCREG_L2MERRSR_EL1) - .unimplemented() .warnNotFail() - .allPrivileges().exceptUserMode(); + .fault(faultUnimplemented); InitReg(MISCREG_CBAR_EL1) .allPrivileges().exceptUserMode().writes(0); InitReg(MISCREG_CONTEXTIDR_EL2) @@ -3526,12 +4541,18 @@ ISA::initializeMiscRegMetadata() .mapsTo(MISCREG_ICC_RPR); InitReg(MISCREG_ICC_SGI1R_EL1) .allPrivileges().exceptUserMode().reads(0) + .faultWrite(EL1, faultIccSgiEL1) + .faultWrite(EL2, faultIccSgiEL2) .mapsTo(MISCREG_ICC_SGI1R); InitReg(MISCREG_ICC_ASGI1R_EL1) .allPrivileges().exceptUserMode().reads(0) + .faultWrite(EL1, faultIccSgiEL1) + .faultWrite(EL2, faultIccSgiEL2) .mapsTo(MISCREG_ICC_ASGI1R); InitReg(MISCREG_ICC_SGI0R_EL1) .allPrivileges().exceptUserMode().reads(0) + .faultWrite(EL1, faultIccSgiEL1) + .faultWrite(EL2, faultIccSgiEL2) .mapsTo(MISCREG_ICC_SGI0R); InitReg(MISCREG_ICC_IAR1_EL1) .allPrivileges().exceptUserMode().writes(0) @@ -3603,13 +4624,13 @@ ISA::initializeMiscRegMetadata() .hyp().mon() .mapsTo(MISCREG_ICC_HSRE); InitReg(MISCREG_ICC_CTLR_EL3) - .allPrivileges().exceptUserMode() + .mon() .mapsTo(MISCREG_ICC_MCTLR); InitReg(MISCREG_ICC_SRE_EL3) - .allPrivileges().exceptUserMode() + .mon() .mapsTo(MISCREG_ICC_MSRE); InitReg(MISCREG_ICC_IGRPEN1_EL3) - .allPrivileges().exceptUserMode() + .mon() .mapsTo(MISCREG_ICC_MGRPEN1); InitReg(MISCREG_ICH_AP0R0_EL2) @@ -3617,30 +4638,24 @@ ISA::initializeMiscRegMetadata() .mapsTo(MISCREG_ICH_AP0R0); InitReg(MISCREG_ICH_AP0R1_EL2) .hyp().mon() - .unimplemented() .mapsTo(MISCREG_ICH_AP0R1); InitReg(MISCREG_ICH_AP0R2_EL2) .hyp().mon() - .unimplemented() .mapsTo(MISCREG_ICH_AP0R2); InitReg(MISCREG_ICH_AP0R3_EL2) .hyp().mon() - .unimplemented() .mapsTo(MISCREG_ICH_AP0R3); InitReg(MISCREG_ICH_AP1R0_EL2) .hyp().mon() .mapsTo(MISCREG_ICH_AP1R0); InitReg(MISCREG_ICH_AP1R1_EL2) .hyp().mon() - .unimplemented() .mapsTo(MISCREG_ICH_AP1R1); InitReg(MISCREG_ICH_AP1R2_EL2) .hyp().mon() - .unimplemented() .mapsTo(MISCREG_ICH_AP1R2); InitReg(MISCREG_ICH_AP1R3_EL2) .hyp().mon() - .unimplemented() .mapsTo(MISCREG_ICH_AP1R3); InitReg(MISCREG_ICH_HCR_EL2) .hyp().mon() @@ -3662,52 +4677,52 @@ ISA::initializeMiscRegMetadata() .mapsTo(MISCREG_ICH_VMCR); InitReg(MISCREG_ICH_LR0_EL2) .hyp().mon() - .allPrivileges().exceptUserMode(); + .mapsTo(MISCREG_ICH_LR0, MISCREG_ICH_LRC0); InitReg(MISCREG_ICH_LR1_EL2) .hyp().mon() - .allPrivileges().exceptUserMode(); + .mapsTo(MISCREG_ICH_LR1, MISCREG_ICH_LRC1); InitReg(MISCREG_ICH_LR2_EL2) .hyp().mon() - .allPrivileges().exceptUserMode(); + .mapsTo(MISCREG_ICH_LR2, MISCREG_ICH_LRC2); InitReg(MISCREG_ICH_LR3_EL2) .hyp().mon() - .allPrivileges().exceptUserMode(); + .mapsTo(MISCREG_ICH_LR3, MISCREG_ICH_LRC3); InitReg(MISCREG_ICH_LR4_EL2) .hyp().mon() - .allPrivileges().exceptUserMode(); + .mapsTo(MISCREG_ICH_LR4, MISCREG_ICH_LRC4); InitReg(MISCREG_ICH_LR5_EL2) .hyp().mon() - .allPrivileges().exceptUserMode(); + .mapsTo(MISCREG_ICH_LR5, MISCREG_ICH_LRC5); InitReg(MISCREG_ICH_LR6_EL2) .hyp().mon() - .allPrivileges().exceptUserMode(); + .mapsTo(MISCREG_ICH_LR6, MISCREG_ICH_LRC6); InitReg(MISCREG_ICH_LR7_EL2) .hyp().mon() - .allPrivileges().exceptUserMode(); + .mapsTo(MISCREG_ICH_LR7, MISCREG_ICH_LRC7); InitReg(MISCREG_ICH_LR8_EL2) .hyp().mon() - .allPrivileges().exceptUserMode(); + .mapsTo(MISCREG_ICH_LR8, MISCREG_ICH_LRC8); InitReg(MISCREG_ICH_LR9_EL2) .hyp().mon() - .allPrivileges().exceptUserMode(); + .mapsTo(MISCREG_ICH_LR9, MISCREG_ICH_LRC9); InitReg(MISCREG_ICH_LR10_EL2) .hyp().mon() - .allPrivileges().exceptUserMode(); + .mapsTo(MISCREG_ICH_LR10, MISCREG_ICH_LRC10); InitReg(MISCREG_ICH_LR11_EL2) .hyp().mon() - .allPrivileges().exceptUserMode(); + .mapsTo(MISCREG_ICH_LR11, MISCREG_ICH_LRC11); InitReg(MISCREG_ICH_LR12_EL2) .hyp().mon() - .allPrivileges().exceptUserMode(); + .mapsTo(MISCREG_ICH_LR12, MISCREG_ICH_LRC12); InitReg(MISCREG_ICH_LR13_EL2) .hyp().mon() - .allPrivileges().exceptUserMode(); + .mapsTo(MISCREG_ICH_LR13, MISCREG_ICH_LRC13); InitReg(MISCREG_ICH_LR14_EL2) .hyp().mon() - .allPrivileges().exceptUserMode(); + .mapsTo(MISCREG_ICH_LR14, MISCREG_ICH_LRC14); InitReg(MISCREG_ICH_LR15_EL2) .hyp().mon() - .allPrivileges().exceptUserMode(); + .mapsTo(MISCREG_ICH_LR15, MISCREG_ICH_LRC15); // GICv3 AArch32 InitReg(MISCREG_ICC_AP0R0) @@ -3769,7 +4784,7 @@ ISA::initializeMiscRegMetadata() InitReg(MISCREG_ICC_HPPIR1) .allPrivileges().exceptUserMode().writes(0); InitReg(MISCREG_ICC_HSRE) - .allPrivileges().exceptUserMode(); + .hyp().mon(); InitReg(MISCREG_ICC_IAR0) .allPrivileges().exceptUserMode().writes(0); InitReg(MISCREG_ICC_IAR1) @@ -3783,11 +4798,11 @@ ISA::initializeMiscRegMetadata() InitReg(MISCREG_ICC_IGRPEN1_S) .allPrivileges().exceptUserMode(); InitReg(MISCREG_ICC_MCTLR) - .allPrivileges().exceptUserMode(); + .mon(); InitReg(MISCREG_ICC_MGRPEN1) - .allPrivileges().exceptUserMode(); + .mon(); InitReg(MISCREG_ICC_MSRE) - .allPrivileges().exceptUserMode(); + .mon(); InitReg(MISCREG_ICC_PMR) .allPrivileges().exceptUserMode(); InitReg(MISCREG_ICC_RPR) @@ -3864,66 +4879,57 @@ ISA::initializeMiscRegMetadata() InitReg(MISCREG_ICH_LR15) .hyp().mon(); InitReg(MISCREG_ICH_LRC0) - .mapsTo(MISCREG_ICH_LR0) .hyp().mon(); InitReg(MISCREG_ICH_LRC1) - .mapsTo(MISCREG_ICH_LR1) .hyp().mon(); InitReg(MISCREG_ICH_LRC2) - .mapsTo(MISCREG_ICH_LR2) .hyp().mon(); InitReg(MISCREG_ICH_LRC3) - .mapsTo(MISCREG_ICH_LR3) .hyp().mon(); InitReg(MISCREG_ICH_LRC4) - .mapsTo(MISCREG_ICH_LR4) .hyp().mon(); InitReg(MISCREG_ICH_LRC5) - .mapsTo(MISCREG_ICH_LR5) .hyp().mon(); InitReg(MISCREG_ICH_LRC6) - .mapsTo(MISCREG_ICH_LR6) .hyp().mon(); InitReg(MISCREG_ICH_LRC7) - .mapsTo(MISCREG_ICH_LR7) .hyp().mon(); InitReg(MISCREG_ICH_LRC8) - .mapsTo(MISCREG_ICH_LR8) .hyp().mon(); InitReg(MISCREG_ICH_LRC9) - .mapsTo(MISCREG_ICH_LR9) .hyp().mon(); InitReg(MISCREG_ICH_LRC10) - .mapsTo(MISCREG_ICH_LR10) .hyp().mon(); InitReg(MISCREG_ICH_LRC11) - .mapsTo(MISCREG_ICH_LR11) .hyp().mon(); InitReg(MISCREG_ICH_LRC12) - .mapsTo(MISCREG_ICH_LR12) .hyp().mon(); InitReg(MISCREG_ICH_LRC13) - .mapsTo(MISCREG_ICH_LR13) .hyp().mon(); InitReg(MISCREG_ICH_LRC14) - .mapsTo(MISCREG_ICH_LR14) .hyp().mon(); InitReg(MISCREG_ICH_LRC15) - .mapsTo(MISCREG_ICH_LR15) .hyp().mon(); // SVE InitReg(MISCREG_ID_AA64ZFR0_EL1) + .faultRead(EL1, HCR_TRAP(tid3)) .allPrivileges().exceptUserMode().writes(0); InitReg(MISCREG_ZCR_EL3) + .fault(EL3, faultZcrEL3) .mon(); InitReg(MISCREG_ZCR_EL2) + .fault(EL2, faultZcrEL2) + .fault(EL3, faultZcrEL3) .hyp().mon(); InitReg(MISCREG_ZCR_EL12) - .monE2H() - .hypE2H() + .fault(EL2, defaultFaultE2H_EL2) + .fault(EL3, defaultFaultE2H_EL3) .mapsTo(MISCREG_ZCR_EL1); InitReg(MISCREG_ZCR_EL1) + .fault(EL1, faultZcrEL1) + .fault(EL2, faultZcrEL2) + .fault(EL3, faultZcrEL3) .allPrivileges().exceptUserMode(); // Dummy registers @@ -3933,43 +4939,45 @@ ISA::initializeMiscRegMetadata() .allPrivileges().exceptUserMode().writes(0); InitReg(MISCREG_UNKNOWN); InitReg(MISCREG_IMPDEF_UNIMPL) - .unimplemented() + .fault(EL1, faultImpdefUnimplEL1) + .fault(EL2, faultUnimplemented) + .fault(EL3, faultUnimplemented) .warnNotFail(impdefAsNop); // RAS extension (unimplemented) InitReg(MISCREG_ERRIDR_EL1) - .unimplemented() - .warnNotFail(); + .warnNotFail() + .fault(faultUnimplemented); InitReg(MISCREG_ERRSELR_EL1) - .unimplemented() - .warnNotFail(); + .warnNotFail() + .fault(faultUnimplemented); InitReg(MISCREG_ERXFR_EL1) - .unimplemented() - .warnNotFail(); + .warnNotFail() + .fault(faultUnimplemented); InitReg(MISCREG_ERXCTLR_EL1) - .unimplemented() - .warnNotFail(); + .warnNotFail() + .fault(faultUnimplemented); InitReg(MISCREG_ERXSTATUS_EL1) - .unimplemented() - .warnNotFail(); + .warnNotFail() + .fault(faultUnimplemented); InitReg(MISCREG_ERXADDR_EL1) - .unimplemented() - .warnNotFail(); + .warnNotFail() + .fault(faultUnimplemented); InitReg(MISCREG_ERXMISC0_EL1) - .unimplemented() - .warnNotFail(); + .warnNotFail() + .fault(faultUnimplemented); InitReg(MISCREG_ERXMISC1_EL1) - .unimplemented() - .warnNotFail(); + .warnNotFail() + .fault(faultUnimplemented); InitReg(MISCREG_DISR_EL1) - .unimplemented() - .warnNotFail(); + .warnNotFail() + .fault(faultUnimplemented); InitReg(MISCREG_VSESR_EL2) - .unimplemented() - .warnNotFail(); + .warnNotFail() + .fault(faultUnimplemented); InitReg(MISCREG_VDISR_EL2) - .unimplemented() - .warnNotFail(); + .warnNotFail() + .fault(faultUnimplemented); // Register mappings for some unimplemented registers: // ESR_EL1 -> DFSR diff --git a/src/arch/arm/regs/misc.hh b/src/arch/arm/regs/misc.hh index 9b517a7277..0b61eec2d6 100644 --- a/src/arch/arm/regs/misc.hh +++ b/src/arch/arm/regs/misc.hh @@ -45,14 +45,18 @@ #include #include "arch/arm/regs/misc_types.hh" +#include "arch/arm/types.hh" #include "base/compiler.hh" +#include "cpu/reg_class.hh" +#include "debug/MiscRegs.hh" #include "dev/arm/generic_timer_miscregs_types.hh" namespace gem5 { +class ArmSystem; class ThreadContext; - +class MiscRegOp64; namespace ArmISA { @@ -1132,25 +1136,444 @@ namespace ArmISA MISCREG_HYP_NS_WR, MISCREG_HYP_S_RD, MISCREG_HYP_S_WR, - // Hypervisor mode, HCR_EL2.E2H == 1 - MISCREG_HYP_E2H_NS_RD, - MISCREG_HYP_E2H_NS_WR, - MISCREG_HYP_E2H_S_RD, - MISCREG_HYP_E2H_S_WR, // Monitor mode, SCR.NS == 0 MISCREG_MON_NS0_RD, MISCREG_MON_NS0_WR, // Monitor mode, SCR.NS == 1 MISCREG_MON_NS1_RD, MISCREG_MON_NS1_WR, - // Monitor mode, HCR_EL2.E2H == 1 - MISCREG_MON_E2H_RD, - MISCREG_MON_E2H_WR, NUM_MISCREG_INFOS }; - extern std::bitset miscRegInfo[NUM_MISCREGS]; + /** MiscReg metadata **/ + struct MiscRegLUTEntry + { + uint32_t lower; // Lower half mapped to this register + uint32_t upper; // Upper half mapped to this register + uint64_t _reset; // value taken on reset (i.e. initialization) + uint64_t _res0; // reserved + uint64_t _res1; // reserved + uint64_t _raz; // read as zero (fixed at 0) + uint64_t _rao; // read as one (fixed at 1) + std::bitset info; + + using FaultCB = std::function< + Fault(const MiscRegLUTEntry &entry, ThreadContext *tc, + const MiscRegOp64 &inst) + >; + + std::array faultRead; + std::array faultWrite; + + Fault checkFault(ThreadContext *tc, const MiscRegOp64 &inst, + ExceptionLevel el); + + protected: + template + static Fault defaultFault(const MiscRegLUTEntry &entry, + ThreadContext *tc, const MiscRegOp64 &inst); + + public: + MiscRegLUTEntry() : + lower(0), upper(0), + _reset(0), _res0(0), _res1(0), _raz(0), _rao(0), info(0), + faultRead({defaultFault, + defaultFault, + defaultFault, + defaultFault}), + faultWrite({defaultFault, + defaultFault, + defaultFault, + defaultFault}) + {} + uint64_t reset() const { return _reset; } + uint64_t res0() const { return _res0; } + uint64_t res1() const { return _res1; } + uint64_t raz() const { return _raz; } + uint64_t rao() const { return _rao; } + // raz/rao implies writes ignored + uint64_t wi() const { return _raz | _rao; } + }; + + /** Metadata table accessible via the value of the register */ + class MiscRegLUTEntryInitializer + { + struct MiscRegLUTEntry &entry; + typedef const MiscRegLUTEntryInitializer& chain; + public: + chain + mapsTo(uint32_t l, uint32_t u = 0) const + { + entry.lower = l; + entry.upper = u; + return *this; + } + chain + res0(uint64_t mask) const + { + entry._res0 = mask; + return *this; + } + chain + res1(uint64_t mask) const + { + entry._res1 = mask; + return *this; + } + chain + raz(uint64_t mask) const + { + entry._raz = mask; + return *this; + } + chain + rao(uint64_t mask) const + { + entry._rao = mask; + return *this; + } + chain + implemented(bool v = true) const + { + entry.info[MISCREG_IMPLEMENTED] = v; + return *this; + } + chain + unimplemented() const + { + return implemented(false); + } + chain + unverifiable(bool v = true) const + { + entry.info[MISCREG_UNVERIFIABLE] = v; + return *this; + } + chain + warnNotFail(bool v = true) const + { + entry.info[MISCREG_WARN_NOT_FAIL] = v; + return *this; + } + chain + mutex(bool v = true) const + { + entry.info[MISCREG_MUTEX] = v; + return *this; + } + chain + banked(bool v = true) const + { + entry.info[MISCREG_BANKED] = v; + return *this; + } + chain + banked64(bool v = true) const + { + entry.info[MISCREG_BANKED64] = v; + return *this; + } + chain + bankedChild(bool v = true) const + { + entry.info[MISCREG_BANKED_CHILD] = v; + return *this; + } + chain + userNonSecureRead(bool v = true) const + { + entry.info[MISCREG_USR_NS_RD] = v; + return *this; + } + chain + userNonSecureWrite(bool v = true) const + { + entry.info[MISCREG_USR_NS_WR] = v; + return *this; + } + chain + userSecureRead(bool v = true) const + { + entry.info[MISCREG_USR_S_RD] = v; + return *this; + } + chain + userSecureWrite(bool v = true) const + { + entry.info[MISCREG_USR_S_WR] = v; + return *this; + } + chain + user(bool v = true) const + { + userNonSecureRead(v); + userNonSecureWrite(v); + userSecureRead(v); + userSecureWrite(v); + return *this; + } + chain + privNonSecureRead(bool v = true) const + { + entry.info[MISCREG_PRI_NS_RD] = v; + return *this; + } + chain + privNonSecureWrite(bool v = true) const + { + entry.info[MISCREG_PRI_NS_WR] = v; + return *this; + } + chain + privNonSecure(bool v = true) const + { + privNonSecureRead(v); + privNonSecureWrite(v); + return *this; + } + chain + privSecureRead(bool v = true) const + { + entry.info[MISCREG_PRI_S_RD] = v; + return *this; + } + chain + privSecureWrite(bool v = true) const + { + entry.info[MISCREG_PRI_S_WR] = v; + return *this; + } + chain + privSecure(bool v = true) const + { + privSecureRead(v); + privSecureWrite(v); + return *this; + } + chain + priv(bool v = true) const + { + privSecure(v); + privNonSecure(v); + return *this; + } + chain + privRead(bool v = true) const + { + privSecureRead(v); + privNonSecureRead(v); + return *this; + } + chain + hypSecureRead(bool v = true) const + { + entry.info[MISCREG_HYP_S_RD] = v; + return *this; + } + chain + hypNonSecureRead(bool v = true) const + { + entry.info[MISCREG_HYP_NS_RD] = v; + return *this; + } + chain + hypRead(bool v = true) const + { + hypSecureRead(v); + hypNonSecureRead(v); + return *this; + } + chain + hypSecureWrite(bool v = true) const + { + entry.info[MISCREG_HYP_S_WR] = v; + return *this; + } + chain + hypNonSecureWrite(bool v = true) const + { + entry.info[MISCREG_HYP_NS_WR] = v; + return *this; + } + chain + hypWrite(bool v = true) const + { + hypSecureWrite(v); + hypNonSecureWrite(v); + return *this; + } + chain + hypSecure(bool v = true) const + { + hypSecureRead(v); + hypSecureWrite(v); + return *this; + } + chain + hyp(bool v = true) const + { + hypRead(v); + hypWrite(v); + return *this; + } + chain + monSecureRead(bool v = true) const + { + entry.info[MISCREG_MON_NS0_RD] = v; + return *this; + } + chain + monSecureWrite(bool v = true) const + { + entry.info[MISCREG_MON_NS0_WR] = v; + return *this; + } + chain + monNonSecureRead(bool v = true) const + { + entry.info[MISCREG_MON_NS1_RD] = v; + return *this; + } + chain + monNonSecureWrite(bool v = true) const + { + entry.info[MISCREG_MON_NS1_WR] = v; + return *this; + } + chain + mon(bool v = true) const + { + monSecureRead(v); + monSecureWrite(v); + monNonSecureRead(v); + monNonSecureWrite(v); + return *this; + } + chain + monSecure(bool v = true) const + { + monSecureRead(v); + monSecureWrite(v); + return *this; + } + chain + monNonSecure(bool v = true) const + { + monNonSecureRead(v); + monNonSecureWrite(v); + return *this; + } + chain + allPrivileges(bool v = true) const + { + userNonSecureRead(v); + userNonSecureWrite(v); + userSecureRead(v); + userSecureWrite(v); + privNonSecureRead(v); + privNonSecureWrite(v); + privSecureRead(v); + privSecureWrite(v); + hypRead(v); + hypWrite(v); + monSecureRead(v); + monSecureWrite(v); + monNonSecureRead(v); + monNonSecureWrite(v); + return *this; + } + chain + nonSecure(bool v = true) const + { + userNonSecureRead(v); + userNonSecureWrite(v); + privNonSecureRead(v); + privNonSecureWrite(v); + hypRead(v); + hypWrite(v); + monNonSecureRead(v); + monNonSecureWrite(v); + return *this; + } + chain + secure(bool v = true) const + { + userSecureRead(v); + userSecureWrite(v); + privSecureRead(v); + privSecureWrite(v); + monSecureRead(v); + monSecureWrite(v); + return *this; + } + chain + reads(bool v) const + { + userNonSecureRead(v); + userSecureRead(v); + privNonSecureRead(v); + privSecureRead(v); + hypRead(v); + monSecureRead(v); + monNonSecureRead(v); + return *this; + } + chain + writes(bool v) const + { + userNonSecureWrite(v); + userSecureWrite(v); + privNonSecureWrite(v); + privSecureWrite(v); + hypWrite(v); + monSecureWrite(v); + monNonSecureWrite(v); + return *this; + } + chain + exceptUserMode() const + { + user(0); + return *this; + } + chain highest(ArmSystem *const sys) const; + + chain + faultRead(ExceptionLevel el, MiscRegLUTEntry::FaultCB cb) const + { + entry.faultRead[el] = cb; + return *this; + } + + chain + faultWrite(ExceptionLevel el, MiscRegLUTEntry::FaultCB cb) const + { + entry.faultWrite[el] = cb; + return *this; + } + + chain + fault(ExceptionLevel el, MiscRegLUTEntry::FaultCB cb) const + { + return faultRead(el, cb).faultWrite(el, cb); + } + + chain + fault(MiscRegLUTEntry::FaultCB cb) const + { + return fault(EL0, cb).fault(EL1, cb).fault(EL2, cb).fault(EL3, cb); + } + + MiscRegLUTEntryInitializer(struct MiscRegLUTEntry &e) + : entry(e) + { + // force unimplemented registers to be thusly declared + implemented(1); + } + }; + + extern std::vector lookUpMiscReg; struct MiscRegNum32 { @@ -1257,6 +1680,7 @@ namespace ArmISA MiscRegIndex decodeAArch64SysReg(unsigned op0, unsigned op1, unsigned crn, unsigned crm, unsigned op2); + MiscRegIndex decodeAArch64SysReg(const MiscRegNum64 &misc_reg); MiscRegNum64 encodeAArch64SysReg(MiscRegIndex misc_reg); // Whether a particular AArch64 system register is -always- read only. @@ -2287,6 +2711,23 @@ namespace ArmISA static_assert(sizeof(miscRegName) / sizeof(*miscRegName) == NUM_MISCREGS, "The miscRegName array and NUM_MISCREGS are inconsistent."); + class MiscRegClassOps : public RegClassOps + { + public: + std::string + regName(const RegId &id) const override + { + return miscRegName[id.index()]; + } + }; + + static inline MiscRegClassOps miscRegClassOps; + + inline constexpr RegClass miscRegClass = + RegClass(MiscRegClass, MiscRegClassName, NUM_MISCREGS, + debug::MiscRegs). + ops(miscRegClassOps); + // This mask selects bits of the CPSR that actually go in the CondCodes // integer register to allow renaming. static const uint32_t CondCodesMask = 0xF00F0000; @@ -2351,13 +2792,9 @@ namespace ArmISA // Generic Timer system registers bool AArch32isUndefinedGenericTimer(MiscRegIndex reg, ThreadContext *tc); - // Checks read access permissions to AArch64 system registers - bool canReadAArch64SysReg(MiscRegIndex reg, HCR hcr, SCR scr, CPSR cpsr, - ThreadContext *tc); - - // Checks write access permissions to AArch64 system registers - bool canWriteAArch64SysReg(MiscRegIndex reg, HCR hcr, SCR scr, CPSR cpsr, - ThreadContext *tc); + // Checks access permissions to AArch64 system registers + Fault checkFaultAccessAArch64SysReg(MiscRegIndex reg, CPSR cpsr, + ThreadContext *tc, const MiscRegOp64 &inst); // Uses just the scr.ns bit to pre flatten the misc regs. This is useful // for MCR/MRC instructions diff --git a/src/arch/arm/regs/misc_types.hh b/src/arch/arm/regs/misc_types.hh index d44651ca75..05bf19bf5a 100644 --- a/src/arch/arm/regs/misc_types.hh +++ b/src/arch/arm/regs/misc_types.hh @@ -182,6 +182,7 @@ namespace ArmISA EndBitUnion(AA64PFR0) BitUnion32(HDCR) + Bitfield<27> tdcc; Bitfield<11> tdra; Bitfield<10> tdosa; Bitfield<9> tda; @@ -235,6 +236,11 @@ namespace ArmISA EndBitUnion(HSTR) BitUnion64(HCR) + Bitfield<55> ttlbos; + Bitfield<54> ttlbis; + Bitfield<52> tocu; + Bitfield<50> ticab; + Bitfield<49> tid4; Bitfield<47> fien; Bitfield<46> fwb; Bitfield<45> nv2; diff --git a/src/arch/arm/regs/vec.hh b/src/arch/arm/regs/vec.hh index 6a7d6e4703..00ab87fbcb 100644 --- a/src/arch/arm/regs/vec.hh +++ b/src/arch/arm/regs/vec.hh @@ -44,6 +44,9 @@ #include "arch/arm/types.hh" #include "arch/generic/vec_pred_reg.hh" #include "arch/generic/vec_reg.hh" +#include "cpu/reg_class.hh" +#include "debug/VecPredRegs.hh" +#include "debug/VecRegs.hh" namespace gem5 { @@ -90,6 +93,25 @@ const int VECREG_UREG0 = 32; const int PREDREG_FFR = 16; const int PREDREG_UREG0 = 17; +static inline VecElemRegClassOps + vecRegElemClassOps(NumVecElemPerVecReg); +static inline TypedRegClassOps vecRegClassOps; +static inline TypedRegClassOps vecPredRegClassOps; + +inline constexpr RegClass vecRegClass = + RegClass(VecRegClass, VecRegClassName, NumVecRegs, debug::VecRegs). + ops(vecRegClassOps). + regType(); +inline constexpr RegClass vecElemClass = + RegClass(VecElemClass, VecElemClassName, NumVecRegs * NumVecElemPerVecReg, + debug::VecRegs). + ops(vecRegElemClassOps); +inline constexpr RegClass vecPredRegClass = + RegClass(VecPredRegClass, VecPredRegClassName, NumVecPredRegs, + debug::VecPredRegs). + ops(vecPredRegClassOps). + regType(); + } // namespace ArmISA } // namespace gem5 diff --git a/src/arch/arm/remote_gdb.cc b/src/arch/arm/remote_gdb.cc index 7b91fab786..c357f02e08 100644 --- a/src/arch/arm/remote_gdb.cc +++ b/src/arch/arm/remote_gdb.cc @@ -243,7 +243,7 @@ RemoteGDB::AArch64GdbRegCache::getRegs(ThreadContext *context) size_t base = 0; for (int i = 0; i < NumVecV8ArchRegs; i++) { ArmISA::VecRegContainer vc; - context->getReg(RegId(VecRegClass, i), &vc); + context->getReg(vecRegClass[i], &vc); auto v = vc.as(); for (size_t j = 0; j < NumVecElemPerNeonVecReg; j++) { r.v[base] = v[j]; @@ -273,7 +273,7 @@ RemoteGDB::AArch64GdbRegCache::setRegs(ThreadContext *context) const size_t base = 0; for (int i = 0; i < NumVecV8ArchRegs; i++) { auto *vc = static_cast( - context->getWritableReg(RegId(VecRegClass, i))); + context->getWritableReg(vecRegClass[i])); auto v = vc->as(); for (size_t j = 0; j < NumVecElemPerNeonVecReg; j++) { v[j] = r.v[base]; diff --git a/src/arch/arm/se_workload.hh b/src/arch/arm/se_workload.hh index deb5d3b906..f0bf0eb7d9 100644 --- a/src/arch/arm/se_workload.hh +++ b/src/arch/arm/se_workload.hh @@ -42,7 +42,7 @@ namespace ArmISA class SEWorkload : public gem5::SEWorkload { public: - using Params = ArmSEWorkloadParams; + PARAMS(ArmSEWorkload); SEWorkload(const Params &p, Addr page_shift) : gem5::SEWorkload(p, page_shift) @@ -52,7 +52,8 @@ class SEWorkload : public gem5::SEWorkload setSystem(System *sys) override { gem5::SEWorkload::setSystem(sys); - gdb = BaseRemoteGDB::build(system); + gdb = BaseRemoteGDB::build( + params().remote_gdb_port, system); } loader::Arch getArch() const override { return loader::Arm64; } diff --git a/src/arch/arm/self_debug.cc b/src/arch/arm/self_debug.cc index 86da693b21..27064cd7be 100644 --- a/src/arch/arm/self_debug.cc +++ b/src/arch/arm/self_debug.cc @@ -82,8 +82,6 @@ SelfDebug::testBreakPoints(ThreadContext *tc, Addr vaddr) to32 = targetAArch32(tc); - init(tc); - if (!isDebugEnabled(tc)) return NoFault; @@ -127,8 +125,6 @@ SelfDebug::testWatchPoints(ThreadContext *tc, Addr vaddr, bool write, { setAArch32(tc); to32 = targetAArch32(tc); - if (!initialized) - init(tc); if (!isDebugEnabled(tc) || !mde) return NoFault; @@ -330,8 +326,6 @@ BrkPoint::test(ThreadContext *tc, Addr pc, ExceptionLevel el, DBGBCR ctr, void SelfDebug::init(ThreadContext *tc) { - if (initialized) - return; CPSR cpsr = tc->readMiscReg(MISCREG_CPSR); aarch32 = cpsr.width == 1; @@ -353,17 +347,15 @@ SelfDebug::init(ThreadContext *tc) } for (int i = 0; i <= dfr.wrps; i++) { - WatchPoint wtp = WatchPoint((MiscRegIndex)(MISCREG_DBGWCR0 + i), - (MiscRegIndex)(MISCREG_DBGWVR0 + i), + WatchPoint wtp = WatchPoint((MiscRegIndex)(MISCREG_DBGWCR0_EL1 + i), + (MiscRegIndex)(MISCREG_DBGWVR0_EL1 + i), this, (bool)mm_fr2.varange, aarch32); - const DBGWCR ctr = tc->readMiscReg(MISCREG_DBGWCR0 + i); + const DBGWCR ctr = tc->readMiscReg(MISCREG_DBGWCR0_EL1 + i); wtp.updateControl(ctr); arWatchPoints.push_back(wtp); } - initialized = true; - RegVal oslar_el1 = tc->readMiscReg(MISCREG_OSLAR_EL1); updateOSLock(oslar_el1); // Initialize preloaded control booleans @@ -721,8 +713,6 @@ SelfDebug::testVectorCatch(ThreadContext *tc, Addr addr, setAArch32(tc); to32 = targetAArch32(tc); - if (!initialized) - init(tc); if (!isDebugEnabled(tc) || !mde || !aarch32) return NoFault; @@ -735,7 +725,7 @@ SelfDebug::testVectorCatch(ThreadContext *tc, Addr addr, if (do_debug) { if (enableTdeTge) { return std::make_shared(0, 0x22, - EC_PREFETCH_ABORT_TO_HYP); + ExceptionClass::PREFETCH_ABORT_TO_HYP); } else { return std::make_shared(addr, ArmFault::DebugEvent, false, diff --git a/src/arch/arm/self_debug.hh b/src/arch/arm/self_debug.hh index c73782e966..5ad0d0991f 100644 --- a/src/arch/arm/self_debug.hh +++ b/src/arch/arm/self_debug.hh @@ -282,7 +282,6 @@ class SelfDebug SoftwareStep * softStep; VectorCatch * vcExcpt; - bool initialized; bool enableTdeTge; // MDCR_EL2.TDE || HCR_EL2.TGE bool mde; // MDSCR_EL1.MDE, DBGDSCRext.MDBGen @@ -295,7 +294,7 @@ class SelfDebug public: SelfDebug() - : initialized(false), enableTdeTge(false), + : softStep(nullptr), vcExcpt(nullptr), enableTdeTge(false), mde(false), sdd(false), kde(false), oslk(false) { softStep = new SoftwareStep(this); @@ -449,8 +448,6 @@ class SelfDebug VectorCatch* getVectorCatch(ThreadContext *tc) { - if (!initialized) - init(tc); return vcExcpt; } diff --git a/src/arch/arm/system.hh b/src/arch/arm/system.hh index 013f7ca32a..a712615b6e 100644 --- a/src/arch/arm/system.hh +++ b/src/arch/arm/system.hh @@ -47,6 +47,7 @@ #include #include "arch/arm/page_size.hh" +#include "arch/arm/types.hh" #include "kern/linux/events.hh" #include "params/ArmSystem.hh" #include "sim/full_system.hh" diff --git a/src/arch/arm/table_walker.cc b/src/arch/arm/table_walker.cc index 96d50369ce..bbf102dad7 100644 --- a/src/arch/arm/table_walker.cc +++ b/src/arch/arm/table_walker.cc @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2012-2019, 2021 Arm Limited + * Copyright (c) 2010, 2012-2019, 2021-2022 Arm Limited * All rights reserved * * The license below extends only to copyright in the software and shall @@ -2296,7 +2296,10 @@ TableWalker::insertPartialTableEntry(LongDescriptor &descriptor) te.valid = true; te.longDescFormat = true; te.partial = true; - te.global = false; + // The entry is global if there is no address space identifier + // to differentiate translation contexts + te.global = !mmu->hasUnprivRegime( + currState->el, currState->hcr.e2h); te.isHyp = currState->isHyp; te.asid = currState->asid; te.vmid = currState->vmid; diff --git a/src/arch/arm/table_walker.hh b/src/arch/arm/table_walker.hh index 28d3d4da52..6ba7ffcd73 100644 --- a/src/arch/arm/table_walker.hh +++ b/src/arch/arm/table_walker.hh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010-2016, 2019, 2021 Arm Limited + * Copyright (c) 2010-2016, 2019, 2021-2022 Arm Limited * All rights reserved * * The license below extends only to copyright in the software and shall @@ -647,9 +647,11 @@ class TableWalker : public ClockedObject !currState->secureLookup)) { return false; // ARM ARM issue C B3.6.3 } else if (currState->aarch64) { - if (currState->el == EL2 || currState->el == EL3) { - return true; // By default translations are treated as global - // in AArch64 EL2 and EL3 + if (!MMU::hasUnprivRegime(currState->el, currState->hcr.e2h)) { + // By default translations are treated as global + // in AArch64 for regimes without an unpriviledged + // component + return true; } else if (currState->isSecure && !currState->secureLookup) { return false; } diff --git a/src/arch/arm/tlb.cc b/src/arch/arm/tlb.cc index a7c3f1270d..e2979f5c7c 100644 --- a/src/arch/arm/tlb.cc +++ b/src/arch/arm/tlb.cc @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010-2013, 2016-2021 Arm Limited + * Copyright (c) 2010-2013, 2016-2022 Arm Limited * All rights reserved * * The license below extends only to copyright in the software and shall @@ -315,19 +315,13 @@ TLB::flushAll() } void -TLB::flush(const TLBIALL& tlbi_op) +TLB::flush(const TLBIOp& tlbi_op) { - DPRINTF(TLB, "Flushing all TLB entries (%s lookup)\n", - (tlbi_op.secureLookup ? "secure" : "non-secure")); int x = 0; TlbEntry *te; while (x < size) { te = &table[x]; - const bool el_match = te->checkELMatch( - tlbi_op.targetEL, tlbi_op.inHost); - if (te->valid && tlbi_op.secureLookup == !te->nstid && - (te->vmid == vmid || tlbi_op.el2Enabled) && el_match) { - + if (tlbi_op.match(te, vmid)) { DPRINTF(TLB, " - %s\n", te->print()); te->valid = false; stats.flushedEntries++; @@ -338,292 +332,6 @@ TLB::flush(const TLBIALL& tlbi_op) stats.flushTlb++; } -void -TLB::flush(const ITLBIALL& tlbi_op) -{ - DPRINTF(TLB, "Flushing all ITLB entries (%s lookup)\n", - (tlbi_op.secureLookup ? "secure" : "non-secure")); - int x = 0; - TlbEntry *te; - while (x < size) { - te = &table[x]; - const bool el_match = te->checkELMatch( - tlbi_op.targetEL, tlbi_op.inHost); - if (te->type & TypeTLB::instruction && te->valid && - tlbi_op.secureLookup == !te->nstid && - (te->vmid == vmid || tlbi_op.el2Enabled) && el_match) { - - DPRINTF(TLB, " - %s\n", te->print()); - te->valid = false; - stats.flushedEntries++; - } - ++x; - } - - stats.flushTlb++; -} - -void -TLB::flush(const DTLBIALL& tlbi_op) -{ - DPRINTF(TLB, "Flushing all DTLB entries (%s lookup)\n", - (tlbi_op.secureLookup ? "secure" : "non-secure")); - int x = 0; - TlbEntry *te; - while (x < size) { - te = &table[x]; - const bool el_match = te->checkELMatch( - tlbi_op.targetEL, tlbi_op.inHost); - if (te->type & TypeTLB::data && te->valid && - tlbi_op.secureLookup == !te->nstid && - (te->vmid == vmid || tlbi_op.el2Enabled) && el_match) { - - DPRINTF(TLB, " - %s\n", te->print()); - te->valid = false; - stats.flushedEntries++; - } - ++x; - } - - stats.flushTlb++; -} - -void -TLB::flush(const TLBIALLEL &tlbi_op) -{ - DPRINTF(TLB, "Flushing all TLB entries (%s lookup)\n", - (tlbi_op.secureLookup ? "secure" : "non-secure")); - int x = 0; - TlbEntry *te; - while (x < size) { - te = &table[x]; - const bool el_match = te->checkELMatch( - tlbi_op.targetEL, tlbi_op.inHost); - if (te->valid && tlbi_op.secureLookup == !te->nstid && el_match) { - - DPRINTF(TLB, " - %s\n", te->print()); - te->valid = false; - stats.flushedEntries++; - } - ++x; - } - - stats.flushTlb++; -} - -void -TLB::flush(const TLBIVMALL &tlbi_op) -{ - DPRINTF(TLB, "Flushing all TLB entries (%s lookup)\n", - (tlbi_op.secureLookup ? "secure" : "non-secure")); - int x = 0; - TlbEntry *te; - while (x < size) { - te = &table[x]; - const bool el_match = te->checkELMatch( - tlbi_op.targetEL, tlbi_op.inHost); - - const bool vmid_match = - te->vmid == vmid || - !tlbi_op.el2Enabled || - (!tlbi_op.stage2Flush() && tlbi_op.inHost); - - if (te->valid && tlbi_op.secureLookup == !te->nstid && - el_match && vmid_match) { - - DPRINTF(TLB, " - %s\n", te->print()); - te->valid = false; - stats.flushedEntries++; - } - ++x; - } - - stats.flushTlb++; -} - -void -TLB::flush(const TLBIALLN &tlbi_op) -{ - bool hyp = tlbi_op.targetEL == EL2; - - DPRINTF(TLB, "Flushing all NS TLB entries (%s lookup)\n", - (hyp ? "hyp" : "non-hyp")); - int x = 0; - TlbEntry *te; - while (x < size) { - te = &table[x]; - const bool el_match = te->checkELMatch(tlbi_op.targetEL, false); - - if (te->valid && te->nstid && te->isHyp == hyp && el_match) { - - DPRINTF(TLB, " - %s\n", te->print()); - stats.flushedEntries++; - te->valid = false; - } - ++x; - } - - stats.flushTlb++; -} - -void -TLB::flush(const TLBIMVA &tlbi_op) -{ - DPRINTF(TLB, "Flushing TLB entries with mva: %#x, asid: %#x " - "(%s lookup)\n", tlbi_op.addr, tlbi_op.asid, - (tlbi_op.secureLookup ? "secure" : "non-secure")); - _flushMva(tlbi_op.addr, tlbi_op.asid, tlbi_op.secureLookup, false, - tlbi_op.targetEL, tlbi_op.inHost, TypeTLB::unified); - stats.flushTlbMvaAsid++; -} - -void -TLB::flush(const ITLBIMVA &tlbi_op) -{ - DPRINTF(TLB, "Flushing ITLB entries with mva: %#x, asid: %#x " - "(%s lookup)\n", tlbi_op.addr, tlbi_op.asid, - (tlbi_op.secureLookup ? "secure" : "non-secure")); - _flushMva(tlbi_op.addr, tlbi_op.asid, tlbi_op.secureLookup, false, - tlbi_op.targetEL, tlbi_op.inHost, TypeTLB::instruction); - stats.flushTlbMvaAsid++; -} - -void -TLB::flush(const DTLBIMVA &tlbi_op) -{ - DPRINTF(TLB, "Flushing DTLB entries with mva: %#x, asid: %#x " - "(%s lookup)\n", tlbi_op.addr, tlbi_op.asid, - (tlbi_op.secureLookup ? "secure" : "non-secure")); - _flushMva(tlbi_op.addr, tlbi_op.asid, tlbi_op.secureLookup, false, - tlbi_op.targetEL, tlbi_op.inHost, TypeTLB::data); - stats.flushTlbMvaAsid++; -} - -void -TLB::flush(const TLBIASID &tlbi_op) -{ - DPRINTF(TLB, "Flushing TLB entries with asid: %#x (%s lookup)\n", - tlbi_op.asid, (tlbi_op.secureLookup ? "secure" : "non-secure")); - - int x = 0 ; - TlbEntry *te; - - while (x < size) { - te = &table[x]; - - const bool el_match = te->checkELMatch( - tlbi_op.targetEL, tlbi_op.inHost); - - const bool vmid_match = - te->vmid == vmid || !tlbi_op.el2Enabled || tlbi_op.inHost; - - if (te->valid && te->asid == tlbi_op.asid && - tlbi_op.secureLookup == !te->nstid && - vmid_match && el_match) { - - te->valid = false; - DPRINTF(TLB, " - %s\n", te->print()); - stats.flushedEntries++; - } - ++x; - } - stats.flushTlbAsid++; -} - -void -TLB::flush(const ITLBIASID &tlbi_op) -{ - DPRINTF(TLB, "Flushing ITLB entries with asid: %#x (%s lookup)\n", - tlbi_op.asid, (tlbi_op.secureLookup ? "secure" : "non-secure")); - - int x = 0 ; - TlbEntry *te; - - while (x < size) { - te = &table[x]; - if (te->type & TypeTLB::instruction && - te->valid && te->asid == tlbi_op.asid && - tlbi_op.secureLookup == !te->nstid && - (te->vmid == vmid || tlbi_op.el2Enabled) && - te->checkELMatch(tlbi_op.targetEL, tlbi_op.inHost)) { - - te->valid = false; - DPRINTF(TLB, " - %s\n", te->print()); - stats.flushedEntries++; - } - ++x; - } - stats.flushTlbAsid++; -} - -void -TLB::flush(const DTLBIASID &tlbi_op) -{ - DPRINTF(TLB, "Flushing DTLB entries with asid: %#x (%s lookup)\n", - tlbi_op.asid, (tlbi_op.secureLookup ? "secure" : "non-secure")); - - int x = 0 ; - TlbEntry *te; - - while (x < size) { - te = &table[x]; - if (te->type & TypeTLB::data && - te->valid && te->asid == tlbi_op.asid && - tlbi_op.secureLookup == !te->nstid && - (te->vmid == vmid || tlbi_op.el2Enabled) && - te->checkELMatch(tlbi_op.targetEL, tlbi_op.inHost)) { - - te->valid = false; - DPRINTF(TLB, " - %s\n", te->print()); - stats.flushedEntries++; - } - ++x; - } - stats.flushTlbAsid++; -} - -void -TLB::flush(const TLBIMVAA &tlbi_op) { - - DPRINTF(TLB, "Flushing TLB entries with mva: %#x (%s lookup)\n", - tlbi_op.addr, - (tlbi_op.secureLookup ? "secure" : "non-secure")); - _flushMva(tlbi_op.addr, 0xbeef, tlbi_op.secureLookup, true, - tlbi_op.targetEL, tlbi_op.inHost, TypeTLB::unified); - stats.flushTlbMva++; -} - -void -TLB::_flushMva(Addr mva, uint64_t asn, bool secure_lookup, - bool ignore_asn, ExceptionLevel target_el, bool in_host, - TypeTLB entry_type) -{ - TlbEntry *te; - Lookup lookup_data; - - lookup_data.va = sext<56>(mva); - lookup_data.asn = asn; - lookup_data.ignoreAsn = ignore_asn; - lookup_data.vmid = vmid; - lookup_data.hyp = target_el == EL2; - lookup_data.secure = secure_lookup; - lookup_data.functional = true; - lookup_data.targetEL = target_el; - lookup_data.inHost = in_host; - lookup_data.mode = BaseMMU::Read; - - te = lookup(lookup_data); - while (te != NULL) { - bool matching_type = (te->type & entry_type); - if (matching_type && secure_lookup == !te->nstid) { - DPRINTF(TLB, " - %s\n", te->print()); - te->valid = false; - stats.flushedEntries++; - } - te = lookup(lookup_data); - } -} - void TLB::takeOverFrom(BaseTLB *_otlb) { @@ -642,13 +350,7 @@ TLB::TlbStats::TlbStats(TLB &parent) ADD_STAT(inserts, statistics::units::Count::get(), "Number of times an entry is inserted into the TLB"), ADD_STAT(flushTlb, statistics::units::Count::get(), - "Number of times complete TLB was flushed"), - ADD_STAT(flushTlbMva, statistics::units::Count::get(), - "Number of times TLB was flushed by MVA"), - ADD_STAT(flushTlbMvaAsid, statistics::units::Count::get(), - "Number of times TLB was flushed by MVA & ASID"), - ADD_STAT(flushTlbAsid, statistics::units::Count::get(), - "Number of times TLB was flushed by ASID"), + "Number of times a TLB invalidation was requested"), ADD_STAT(flushedEntries, statistics::units::Count::get(), "Number of entries that have been flushed from TLB"), ADD_STAT(readAccesses, statistics::units::Count::get(), "Read accesses", diff --git a/src/arch/arm/tlb.hh b/src/arch/arm/tlb.hh index 40ad76ca76..fc9b68f0be 100644 --- a/src/arch/arm/tlb.hh +++ b/src/arch/arm/tlb.hh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010-2013, 2016, 2019-2021 Arm Limited + * Copyright (c) 2010-2013, 2016, 2019-2022 Arm Limited * All rights reserved * * The license below extends only to copyright in the software and shall @@ -61,20 +61,7 @@ namespace ArmISA { class TableWalker; class TLB; - -class TLBIALL; -class ITLBIALL; -class DTLBIALL; -class TLBIALLEL; -class TLBIVMALL; -class TLBIALLN; -class TLBIMVA; -class ITLBIMVA; -class DTLBIMVA; -class TLBIASID; -class ITLBIASID; -class DTLBIASID; -class TLBIMVAA; +class TLBIOp; class TlbTestInterface { @@ -153,9 +140,6 @@ class TLB : public BaseTLB mutable statistics::Scalar writeMisses; mutable statistics::Scalar inserts; mutable statistics::Scalar flushTlb; - mutable statistics::Scalar flushTlbMva; - mutable statistics::Scalar flushTlbMvaAsid; - mutable statistics::Scalar flushTlbAsid; mutable statistics::Scalar flushedEntries; statistics::Formula readAccesses; @@ -219,42 +203,9 @@ class TLB : public BaseTLB void flushAll() override; - /** Reset the entire TLB + /** Flush TLB entries */ - void flush(const TLBIALL &tlbi_op); - void flush(const ITLBIALL &tlbi_op); - void flush(const DTLBIALL &tlbi_op); - - /** Implementaton of AArch64 TLBI ALLE1(IS), ALLE2(IS), ALLE3(IS) - * instructions - */ - void flush(const TLBIALLEL &tlbi_op); - - /** Implementaton of AArch64 TLBI VMALLE1(IS)/VMALLS112E1(IS) - * instructions - */ - void flush(const TLBIVMALL &tlbi_op); - - /** Remove all entries in the non secure world, depending on whether they - * were allocated in hyp mode or not - */ - void flush(const TLBIALLN &tlbi_op); - - /** Remove any entries that match both a va and asn - */ - void flush(const TLBIMVA &tlbi_op); - void flush(const ITLBIMVA &tlbi_op); - void flush(const DTLBIMVA &tlbi_op); - - /** Remove any entries that match the asn - */ - void flush(const TLBIASID &tlbi_op); - void flush(const ITLBIASID &tlbi_op); - void flush(const DTLBIASID &tlbi_op); - - /** Remove all entries that match the va regardless of asn - */ - void flush(const TLBIMVAA &tlbi_op); + void flush(const TLBIOp &tlbi_op); Fault trickBoxCheck(const RequestPtr &req, BaseMMU::Mode mode, TlbEntry::DomainType domain); diff --git a/src/arch/arm/tlbi_op.cc b/src/arch/arm/tlbi_op.cc index 5c595a8ebb..e89f411384 100644 --- a/src/arch/arm/tlbi_op.cc +++ b/src/arch/arm/tlbi_op.cc @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021 Arm Limited + * Copyright (c) 2018-2022 Arm Limited * All rights reserved * * The license below extends only to copyright in the software and shall @@ -62,6 +62,14 @@ TLBIALL::operator()(ThreadContext* tc) } } +bool +TLBIALL::match(TlbEntry* te, vmid_t vmid) const +{ + return te->valid && secureLookup == !te->nstid && + (te->vmid == vmid || el2Enabled) && + te->checkELMatch(targetEL, inHost); +} + void ITLBIALL::operator()(ThreadContext* tc) { @@ -69,6 +77,12 @@ ITLBIALL::operator()(ThreadContext* tc) getMMUPtr(tc)->iflush(*this); } +bool +ITLBIALL::match(TlbEntry* te, vmid_t vmid) const +{ + return TLBIALL::match(te, vmid) && (te->type & TypeTLB::instruction); +} + void DTLBIALL::operator()(ThreadContext* tc) { @@ -76,6 +90,12 @@ DTLBIALL::operator()(ThreadContext* tc) getMMUPtr(tc)->dflush(*this); } +bool +DTLBIALL::match(TlbEntry* te, vmid_t vmid) const +{ + return TLBIALL::match(te, vmid) && (te->type & TypeTLB::data); +} + void TLBIALLEL::operator()(ThreadContext* tc) { @@ -90,6 +110,13 @@ TLBIALLEL::operator()(ThreadContext* tc) } } +bool +TLBIALLEL::match(TlbEntry* te, vmid_t vmid) const +{ + return te->valid && secureLookup == !te->nstid && + te->checkELMatch(targetEL, inHost); +} + void TLBIVMALL::operator()(ThreadContext* tc) { @@ -106,6 +133,14 @@ TLBIVMALL::operator()(ThreadContext* tc) } } +bool +TLBIVMALL::match(TlbEntry* te, vmid_t vmid) const +{ + return te->valid && secureLookup == !te->nstid && + te->checkELMatch(targetEL, inHost) && + (te->vmid == vmid || !el2Enabled || (!stage2Flush() && inHost)); +} + void TLBIASID::operator()(ThreadContext* tc) { @@ -120,6 +155,15 @@ TLBIASID::operator()(ThreadContext* tc) } } +bool +TLBIASID::match(TlbEntry* te, vmid_t vmid) const +{ + return te->valid && te->asid == asid && + secureLookup == !te->nstid && + te->checkELMatch(targetEL, inHost) && + (te->vmid == vmid || !el2Enabled || inHost); +} + void ITLBIASID::operator()(ThreadContext* tc) { @@ -127,6 +171,12 @@ ITLBIASID::operator()(ThreadContext* tc) getMMUPtr(tc)->iflush(*this); } +bool +ITLBIASID::match(TlbEntry* te, vmid_t vmid) const +{ + return TLBIASID::match(te, vmid) && (te->type & TypeTLB::instruction); +} + void DTLBIASID::operator()(ThreadContext* tc) { @@ -134,6 +184,12 @@ DTLBIASID::operator()(ThreadContext* tc) getMMUPtr(tc)->dflush(*this); } +bool +DTLBIASID::match(TlbEntry* te, vmid_t vmid) const +{ + return TLBIASID::match(te, vmid) && (te->type & TypeTLB::data); +} + void TLBIALLN::operator()(ThreadContext* tc) { @@ -145,6 +201,14 @@ TLBIALLN::operator()(ThreadContext* tc) } } +bool +TLBIALLN::match(TlbEntry* te, vmid_t vmid) const +{ + return te->valid && te->nstid && + te->isHyp == (targetEL == EL2) && + te->checkELMatch(targetEL, false); +} + void TLBIMVAA::operator()(ThreadContext* tc) { @@ -158,6 +222,23 @@ TLBIMVAA::operator()(ThreadContext* tc) } } +bool +TLBIMVAA::match(TlbEntry* te, vmid_t vmid) const +{ + TlbEntry::Lookup lookup_data; + lookup_data.va = sext<56>(addr); + lookup_data.ignoreAsn = true; + lookup_data.vmid = vmid; + lookup_data.hyp = targetEL == EL2; + lookup_data.secure = secureLookup; + lookup_data.functional = true; + lookup_data.targetEL = targetEL; + lookup_data.inHost = inHost; + lookup_data.mode = BaseMMU::Read; + + return te->match(lookup_data) && (!lastLevel || !te->partial); +} + void TLBIMVA::operator()(ThreadContext* tc) { @@ -171,18 +252,48 @@ TLBIMVA::operator()(ThreadContext* tc) } } +bool +TLBIMVA::match(TlbEntry* te, vmid_t vmid) const +{ + TlbEntry::Lookup lookup_data; + lookup_data.va = sext<56>(addr); + lookup_data.asn = asid; + lookup_data.ignoreAsn = false; + lookup_data.vmid = vmid; + lookup_data.hyp = targetEL == EL2; + lookup_data.secure = secureLookup; + lookup_data.functional = true; + lookup_data.targetEL = targetEL; + lookup_data.inHost = inHost; + lookup_data.mode = BaseMMU::Read; + + return te->match(lookup_data) && (!lastLevel || !te->partial); +} + void ITLBIMVA::operator()(ThreadContext* tc) { getMMUPtr(tc)->iflush(*this); } +bool +ITLBIMVA::match(TlbEntry* te, vmid_t vmid) const +{ + return TLBIMVA::match(te, vmid) && (te->type & TypeTLB::instruction); +} + void DTLBIMVA::operator()(ThreadContext* tc) { getMMUPtr(tc)->dflush(*this); } +bool +DTLBIMVA::match(TlbEntry* te, vmid_t vmid) const +{ + return TLBIMVA::match(te, vmid) && (te->type & TypeTLB::data); +} + void TLBIIPA::operator()(ThreadContext* tc) { diff --git a/src/arch/arm/tlbi_op.hh b/src/arch/arm/tlbi_op.hh index 0151ff92f7..4f4ea09ec8 100644 --- a/src/arch/arm/tlbi_op.hh +++ b/src/arch/arm/tlbi_op.hh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 ARM Limited + * Copyright (c) 2018-2020, 2022 Arm Limited * All rights reserved * * The license below extends only to copyright in the software and shall @@ -76,6 +76,8 @@ class TLBIOp (*this)(oc); } + virtual bool match(TlbEntry *entry, vmid_t curr_vmid) const = 0; + /** * Return true if the TLBI op needs to flush stage1 * entries, Defaulting to true in the TLBIOp abstract @@ -113,6 +115,8 @@ class TLBIALL : public TLBIOp void operator()(ThreadContext* tc) override; + bool match(TlbEntry *entry, vmid_t curr_vmid) const override; + bool stage2Flush() const override { @@ -143,6 +147,8 @@ class ITLBIALL : public TLBIALL void broadcast(ThreadContext *tc) = delete; void operator()(ThreadContext* tc) override; + + bool match(TlbEntry *entry, vmid_t curr_vmid) const override; }; /** Data TLB Invalidate All */ @@ -156,6 +162,8 @@ class DTLBIALL : public TLBIALL void broadcast(ThreadContext *tc) = delete; void operator()(ThreadContext* tc) override; + + bool match(TlbEntry *entry, vmid_t curr_vmid) const override; }; /** Implementaton of AArch64 TLBI ALLE(1,2,3)(IS) instructions */ @@ -168,6 +176,8 @@ class TLBIALLEL : public TLBIOp void operator()(ThreadContext* tc) override; + bool match(TlbEntry *entry, vmid_t curr_vmid) const override; + bool stage2Flush() const override { @@ -195,6 +205,8 @@ class TLBIVMALL : public TLBIOp void operator()(ThreadContext* tc) override; + bool match(TlbEntry *entry, vmid_t curr_vmid) const override; + bool stage2Flush() const override { @@ -223,6 +235,8 @@ class TLBIASID : public TLBIOp void operator()(ThreadContext* tc) override; + bool match(TlbEntry *entry, vmid_t curr_vmid) const override; + uint16_t asid; bool inHost; bool el2Enabled; @@ -239,6 +253,8 @@ class ITLBIASID : public TLBIASID void broadcast(ThreadContext *tc) = delete; void operator()(ThreadContext* tc) override; + + bool match(TlbEntry *entry, vmid_t curr_vmid) const override; }; /** Data TLB Invalidate by ASID match */ @@ -252,6 +268,8 @@ class DTLBIASID : public TLBIASID void broadcast(ThreadContext *tc) = delete; void operator()(ThreadContext* tc) override; + + bool match(TlbEntry *entry, vmid_t curr_vmid) const override; }; /** TLB Invalidate All, Non-Secure */ @@ -264,6 +282,8 @@ class TLBIALLN : public TLBIOp void operator()(ThreadContext* tc) override; + bool match(TlbEntry *entry, vmid_t curr_vmid) const override; + bool stage2Flush() const override { @@ -282,14 +302,18 @@ class TLBIMVAA : public TLBIOp { public: TLBIMVAA(ExceptionLevel _targetEL, bool _secure, - Addr _addr) - : TLBIOp(_targetEL, _secure), addr(_addr), inHost(false) + Addr _addr, bool last_level) + : TLBIOp(_targetEL, _secure), addr(_addr), inHost(false), + lastLevel(last_level) {} void operator()(ThreadContext* tc) override; + bool match(TlbEntry *entry, vmid_t curr_vmid) const override; + Addr addr; bool inHost; + bool lastLevel; }; /** TLB Invalidate by VA */ @@ -297,16 +321,19 @@ class TLBIMVA : public TLBIOp { public: TLBIMVA(ExceptionLevel _targetEL, bool _secure, - Addr _addr, uint16_t _asid) + Addr _addr, uint16_t _asid, bool last_level) : TLBIOp(_targetEL, _secure), addr(_addr), asid(_asid), - inHost(false) + inHost(false), lastLevel(last_level) {} void operator()(ThreadContext* tc) override; + bool match(TlbEntry *entry, vmid_t curr_vmid) const override; + Addr addr; uint16_t asid; bool inHost; + bool lastLevel; }; /** Instruction TLB Invalidate by VA */ @@ -315,12 +342,14 @@ class ITLBIMVA : public TLBIMVA public: ITLBIMVA(ExceptionLevel _targetEL, bool _secure, Addr _addr, uint16_t _asid) - : TLBIMVA(_targetEL, _secure, _addr, _asid) + : TLBIMVA(_targetEL, _secure, _addr, _asid, false) {} void broadcast(ThreadContext *tc) = delete; void operator()(ThreadContext* tc) override; + + bool match(TlbEntry *entry, vmid_t curr_vmid) const override; }; /** Data TLB Invalidate by VA */ @@ -329,24 +358,33 @@ class DTLBIMVA : public TLBIMVA public: DTLBIMVA(ExceptionLevel _targetEL, bool _secure, Addr _addr, uint16_t _asid) - : TLBIMVA(_targetEL, _secure, _addr, _asid) + : TLBIMVA(_targetEL, _secure, _addr, _asid, false) {} void broadcast(ThreadContext *tc) = delete; void operator()(ThreadContext* tc) override; + + bool match(TlbEntry *entry, vmid_t curr_vmid) const override; }; /** TLB Invalidate by Intermediate Physical Address */ class TLBIIPA : public TLBIOp { public: - TLBIIPA(ExceptionLevel _targetEL, bool _secure, Addr _addr) - : TLBIOp(_targetEL, _secure), addr(_addr) + TLBIIPA(ExceptionLevel _targetEL, bool _secure, Addr _addr, + bool last_level) + : TLBIOp(_targetEL, _secure), addr(_addr), lastLevel(last_level) {} void operator()(ThreadContext* tc) override; + bool + match(TlbEntry *entry, vmid_t curr_vmid) const override + { + panic("This shouldn't be called\n"); + } + bool stage1Flush() const override { @@ -357,10 +395,11 @@ class TLBIIPA : public TLBIOp TLBIMVAA makeStage2() const { - return TLBIMVAA(EL1, secureLookup, addr); + return TLBIMVAA(EL1, secureLookup, addr, lastLevel); } Addr addr; + bool lastLevel; }; } // namespace ArmISA diff --git a/src/arch/arm/tracers/SConscript b/src/arch/arm/tracers/SConscript index a509b220ed..15945a4ac4 100644 --- a/src/arch/arm/tracers/SConscript +++ b/src/arch/arm/tracers/SConscript @@ -36,7 +36,7 @@ Import('*') SimObject('TarmacTrace.py', sim_objects=['TarmacParser', 'TarmacTracer'], - tags='arm isa') + enums=['TarmacDump'], tags='arm isa') Source('tarmac_base.cc', tags='arm isa') Source('tarmac_parser.cc', tags='arm isa') Source('tarmac_tracer.cc', tags='arm isa') diff --git a/src/arch/arm/tracers/TarmacTrace.py b/src/arch/arm/tracers/TarmacTrace.py index faf2db0fea..82c447aada 100644 --- a/src/arch/arm/tracers/TarmacTrace.py +++ b/src/arch/arm/tracers/TarmacTrace.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 ARM Limited +# Copyright (c) 2018, 2022 Arm Limited # All rights reserved. # # The license below extends only to copyright in the software and shall @@ -37,38 +37,59 @@ from m5.SimObject import SimObject from m5.params import * from m5.objects.InstTracer import InstTracer + class TarmacParser(InstTracer): - type = 'TarmacParser' - cxx_class = 'gem5::Trace::TarmacParser' + type = "TarmacParser" + cxx_class = "gem5::trace::TarmacParser" cxx_header = "arch/arm/tracers/tarmac_parser.hh" path_to_trace = Param.String("tarmac.log", "path to TARMAC trace") start_pc = Param.Int( - 0x0, "tracing starts when the PC gets this value; ignored if 0x0") + 0x0, "tracing starts when the PC gets this value; ignored if 0x0" + ) - exit_on_diff = Param.Bool(False, - "stop simulation after first mismatch is detected") + exit_on_diff = Param.Bool( + False, "stop simulation after first mismatch is detected" + ) - exit_on_insn_diff = Param.Bool(False, - "stop simulation after first mismatch on PC or opcode is detected") + exit_on_insn_diff = Param.Bool( + False, + "stop simulation after first mismatch on PC or opcode is detected", + ) - mem_wr_check = Param.Bool(False, - "enable check of memory write accesses") + mem_wr_check = Param.Bool(False, "enable check of memory write accesses") - cpu_id = Param.Bool(False, - "true if trace format includes the CPU id") + cpu_id = Param.Bool(False, "true if trace format includes the CPU id") + + ignore_mem_addr = Param.AddrRange( + AddrRange(0, size=0), "Range of unverifiable memory addresses" + ) + + +class TarmacDump(ScopedEnum): + vals = ["stdoutput", "stderror", "file"] - ignore_mem_addr = Param.AddrRange(AddrRange(0, size=0), - "Range of unverifiable memory addresses") class TarmacTracer(InstTracer): - type = 'TarmacTracer' - cxx_class = 'gem5::Trace::TarmacTracer' + type = "TarmacTracer" + cxx_class = "gem5::trace::TarmacTracer" cxx_header = "arch/arm/tracers/tarmac_tracer.hh" - start_tick = Param.Tick(0, - "tracing starts when the tick time gets this value") + start_tick = Param.Tick( + 0, "tracing starts when the tick time gets this value" + ) - end_tick = Param.Tick(MaxTick, - "tracing ends when the tick time gets this value") + end_tick = Param.Tick( + MaxTick, "tracing ends when the tick time gets this value" + ) + outfile = Param.TarmacDump( + "stdoutput", + "Selects where the tracer is dumping its output" + "Current options are:" + "1) stdoutput = dump to standard output" + "2) stderror = dump to standard error" + "3) file = dump to a file. As there is one tracer per CPU," + "this means every CPU will dump its trace to a different file," + "name after the tracer name (e.g. cpu0.tracer, cpu1.tracer)", + ) diff --git a/src/arch/arm/tracers/tarmac_base.cc b/src/arch/arm/tracers/tarmac_base.cc index c8a8619b3c..99ed3bb0f1 100644 --- a/src/arch/arm/tracers/tarmac_base.cc +++ b/src/arch/arm/tracers/tarmac_base.cc @@ -50,7 +50,7 @@ namespace gem5 using namespace ArmISA; -namespace Trace { +namespace trace { TarmacBaseRecord::TarmacBaseRecord(Tick _when, ThreadContext *_thread, const StaticInstPtr _staticInst, @@ -118,5 +118,5 @@ TarmacBaseRecord::pcToISetState(const PCStateBase &pc) return isetstate; } -} // namespace Trace +} // namespace trace } // namespace gem5 diff --git a/src/arch/arm/tracers/tarmac_base.hh b/src/arch/arm/tracers/tarmac_base.hh index cbf87a3c36..501eb1b008 100644 --- a/src/arch/arm/tracers/tarmac_base.hh +++ b/src/arch/arm/tracers/tarmac_base.hh @@ -49,6 +49,7 @@ #ifndef __ARCH_ARM_TRACERS_TARMAC_BASE_HH__ #define __ARCH_ARM_TRACERS_TARMAC_BASE_HH__ +#include "arch/arm/types.hh" #include "base/trace.hh" #include "base/types.hh" #include "cpu/static_inst.hh" @@ -59,7 +60,7 @@ namespace gem5 class ThreadContext; -namespace Trace { +namespace trace { class TarmacBaseRecord : public InstRecord { @@ -146,7 +147,7 @@ class TarmacBaseRecord : public InstRecord }; -} // namespace Trace +} // namespace trace } // namespace gem5 #endif // __ARCH_ARM_TRACERS_TARMAC_BASE_HH__ diff --git a/src/arch/arm/tracers/tarmac_parser.cc b/src/arch/arm/tracers/tarmac_parser.cc index 34d7ca7cbb..cb2d9e31c1 100644 --- a/src/arch/arm/tracers/tarmac_parser.cc +++ b/src/arch/arm/tracers/tarmac_parser.cc @@ -45,6 +45,8 @@ #include "arch/arm/insts/static_inst.hh" #include "arch/arm/mmu.hh" +#include "arch/arm/regs/int.hh" +#include "arch/arm/regs/vec.hh" #include "cpu/static_inst.hh" #include "cpu/thread_context.hh" #include "mem/packet.hh" @@ -62,7 +64,7 @@ namespace gem5 using namespace ArmISA; -namespace Trace { +namespace trace { // TARMAC Parser static variables const int TarmacParserRecord::MaxLineLength; @@ -743,7 +745,7 @@ TarmacParserRecord::MiscRegMap TarmacParserRecord::miscRegMap = { void TarmacParserRecord::TarmacParserRecordEvent::process() { - std::ostream &outs = Trace::output(); + std::ostream &outs = trace::output(); std::list::iterator it = destRegRecords.begin(), end = destRegRecords.end(); @@ -755,31 +757,28 @@ TarmacParserRecord::TarmacParserRecordEvent::process() switch (it->type) { case REG_R: case REG_X: - values.push_back(thread->getReg(RegId(IntRegClass, it->index))); + values.push_back(thread->getReg(intRegClass[it->index])); break; case REG_S: if (instRecord.isetstate == ISET_A64) { ArmISA::VecRegContainer vc; - thread->getReg(RegId(VecRegClass, it->index), &vc); + thread->getReg(vecRegClass[it->index], &vc); auto vv = vc.as(); values.push_back(vv[0]); } else { - const VecElem elem = thread->getReg( - RegId(VecElemClass, it->index)); + const VecElem elem = thread->getReg(vecElemClass[it->index]); values.push_back(elem); } break; case REG_D: if (instRecord.isetstate == ISET_A64) { ArmISA::VecRegContainer vc; - thread->getReg(RegId(VecRegClass, it->index), &vc); + thread->getReg(vecRegClass[it->index], &vc); auto vv = vc.as(); values.push_back(vv[0]); } else { - const VecElem w0 = thread->getReg( - RegId(VecElemClass, it->index)); - const VecElem w1 = thread->getReg( - RegId(VecElemClass, it->index + 1)); + const VecElem w0 = thread->getReg(vecElemClass[it->index]); + const VecElem w1 = thread->getReg(vecElemClass[it->index + 1]); values.push_back((uint64_t)(w1) << 32 | w0); } @@ -787,7 +786,7 @@ TarmacParserRecord::TarmacParserRecordEvent::process() case REG_P: { ArmISA::VecPredRegContainer pc; - thread->getReg(RegId(VecPredRegClass, it->index), &pc); + thread->getReg(vecPredRegClass[it->index], &pc); auto pv = pc.as(); uint64_t p = 0; for (int i = maxVectorLength * 8; i > 0; ) { @@ -799,19 +798,15 @@ TarmacParserRecord::TarmacParserRecordEvent::process() case REG_Q: if (instRecord.isetstate == ISET_A64) { ArmISA::VecRegContainer vc; - thread->getReg(RegId(VecRegClass, it->index), &vc); + thread->getReg(vecRegClass[it->index], &vc); auto vv = vc.as(); values.push_back(vv[0]); values.push_back(vv[1]); } else { - const VecElem w0 = thread->getReg( - RegId(VecElemClass, it->index)); - const VecElem w1 = thread->getReg( - RegId(VecElemClass, it->index + 1)); - const VecElem w2 = thread->getReg( - RegId(VecElemClass, it->index + 2)); - const VecElem w3 = thread->getReg( - RegId(VecElemClass, it->index + 3)); + const VecElem w0 = thread->getReg(vecElemClass[it->index]); + const VecElem w1 = thread->getReg(vecElemClass[it->index + 1]); + const VecElem w2 = thread->getReg(vecElemClass[it->index + 2]); + const VecElem w3 = thread->getReg(vecElemClass[it->index + 3]); values.push_back((uint64_t)(w1) << 32 | w0); values.push_back((uint64_t)(w3) << 32 | w2); @@ -821,7 +816,7 @@ TarmacParserRecord::TarmacParserRecordEvent::process() { int8_t i = maxVectorLength; ArmISA::VecRegContainer vc; - thread->getReg(RegId(VecRegClass, it->index), &vc); + thread->getReg(vecRegClass[it->index], &vc); auto vv = vc.as(); while (i > 0) { values.push_back(vv[--i]); @@ -939,7 +934,7 @@ void TarmacParserRecord::printMismatchHeader(const StaticInstPtr staticInst, const PCStateBase &pc) { - std::ostream &outs = Trace::output(); + std::ostream &outs = trace::output(); outs << "\nMismatch between gem5 and TARMAC trace @ " << std::dec << curTick() << " ticks\n" << "[seq_num: " << std::dec << instRecord.seq_num @@ -968,7 +963,7 @@ TarmacParserRecord::TarmacParserRecord(Tick _when, ThreadContext *_thread, void TarmacParserRecord::dump() { - std::ostream &outs = Trace::output(); + std::ostream &outs = trace::output(); uint64_t written_data = 0; unsigned mem_flags = 3 | ArmISA::MMU::AllowUnaligned; @@ -1300,8 +1295,11 @@ TarmacParserRecord::readMemNoEffect(Addr addr, uint8_t *data, unsigned size, return false; // the translating proxy will perform the virtual to physical // translation again - (FullSystem ? TranslatingPortProxy(thread) : - SETranslatingPortProxy(thread)).readBlob(addr, data, size); + TranslatingPortProxy fs_proxy(thread); + SETranslatingPortProxy se_proxy(thread); + PortProxy &virt_proxy = FullSystem ? fs_proxy : se_proxy; + + virt_proxy.readBlob(addr, data, size); } else { return false; } @@ -1359,5 +1357,5 @@ TarmacParserRecord::iSetStateToStr(ISetState isetstate) const } } -} // namespace Trace +} // namespace trace } // namespace gem5 diff --git a/src/arch/arm/tracers/tarmac_parser.hh b/src/arch/arm/tracers/tarmac_parser.hh index 39cf5c38a6..41c4e78f08 100644 --- a/src/arch/arm/tracers/tarmac_parser.hh +++ b/src/arch/arm/tracers/tarmac_parser.hh @@ -62,7 +62,7 @@ namespace gem5 { -namespace Trace { +namespace trace { class TarmacParserRecord : public TarmacBaseRecord { @@ -300,7 +300,7 @@ class TarmacParser : public InstTracer bool macroopInProgress; }; -} // namespace Trace +} // namespace trace } // namespace gem5 #endif // __ARCH_ARM_TRACERS_TARMAC_PARSER_HH__ diff --git a/src/arch/arm/tracers/tarmac_record.cc b/src/arch/arm/tracers/tarmac_record.cc index 94dbc518e6..59d6a18b39 100644 --- a/src/arch/arm/tracers/tarmac_record.cc +++ b/src/arch/arm/tracers/tarmac_record.cc @@ -47,7 +47,7 @@ namespace gem5 using namespace ArmISA; -namespace Trace { +namespace trace { // TARMAC Instruction Record static variables uint64_t TarmacTracerRecord::TraceInstEntry::instCount = 0; @@ -158,36 +158,33 @@ TarmacTracerRecord::TraceRegEntry::TraceRegEntry( const RegId& reg) : RegEntry(*tarmCtx.pc), regValid(false), - regClass(reg.classValue()), - regRel(reg.index()) + regId(reg) { } void -TarmacTracerRecord::TraceRegEntry::update( - const TarmacContext& tarmCtx -) +TarmacTracerRecord::TraceRegEntry::update(const TarmacContext& tarmCtx) { // Fill the register entry data, according to register // class. - switch (regClass) { + switch (regId.classValue()) { case CCRegClass: - updateCC(tarmCtx, regRel); + updateCC(tarmCtx); break; case FloatRegClass: - updateFloat(tarmCtx, regRel); + updateFloat(tarmCtx); break; case IntRegClass: - updateInt(tarmCtx, regRel); + updateInt(tarmCtx); break; case MiscRegClass: - updateMisc(tarmCtx, regRel); + updateMisc(tarmCtx); break; case VecRegClass: - updateVec(tarmCtx, regRel); + updateVec(tarmCtx); break; case VecPredRegClass: - updatePred(tarmCtx, regRel); + updatePred(tarmCtx); break; default: // If unsupported format, do nothing: non updating @@ -197,21 +194,18 @@ TarmacTracerRecord::TraceRegEntry::update( } void -TarmacTracerRecord::TraceRegEntry::updateMisc( - const TarmacContext& tarmCtx, - RegIndex regRelIdx -) +TarmacTracerRecord::TraceRegEntry::updateMisc(const TarmacContext& tarmCtx) { auto thread = tarmCtx.thread; regValid = true; - regName = miscRegName[regRelIdx]; - values[Lo] = thread->readMiscRegNoEffect(regRelIdx); + regName = miscRegName[regId.index()]; + values[Lo] = thread->readMiscRegNoEffect(regId.index()); // If it is the CPSR: // update the value of the CPSR register and add // the CC flags on top of the value - if (regRelIdx == MISCREG_CPSR) { + if (regId.index() == MISCREG_CPSR) { CPSR cpsr = thread->readMiscRegNoEffect(MISCREG_CPSR); cpsr.nz = thread->getReg(cc_reg::Nz); cpsr.c = thread->getReg(cc_reg::C); @@ -224,37 +218,25 @@ TarmacTracerRecord::TraceRegEntry::updateMisc( } void -TarmacTracerRecord::TraceRegEntry::updateCC( - const TarmacContext& tarmCtx, - RegIndex regRelIdx -) +TarmacTracerRecord::TraceRegEntry::updateCC(const TarmacContext& tarmCtx) { auto thread = tarmCtx.thread; regValid = true; - regName = cc_reg::RegName[regRelIdx]; - values[Lo] = thread->getReg(RegId(CCRegClass, regRelIdx)); + regName = cc_reg::RegName[regId.index()]; + values[Lo] = thread->getReg(regId); } void -TarmacTracerRecord::TraceRegEntry::updateFloat( - const TarmacContext& tarmCtx, - RegIndex regRelIdx -) +TarmacTracerRecord::TraceRegEntry::updateFloat(const TarmacContext& tarmCtx) { - auto thread = tarmCtx.thread; - regValid = true; - regName = "f" + std::to_string(regRelIdx); - RegId reg(FloatRegClass, regRelIdx); - values[Lo] = bitsToFloat32(thread->getReg(reg)); + regName = "f" + std::to_string(regId.index()); + panic("ARM doesn't support float registers."); } void -TarmacTracerRecord::TraceRegEntry::updateInt( - const TarmacContext& tarmCtx, - RegIndex regRelIdx -) +TarmacTracerRecord::TraceRegEntry::updateInt(const TarmacContext& tarmCtx) { auto thread = tarmCtx.thread; @@ -270,7 +252,7 @@ TarmacTracerRecord::TraceRegEntry::updateInt( } regValid = true; - switch (regRelIdx) { + switch (regId.index()) { case int_reg::Pc: regName = "pc"; break; @@ -284,10 +266,10 @@ TarmacTracerRecord::TraceRegEntry::updateInt( regName = "lr" + reg_suffix; break; default: - regName = "r" + std::to_string(regRelIdx); + regName = "r" + std::to_string(regId.index()); break; } - values[Lo] = thread->getReg(RegId(IntRegClass, regRelIdx)); + values[Lo] = thread->getReg(regId); } void @@ -392,7 +374,7 @@ template void TarmacTracerRecord::flushQueues(Queue& queue) { - std::ostream &outs = Trace::output(); + std::ostream &outs = tracer.output(); for (const auto &single_entry : queue) { single_entry->print(outs); @@ -464,5 +446,5 @@ TarmacTracerRecord::TraceRegEntry::print( values[Lo]); /* Register value */ } -} // namespace Trace +} // namespace trace } // namespace gem5 diff --git a/src/arch/arm/tracers/tarmac_record.hh b/src/arch/arm/tracers/tarmac_record.hh index 197034f2c1..009df5db29 100644 --- a/src/arch/arm/tracers/tarmac_record.hh +++ b/src/arch/arm/tracers/tarmac_record.hh @@ -54,7 +54,7 @@ namespace gem5 { -namespace Trace { +namespace trace { class TarmacContext; @@ -140,31 +140,18 @@ class TarmacTracerRecord : public TarmacBaseRecord protected: /** Register update functions. */ - virtual void - updateMisc(const TarmacContext& tarmCtx, RegIndex regRelIdx); - - virtual void - updateCC(const TarmacContext& tarmCtx, RegIndex regRelIdx); - - virtual void - updateFloat(const TarmacContext& tarmCtx, RegIndex regRelIdx); - - virtual void - updateInt(const TarmacContext& tarmCtx, RegIndex regRelIdx); - - virtual void - updateVec(const TarmacContext& tarmCtx, RegIndex regRelIdx) {}; - - virtual void - updatePred(const TarmacContext& tarmCtx, RegIndex regRelIdx) {}; + virtual void updateMisc(const TarmacContext& tarmCtx); + virtual void updateCC(const TarmacContext& tarmCtx); + virtual void updateFloat(const TarmacContext& tarmCtx); + virtual void updateInt(const TarmacContext& tarmCtx); + virtual void updateVec(const TarmacContext& tarmCtx) {}; + virtual void updatePred(const TarmacContext& tarmCtx) {}; public: /** True if register entry is valid */ bool regValid; - /** Register class */ - RegClassType regClass; - /** Register arch number */ - RegIndex regRel; + /** Register ID */ + RegId regId; /** Register name to be printed */ std::string regName; }; @@ -229,7 +216,9 @@ class TarmacTracerRecord : public TarmacBaseRecord // Find all CC Entries and move them at the end of the queue auto it = std::remove_if( queue.begin(), queue.end(), - [] (RegPtr& reg) ->bool { return (reg->regClass == CCRegClass); } + [] (RegPtr& reg) ->bool { + return (reg->regId.classValue() == CCRegClass); + } ); if (it != queue.end()) { @@ -238,8 +227,8 @@ class TarmacTracerRecord : public TarmacBaseRecord auto is_cpsr = [] (RegPtr& reg) ->bool { - return (reg->regClass == MiscRegClass) && - (reg->regRel == ArmISA::MISCREG_CPSR); + return (reg->regId.classValue()== MiscRegClass) && + (reg->regId.index() == ArmISA::MISCREG_CPSR); }; // Looking for the presence of a CPSR register entry. @@ -249,7 +238,7 @@ class TarmacTracerRecord : public TarmacBaseRecord // If CPSR entry not present, generate one if (cpsr_it == queue.end()) { - RegId reg(MiscRegClass, ArmISA::MISCREG_CPSR); + RegId reg = ArmISA::miscRegClass[ArmISA::MISCREG_CPSR]; queue.push_back( std::make_unique( genRegister(tarmCtx, reg)) @@ -269,7 +258,7 @@ class TarmacTracerRecord : public TarmacBaseRecord TarmacTracer& tracer; }; -} // namespace Trace +} // namespace trace } // namespace gem5 #endif // __ARCH_ARM_TRACERS_TARMAC_RECORD_HH__ diff --git a/src/arch/arm/tracers/tarmac_record_v8.cc b/src/arch/arm/tracers/tarmac_record_v8.cc index 3c4a1525bd..29606c3f82 100644 --- a/src/arch/arm/tracers/tarmac_record_v8.cc +++ b/src/arch/arm/tracers/tarmac_record_v8.cc @@ -48,7 +48,7 @@ namespace gem5 using namespace ArmISA; -namespace Trace { +namespace trace { TarmacTracerRecordV8::TraceInstEntryV8::TraceInstEntryV8( const TarmacContext& tarmCtx, @@ -90,22 +90,19 @@ TarmacTracerRecordV8::TraceRegEntryV8::TraceRegEntryV8( } void -TarmacTracerRecordV8::TraceRegEntryV8::updateInt( - const TarmacContext& tarmCtx, - RegIndex regRelIdx -) +TarmacTracerRecordV8::TraceRegEntryV8::updateInt(const TarmacContext& tarmCtx) { // Do not trace pseudo register accesses: invalid // register entry. - if (regRelIdx > int_reg::NumArchRegs) { + if (regId.index() > int_reg::NumArchRegs) { regValid = false; return; } - TraceRegEntry::updateInt(tarmCtx, regRelIdx); + TraceRegEntry::updateInt(tarmCtx); - if ((regRelIdx != int_reg::Pc) || (regRelIdx != StackPointerReg) || - (regRelIdx != FramePointerReg) || (regRelIdx != ReturnAddressReg)) { + if ((regId != int_reg::Pc) || (regId != StackPointerReg) || + (regId != FramePointerReg) || (regId != ReturnAddressReg)) { const auto* arm_inst = static_cast( tarmCtx.staticInst.get() @@ -113,33 +110,27 @@ TarmacTracerRecordV8::TraceRegEntryV8::updateInt( regWidth = (arm_inst->getIntWidth()); if (regWidth == 32) { - regName = "W" + std::to_string(regRelIdx); + regName = "W" + std::to_string(regId.index()); } else { - regName = "X" + std::to_string(regRelIdx); + regName = "X" + std::to_string(regId.index()); } } } void -TarmacTracerRecordV8::TraceRegEntryV8::updateMisc( - const TarmacContext& tarmCtx, - RegIndex regRelIdx -) +TarmacTracerRecordV8::TraceRegEntryV8::updateMisc(const TarmacContext& tarmCtx) { - TraceRegEntry::updateMisc(tarmCtx, regRelIdx); + TraceRegEntry::updateMisc(tarmCtx); // System registers are 32bit wide regWidth = 32; } void -TarmacTracerRecordV8::TraceRegEntryV8::updateVec( - const TarmacContext& tarmCtx, - RegIndex regRelIdx -) +TarmacTracerRecordV8::TraceRegEntryV8::updateVec(const TarmacContext& tarmCtx) { auto thread = tarmCtx.thread; ArmISA::VecRegContainer vec_container; - thread->getReg(RegId(regClass, regRelIdx), &vec_container); + thread->getReg(regId, &vec_container); auto vv = vec_container.as(); regWidth = ArmStaticInst::getCurSveVecLenInBits(thread); @@ -153,18 +144,15 @@ TarmacTracerRecordV8::TraceRegEntryV8::updateVec( } regValid = true; - regName = "Z" + std::to_string(regRelIdx); + regName = "Z" + std::to_string(regId.index()); } void -TarmacTracerRecordV8::TraceRegEntryV8::updatePred( - const TarmacContext& tarmCtx, - RegIndex regRelIdx -) +TarmacTracerRecordV8::TraceRegEntryV8::updatePred(const TarmacContext& tarmCtx) { auto thread = tarmCtx.thread; ArmISA::VecPredRegContainer pred_container; - thread->getReg(RegId(regClass, regRelIdx), &pred_container); + thread->getReg(regId, &pred_container); // Predicate registers are always 1/8 the size of related vector // registers. (getCurSveVecLenInBits(thread) / 8) @@ -181,7 +169,7 @@ TarmacTracerRecordV8::TraceRegEntryV8::updatePred( } regValid = true; - regName = "P" + std::to_string(regRelIdx); + regName = "P" + std::to_string(regId.index()); } void @@ -325,5 +313,5 @@ TarmacTracerRecordV8::TraceRegEntryV8::formatReg() const } } -} // namespace Trace +} // namespace trace } // namespace gem5 diff --git a/src/arch/arm/tracers/tarmac_record_v8.hh b/src/arch/arm/tracers/tarmac_record_v8.hh index 231945fd9e..ae972b0040 100644 --- a/src/arch/arm/tracers/tarmac_record_v8.hh +++ b/src/arch/arm/tracers/tarmac_record_v8.hh @@ -48,7 +48,7 @@ namespace gem5 { -namespace Trace { +namespace trace { /** * TarmacTracer record for ARMv8 CPUs: @@ -103,17 +103,10 @@ class TarmacTracerRecordV8 : public TarmacTracerRecord const std::string &prefix = "") const override; protected: - void updateInt(const TarmacContext& tarmCtx, - RegIndex regRelIdx) override; - - void updateMisc(const TarmacContext& tarmCtx, - RegIndex regRelIdx) override; - - void updateVec(const TarmacContext& tarmCtx, - RegIndex regRelIdx) override; - - void updatePred(const TarmacContext& tarmCtx, - RegIndex regRelIdx) override; + void updateInt(const TarmacContext& tarmCtx) override; + void updateMisc(const TarmacContext& tarmCtx) override; + void updateVec(const TarmacContext& tarmCtx) override; + void updatePred(const TarmacContext& tarmCtx) override; /** * Returning a string which contains the formatted @@ -165,7 +158,7 @@ class TarmacTracerRecordV8 : public TarmacTracerRecord void addRegEntry(std::vector& queue, const TarmacContext& ptr); }; -} // namespace Trace +} // namespace trace } // namespace gem5 #endif // __ARCH_ARM_TRACERS_TARMAC_RECORD_V8_HH__ diff --git a/src/arch/arm/tracers/tarmac_tracer.cc b/src/arch/arm/tracers/tarmac_tracer.cc index e017cc41f5..aa454f7539 100644 --- a/src/arch/arm/tracers/tarmac_tracer.cc +++ b/src/arch/arm/tracers/tarmac_tracer.cc @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited + * Copyright (c) 2017-2018, 2022 Arm Limited * All rights reserved * * The license below extends only to copyright in the software and shall @@ -40,12 +40,15 @@ #include #include "arch/arm/system.hh" +#include "base/output.hh" #include "cpu/base.hh" +#include "enums/TarmacDump.hh" + namespace gem5 { -namespace Trace { +namespace trace { std::string TarmacContext::tarmacCpuName() const @@ -54,8 +57,28 @@ TarmacContext::tarmacCpuName() const return "cpu" + std::to_string(id); } +namespace { + +OutputStream * +tarmacDump(const TarmacTracerParams &p) +{ + switch (p.outfile) { + case TarmacDump::stdoutput: + return simout.findOrCreate("stdout"); + case TarmacDump::stderror: + return simout.findOrCreate("stderr"); + case TarmacDump::file: + return simout.findOrCreate(p.name); + default: + panic("Invalid option\n"); + } +} + +} + TarmacTracer::TarmacTracer(const Params &p) : InstTracer(p), + outstream(tarmacDump(p)), startTick(p.start_tick), endTick(p.end_tick) { @@ -95,5 +118,11 @@ TarmacTracer::getInstRecord(Tick when, ThreadContext *tc, } } -} // namespace Trace +std::ostream& +TarmacTracer::output() +{ + return *(outstream->stream()); +} + +} // namespace trace } // namespace gem5 diff --git a/src/arch/arm/tracers/tarmac_tracer.hh b/src/arch/arm/tracers/tarmac_tracer.hh index 29e82ff0cc..f8c7b5ca53 100644 --- a/src/arch/arm/tracers/tarmac_tracer.hh +++ b/src/arch/arm/tracers/tarmac_tracer.hh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited + * Copyright (c) 2017-2018, 2022 Arm Limited * All rights reserved * * The license below extends only to copyright in the software and shall @@ -54,8 +54,9 @@ namespace gem5 { class ThreadContext; +class OutputStream; -namespace Trace { +namespace trace { /** * This object type is encapsulating the informations needed by @@ -104,12 +105,16 @@ class TarmacTracer : public InstTracer const StaticInstPtr staticInst, const PCStateBase &pc, const StaticInstPtr macroStaticInst=nullptr) override; + std::ostream& output(); + protected: typedef std::unique_ptr PEntryPtr; typedef TarmacTracerRecord::InstPtr InstPtr; typedef TarmacTracerRecord::MemPtr MemPtr; typedef TarmacTracerRecord::RegPtr RegPtr; + OutputStream *outstream; + /** * startTick and endTick allow to trace a specific window of ticks * rather than the entire CPU execution. @@ -129,7 +134,7 @@ class TarmacTracer : public InstTracer std::vector regQueue; }; -} // namespace Trace +} // namespace trace } // namespace gem5 #endif // __ARCH_ARM_TRACERS_TARMAC_TRACER_HH__ diff --git a/src/arch/arm/types.hh b/src/arch/arm/types.hh index 734fe6fd7e..44b20476d2 100644 --- a/src/arch/arm/types.hh +++ b/src/arch/arm/types.hh @@ -297,57 +297,57 @@ namespace ArmISA MODE_MAXMODE = MODE_SYSTEM }; - enum ExceptionClass + enum class ExceptionClass { - EC_INVALID = -1, - EC_UNKNOWN = 0x0, - EC_TRAPPED_WFI_WFE = 0x1, - EC_TRAPPED_CP15_MCR_MRC = 0x3, - EC_TRAPPED_CP15_MCRR_MRRC = 0x4, - EC_TRAPPED_CP14_MCR_MRC = 0x5, - EC_TRAPPED_CP14_LDC_STC = 0x6, - EC_TRAPPED_HCPTR = 0x7, - EC_TRAPPED_SIMD_FP = 0x7, // AArch64 alias - EC_TRAPPED_CP10_MRC_VMRS = 0x8, - EC_TRAPPED_PAC = 0x9, - EC_TRAPPED_BXJ = 0xA, - EC_TRAPPED_CP14_MCRR_MRRC = 0xC, - EC_ILLEGAL_INST = 0xE, - EC_SVC_TO_HYP = 0x11, - EC_SVC = 0x11, // AArch64 alias - EC_HVC = 0x12, - EC_SMC_TO_HYP = 0x13, - EC_SMC = 0x13, // AArch64 alias - EC_SVC_64 = 0x15, - EC_HVC_64 = 0x16, - EC_SMC_64 = 0x17, - EC_TRAPPED_MSR_MRS_64 = 0x18, - EC_TRAPPED_SVE = 0x19, - EC_PREFETCH_ABORT_TO_HYP = 0x20, - EC_PREFETCH_ABORT_LOWER_EL = 0x20, // AArch64 alias - EC_PREFETCH_ABORT_FROM_HYP = 0x21, - EC_PREFETCH_ABORT_CURR_EL = 0x21, // AArch64 alias - EC_PC_ALIGNMENT = 0x22, - EC_DATA_ABORT_TO_HYP = 0x24, - EC_DATA_ABORT_LOWER_EL = 0x24, // AArch64 alias - EC_DATA_ABORT_FROM_HYP = 0x25, - EC_DATA_ABORT_CURR_EL = 0x25, // AArch64 alias - EC_STACK_PTR_ALIGNMENT = 0x26, - EC_FP_EXCEPTION = 0x28, - EC_FP_EXCEPTION_64 = 0x2C, - EC_SERROR = 0x2F, - EC_HW_BREAKPOINT = 0x30, - EC_HW_BREAKPOINT_LOWER_EL = 0x30, - EC_HW_BREAKPOINT_CURR_EL = 0x31, - EC_SOFTWARE_STEP = 0x32, - EC_SOFTWARE_STEP_LOWER_EL = 0x32, - EC_SOFTWARE_STEP_CURR_EL = 0x33, - EC_WATCHPOINT = 0x34, - EC_WATCHPOINT_LOWER_EL = 0x34, - EC_WATCHPOINT_CURR_EL = 0x35, - EC_SOFTWARE_BREAKPOINT = 0x38, - EC_VECTOR_CATCH = 0x3A, - EC_SOFTWARE_BREAKPOINT_64 = 0x3C, + INVALID = -1, + UNKNOWN = 0x0, + TRAPPED_WFI_WFE = 0x1, + TRAPPED_CP15_MCR_MRC = 0x3, + TRAPPED_CP15_MCRR_MRRC = 0x4, + TRAPPED_CP14_MCR_MRC = 0x5, + TRAPPED_CP14_LDC_STC = 0x6, + TRAPPED_HCPTR = 0x7, + TRAPPED_SIMD_FP = 0x7, // AArch64 alias + TRAPPED_CP10_MRC_VMRS = 0x8, + TRAPPED_PAC = 0x9, + TRAPPED_BXJ = 0xA, + TRAPPED_CP14_MCRR_MRRC = 0xC, + ILLEGAL_INST = 0xE, + SVC_TO_HYP = 0x11, + SVC = 0x11, // AArch64 alias + HVC = 0x12, + SMC_TO_HYP = 0x13, + SMC = 0x13, // AArch64 alias + SVC_64 = 0x15, + HVC_64 = 0x16, + SMC_64 = 0x17, + TRAPPED_MSR_MRS_64 = 0x18, + TRAPPED_SVE = 0x19, + PREFETCH_ABORT_TO_HYP = 0x20, + PREFETCH_ABORT_LOWER_EL = 0x20, // AArch64 alias + PREFETCH_ABORT_FROM_HYP = 0x21, + PREFETCH_ABORT_CURR_EL = 0x21, // AArch64 alias + PC_ALIGNMENT = 0x22, + DATA_ABORT_TO_HYP = 0x24, + DATA_ABORT_LOWER_EL = 0x24, // AArch64 alias + DATA_ABORT_FROM_HYP = 0x25, + DATA_ABORT_CURR_EL = 0x25, // AArch64 alias + STACK_PTR_ALIGNMENT = 0x26, + FP_EXCEPTION = 0x28, + FP_EXCEPTION_64 = 0x2C, + SERROR = 0x2F, + HW_BREAKPOINT = 0x30, + HW_BREAKPOINT_LOWER_EL = 0x30, + HW_BREAKPOINT_CURR_EL = 0x31, + SOFTWARE_STEP = 0x32, + SOFTWARE_STEP_LOWER_EL = 0x32, + SOFTWARE_STEP_CURR_EL = 0x33, + WATCHPOINT = 0x34, + WATCHPOINT_LOWER_EL = 0x34, + WATCHPOINT_CURR_EL = 0x35, + SOFTWARE_BREAKPOINT = 0x38, + VECTOR_CATCH = 0x3A, + SOFTWARE_BREAKPOINT_64 = 0x3C, }; /** diff --git a/src/arch/arm/utility.cc b/src/arch/arm/utility.cc index 1c54c88c57..67645691df 100644 --- a/src/arch/arm/utility.cc +++ b/src/arch/arm/utility.cc @@ -41,6 +41,7 @@ #include "arch/arm/faults.hh" #include "arch/arm/interrupts.hh" +#include "arch/arm/isa.hh" #include "arch/arm/mmu.hh" #include "arch/arm/page_size.hh" #include "arch/arm/regs/cc.hh" @@ -85,7 +86,7 @@ bool isSecureBelowEL3(ThreadContext *tc) { return ArmSystem::haveEL(tc, EL3) && - static_cast(tc->readMiscRegNoEffect(MISCREG_SCR)).ns == 0; + static_cast(tc->readMiscRegNoEffect(MISCREG_SCR_EL3)).ns == 0; } ExceptionLevel @@ -94,16 +95,10 @@ debugTargetFrom(ThreadContext *tc, bool secure) bool route_to_el2; if (ArmSystem::haveEL(tc, EL2) && (!secure || HaveExt(tc, ArmExtension::FEAT_SEL2))) { - if (ELIs32(tc, EL2)) { - const HCR hcr = tc->readMiscReg(MISCREG_HCR); - const HDCR hdcr = tc->readMiscRegNoEffect(MISCREG_HDCR); - route_to_el2 = (hdcr.tde == 1 || hcr.tge == 1); - } else { - const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); - const HDCR mdcr = tc->readMiscRegNoEffect(MISCREG_MDCR_EL2); - route_to_el2 = (mdcr.tde == 1 || hcr.tge == 1); - } - }else{ + const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); + const HDCR mdcr = tc->readMiscRegNoEffect(MISCREG_MDCR_EL2); + route_to_el2 = (mdcr.tde == 1 || hcr.tge == 1); + } else { route_to_el2 = false; } ExceptionLevel target; @@ -128,8 +123,8 @@ inAArch64(ThreadContext *tc) ExceptionLevel currEL(const ThreadContext *tc) { - return static_cast( - const_cast(tc)->getIsaPtr())->currEL(); + CPSR cpsr = tc->readMiscRegNoEffect(MISCREG_CPSR); + return opModeToEL((OperatingMode)(uint8_t)cpsr.mode); } bool @@ -238,7 +233,8 @@ s1TranslationRegime(ThreadContext* tc, ExceptionLevel el) if (el != EL0) return el; else if (ArmSystem::haveEL(tc, EL3) && ELIs32(tc, EL3) && - static_cast(tc->readMiscRegNoEffect(MISCREG_SCR)).ns == 0) + static_cast( + tc->readMiscRegNoEffect(MISCREG_SCR_EL3)).ns == 0) return EL3; else if (HaveExt(tc, ArmExtension::FEAT_VHE) && ELIsInHost(tc, el)) return EL2; @@ -501,7 +497,7 @@ Fault mcrMrc15Trap(const MiscRegIndex misc_reg, ExtMachInst mach_inst, ThreadContext *tc, uint32_t imm) { - ExceptionClass ec = EC_TRAPPED_CP15_MCR_MRC; + ExceptionClass ec = ExceptionClass::TRAPPED_CP15_MCR_MRC; if (mcrMrc15TrapToHyp(misc_reg, tc, imm, &ec)) return std::make_shared(mach_inst, imm, ec); return AArch64AArch32SystemAccessTrap(misc_reg, mach_inst, tc, imm, ec); @@ -519,7 +515,7 @@ mcrMrc15TrapToHyp(const MiscRegIndex misc_reg, ThreadContext *tc, uint32_t iss, uint32_t opc2; bool trap_to_hyp = false; - const HCR hcr = tc->readMiscReg(MISCREG_HCR); + const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); const HDCR hdcr = tc->readMiscReg(MISCREG_HDCR); const HSTR hstr = tc->readMiscReg(MISCREG_HSTR); const HCPTR hcptr = tc->readMiscReg(MISCREG_HCPTR); @@ -630,19 +626,11 @@ mcrMrc15TrapToHyp(const MiscRegIndex misc_reg, ThreadContext *tc, uint32_t iss, break; // GICv3 regs case MISCREG_ICC_SGI0R: - { - auto *isa = static_cast(tc->getIsaPtr()); - if (isa->haveGICv3CpuIfc()) - trap_to_hyp = hcr.fmo; - } + trap_to_hyp = hcr.fmo; break; case MISCREG_ICC_SGI1R: case MISCREG_ICC_ASGI1R: - { - auto *isa = static_cast(tc->getIsaPtr()); - if (isa->haveGICv3CpuIfc()) - trap_to_hyp = hcr.imo; - } + trap_to_hyp = hcr.imo; break; case MISCREG_CNTFRQ ... MISCREG_CNTV_TVAL: // CNTFRQ may be trapped only on reads @@ -672,7 +660,7 @@ mcrMrc14TrapToHyp(const MiscRegIndex misc_reg, ThreadContext *tc, uint32_t iss) uint32_t opc1; uint32_t opc2; - const HCR hcr = tc->readMiscReg(MISCREG_HCR); + const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); const HDCR hdcr = tc->readMiscReg(MISCREG_HDCR); const HSTR hstr = tc->readMiscReg(MISCREG_HSTR); const HCPTR hcptr = tc->readMiscReg(MISCREG_HCPTR); @@ -721,7 +709,7 @@ Fault mcrrMrrc15Trap(const MiscRegIndex misc_reg, ExtMachInst mach_inst, ThreadContext *tc, uint32_t imm) { - ExceptionClass ec = EC_TRAPPED_CP15_MCRR_MRRC; + ExceptionClass ec = ExceptionClass::TRAPPED_CP15_MCRR_MRRC; if (mcrrMrrc15TrapToHyp(misc_reg, tc, imm, &ec)) return std::make_shared(mach_inst, imm, ec); return AArch64AArch32SystemAccessTrap(misc_reg, mach_inst, tc, imm, ec); @@ -739,7 +727,7 @@ mcrrMrrc15TrapToHyp(const MiscRegIndex misc_reg, ThreadContext *tc, bool is_read; bool trap_to_hyp = false; - const HCR hcr = tc->readMiscReg(MISCREG_HCR); + const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2); const HSTR hstr = tc->readMiscReg(MISCREG_HSTR); if (EL2Enabled(tc) && (currEL(tc) < EL2)) { @@ -851,7 +839,7 @@ isGenericTimerCommonEL0HypTrap(const MiscRegIndex misc_reg, ThreadContext *tc, // As per the architecture, this hyp trap should have uncategorized // exception class if (ec) - *ec = EC_UNKNOWN; + *ec = ExceptionClass::UNKNOWN; return true; } return false; @@ -1340,13 +1328,10 @@ syncVecRegsToElems(ThreadContext *tc) { int ei = 0; for (int ri = 0; ri < NumVecRegs; ri++) { - RegId reg_id(VecRegClass, ri); VecRegContainer reg; - tc->getReg(reg_id, ®); - for (int j = 0; j < NumVecElemPerVecReg; j++, ei++) { - RegId elem_id(VecElemClass, ei); - tc->setReg(elem_id, reg.as()[j]); - } + tc->getReg(vecRegClass[ri], ®); + for (int j = 0; j < NumVecElemPerVecReg; j++, ei++) + tc->setReg(vecElemClass[ei], reg.as()[j]); } } @@ -1357,11 +1342,10 @@ syncVecElemsToRegs(ThreadContext *tc) for (int ri = 0; ri < NumVecRegs; ri++) { VecRegContainer reg; for (int j = 0; j < NumVecElemPerVecReg; j++, ei++) { - RegId elem_id(VecElemClass, ei); + RegId elem_id = vecElemClass[ei]; reg.as()[j] = tc->getReg(elem_id); } - RegId reg_id(VecRegClass, ri); - tc->setReg(reg_id, ®); + tc->setReg(vecRegClass[ri], ®); } } diff --git a/src/arch/arm/utility.hh b/src/arch/arm/utility.hh index 2064fef403..b5a5dd72dd 100644 --- a/src/arch/arm/utility.hh +++ b/src/arch/arm/utility.hh @@ -265,19 +265,6 @@ mcrrMrrcIssBuild(bool isRead, uint32_t crm, RegIndex rt, RegIndex rt2, (opc1 << 16); } -static inline uint32_t -msrMrs64IssBuild(bool isRead, uint32_t op0, uint32_t op1, uint32_t crn, - uint32_t crm, uint32_t op2, RegIndex rt) -{ - return isRead | - (crm << 1) | - (rt << 5) | - (crn << 10) | - (op1 << 14) | - (op2 << 17) | - (op0 << 20); -} - Fault mcrMrc15Trap(const MiscRegIndex miscReg, ExtMachInst machInst, ThreadContext *tc, uint32_t imm); bool mcrMrc15TrapToHyp(const MiscRegIndex miscReg, ThreadContext *tc, diff --git a/src/arch/generic/BaseISA.py b/src/arch/generic/BaseISA.py index fa07d732c4..1395eb35a3 100644 --- a/src/arch/generic/BaseISA.py +++ b/src/arch/generic/BaseISA.py @@ -26,8 +26,9 @@ from m5.params import * from m5.SimObject import SimObject + class BaseISA(SimObject): - type = 'BaseISA' + type = "BaseISA" abstract = True cxx_header = "arch/generic/isa.hh" - cxx_class = 'gem5::BaseISA' + cxx_class = "gem5::BaseISA" diff --git a/src/arch/generic/BaseInterrupts.py b/src/arch/generic/BaseInterrupts.py index ab26323982..ef71a1934a 100644 --- a/src/arch/generic/BaseInterrupts.py +++ b/src/arch/generic/BaseInterrupts.py @@ -26,8 +26,9 @@ from m5.params import * from m5.SimObject import SimObject + class BaseInterrupts(SimObject): - type = 'BaseInterrupts' + type = "BaseInterrupts" abstract = True cxx_header = "arch/generic/interrupts.hh" - cxx_class = 'gem5::BaseInterrupts' + cxx_class = "gem5::BaseInterrupts" diff --git a/src/arch/generic/BaseMMU.py b/src/arch/generic/BaseMMU.py index c9ea25af34..757a0d2ad7 100644 --- a/src/arch/generic/BaseMMU.py +++ b/src/arch/generic/BaseMMU.py @@ -39,11 +39,12 @@ from m5.objects.BaseTLB import BaseTLB from m5.params import * from m5.SimObject import SimObject + class BaseMMU(SimObject): - type = 'BaseMMU' + type = "BaseMMU" abstract = True cxx_header = "arch/generic/mmu.hh" - cxx_class = 'gem5::BaseMMU' + cxx_class = "gem5::BaseMMU" itb = Param.BaseTLB("Instruction TLB") dtb = Param.BaseTLB("Data TLB") diff --git a/src/arch/generic/BaseTLB.py b/src/arch/generic/BaseTLB.py index cbc296b0d6..e70bf1f477 100644 --- a/src/arch/generic/BaseTLB.py +++ b/src/arch/generic/BaseTLB.py @@ -40,6 +40,7 @@ from m5.params import * from m5.SimObject import SimObject + class TypeTLB(ScopedEnum): """ instruction: TLB contains instruction entries only @@ -53,25 +54,25 @@ class TypeTLB(ScopedEnum): bool has_data = tlb->type() & TypeTLB::data; """ - map = { - 'instruction' : 0x1, - 'data' : 0x2, - 'unified' : 0x3, - } + + map = {"instruction": 0x1, "data": 0x2, "unified": 0x3} + class BaseTLB(SimObject): - type = 'BaseTLB' + type = "BaseTLB" abstract = True cxx_header = "arch/generic/tlb.hh" - cxx_class = 'gem5::BaseTLB' + cxx_class = "gem5::BaseTLB" # Ports to connect with other TLB levels - cpu_side_ports = VectorResponsePort("Ports closer to the CPU side") - slave = DeprecatedParam(cpu_side_ports, - '`slave` is now called `cpu_side_ports`') + cpu_side_ports = VectorResponsePort("Ports closer to the CPU side") + slave = DeprecatedParam( + cpu_side_ports, "`slave` is now called `cpu_side_ports`" + ) mem_side_port = RequestPort("Port closer to memory side") - master = DeprecatedParam(mem_side_port, - '`master` is now called `mem_side_port`') + master = DeprecatedParam( + mem_side_port, "`master` is now called `mem_side_port`" + ) entry_type = Param.TypeTLB("Instruction/Data/Unified TLB entries") diff --git a/src/arch/generic/InstDecoder.py b/src/arch/generic/InstDecoder.py index 7f3b7fd4e0..4c0098080f 100644 --- a/src/arch/generic/InstDecoder.py +++ b/src/arch/generic/InstDecoder.py @@ -26,10 +26,11 @@ from m5.params import * from m5.SimObject import SimObject + class InstDecoder(SimObject): - type = 'InstDecoder' + type = "InstDecoder" abstract = True cxx_header = "arch/generic/decoder.hh" - cxx_class = 'gem5::InstDecoder' + cxx_class = "gem5::InstDecoder" isa = Param.BaseISA(NULL, "ISA object for this context") diff --git a/src/arch/generic/isa.hh b/src/arch/generic/isa.hh index a49b17a606..e4e7929385 100644 --- a/src/arch/generic/isa.hh +++ b/src/arch/generic/isa.hh @@ -57,7 +57,7 @@ class ExecContext; class BaseISA : public SimObject { public: - typedef std::vector RegClasses; + typedef std::vector RegClasses; protected: using SimObject::SimObject; @@ -68,6 +68,14 @@ class BaseISA : public SimObject public: virtual PCStateBase *newPCState(Addr new_inst_addr=0) const = 0; + virtual void clear() {} + + virtual RegVal readMiscRegNoEffect(RegIndex idx) const = 0; + virtual RegVal readMiscReg(RegIndex idx) = 0; + + virtual void setMiscRegNoEffect(RegIndex idx, RegVal val) = 0; + virtual void setMiscReg(RegIndex idx, RegVal val) = 0; + virtual void takeOverFrom(ThreadContext *new_tc, ThreadContext *old_tc) {} virtual void setThreadContext(ThreadContext *_tc) { tc = _tc; } diff --git a/src/arch/generic/memhelpers.hh b/src/arch/generic/memhelpers.hh index b07a654388..d5684a6af9 100644 --- a/src/arch/generic/memhelpers.hh +++ b/src/arch/generic/memhelpers.hh @@ -64,7 +64,7 @@ initiateMemRead(XC *xc, Addr addr, std::size_t size, /// to determine the size of the access. template Fault -initiateMemRead(XC *xc, Trace::InstRecord *traceData, Addr addr, +initiateMemRead(XC *xc, trace::InstRecord *traceData, Addr addr, MemT &mem, Request::Flags flags) { static const std::vector byte_enable(sizeof(MemT), true); @@ -75,7 +75,7 @@ initiateMemRead(XC *xc, Trace::InstRecord *traceData, Addr addr, /// Extract the data returned from a timing mode read. template void -getMem(PacketPtr pkt, MemT &mem, Trace::InstRecord *traceData) +getMem(PacketPtr pkt, MemT &mem, trace::InstRecord *traceData) { mem = pkt->get(Order); if (traceData) @@ -84,14 +84,14 @@ getMem(PacketPtr pkt, MemT &mem, Trace::InstRecord *traceData) template void -getMemLE(PacketPtr pkt, MemT &mem, Trace::InstRecord *traceData) +getMemLE(PacketPtr pkt, MemT &mem, trace::InstRecord *traceData) { getMem(pkt, mem, traceData); } template void -getMemBE(PacketPtr pkt, MemT &mem, Trace::InstRecord *traceData) +getMemBE(PacketPtr pkt, MemT &mem, trace::InstRecord *traceData) { getMem(pkt, mem, traceData); } @@ -109,7 +109,7 @@ readMemAtomic(XC *xc, Addr addr, uint8_t *mem, /// Read from memory in atomic mode. template Fault -readMemAtomic(XC *xc, Trace::InstRecord *traceData, Addr addr, MemT &mem, +readMemAtomic(XC *xc, trace::InstRecord *traceData, Addr addr, MemT &mem, Request::Flags flags) { memset(&mem, 0, sizeof(mem)); @@ -126,7 +126,7 @@ readMemAtomic(XC *xc, Trace::InstRecord *traceData, Addr addr, MemT &mem, template Fault -readMemAtomicLE(XC *xc, Trace::InstRecord *traceData, Addr addr, MemT &mem, +readMemAtomicLE(XC *xc, trace::InstRecord *traceData, Addr addr, MemT &mem, Request::Flags flags) { return readMemAtomic( @@ -135,7 +135,7 @@ readMemAtomicLE(XC *xc, Trace::InstRecord *traceData, Addr addr, MemT &mem, template Fault -readMemAtomicBE(XC *xc, Trace::InstRecord *traceData, Addr addr, MemT &mem, +readMemAtomicBE(XC *xc, trace::InstRecord *traceData, Addr addr, MemT &mem, Request::Flags flags) { return readMemAtomic(xc, traceData, addr, mem, flags); @@ -153,7 +153,7 @@ writeMemTiming(XC *xc, uint8_t *mem, Addr addr, template Fault -writeMemTiming(XC *xc, Trace::InstRecord *traceData, MemT mem, Addr addr, +writeMemTiming(XC *xc, trace::InstRecord *traceData, MemT mem, Addr addr, Request::Flags flags, uint64_t *res) { if (traceData) { @@ -167,7 +167,7 @@ writeMemTiming(XC *xc, Trace::InstRecord *traceData, MemT mem, Addr addr, template Fault -writeMemTimingLE(XC *xc, Trace::InstRecord *traceData, MemT mem, Addr addr, +writeMemTimingLE(XC *xc, trace::InstRecord *traceData, MemT mem, Addr addr, Request::Flags flags, uint64_t *res) { return writeMemTiming( @@ -176,7 +176,7 @@ writeMemTimingLE(XC *xc, Trace::InstRecord *traceData, MemT mem, Addr addr, template Fault -writeMemTimingBE(XC *xc, Trace::InstRecord *traceData, MemT mem, Addr addr, +writeMemTimingBE(XC *xc, trace::InstRecord *traceData, MemT mem, Addr addr, Request::Flags flags, uint64_t *res) { return writeMemTiming( @@ -195,7 +195,7 @@ writeMemAtomic(XC *xc, uint8_t *mem, Addr addr, template Fault -writeMemAtomic(XC *xc, Trace::InstRecord *traceData, const MemT &mem, +writeMemAtomic(XC *xc, trace::InstRecord *traceData, const MemT &mem, Addr addr, Request::Flags flags, uint64_t *res) { if (traceData) { @@ -216,7 +216,7 @@ writeMemAtomic(XC *xc, Trace::InstRecord *traceData, const MemT &mem, template Fault -writeMemAtomicLE(XC *xc, Trace::InstRecord *traceData, const MemT &mem, +writeMemAtomicLE(XC *xc, trace::InstRecord *traceData, const MemT &mem, Addr addr, Request::Flags flags, uint64_t *res) { return writeMemAtomic( @@ -225,7 +225,7 @@ writeMemAtomicLE(XC *xc, Trace::InstRecord *traceData, const MemT &mem, template Fault -writeMemAtomicBE(XC *xc, Trace::InstRecord *traceData, const MemT &mem, +writeMemAtomicBE(XC *xc, trace::InstRecord *traceData, const MemT &mem, Addr addr, Request::Flags flags, uint64_t *res) { return writeMemAtomic( @@ -235,7 +235,7 @@ writeMemAtomicBE(XC *xc, Trace::InstRecord *traceData, const MemT &mem, /// Do atomic read-modify-write (AMO) in atomic mode template Fault -amoMemAtomic(XC *xc, Trace::InstRecord *traceData, MemT &mem, Addr addr, +amoMemAtomic(XC *xc, trace::InstRecord *traceData, MemT &mem, Addr addr, Request::Flags flags, AtomicOpFunctor *_amo_op) { assert(_amo_op); @@ -257,7 +257,7 @@ amoMemAtomic(XC *xc, Trace::InstRecord *traceData, MemT &mem, Addr addr, template Fault -amoMemAtomicLE(XC *xc, Trace::InstRecord *traceData, MemT &mem, Addr addr, +amoMemAtomicLE(XC *xc, trace::InstRecord *traceData, MemT &mem, Addr addr, Request::Flags flags, AtomicOpFunctor *_amo_op) { return amoMemAtomic( @@ -266,7 +266,7 @@ amoMemAtomicLE(XC *xc, Trace::InstRecord *traceData, MemT &mem, Addr addr, template Fault -amoMemAtomicBE(XC *xc, Trace::InstRecord *traceData, MemT &mem, Addr addr, +amoMemAtomicBE(XC *xc, trace::InstRecord *traceData, MemT &mem, Addr addr, Request::Flags flags, AtomicOpFunctor *_amo_op) { return amoMemAtomic( @@ -276,7 +276,7 @@ amoMemAtomicBE(XC *xc, Trace::InstRecord *traceData, MemT &mem, Addr addr, /// Do atomic read-modify-wrote (AMO) in timing mode template Fault -initiateMemAMO(XC *xc, Trace::InstRecord *traceData, Addr addr, MemT& mem, +initiateMemAMO(XC *xc, trace::InstRecord *traceData, Addr addr, MemT& mem, Request::Flags flags, AtomicOpFunctor *_amo_op) { assert(_amo_op); diff --git a/src/arch/generic/vec_reg.hh b/src/arch/generic/vec_reg.hh index fecd5c6272..349811f1f0 100644 --- a/src/arch/generic/vec_reg.hh +++ b/src/arch/generic/vec_reg.hh @@ -110,7 +110,7 @@ namespace gem5 { -constexpr unsigned MaxVecRegLenInBytes = 4096; +constexpr unsigned MaxVecRegLenInBytes = 1ULL << 16; // 2^16 bytes /** * Vector Register Abstraction diff --git a/src/arch/isa_parser/isa_parser.py b/src/arch/isa_parser/isa_parser.py index d33bcc4be7..62f33828a1 100755 --- a/src/arch/isa_parser/isa_parser.py +++ b/src/arch/isa_parser/isa_parser.py @@ -41,6 +41,7 @@ import os import re import sys import traceback + # get type names from types import * @@ -49,7 +50,7 @@ from .operand_list import * from .operand_types import * from .util import * -debug=False +debug = False #################### # Template objects. @@ -57,7 +58,8 @@ debug=False # Template objects are format strings that allow substitution from # the attribute spaces of other objects (e.g. InstObjParams instances). -labelRE = re.compile(r'(?( &std::remove_pointer_t::srcRegIdxArr), reinterpret_cast( &std::remove_pointer_t::destRegIdxArr)); - ''' + """ - pcstate_decl = f'{self.parser.namespace}::PCState ' \ - '__parserAutoPCState;\n' - myDict['op_decl'] = operands.concatAttrStrings('op_decl') + pcstate_decl = ( + f"{self.parser.namespace}::PCState " "__parserAutoPCState;\n" + ) + myDict["op_decl"] = operands.concatAttrStrings("op_decl") if operands.readPC or operands.setPC: - myDict['op_decl'] += pcstate_decl + myDict["op_decl"] += pcstate_decl is_src = lambda op: op.is_src is_dest = lambda op: op.is_dest - myDict['op_src_decl'] = \ - operands.concatSomeAttrStrings(is_src, 'op_src_decl') - myDict['op_dest_decl'] = \ - operands.concatSomeAttrStrings(is_dest, 'op_dest_decl') + myDict["op_src_decl"] = operands.concatSomeAttrStrings( + is_src, "op_src_decl" + ) + myDict["op_dest_decl"] = operands.concatSomeAttrStrings( + is_dest, "op_dest_decl" + ) if operands.readPC: - myDict['op_src_decl'] += pcstate_decl + myDict["op_src_decl"] += pcstate_decl if operands.setPC: - myDict['op_dest_decl'] += pcstate_decl + myDict["op_dest_decl"] += pcstate_decl - myDict['op_rd'] = operands.concatAttrStrings('op_rd') + myDict["op_rd"] = operands.concatAttrStrings("op_rd") if operands.readPC: - myDict['op_rd'] = \ - 'set(__parserAutoPCState, xc->pcState());\n' + \ - myDict['op_rd'] + myDict["op_rd"] = ( + "set(__parserAutoPCState, xc->pcState());\n" + + myDict["op_rd"] + ) # Compose the op_wb string. If we're going to write back the # PC state because we changed some of its elements, we'll need to @@ -151,20 +166,20 @@ class Template(object): # modifications to the PC to layer appropriately. reordered = list(operands.items) reordered.reverse() - op_wb_str = '' - pcWbStr = 'xc->pcState(__parserAutoPCState);\n' + op_wb_str = "" + pcWbStr = "xc->pcState(__parserAutoPCState);\n" for op_desc in reordered: if op_desc.isPCPart() and op_desc.is_dest: op_wb_str = op_desc.op_wb + pcWbStr + op_wb_str - pcWbStr = '' + pcWbStr = "" else: op_wb_str = op_desc.op_wb + op_wb_str - myDict['op_wb'] = op_wb_str + myDict["op_wb"] = op_wb_str elif isinstance(d, dict): # if the argument is a dictionary, we just use it. myDict.update(d) - elif hasattr(d, '__dict__'): + elif hasattr(d, "__dict__"): # if the argument is an object, we use its attribute map. myDict.update(d.__dict__) else: @@ -175,6 +190,7 @@ class Template(object): def __str__(self): return self.template + ################ # Format object. # @@ -182,18 +198,22 @@ class Template(object): # a defineInst() method that generates the code for an instruction # definition. + class Format(object): def __init__(self, id, params, code): self.id = id self.params = params - label = 'def format ' + id - self.user_code = compile(fixPythonIndentation(code), label, 'exec') + label = "def format " + id + self.user_code = compile(fixPythonIndentation(code), label, "exec") param_list = ", ".join(params) - f = '''def defInst(_code, _context, %s): + f = ( + """def defInst(_code, _context, %s): my_locals = vars().copy() exec(_code, _context, my_locals) - return my_locals\n''' % param_list - c = compile(f, label + ' wrapper', 'exec') + return my_locals\n""" + % param_list + ) + c = compile(f, label + " wrapper", "exec") exec(c, globals()) self.func = defInst @@ -204,7 +224,7 @@ class Format(object): Name = name[0].upper() if len(name) > 1: Name += name[1:] - context.update({ 'name' : name, 'Name' : Name }) + context.update({"name": name, "Name": Name}) try: vars = self.func(self.user_code, context, *args[0], **args[1]) except Exception as exc: @@ -212,20 +232,27 @@ class Format(object): raise error(lineno, 'error defining "%s": %s.' % (name, exc)) for k in list(vars.keys()): - if k not in ('header_output', 'decoder_output', - 'exec_output', 'decode_block'): + if k not in ( + "header_output", + "decoder_output", + "exec_output", + "decode_block", + ): del vars[k] return GenCode(parser, **vars) + # Special null format to catch an implicit-format instruction # definition outside of any format block. class NoFormat(object): def __init__(self): - self.defaultInst = '' + self.defaultInst = "" def defineInst(self, parser, name, args, lineno): - error(lineno, - 'instruction definition "%s" with no active format!' % name) + error( + lineno, 'instruction definition "%s" with no active format!' % name + ) + ############### # GenCode class @@ -239,11 +266,18 @@ class NoFormat(object): # exec.cc file. The has_decode_default attribute is used in the decode block # to allow explicit default clauses to override default default clauses. + class GenCode(object): # Constructor. - def __init__(self, parser, - header_output = '', decoder_output = '', exec_output = '', - decode_block = '', has_decode_default = False): + def __init__( + self, + parser, + header_output="", + decoder_output="", + exec_output="", + decode_block="", + has_decode_default=False, + ): self.parser = parser self.header_output = header_output self.decoder_output = decoder_output @@ -255,54 +289,58 @@ class GenCode(object): # interwoven by the write_top_level_files(). def emit(self): if self.header_output: - self.parser.get_file('header').write(self.header_output) + self.parser.get_file("header").write(self.header_output) if self.decoder_output: - self.parser.get_file('decoder').write(self.decoder_output) + self.parser.get_file("decoder").write(self.decoder_output) if self.exec_output: - self.parser.get_file('exec').write(self.exec_output) + self.parser.get_file("exec").write(self.exec_output) if self.decode_block: - self.parser.get_file('decode_block').write(self.decode_block) + self.parser.get_file("decode_block").write(self.decode_block) # Override '+' operator: generate a new GenCode object that # concatenates all the individual strings in the operands. def __add__(self, other): - return GenCode(self.parser, - self.header_output + other.header_output, - self.decoder_output + other.decoder_output, - self.exec_output + other.exec_output, - self.decode_block + other.decode_block, - self.has_decode_default or other.has_decode_default) + return GenCode( + self.parser, + self.header_output + other.header_output, + self.decoder_output + other.decoder_output, + self.exec_output + other.exec_output, + self.decode_block + other.decode_block, + self.has_decode_default or other.has_decode_default, + ) # Prepend a string (typically a comment) to all the strings. def prepend_all(self, pre): self.header_output = pre + self.header_output - self.decoder_output = pre + self.decoder_output + self.decoder_output = pre + self.decoder_output self.decode_block = pre + self.decode_block - self.exec_output = pre + self.exec_output + self.exec_output = pre + self.exec_output # Wrap the decode block in a pair of strings (e.g., 'case foo:' # and 'break;'). Used to build the big nested switch statement. - def wrap_decode_block(self, pre, post = ''): + def wrap_decode_block(self, pre, post=""): self.decode_block = pre + indent(self.decode_block) + post + ##################################################################### # # Bitfield Operator Support # ##################################################################### -bitOp1ArgRE = re.compile(r'<\s*(\w+)\s*:\s*>') +bitOp1ArgRE = re.compile(r"<\s*(\w+)\s*:\s*>") + +bitOpWordRE = re.compile(r"(?") +bitOpExprRE = re.compile(r"\)<\s*(\w+)\s*:\s*(\w+)\s*>") -bitOpWordRE = re.compile(r'(?') -bitOpExprRE = re.compile(r'\)<\s*(\w+)\s*:\s*(\w+)\s*>') def substBitOps(code): # first convert single-bit selectors to two-index form # i.e., --> - code = bitOp1ArgRE.sub(r'<\1:\1>', code) + code = bitOp1ArgRE.sub(r"<\1:\1>", code) # simple case: selector applied to ID (name) # i.e., foo --> bits(foo, a, b) - code = bitOpWordRE.sub(r'bits(\1, \2, \3)', code) + code = bitOpWordRE.sub(r"bits(\1, \2, \3)", code) # if selector is applied to expression (ending in ')'), # we need to search backward for matching '(' match = bitOpExprRE.search(code) @@ -311,17 +349,20 @@ def substBitOps(code): here = exprEnd - 1 nestLevel = 1 while nestLevel > 0: - if code[here] == '(': + if code[here] == "(": nestLevel -= 1 - elif code[here] == ')': + elif code[here] == ")": nestLevel += 1 here -= 1 if here < 0: sys.exit("Didn't find '('!") - exprStart = here+1 - newExpr = r'bits(%s, %s, %s)' % (code[exprStart:exprEnd+1], - match.group(1), match.group(2)) - code = code[:exprStart] + newExpr + code[match.end():] + exprStart = here + 1 + newExpr = r"bits(%s, %s, %s)" % ( + code[exprStart : exprEnd + 1], + match.group(1), + match.group(2), + ) + code = code[:exprStart] + newExpr + code[match.end() :] match = bitOpExprRE.search(code) return code @@ -346,39 +387,43 @@ def makeList(arg): elif not arg: return [] else: - return [ arg ] + return [arg] + def makeFlagConstructor(flag_list): if len(flag_list) == 0: - return '' + return "" # filter out repeated flags flag_list.sort() i = 1 while i < len(flag_list): - if flag_list[i] == flag_list[i-1]: + if flag_list[i] == flag_list[i - 1]: del flag_list[i] else: i += 1 - pre = '\n\tflags[' - post = '] = true;' + pre = "\n\tflags[" + post = "] = true;" code = pre + (post + pre).join(flag_list) + post return code + # Assume all instruction flags are of the form 'IsFoo' -instFlagRE = re.compile(r'Is.*') +instFlagRE = re.compile(r"Is.*") # OpClass constants end in 'Op' except No_OpClass -opClassRE = re.compile(r'.*Op|No_OpClass') +opClassRE = re.compile(r".*Op|No_OpClass") + class InstObjParams(object): - def __init__(self, parser, mnem, class_name, base_class = '', - snippets = {}, opt_args = []): + def __init__( + self, parser, mnem, class_name, base_class="", snippets={}, opt_args=[] + ): self.mnemonic = mnem self.class_name = class_name self.base_class = base_class if not isinstance(snippets, dict): - snippets = {'code' : snippets} - compositeCode = ' '.join(list(map(str, snippets.values()))) + snippets = {"code": snippets} + compositeCode = " ".join(list(map(str, snippets.values()))) self.snippets = snippets self.operands = OperandList(parser, compositeCode) @@ -388,12 +433,13 @@ class InstObjParams(object): # The header of the constructor declares the variables to be used # in the body of the constructor. - header = '' + header = "" - self.constructor = header + \ - self.operands.concatAttrStrings('constructor') + self.constructor = header + self.operands.concatAttrStrings( + "constructor" + ) - self.flags = self.operands.concatAttrLists('flags') + self.flags = self.operands.concatAttrLists("flags") self.op_class = None @@ -407,34 +453,36 @@ class InstObjParams(object): elif opClassRE.match(oa): self.op_class = oa else: - error('InstObjParams: optional arg "%s" not recognized ' - 'as StaticInst::Flag or OpClass.' % oa) + error( + 'InstObjParams: optional arg "%s" not recognized ' + "as StaticInst::Flag or OpClass." % oa + ) # Make a basic guess on the operand class if not set. # These are good enough for most cases. if not self.op_class: - if 'IsStore' in self.flags: + if "IsStore" in self.flags: # The order matters here: 'IsFloating' and 'IsInteger' are # usually set in FP instructions because of the base # register - if 'IsFloating' in self.flags: - self.op_class = 'FloatMemWriteOp' + if "IsFloating" in self.flags: + self.op_class = "FloatMemWriteOp" else: - self.op_class = 'MemWriteOp' - elif 'IsLoad' in self.flags or 'IsPrefetch' in self.flags: + self.op_class = "MemWriteOp" + elif "IsLoad" in self.flags or "IsPrefetch" in self.flags: # The order matters here: 'IsFloating' and 'IsInteger' are # usually set in FP instructions because of the base # register - if 'IsFloating' in self.flags: - self.op_class = 'FloatMemReadOp' + if "IsFloating" in self.flags: + self.op_class = "FloatMemReadOp" else: - self.op_class = 'MemReadOp' - elif 'IsFloating' in self.flags: - self.op_class = 'FloatAddOp' - elif 'IsVector' in self.flags: - self.op_class = 'SimdAddOp' + self.op_class = "MemReadOp" + elif "IsFloating" in self.flags: + self.op_class = "FloatAddOp" + elif "IsVector" in self.flags: + self.op_class = "SimdAddOp" else: - self.op_class = 'IntAluOp' + self.op_class = "IntAluOp" # add flag initialization to contructor here to include # any flags added via opt_args @@ -444,10 +492,10 @@ class InstObjParams(object): # function (which should be provided by isa_desc via a declare) # if 'IsVector' is set, add call to the Vector enable check # function (which should be provided by isa_desc via a declare) - if 'IsFloating' in self.flags: - self.fp_enable_check = 'fault = checkFpEnableFault(xc);' + if "IsFloating" in self.flags: + self.fp_enable_check = "fault = checkFpEnableFault(xc);" else: - self.fp_enable_check = '' + self.fp_enable_check = "" def padSrcRegIdx(self, padding): self.srcRegIdxPadding = padding @@ -462,12 +510,13 @@ class InstObjParams(object): # parses ISA DSL and emits C++ headers and source # + class ISAParser(Grammar): def __init__(self, output_dir): super().__init__() self.output_dir = output_dir - self.filename = None # for output file watermarking/scaremongering + self.filename = None # for output file watermarking/scaremongering # variable to hold templates self.templateMap = {} @@ -504,20 +553,22 @@ class ISAParser(Grammar): # file where it was included. self.fileNameStack = Stack() - symbols = ('makeList', 're') + symbols = ("makeList", "re") self.exportContext = dict([(s, eval(s)) for s in symbols]) - self.exportContext.update({ - 'overrideInOperand': overrideInOperand, - 'IntRegOp': IntRegOperandDesc, - 'FloatRegOp': FloatRegOperandDesc, - 'CCRegOp': CCRegOperandDesc, - 'VecElemOp': VecElemOperandDesc, - 'VecRegOp': VecRegOperandDesc, - 'VecPredRegOp': VecPredRegOperandDesc, - 'ControlRegOp': ControlRegOperandDesc, - 'MemOp': MemOperandDesc, - 'PCStateOp': PCStateOperandDesc, - }) + self.exportContext.update( + { + "overrideInOperand": overrideInOperand, + "IntRegOp": IntRegOperandDesc, + "FloatRegOp": FloatRegOperandDesc, + "CCRegOp": CCRegOperandDesc, + "VecElemOp": VecElemOperandDesc, + "VecRegOp": VecRegOperandDesc, + "VecPredRegOp": VecPredRegOperandDesc, + "ControlRegOp": ControlRegOperandDesc, + "MemOp": MemOperandDesc, + "PCStateOp": PCStateOperandDesc, + } + ) self.maxMiscDestRegs = 0 @@ -531,32 +582,33 @@ class ISAParser(Grammar): self.buildOperandREs() return self._operandsWithExtRE - def __getitem__(self, i): # Allow object (self) to be + def __getitem__(self, i): # Allow object (self) to be return getattr(self, i) # passed to %-substitutions # Change the file suffix of a base filename: # (e.g.) decoder.cc -> decoder-g.cc.inc for 'global' outputs def suffixize(self, s, sec): - extn = re.compile('(\.[^\.]+)$') # isolate extension + extn = re.compile("(\.[^\.]+)$") # isolate extension if self.namespace: - return extn.sub(r'-ns\1.inc', s) # insert some text on either side + return extn.sub(r"-ns\1.inc", s) # insert some text on either side else: - return extn.sub(r'-g\1.inc', s) + return extn.sub(r"-g\1.inc", s) # Get the file object for emitting code into the specified section # (header, decoder, exec, decode_block). def get_file(self, section): - if section == 'decode_block': - filename = 'decode-method.cc.inc' + if section == "decode_block": + filename = "decode-method.cc.inc" else: - if section == 'header': - file = 'decoder.hh' + if section == "header": + file = "decoder.hh" else: - file = '%s.cc' % section + file = "%s.cc" % section filename = self.suffixize(file, section) try: return self.files[filename] - except KeyError: pass + except KeyError: + pass f = self.open(filename) self.files[filename] = f @@ -573,11 +625,11 @@ class ISAParser(Grammar): # thereof) of the __SPLIT definition during C preprocessing will # select the different chunks. If no 'split' directives are used, # the cpp emissions have no effect. - if re.search('-ns.cc.inc$', filename): - print('#if !defined(__SPLIT) || (__SPLIT == 1)', file=f) + if re.search("-ns.cc.inc$", filename): + print("#if !defined(__SPLIT) || (__SPLIT == 1)", file=f) self.splits[f] = 1 # ensure requisite #include's - elif filename == 'decoder-g.hh.inc': + elif filename == "decoder-g.hh.inc": print('#include "base/bitfield.hh"', file=f) return f @@ -588,97 +640,103 @@ class ISAParser(Grammar): # you directly see the chunks emitted as files that are #include'd. def write_top_level_files(self): # decoder header - everything depends on this - file = 'decoder.hh' + file = "decoder.hh" with self.open(file) as f: - f.write('#ifndef __ARCH_%(isa)s_GENERATED_DECODER_HH__\n' - '#define __ARCH_%(isa)s_GENERATED_DECODER_HH__\n\n' % - {'isa': self.isa_name.upper()}) - fn = 'decoder-g.hh.inc' - assert(fn in self.files) + f.write( + "#ifndef __ARCH_%(isa)s_GENERATED_DECODER_HH__\n" + "#define __ARCH_%(isa)s_GENERATED_DECODER_HH__\n\n" + % {"isa": self.isa_name.upper()} + ) + fn = "decoder-g.hh.inc" + assert fn in self.files f.write('#include "%s"\n' % fn) - fn = 'decoder-ns.hh.inc' - assert(fn in self.files) - f.write('namespace gem5\n{\n') - f.write('namespace %s {\n#include "%s"\n} // namespace %s\n' - % (self.namespace, fn, self.namespace)) - f.write('} // namespace gem5') - f.write('\n#endif // __ARCH_%s_GENERATED_DECODER_HH__\n' % - self.isa_name.upper()) + fn = "decoder-ns.hh.inc" + assert fn in self.files + f.write("namespace gem5\n{\n") + f.write( + 'namespace %s {\n#include "%s"\n} // namespace %s\n' + % (self.namespace, fn, self.namespace) + ) + f.write("} // namespace gem5") + f.write( + "\n#endif // __ARCH_%s_GENERATED_DECODER_HH__\n" + % self.isa_name.upper() + ) # decoder method - cannot be split - file = 'decoder.cc' + file = "decoder.cc" with self.open(file) as f: - fn = 'base/compiler.hh' + fn = "base/compiler.hh" f.write('#include "%s"\n' % fn) - fn = 'decoder-g.cc.inc' - assert(fn in self.files) + fn = "decoder-g.cc.inc" + assert fn in self.files f.write('#include "%s"\n' % fn) - fn = 'decoder.hh' + fn = "decoder.hh" f.write('#include "%s"\n' % fn) - fn = 'decode-method.cc.inc' + fn = "decode-method.cc.inc" # is guaranteed to have been written for parse to complete f.write('#include "%s"\n' % fn) - extn = re.compile('(\.[^\.]+)$') + extn = re.compile("(\.[^\.]+)$") # instruction constructors - splits = self.splits[self.get_file('decoder')] - file_ = 'inst-constrs.cc' - for i in range(1, splits+1): + splits = self.splits[self.get_file("decoder")] + file_ = "inst-constrs.cc" + for i in range(1, splits + 1): if splits > 1: - file = extn.sub(r'-%d\1' % i, file_) + file = extn.sub(r"-%d\1" % i, file_) else: file = file_ with self.open(file) as f: - fn = 'decoder-g.cc.inc' - assert(fn in self.files) + fn = "decoder-g.cc.inc" + assert fn in self.files f.write('#include "%s"\n' % fn) - fn = 'decoder.hh' + fn = "decoder.hh" f.write('#include "%s"\n' % fn) - fn = 'decoder-ns.cc.inc' - assert(fn in self.files) - print('namespace gem5\n{\n', file=f) - print('namespace %s {' % self.namespace, file=f) + fn = "decoder-ns.cc.inc" + assert fn in self.files + print("namespace gem5\n{\n", file=f) + print("namespace %s {" % self.namespace, file=f) if splits > 1: - print('#define __SPLIT %u' % i, file=f) + print("#define __SPLIT %u" % i, file=f) print('#include "%s"' % fn, file=f) - print('} // namespace %s' % self.namespace, file=f) - print('} // namespace gem5', file=f) + print("} // namespace %s" % self.namespace, file=f) + print("} // namespace gem5", file=f) # instruction execution - splits = self.splits[self.get_file('exec')] - for i in range(1, splits+1): - file = 'generic_cpu_exec.cc' + splits = self.splits[self.get_file("exec")] + for i in range(1, splits + 1): + file = "generic_cpu_exec.cc" if splits > 1: - file = extn.sub(r'_%d\1' % i, file) + file = extn.sub(r"_%d\1" % i, file) with self.open(file) as f: - fn = 'exec-g.cc.inc' - assert(fn in self.files) + fn = "exec-g.cc.inc" + assert fn in self.files f.write('#include "%s"\n' % fn) f.write('#include "cpu/exec_context.hh"\n') f.write('#include "decoder.hh"\n') - fn = 'exec-ns.cc.inc' - assert(fn in self.files) - print('namespace gem5\n{\n', file=f) - print('namespace %s {' % self.namespace, file=f) + fn = "exec-ns.cc.inc" + assert fn in self.files + print("namespace gem5\n{\n", file=f) + print("namespace %s {" % self.namespace, file=f) if splits > 1: - print('#define __SPLIT %u' % i, file=f) + print("#define __SPLIT %u" % i, file=f) print('#include "%s"' % fn, file=f) - print('} // namespace %s' % self.namespace, file=f) - print('} // namespace gem5', file=f) + print("} // namespace %s" % self.namespace, file=f) + print("} // namespace gem5", file=f) - scaremonger_template ='''// DO NOT EDIT + scaremonger_template = """// DO NOT EDIT // This file was automatically generated from an ISA description: // %(filename)s -'''; +""" ##################################################################### # @@ -699,77 +757,92 @@ class ISAParser(Grammar): # using the same regexp as generic IDs, but distinguished in the # t_ID() function. The PLY documentation suggests this approach. reserved = ( - 'BITFIELD', 'DECODE', 'DECODER', 'DEFAULT', 'DEF', 'EXEC', 'FORMAT', - 'HEADER', 'LET', 'NAMESPACE', 'OPERAND_TYPES', 'OPERANDS', - 'OUTPUT', 'SIGNED', 'SPLIT', 'TEMPLATE' - ) + "BITFIELD", + "DECODE", + "DECODER", + "DEFAULT", + "DEF", + "EXEC", + "FORMAT", + "HEADER", + "LET", + "NAMESPACE", + "OPERAND_TYPES", + "OPERANDS", + "OUTPUT", + "SIGNED", + "SPLIT", + "TEMPLATE", + ) # List of tokens. The lex module requires this. tokens = reserved + ( # identifier - 'ID', - + "ID", # integer literal - 'INTLIT', - + "INTLIT", # string literal - 'STRLIT', - + "STRLIT", # code literal - 'CODELIT', - + "CODELIT", # ( ) [ ] { } < > , ; . : :: * - 'LPAREN', 'RPAREN', - 'LBRACKET', 'RBRACKET', - 'LBRACE', 'RBRACE', - 'LESS', 'GREATER', 'EQUALS', - 'COMMA', 'SEMI', 'DOT', 'COLON', 'DBLCOLON', - 'ASTERISK', - + "LPAREN", + "RPAREN", + "LBRACKET", + "RBRACKET", + "LBRACE", + "RBRACE", + "LESS", + "GREATER", + "EQUALS", + "COMMA", + "SEMI", + "DOT", + "COLON", + "DBLCOLON", + "ASTERISK", # C preprocessor directives - 'CPPDIRECTIVE' - - # The following are matched but never returned. commented out to - # suppress PLY warning + "CPPDIRECTIVE" + # The following are matched but never returned. commented out to + # suppress PLY warning # newfile directive - # 'NEWFILE', - + # 'NEWFILE', # endfile directive - # 'ENDFILE' + # 'ENDFILE' ) # Regular expressions for token matching - t_LPAREN = r'\(' - t_RPAREN = r'\)' - t_LBRACKET = r'\[' - t_RBRACKET = r'\]' - t_LBRACE = r'\{' - t_RBRACE = r'\}' - t_LESS = r'\<' - t_GREATER = r'\>' - t_EQUALS = r'=' - t_COMMA = r',' - t_SEMI = r';' - t_DOT = r'\.' - t_COLON = r':' - t_DBLCOLON = r'::' - t_ASTERISK = r'\*' + t_LPAREN = r"\(" + t_RPAREN = r"\)" + t_LBRACKET = r"\[" + t_RBRACKET = r"\]" + t_LBRACE = r"\{" + t_RBRACE = r"\}" + t_LESS = r"\<" + t_GREATER = r"\>" + t_EQUALS = r"=" + t_COMMA = r"," + t_SEMI = r";" + t_DOT = r"\." + t_COLON = r":" + t_DBLCOLON = r"::" + t_ASTERISK = r"\*" # Identifiers and reserved words - reserved_map = { } + reserved_map = {} for r in reserved: reserved_map[r.lower()] = r def t_ID(self, t): - r'[A-Za-z_]\w*' - t.type = self.reserved_map.get(t.value, 'ID') + r"[A-Za-z_]\w*" + t.type = self.reserved_map.get(t.value, "ID") return t # Integer literal def t_INTLIT(self, t): - r'-?(0x[\da-fA-F]+)|\d+' + r"-?(0x[\da-fA-F]+)|\d+" try: - t.value = int(t.value,0) + t.value = int(t.value, 0) except ValueError: error(t.lexer.lineno, 'Integer value "%s" too large' % t.value) t.value = 0 @@ -781,22 +854,21 @@ class ISAParser(Grammar): r"(?m)'([^'])+'" # strip off quotes t.value = t.value[1:-1] - t.lexer.lineno += t.value.count('\n') + t.lexer.lineno += t.value.count("\n") return t - # "Code literal"... like a string literal, but delimiters are # '{{' and '}}' so they get formatted nicely under emacs c-mode def t_CODELIT(self, t): r"(?m)\{\{([^\}]|}(?!\}))+\}\}" # strip off {{ & }} t.value = t.value[2:-2] - t.lexer.lineno += t.value.count('\n') + t.lexer.lineno += t.value.count("\n") return t def t_CPPDIRECTIVE(self, t): - r'^\#[^\#].*\n' - t.lexer.lineno += t.value.count('\n') + r"^\#[^\#].*\n" + t.lexer.lineno += t.value.count("\n") return t def t_NEWFILE(self, t): @@ -805,7 +877,7 @@ class ISAParser(Grammar): t.lexer.lineno = LineTracker(t.value[11:-2]) def t_ENDFILE(self, t): - r'^\#\#endfile\n' + r"^\#\#endfile\n" t.lexer.lineno = self.fileNameStack.pop() # @@ -815,15 +887,15 @@ class ISAParser(Grammar): # Newlines def t_NEWLINE(self, t): - r'\n+' - t.lexer.lineno += t.value.count('\n') + r"\n+" + t.lexer.lineno += t.value.count("\n") # Comments def t_comment(self, t): - r'//.*' + r"//.*" # Completely ignored characters - t_ignore = ' \t\x0c' + t_ignore = " \t\x0c" # Error handler def t_error(self, t): @@ -854,13 +926,13 @@ class ISAParser(Grammar): # after will be inside. The decoder function is always inside the # namespace. def p_specification(self, t): - 'specification : opt_defs_and_outputs top_level_decode_block' + "specification : opt_defs_and_outputs top_level_decode_block" for f in self.splits.keys(): - f.write('\n#endif\n') + f.write("\n#endif\n") - for f in self.files.values(): # close ALL the files; - f.close() # not doing so can cause compilation to fail + for f in self.files.values(): # close ALL the files; + f.close() # not doing so can cause compilation to fail self.write_top_level_files() @@ -872,39 +944,39 @@ class ISAParser(Grammar): # as soon as possible, except for the decode_block, which has to be # accumulated into one large function of nested switch/case blocks. def p_opt_defs_and_outputs_0(self, t): - 'opt_defs_and_outputs : empty' + "opt_defs_and_outputs : empty" def p_opt_defs_and_outputs_1(self, t): - 'opt_defs_and_outputs : defs_and_outputs' + "opt_defs_and_outputs : defs_and_outputs" def p_defs_and_outputs_0(self, t): - 'defs_and_outputs : def_or_output' + "defs_and_outputs : def_or_output" def p_defs_and_outputs_1(self, t): - 'defs_and_outputs : defs_and_outputs def_or_output' + "defs_and_outputs : defs_and_outputs def_or_output" # The list of possible definition/output statements. # They are all processed as they are seen. def p_def_or_output(self, t): - '''def_or_output : name_decl - | def_format - | def_bitfield - | def_bitfield_struct - | def_template - | def_operand_types - | def_operands - | output - | global_let - | split''' + """def_or_output : name_decl + | def_format + | def_bitfield + | def_bitfield_struct + | def_template + | def_operand_types + | def_operands + | output + | global_let + | split""" # Utility function used by both invocations of splitting - explicit # 'split' keyword and split() function inside "let {{ }};" blocks. def split(self, sec, write=False): - assert(sec != 'header' and "header cannot be split") + assert sec != "header" and "header cannot be split" f = self.get_file(sec) self.splits[f] += 1 - s = '\n#endif\n#if __SPLIT == %u\n' % self.splits[f] + s = "\n#endif\n#if __SPLIT == %u\n" % self.splits[f] if write: f.write(s) else: @@ -912,23 +984,23 @@ class ISAParser(Grammar): # split output file to reduce compilation time def p_split(self, t): - 'split : SPLIT output_type SEMI' - assert(self.isa_name and "'split' not allowed before namespace decl") + "split : SPLIT output_type SEMI" + assert self.isa_name and "'split' not allowed before namespace decl" self.split(t[2], True) def p_output_type(self, t): - '''output_type : DECODER - | HEADER - | EXEC''' + """output_type : DECODER + | HEADER + | EXEC""" t[0] = t[1] # ISA name declaration looks like "namespace ;" def p_name_decl(self, t): - 'name_decl : NAMESPACE ID SEMI' - assert(self.isa_name == None and "Only 1 namespace decl permitted") + "name_decl : NAMESPACE ID SEMI" + assert self.isa_name == None and "Only 1 namespace decl permitted" self.isa_name = t[2] - self.namespace = t[2] + 'Inst' + self.namespace = t[2] + "Inst" # Output blocks 'output {{...}}' (C++ code blocks) are copied # directly to the appropriate output section. @@ -942,13 +1014,14 @@ class ISAParser(Grammar): return substBitOps(s % self.templateMap) def p_output(self, t): - 'output : OUTPUT output_type CODELIT SEMI' - kwargs = { t[2]+'_output' : self.process_output(t[3]) } + "output : OUTPUT output_type CODELIT SEMI" + kwargs = {t[2] + "_output": self.process_output(t[3])} GenCode(self, **kwargs).emit() def make_split(self): def _split(sec): return self.split(sec) + return _split # global let blocks 'let {{...}}' (Python code blocks) are @@ -956,21 +1029,21 @@ class ISAParser(Grammar): # special variable context 'exportContext' to prevent the code # from polluting this script's namespace. def p_global_let(self, t): - 'global_let : LET CODELIT SEMI' + "global_let : LET CODELIT SEMI" self.updateExportContext() - self.exportContext["header_output"] = '' - self.exportContext["decoder_output"] = '' - self.exportContext["exec_output"] = '' - self.exportContext["decode_block"] = '' + self.exportContext["header_output"] = "" + self.exportContext["decoder_output"] = "" + self.exportContext["exec_output"] = "" + self.exportContext["decode_block"] = "" self.exportContext["split"] = self.make_split() - split_setup = ''' + split_setup = """ def wrap(func): def split(sec): globals()[sec + '_output'] += func(sec) return split split = wrap(split) del wrap -''' +""" # This tricky setup (immediately above) allows us to just write # (e.g.) "split('exec')" in the Python code and the split #ifdef's # will automatically be added to the exec_output variable. The inner @@ -979,94 +1052,97 @@ del wrap # next split's #define from the parser and add it to the current # emission-in-progress. try: - exec(split_setup+fixPythonIndentation(t[2]), self.exportContext) + exec(split_setup + fixPythonIndentation(t[2]), self.exportContext) except Exception as exc: traceback.print_exc(file=sys.stdout) if debug: raise - error(t.lineno(1), 'In global let block: %s' % exc) - GenCode(self, - header_output=self.exportContext["header_output"], - decoder_output=self.exportContext["decoder_output"], - exec_output=self.exportContext["exec_output"], - decode_block=self.exportContext["decode_block"]).emit() + error(t.lineno(1), "In global let block: %s" % exc) + GenCode( + self, + header_output=self.exportContext["header_output"], + decoder_output=self.exportContext["decoder_output"], + exec_output=self.exportContext["exec_output"], + decode_block=self.exportContext["decode_block"], + ).emit() # Define the mapping from operand type extensions to C++ types and # bit widths (stored in operandTypeMap). def p_def_operand_types(self, t): - 'def_operand_types : DEF OPERAND_TYPES CODELIT SEMI' + "def_operand_types : DEF OPERAND_TYPES CODELIT SEMI" try: - self.operandTypeMap = eval('{' + t[3] + '}') + self.operandTypeMap = eval("{" + t[3] + "}") except Exception as exc: if debug: raise - error(t.lineno(1), - 'In def operand_types: %s' % exc) + error(t.lineno(1), "In def operand_types: %s" % exc) # Define the mapping from operand names to operand classes and # other traits. Stored in operandNameMap. def p_def_operands(self, t): - 'def_operands : DEF OPERANDS CODELIT SEMI' - if not hasattr(self, 'operandTypeMap'): - error(t.lineno(1), - 'error: operand types must be defined before operands') + "def_operands : DEF OPERANDS CODELIT SEMI" + if not hasattr(self, "operandTypeMap"): + error( + t.lineno(1), + "error: operand types must be defined before operands", + ) try: - user_dict = eval('{' + t[3] + '}', self.exportContext) + user_dict = eval("{" + t[3] + "}", self.exportContext) except Exception as exc: if debug: raise - error(t.lineno(1), 'In def operands: %s' % exc) + error(t.lineno(1), "In def operands: %s" % exc) self.buildOperandNameMap(user_dict, t.lexer.lineno) # A bitfield definition looks like: # 'def [signed] bitfield [:]' # This generates a preprocessor macro in the output file. def p_def_bitfield_0(self, t): - 'def_bitfield : DEF opt_signed ' \ - 'BITFIELD ID LESS INTLIT COLON INTLIT GREATER SEMI' - expr = 'bits(machInst, %2d, %2d)' % (t[6], t[8]) - if (t[2] == 'signed'): - expr = 'sext<%d>(%s)' % (t[6] - t[8] + 1, expr) - hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr) + "def_bitfield : DEF opt_signed " "BITFIELD ID LESS INTLIT COLON INTLIT GREATER SEMI" + expr = "bits(machInst, %2d, %2d)" % (t[6], t[8]) + if t[2] == "signed": + expr = "sext<%d>(%s)" % (t[6] - t[8] + 1, expr) + hash_define = "#undef %s\n#define %s\t%s\n" % (t[4], t[4], expr) GenCode(self, header_output=hash_define).emit() # alternate form for single bit: 'def [signed] bitfield []' def p_def_bitfield_1(self, t): - 'def_bitfield : DEF opt_signed BITFIELD ID LESS INTLIT GREATER SEMI' - expr = 'bits(machInst, %2d, %2d)' % (t[6], t[6]) - if (t[2] == 'signed'): - expr = 'sext<%d>(%s)' % (1, expr) - hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr) + "def_bitfield : DEF opt_signed BITFIELD ID LESS INTLIT GREATER SEMI" + expr = "bits(machInst, %2d, %2d)" % (t[6], t[6]) + if t[2] == "signed": + expr = "sext<%d>(%s)" % (1, expr) + hash_define = "#undef %s\n#define %s\t%s\n" % (t[4], t[4], expr) GenCode(self, header_output=hash_define).emit() # alternate form for structure member: 'def bitfield ' def p_def_bitfield_struct(self, t): - 'def_bitfield_struct : DEF opt_signed BITFIELD ID id_with_dot SEMI' - if (t[2] != ''): - error(t.lineno(1), - 'error: structure bitfields are always unsigned.') - expr = 'machInst.%s' % t[5] - hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr) + "def_bitfield_struct : DEF opt_signed BITFIELD ID id_with_dot SEMI" + if t[2] != "": + error( + t.lineno(1), "error: structure bitfields are always unsigned." + ) + expr = "machInst.%s" % t[5] + hash_define = "#undef %s\n#define %s\t%s\n" % (t[4], t[4], expr) GenCode(self, header_output=hash_define).emit() def p_id_with_dot_0(self, t): - 'id_with_dot : ID' + "id_with_dot : ID" t[0] = t[1] def p_id_with_dot_1(self, t): - 'id_with_dot : ID DOT id_with_dot' + "id_with_dot : ID DOT id_with_dot" t[0] = t[1] + t[2] + t[3] def p_opt_signed_0(self, t): - 'opt_signed : SIGNED' + "opt_signed : SIGNED" t[0] = t[1] def p_opt_signed_1(self, t): - 'opt_signed : empty' - t[0] = '' + "opt_signed : empty" + t[0] = "" def p_def_template(self, t): - 'def_template : DEF TEMPLATE ID CODELIT SEMI' + "def_template : DEF TEMPLATE ID CODELIT SEMI" if t[3] in self.templateMap: print("warning: template %s already defined" % t[3]) self.templateMap[t[3]] = Template(self, t[4]) @@ -1074,7 +1150,7 @@ del wrap # An instruction format definition looks like # "def format () {{...}};" def p_def_format(self, t): - 'def_format : DEF FORMAT ID LPAREN param_list RPAREN CODELIT SEMI' + "def_format : DEF FORMAT ID LPAREN param_list RPAREN CODELIT SEMI" (id, params, code) = (t[3], t[5], t[7]) self.defFormat(id, params, code, t.lexer.lineno) @@ -1093,49 +1169,49 @@ del wrap # list of the positional params and the second element is a dict # containing the keyword params. def p_param_list_0(self, t): - 'param_list : positional_param_list COMMA nonpositional_param_list' + "param_list : positional_param_list COMMA nonpositional_param_list" t[0] = t[1] + t[3] def p_param_list_1(self, t): - '''param_list : positional_param_list - | nonpositional_param_list''' + """param_list : positional_param_list + | nonpositional_param_list""" t[0] = t[1] def p_positional_param_list_0(self, t): - 'positional_param_list : empty' + "positional_param_list : empty" t[0] = [] def p_positional_param_list_1(self, t): - 'positional_param_list : ID' + "positional_param_list : ID" t[0] = [t[1]] def p_positional_param_list_2(self, t): - 'positional_param_list : positional_param_list COMMA ID' + "positional_param_list : positional_param_list COMMA ID" t[0] = t[1] + [t[3]] def p_nonpositional_param_list_0(self, t): - 'nonpositional_param_list : keyword_param_list COMMA excess_args_param' + "nonpositional_param_list : keyword_param_list COMMA excess_args_param" t[0] = t[1] + t[3] def p_nonpositional_param_list_1(self, t): - '''nonpositional_param_list : keyword_param_list - | excess_args_param''' + """nonpositional_param_list : keyword_param_list + | excess_args_param""" t[0] = t[1] def p_keyword_param_list_0(self, t): - 'keyword_param_list : keyword_param' + "keyword_param_list : keyword_param" t[0] = [t[1]] def p_keyword_param_list_1(self, t): - 'keyword_param_list : keyword_param_list COMMA keyword_param' + "keyword_param_list : keyword_param_list COMMA keyword_param" t[0] = t[1] + [t[3]] def p_keyword_param(self, t): - 'keyword_param : ID EQUALS expr' - t[0] = t[1] + ' = ' + t[3].__repr__() + "keyword_param : ID EQUALS expr" + t[0] = t[1] + " = " + t[3].__repr__() def p_excess_args_param(self, t): - 'excess_args_param : ASTERISK ID' + "excess_args_param : ASTERISK ID" # Just concatenate them: '*ID'. Wrap in list to be consistent # with positional_param_list and keyword_param_list. t[0] = [t[1] + t[2]] @@ -1148,27 +1224,31 @@ del wrap # decode [, ]* [default ] { ... } # def p_top_level_decode_block(self, t): - 'top_level_decode_block : decode_block' + "top_level_decode_block : decode_block" codeObj = t[1] - codeObj.wrap_decode_block(''' + codeObj.wrap_decode_block( + """ using namespace gem5; StaticInstPtr %(isa_name)s::Decoder::decodeInst(%(isa_name)s::ExtMachInst machInst) { using namespace %(namespace)s; -''' % self, '}') +""" + % self, + "}", + ) codeObj.emit() def p_decode_block(self, t): - 'decode_block : DECODE ID opt_default LBRACE decode_stmt_list RBRACE' + "decode_block : DECODE ID opt_default LBRACE decode_stmt_list RBRACE" default_defaults = self.defaultStack.pop() codeObj = t[5] # use the "default defaults" only if there was no explicit # default statement in decode_stmt_list if not codeObj.has_decode_default: codeObj += default_defaults - codeObj.wrap_decode_block('switch (%s) {\n' % t[2], '}\n') + codeObj.wrap_decode_block("switch (%s) {\n" % t[2], "}\n") t[0] = codeObj # The opt_default statement serves only to push the "default @@ -1176,7 +1256,7 @@ StaticInstPtr # decode blocks, and used and popped off when the current # decode_block is processed (in p_decode_block() above). def p_opt_default_0(self, t): - 'opt_default : empty' + "opt_default : empty" # no default specified: reuse the one currently at the top of # the stack self.defaultStack.push(self.defaultStack.top()) @@ -1184,22 +1264,22 @@ StaticInstPtr t[0] = None def p_opt_default_1(self, t): - 'opt_default : DEFAULT inst' + "opt_default : DEFAULT inst" # push the new default codeObj = t[2] - codeObj.wrap_decode_block('\ndefault:\n', 'break;\n') + codeObj.wrap_decode_block("\ndefault:\n", "break;\n") self.defaultStack.push(codeObj) # no meaningful value returned t[0] = None def p_decode_stmt_list_0(self, t): - 'decode_stmt_list : decode_stmt' + "decode_stmt_list : decode_stmt" t[0] = t[1] def p_decode_stmt_list_1(self, t): - 'decode_stmt_list : decode_stmt decode_stmt_list' - if (t[1].has_decode_default and t[2].has_decode_default): - error(t.lineno(1), 'Two default cases in decode block') + "decode_stmt_list : decode_stmt decode_stmt_list" + if t[1].has_decode_default and t[2].has_decode_default: + error(t.lineno(1), "Two default cases in decode block") t[0] = t[1] + t[2] # @@ -1211,7 +1291,6 @@ StaticInstPtr # 3. Instruction definitions. # 4. C preprocessor directives. - # Preprocessor directives found in a decode statement list are # passed through to the output, replicated to all of the output # code streams. This works well for ifdefs, so we can ifdef out @@ -1220,7 +1299,7 @@ StaticInstPtr # makes it easy to keep them in the right place with respect to # the code generated by the other statements. def p_decode_stmt_cpp(self, t): - 'decode_stmt : CPPDIRECTIVE' + "decode_stmt : CPPDIRECTIVE" t[0] = GenCode(self, t[1], t[1], t[1], t[1]) # A format block 'format { ... }' sets the default @@ -1229,7 +1308,7 @@ StaticInstPtr # format on the instruction definition or with a nested format # block. def p_decode_stmt_format(self, t): - 'decode_stmt : FORMAT push_format_id LBRACE decode_stmt_list RBRACE' + "decode_stmt : FORMAT push_format_id LBRACE decode_stmt_list RBRACE" # The format will be pushed on the stack when 'push_format_id' # is processed (see below). Once the parser has recognized # the full production (though the right brace), we're done @@ -1241,33 +1320,34 @@ StaticInstPtr # stack) when we recognize the format name part of the format # block. def p_push_format_id(self, t): - 'push_format_id : ID' + "push_format_id : ID" try: self.formatStack.push(self.formatMap[t[1]]) - t[0] = ('', '// format %s' % t[1]) + t[0] = ("", "// format %s" % t[1]) except KeyError: error(t.lineno(1), 'instruction format "%s" not defined.' % t[1]) # Nested decode block: if the value of the current field matches # the specified constant(s), do a nested decode on some other field. def p_decode_stmt_decode(self, t): - 'decode_stmt : case_list COLON decode_block' + "decode_stmt : case_list COLON decode_block" case_list = t[1] codeObj = t[3] # just wrap the decoding code from the block as a case in the # outer switch statement. - codeObj.wrap_decode_block('\n%s\n' % ''.join(case_list), - 'GEM5_UNREACHABLE;\n') - codeObj.has_decode_default = (case_list == ['default:']) + codeObj.wrap_decode_block( + "\n%s\n" % "".join(case_list), "GEM5_UNREACHABLE;\n" + ) + codeObj.has_decode_default = case_list == ["default:"] t[0] = codeObj # Instruction definition (finally!). def p_decode_stmt_inst(self, t): - 'decode_stmt : case_list COLON inst SEMI' + "decode_stmt : case_list COLON inst SEMI" case_list = t[1] codeObj = t[3] - codeObj.wrap_decode_block('\n%s' % ''.join(case_list), 'break;\n') - codeObj.has_decode_default = (case_list == ['default:']) + codeObj.wrap_decode_block("\n%s" % "".join(case_list), "break;\n") + codeObj.has_decode_default = case_list == ["default:"] t[0] = codeObj # The constant list for a decode case label must be non-empty, and must @@ -1275,33 +1355,33 @@ StaticInstPtr # comma-separated integer literals or strings which evaluate to # constants when compiled as C++. def p_case_list_0(self, t): - 'case_list : DEFAULT' - t[0] = ['default:'] + "case_list : DEFAULT" + t[0] = ["default:"] def prep_int_lit_case_label(self, lit): if lit >= 2**32: - return 'case %#xULL: ' % lit + return "case %#xULL: " % lit else: - return 'case %#x: ' % lit + return "case %#x: " % lit def prep_str_lit_case_label(self, lit): - return 'case %s: ' % lit + return "case %s: " % lit def p_case_list_1(self, t): - 'case_list : INTLIT' + "case_list : INTLIT" t[0] = [self.prep_int_lit_case_label(t[1])] def p_case_list_2(self, t): - 'case_list : STRLIT' + "case_list : STRLIT" t[0] = [self.prep_str_lit_case_label(t[1])] def p_case_list_3(self, t): - 'case_list : case_list COMMA INTLIT' + "case_list : case_list COMMA INTLIT" t[0] = t[1] t[0].append(self.prep_int_lit_case_label(t[3])) def p_case_list_4(self, t): - 'case_list : case_list COMMA STRLIT' + "case_list : case_list COMMA STRLIT" t[0] = t[1] t[0].append(self.prep_str_lit_case_label(t[3])) @@ -1309,28 +1389,28 @@ StaticInstPtr # (specified by an enclosing format block). # "()" def p_inst_0(self, t): - 'inst : ID LPAREN arg_list RPAREN' + "inst : ID LPAREN arg_list RPAREN" # Pass the ID and arg list to the current format class to deal with. currentFormat = self.formatStack.top() codeObj = currentFormat.defineInst(self, t[1], t[3], t.lexer.lineno) - args = ','.join(list(map(str, t[3]))) - args = re.sub('(?m)^', '//', args) - args = re.sub('^//', '', args) - comment = '\n// %s::%s(%s)\n' % (currentFormat.id, t[1], args) + args = ",".join(list(map(str, t[3]))) + args = re.sub("(?m)^", "//", args) + args = re.sub("^//", "", args) + comment = "\n// %s::%s(%s)\n" % (currentFormat.id, t[1], args) codeObj.prepend_all(comment) t[0] = codeObj # Define an instruction using an explicitly specified format: # "::()" def p_inst_1(self, t): - 'inst : ID DBLCOLON ID LPAREN arg_list RPAREN' + "inst : ID DBLCOLON ID LPAREN arg_list RPAREN" try: format = self.formatMap[t[1]] except KeyError: error(t.lineno(1), 'instruction format "%s" not defined.' % t[1]) codeObj = format.defineInst(self, t[3], t[5], t.lexer.lineno) - comment = '\n// %s::%s(%s)\n' % (t[1], t[3], t[5]) + comment = "\n// %s::%s(%s)\n" % (t[1], t[3], t[5]) codeObj.prepend_all(comment) t[0] = codeObj @@ -1338,41 +1418,41 @@ StaticInstPtr # list of the positional args and the second element is a dict # containing the keyword args. def p_arg_list_0(self, t): - 'arg_list : positional_arg_list COMMA keyword_arg_list' - t[0] = ( t[1], t[3] ) + "arg_list : positional_arg_list COMMA keyword_arg_list" + t[0] = (t[1], t[3]) def p_arg_list_1(self, t): - 'arg_list : positional_arg_list' - t[0] = ( t[1], {} ) + "arg_list : positional_arg_list" + t[0] = (t[1], {}) def p_arg_list_2(self, t): - 'arg_list : keyword_arg_list' - t[0] = ( [], t[1] ) + "arg_list : keyword_arg_list" + t[0] = ([], t[1]) def p_positional_arg_list_0(self, t): - 'positional_arg_list : empty' + "positional_arg_list : empty" t[0] = [] def p_positional_arg_list_1(self, t): - 'positional_arg_list : expr' + "positional_arg_list : expr" t[0] = [t[1]] def p_positional_arg_list_2(self, t): - 'positional_arg_list : positional_arg_list COMMA expr' + "positional_arg_list : positional_arg_list COMMA expr" t[0] = t[1] + [t[3]] def p_keyword_arg_list_0(self, t): - 'keyword_arg_list : keyword_arg' + "keyword_arg_list : keyword_arg" t[0] = t[1] def p_keyword_arg_list_1(self, t): - 'keyword_arg_list : keyword_arg_list COMMA keyword_arg' + "keyword_arg_list : keyword_arg_list COMMA keyword_arg" t[0] = t[1] t[0].update(t[3]) def p_keyword_arg(self, t): - 'keyword_arg : ID EQUALS expr' - t[0] = { t[1] : t[3] } + "keyword_arg : ID EQUALS expr" + t[0] = {t[1]: t[3]} # # Basic expressions. These constitute the argument values of @@ -1386,33 +1466,33 @@ StaticInstPtr # there isn't really a variable namespace to refer to). # def p_expr_0(self, t): - '''expr : ID - | INTLIT - | STRLIT - | CODELIT''' + """expr : ID + | INTLIT + | STRLIT + | CODELIT""" t[0] = t[1] def p_expr_1(self, t): - '''expr : LBRACKET list_expr RBRACKET''' + """expr : LBRACKET list_expr RBRACKET""" t[0] = t[2] def p_list_expr_0(self, t): - 'list_expr : expr' + "list_expr : expr" t[0] = [t[1]] def p_list_expr_1(self, t): - 'list_expr : list_expr COMMA expr' + "list_expr : list_expr COMMA expr" t[0] = t[1] + [t[3]] def p_list_expr_2(self, t): - 'list_expr : empty' + "list_expr : empty" t[0] = [] # # Empty production... use in other rules for readability. # def p_empty(self, t): - 'empty :' + "empty :" pass # Parse error handler. Note that the argument here is the @@ -1431,15 +1511,16 @@ StaticInstPtr class InstObjParamsWrapper(InstObjParams): def __init__(iop, *args, **kwargs): super().__init__(self, *args, **kwargs) - self.exportContext['InstObjParams'] = InstObjParamsWrapper + + self.exportContext["InstObjParams"] = InstObjParamsWrapper self.exportContext.update(self.templateMap) def defFormat(self, id, params, code, lineno): - '''Define a new format''' + """Define a new format""" # make sure we haven't already defined this one if id in self.formatMap: - error(lineno, 'format %s redefined.' % id) + error(lineno, "format %s redefined." % id) # create new object and store in global map self.formatMap[id] = Format(id, params, code) @@ -1447,14 +1528,14 @@ StaticInstPtr def buildOperandNameMap(self, user_dict, lineno): operand_name = {} for op_name, op_desc in user_dict.items(): - assert(isinstance(op_desc, OperandDesc)) + assert isinstance(op_desc, OperandDesc) - base_cls = op_desc.attrs['base_cls'] + base_cls = op_desc.attrs["base_cls"] op_desc.setName(op_name) # New class name will be e.g. "IntRegOperand_Ra" - cls_name = base_cls.__name__ + '_' + op_name + cls_name = base_cls.__name__ + "_" + op_name # The following statement creates a new class called # as a subclass of with the attributes # in op_desc.attrs, just as if we evaluated a class declaration. @@ -1469,63 +1550,71 @@ StaticInstPtr # build a map elem -> vector (used in OperandList) elem_to_vec = {} for op_name, op in self.operandNameMap.items(): - if hasattr(op, 'elems'): + if hasattr(op, "elems"): for elem in op.elems.keys(): operands.append(elem) elem_to_vec[elem] = op_name self.elemToVector = elem_to_vec extensions = self.operandTypeMap.keys() - operandsREString = r''' + operandsREString = r""" (?[^"]*)".*$', - re.MULTILINE) + includeRE = re.compile( + r'^\s*##include\s+"(?P[^"]*)".*$', re.MULTILINE + ) def replace_include(self, matchobj, dirname): """Function to replace a matched '##include' directive with the @@ -1534,10 +1623,12 @@ StaticInstPtr (from a match of includeRE) and 'dirname' is the directory relative to which the file path should be resolved.""" - fname = matchobj.group('filename') + fname = matchobj.group("filename") full_fname = os.path.normpath(os.path.join(dirname, fname)) - contents = '##newfile "%s"\n%s\n##endfile\n' % \ - (full_fname, self.read_and_flatten(full_fname)) + contents = '##newfile "%s"\n%s\n##endfile\n' % ( + full_fname, + self.read_and_flatten(full_fname), + ) return contents def read_and_flatten(self, filename): @@ -1554,6 +1645,7 @@ StaticInstPtr # Find any includes and include them def replace(matchobj): return self.replace_include(matchobj, current_dir) + contents = self.includeRE.sub(replace, contents) self.fileNameStack.pop() @@ -1562,7 +1654,7 @@ StaticInstPtr AlreadyGenerated = {} def _parse_isa_desc(self, isa_desc_file): - '''Read in and parse the ISA description.''' + """Read in and parse the ISA description.""" # The build system can end up running the ISA parser twice: once to # finalize the build dependencies, and then to actually generate @@ -1576,7 +1668,7 @@ StaticInstPtr return # grab the last three path components of isa_desc_file - self.filename = '/'.join(isa_desc_file.split('/')[-3:]) + self.filename = "/".join(isa_desc_file.split("/")[-3:]) # Read file and (recursively) all included files into a string. # PLY requires that the input be in a single string so we have to @@ -1600,7 +1692,8 @@ StaticInstPtr print(e) sys.exit(1) + # Called as script: get args from command line. # Args are: -if __name__ == '__main__': +if __name__ == "__main__": ISAParser(sys.argv[2]).parse_isa_desc(sys.argv[1]) diff --git a/src/arch/isa_parser/operand_list.py b/src/arch/isa_parser/operand_list.py index 34387774a1..8df36c711b 100755 --- a/src/arch/isa_parser/operand_list.py +++ b/src/arch/isa_parser/operand_list.py @@ -40,15 +40,17 @@ from .util import assignRE, commentRE, stringRE from .util import error + class OperandList(object): - '''Find all the operands in the given code block. Returns an operand - descriptor list (instance of class OperandList).''' + """Find all the operands in the given code block. Returns an operand + descriptor list (instance of class OperandList).""" + def __init__(self, parser, code): self.items = [] self.bases = {} # delete strings and comments so we don't match on operands inside for regEx in (stringRE, commentRE): - code = regEx.sub('', code) + code = regEx.sub("", code) # search for operands for match in parser.operandsRE().finditer(code): @@ -62,18 +64,20 @@ class OperandList(object): isElem = True elem_op = (op_base, op_ext) op_base = parser.elemToVector[op_base] - op_ext = '' # use the default one + op_ext = "" # use the default one # if the token following the operand is an assignment, this is # a destination (LHS), else it's a source (RHS) - is_dest = (assignRE.match(code, match.end()) != None) + is_dest = assignRE.match(code, match.end()) != None is_src = not is_dest # see if we've already seen this one op_desc = self.find_base(op_base) if op_desc: - if op_ext and op_ext != '' and op_desc.ext != op_ext: - error ('Inconsistent extensions for operand %s: %s - %s' \ - % (op_base, op_desc.ext, op_ext)) + if op_ext and op_ext != "" and op_desc.ext != op_ext: + error( + "Inconsistent extensions for operand %s: %s - %s" + % (op_base, op_desc.ext, op_ext) + ) op_desc.is_src = op_desc.is_src or is_src op_desc.is_dest = op_desc.is_dest or is_dest if isElem: @@ -83,16 +87,19 @@ class OperandList(object): (ae_base, ae_ext) = ae if ae_base == elem_base: if ae_ext != elem_ext: - error('Inconsistent extensions for elem' - ' operand %s' % elem_base) + error( + "Inconsistent extensions for elem" + " operand %s" % elem_base + ) else: found = True if not found: op_desc.active_elems.append(elem_op) else: # new operand: create new descriptor - op_desc = parser.operandNameMap[op_base](parser, - op_full, op_ext, is_src, is_dest) + op_desc = parser.operandNameMap[op_base]( + parser, op_full, op_ext, is_src, is_dest + ) # if operand is a vector elem, add the corresponding vector # operand if not already done if isElem: @@ -152,12 +159,12 @@ class OperandList(object): # return a single string that is the concatenation of the (string) # values of the specified attribute for all operands def concatAttrStrings(self, attr_name): - return self.__internalConcatAttrs(attr_name, lambda x: 1, '') + return self.__internalConcatAttrs(attr_name, lambda x: 1, "") # like concatAttrStrings, but only include the values for the operands # for which the provided filter function returns true def concatSomeAttrStrings(self, filter, attr_name): - return self.__internalConcatAttrs(attr_name, filter, '') + return self.__internalConcatAttrs(attr_name, filter, "") # return a single list that is the concatenation of the (list) # values of the specified attribute for all operands @@ -172,15 +179,17 @@ class OperandList(object): def sort(self): self.items.sort(key=lambda a: a.sort_pri) + class SubOperandList(OperandList): - '''Find all the operands in the given code block. Returns an operand - descriptor list (instance of class OperandList).''' + """Find all the operands in the given code block. Returns an operand + descriptor list (instance of class OperandList).""" + def __init__(self, parser, code, requestor_list): self.items = [] self.bases = {} # delete strings and comments so we don't match on operands inside for regEx in (stringRE, commentRE): - code = regEx.sub('', code) + code = regEx.sub("", code) # search for operands for match in parser.operandsRE().finditer(code): @@ -195,8 +204,10 @@ class SubOperandList(OperandList): # find this op in the requestor list op_desc = requestor_list.find_base(op_base) if not op_desc: - error('Found operand %s which is not in the requestor list!' - % op_base) + error( + "Found operand %s which is not in the requestor list!" + % op_base + ) else: # See if we've already found this operand op_desc = self.find_base(op_base) @@ -228,5 +239,7 @@ class SubOperandList(OperandList): # Whether this instruction manipulates the whole PC or parts of it. # Mixing the two is a bad idea and flagged as an error. self.pcPart = None - if part: self.pcPart = True - if whole: self.pcPart = False + if part: + self.pcPart = True + if whole: + self.pcPart = False diff --git a/src/arch/isa_parser/operand_types.py b/src/arch/isa_parser/operand_types.py index b699025fab..63ca765a09 100755 --- a/src/arch/isa_parser/operand_types.py +++ b/src/arch/isa_parser/operand_types.py @@ -37,15 +37,19 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + def overrideInOperand(func): func.override_in_operand = True return func + overrideInOperand.overrides = dict() + class OperandDesc(object): - def __init__(self, base_cls, dflt_ext, reg_spec, flags=None, - sort_pri=None): + def __init__( + self, base_cls, dflt_ext, reg_spec, flags=None, sort_pri=None + ): from .isa_parser import makeList @@ -56,19 +60,22 @@ class OperandDesc(object): # obvious shortcuts; we convert these to canonical form here. if not flags: # no flags specified (e.g., 'None') - flags = ( [], [], [] ) + flags = ([], [], []) elif isinstance(flags, str): # a single flag: assumed to be unconditional - flags = ( [ flags ], [], [] ) + flags = ([flags], [], []) elif isinstance(flags, list): # a list of flags: also assumed to be unconditional - flags = ( flags, [], [] ) + flags = (flags, [], []) elif isinstance(flags, tuple): # it's a tuple: it should be a triple, # but each item could be a single string or a list (uncond_flags, src_flags, dest_flags) = flags - flags = (makeList(uncond_flags), - makeList(src_flags), makeList(dest_flags)) + flags = ( + makeList(uncond_flags), + makeList(src_flags), + makeList(dest_flags), + ) attrs = {} # reg_spec is either just a string or a dictionary @@ -76,45 +83,47 @@ class OperandDesc(object): if isinstance(reg_spec, tuple): (reg_spec, elem_spec) = reg_spec if isinstance(elem_spec, str): - attrs['elem_spec'] = elem_spec + attrs["elem_spec"] = elem_spec else: - assert(isinstance(elem_spec, dict)) - attrs['elems'] = elem_spec + assert isinstance(elem_spec, dict) + attrs["elems"] = elem_spec for key in dir(self): val = getattr(self, key) # If this is a method, extract the function that implements it. - if hasattr(val, '__func__'): + if hasattr(val, "__func__"): val = val.__func__ # If this should override something in the operand - if getattr(val, 'override_in_operand', False): + if getattr(val, "override_in_operand", False): attrs[key] = val - attrs.update({ - 'base_cls': base_cls, - 'dflt_ext': dflt_ext, - 'reg_spec': reg_spec, - 'flags': flags, - 'sort_pri': sort_pri, - }) + attrs.update( + { + "base_cls": base_cls, + "dflt_ext": dflt_ext, + "reg_spec": reg_spec, + "flags": flags, + "sort_pri": sort_pri, + } + ) self.attrs = attrs def setName(self, name): - self.attrs['base_name'] = name + self.attrs["base_name"] = name class Operand(object): - '''Base class for operand descriptors. An instance of this class + """Base class for operand descriptors. An instance of this class (or actually a class derived from this one) represents a specific operand for a code block (e.g, "Rc.sq" as a dest). Intermediate derived classes encapsulates the traits of a particular operand - type (e.g., "32-bit integer register").''' + type (e.g., "32-bit integer register").""" - src_reg_constructor = '\n\tsetSrcRegIdx(_numSrcRegs++, %s);' - dst_reg_constructor = '\n\tsetDestRegIdx(_numDestRegs++, %s);' + src_reg_constructor = "\n\tsetSrcRegIdx(_numSrcRegs++, %s);" + dst_reg_constructor = "\n\tsetDestRegIdx(_numDestRegs++, %s);" def regId(self): - return f'RegId({self.reg_class}, {self.reg_spec})' + return f"{self.reg_class}[{self.reg_spec}]" def srcRegId(self): return self.regId() @@ -132,10 +141,10 @@ class Operand(object): # extension, if one was explicitly provided, or the default. if ext: self.eff_ext = ext - elif hasattr(self, 'dflt_ext'): + elif hasattr(self, "dflt_ext"): self.eff_ext = self.dflt_ext - if hasattr(self, 'eff_ext'): + if hasattr(self, "eff_ext"): self.ctype = parser.operandTypeMap[self.eff_ext] # Finalize additional fields (primarily code fields). This step @@ -151,15 +160,15 @@ class Operand(object): self.op_rd = self.makeRead() self.op_src_decl = self.makeDecl() else: - self.op_rd = '' - self.op_src_decl = '' + self.op_rd = "" + self.op_src_decl = "" if self.is_dest: self.op_wb = self.makeWrite() self.op_dest_decl = self.makeDecl() else: - self.op_wb = '' - self.op_dest_decl = '' + self.op_wb = "" + self.op_dest_decl = "" def isMem(self): return 0 @@ -186,76 +195,84 @@ class Operand(object): def makeDecl(self): # Note that initializations in the declarations are solely # to avoid 'uninitialized variable' errors from the compiler. - return self.ctype + ' ' + self.base_name + ' = 0;\n'; + return self.ctype + " " + self.base_name + " = 0;\n" + class RegOperand(Operand): def isReg(self): return 1 def makeConstructor(self): - c_src = '' - c_dest = '' + c_src = "" + c_dest = "" if self.is_src: c_src = self.src_reg_constructor % self.srcRegId() if self.is_dest: c_dest = self.dst_reg_constructor % self.destRegId() - c_dest += f'\n\t_numTypedDestRegs[{self.reg_class}]++;' + c_dest += f"\n\t_numTypedDestRegs[{self.reg_class}.type()]++;" return c_src + c_dest + class RegValOperand(RegOperand): def makeRead(self): - reg_val = f'xc->getRegOperand(this, {self.src_reg_idx})' + reg_val = f"xc->getRegOperand(this, {self.src_reg_idx})" - if self.ctype == 'float': - reg_val = f'bitsToFloat32({reg_val})' - elif self.ctype == 'double': - reg_val = f'bitsToFloat64({reg_val})' + if self.ctype == "float": + reg_val = f"bitsToFloat32({reg_val})" + elif self.ctype == "double": + reg_val = f"bitsToFloat64({reg_val})" - return f'{self.base_name} = {reg_val};\n' + return f"{self.base_name} = {reg_val};\n" def makeWrite(self): reg_val = self.base_name - if self.ctype == 'float': - reg_val = f'floatToBits32({reg_val})' - elif self.ctype == 'double': - reg_val = f'floatToBits64({reg_val})' + if self.ctype == "float": + reg_val = f"floatToBits32({reg_val})" + elif self.ctype == "double": + reg_val = f"floatToBits64({reg_val})" - return f''' + return f""" {{ RegVal final_val = {reg_val}; xc->setRegOperand(this, {self.dest_reg_idx}, final_val); if (traceData) {{ - traceData->setData(final_val); + traceData->setData({self.reg_class}, final_val); }} - }}''' + }}""" + class RegOperandDesc(OperandDesc): def __init__(self, reg_class, *args, **kwargs): super().__init__(*args, **kwargs) - self.attrs['reg_class'] = reg_class + self.attrs["reg_class"] = reg_class + class IntRegOperandDesc(RegOperandDesc): def __init__(self, *args, **kwargs): - super().__init__('IntRegClass', RegValOperand, *args, **kwargs) + super().__init__("intRegClass", RegValOperand, *args, **kwargs) + class FloatRegOperandDesc(RegOperandDesc): def __init__(self, *args, **kwargs): - super().__init__('FloatRegClass', RegValOperand, *args, **kwargs) + super().__init__("floatRegClass", RegValOperand, *args, **kwargs) + class CCRegOperandDesc(RegOperandDesc): def __init__(self, *args, **kwargs): - super().__init__('CCRegClass', RegValOperand, *args, **kwargs) + super().__init__("ccRegClass", RegValOperand, *args, **kwargs) + class VecElemOperandDesc(RegOperandDesc): def __init__(self, *args, **kwargs): - super().__init__('VecElemClass', RegValOperand, *args, **kwargs) + super().__init__("vecElemClass", RegValOperand, *args, **kwargs) + class VecRegOperand(RegOperand): - reg_class = 'VecRegClass' + reg_class = "vecRegClass" def __init__(self, parser, full_name, ext, is_src, is_dest): super().__init__(parser, full_name, ext, is_src, is_dest) @@ -269,18 +286,18 @@ class VecRegOperand(RegOperand): else: ext = dflt_elem_ext ctype = self.parser.operandTypeMap[ext] - return '\n\t%s %s = 0;' % (ctype, elem_name) + return "\n\t%s %s = 0;" % (ctype, elem_name) def makeDecl(self): if not self.is_dest and self.is_src: - c_decl = '\t/* Vars for %s*/' % (self.base_name) - if hasattr(self, 'active_elems'): + c_decl = "\t/* Vars for %s*/" % (self.base_name) + if hasattr(self, "active_elems"): if self.active_elems: for elem in self.active_elems: c_decl += self.makeDeclElem(elem) - return c_decl + '\t/* End vars for %s */\n' % (self.base_name) + return c_decl + "\t/* End vars for %s */\n" % (self.base_name) else: - return '' + return "" # Read destination register to write def makeReadWElem(self, elem_op): @@ -291,23 +308,29 @@ class VecRegOperand(RegOperand): else: ext = dflt_elem_ext ctype = self.parser.operandTypeMap[ext] - c_read = '\t\t%s& %s = %s[%s];\n' % \ - (ctype, elem_name, self.base_name, elem_spec) + c_read = "\t\t%s& %s = %s[%s];\n" % ( + ctype, + elem_name, + self.base_name, + elem_spec, + ) return c_read def makeReadW(self): - tmp_name = f'tmp_d{self.dest_reg_idx}' - c_readw = f'\t\tauto &{tmp_name} = \n' \ - f'\t\t *({self.parser.namespace}::VecRegContainer *)\n' \ - f'\t\t xc->getWritableRegOperand(\n' \ - f'\t\t this, {self.dest_reg_idx});\n' + tmp_name = f"tmp_d{self.dest_reg_idx}" + c_readw = ( + f"\t\tauto &{tmp_name} = \n" + f"\t\t *({self.parser.namespace}::VecRegContainer *)\n" + f"\t\t xc->getWritableRegOperand(\n" + f"\t\t this, {self.dest_reg_idx});\n" + ) if self.elemExt: - ext = f'{self.parser.operandTypeMap[self.elemExt]}' - c_readw += f'\t\tauto {self.base_name} = {tmp_name}.as<{ext}>();\n' + ext = f"{self.parser.operandTypeMap[self.elemExt]}" + c_readw += f"\t\tauto {self.base_name} = {tmp_name}.as<{ext}>();\n" if self.ext: - ext = f'{self.parser.operandTypeMap[self.ext]}' - c_readw += f'\t\tauto {self.base_name} = {tmp_name}.as<{ext}>();\n' - if hasattr(self, 'active_elems'): + ext = f"{self.parser.operandTypeMap[self.ext]}" + c_readw += f"\t\tauto {self.base_name} = {tmp_name}.as<{ext}>();\n" + if hasattr(self, "active_elems"): if self.active_elems: for elem in self.active_elems: c_readw += self.makeReadWElem(elem) @@ -323,96 +346,109 @@ class VecRegOperand(RegOperand): else: ext = dflt_elem_ext ctype = self.parser.operandTypeMap[ext] - c_read = '\t\t%s = %s[%s];\n' % \ - (elem_name, name, elem_spec) + c_read = "\t\t%s = %s[%s];\n" % (elem_name, name, elem_spec) return c_read def makeRead(self): name = self.base_name if self.is_dest and self.is_src: - name += '_merger' + name += "_merger" - tmp_name = f'tmp_s{self.src_reg_idx}' - c_read = f'\t\t{self.parser.namespace}::VecRegContainer ' \ - f'{tmp_name};\n' \ - f'\t\txc->getRegOperand(this, {self.src_reg_idx},\n' \ - f'\t\t &{tmp_name});\n' + tmp_name = f"tmp_s{self.src_reg_idx}" + c_read = ( + f"\t\t{self.parser.namespace}::VecRegContainer " + f"{tmp_name};\n" + f"\t\txc->getRegOperand(this, {self.src_reg_idx},\n" + f"\t\t &{tmp_name});\n" + ) # If the parser has detected that elements are being access, create # the appropriate view if self.elemExt: - ext = f'{self.parser.operandTypeMap[self.elemExt]}' - c_read += f'\t\tauto {name} = {tmp_name}.as<{ext}>();\n' + ext = f"{self.parser.operandTypeMap[self.elemExt]}" + c_read += f"\t\tauto {name} = {tmp_name}.as<{ext}>();\n" if self.ext: - ext = f'{self.parser.operandTypeMap[self.ext]}' - c_read += f'\t\tauto {name} = {tmp_name}.as<{ext}>();\n' - if hasattr(self, 'active_elems'): + ext = f"{self.parser.operandTypeMap[self.ext]}" + c_read += f"\t\tauto {name} = {tmp_name}.as<{ext}>();\n" + if hasattr(self, "active_elems"): if self.active_elems: for elem in self.active_elems: c_read += self.makeReadElem(elem, name) return c_read def makeWrite(self): - return f''' + return f""" if (traceData) {{ - traceData->setData(tmp_d{self.dest_reg_idx}); + traceData->setData({self.reg_class}, &tmp_d{self.dest_reg_idx}); }} - ''' + """ def finalize(self): super().finalize() if self.is_dest: self.op_rd = self.makeReadW() + self.op_rd + class VecRegOperandDesc(RegOperandDesc): def __init__(self, *args, **kwargs): - super().__init__('VecRegClass', VecRegOperand, *args, **kwargs) + super().__init__("vecRegClass", VecRegOperand, *args, **kwargs) + class VecPredRegOperand(RegOperand): - reg_class = 'VecPredRegClass' + reg_class = "vecPredRegClass" def makeDecl(self): - return '' + return "" def makeRead(self): - tmp_name = f'tmp_s{self.src_reg_idx}' - c_read = f'\t\t{self.parser.namespace}::VecPredRegContainer \n' \ - f'\t\t {tmp_name};\n' \ - f'xc->getRegOperand(this, {self.src_reg_idx}, ' \ - f'&{tmp_name});\n' + tmp_name = f"tmp_s{self.src_reg_idx}" + c_read = ( + f"\t\t{self.parser.namespace}::VecPredRegContainer \n" + f"\t\t {tmp_name};\n" + f"xc->getRegOperand(this, {self.src_reg_idx}, " + f"&{tmp_name});\n" + ) if self.ext: - c_read += f'\t\tauto {self.base_name} = {tmp_name}.as<' \ - f'{self.parser.operandTypeMap[self.ext]}>();\n' + c_read += ( + f"\t\tauto {self.base_name} = {tmp_name}.as<" + f"{self.parser.operandTypeMap[self.ext]}>();\n" + ) return c_read def makeReadW(self): - tmp_name = f'tmp_d{self.dest_reg_idx}' - c_readw = f'\t\tauto &{tmp_name} = \n' \ - f'\t\t *({self.parser.namespace}::' \ - f'VecPredRegContainer *)xc->getWritableRegOperand(' \ - f'this, {self.dest_reg_idx});\n' + tmp_name = f"tmp_d{self.dest_reg_idx}" + c_readw = ( + f"\t\tauto &{tmp_name} = \n" + f"\t\t *({self.parser.namespace}::" + f"VecPredRegContainer *)xc->getWritableRegOperand(" + f"this, {self.dest_reg_idx});\n" + ) if self.ext: - c_readw += f'\t\tauto {self.base_name} = {tmp_name}.as<' \ - f'{self.parser.operandTypeMap[self.ext]}>();\n' + c_readw += ( + f"\t\tauto {self.base_name} = {tmp_name}.as<" + f"{self.parser.operandTypeMap[self.ext]}>();\n" + ) return c_readw def makeWrite(self): - return f''' + return f""" if (traceData) {{ - traceData->setData(tmp_d{self.dest_reg_idx}); + traceData->setData({self.reg_class}, &tmp_d{self.dest_reg_idx}); }} - ''' + """ def finalize(self): super().finalize() if self.is_dest: self.op_rd = self.makeReadW() + self.op_rd + class VecPredRegOperandDesc(RegOperandDesc): def __init__(self, *args, **kwargs): - super().__init__('VecPredRegClass', VecPredRegOperand, *args, **kwargs) + super().__init__("vecPredRegClass", VecPredRegOperand, *args, **kwargs) + class ControlRegOperand(Operand): - reg_class = 'MiscRegClass' + reg_class = "miscRegClass" def isReg(self): return 1 @@ -421,8 +457,8 @@ class ControlRegOperand(Operand): return 1 def makeConstructor(self): - c_src = '' - c_dest = '' + c_src = "" + c_dest = "" if self.is_src: c_src = self.src_reg_constructor % self.srcRegId() @@ -434,88 +470,103 @@ class ControlRegOperand(Operand): def makeRead(self): bit_select = 0 - if (self.ctype == 'float' or self.ctype == 'double'): - error('Attempt to read control register as FP') + if self.ctype == "float" or self.ctype == "double": + error("Attempt to read control register as FP") - return f'{self.base_name} = ' \ - f'xc->readMiscRegOperand(this, {self.src_reg_idx});\n' + return ( + f"{self.base_name} = " + f"xc->readMiscRegOperand(this, {self.src_reg_idx});\n" + ) def makeWrite(self): - if (self.ctype == 'float' or self.ctype == 'double'): - error('Attempt to write control register as FP') - wb = f'xc->setMiscRegOperand(this, ' \ - f'{self.dest_reg_idx}, {self.base_name});\n' - wb += f''' + if self.ctype == "float" or self.ctype == "double": + error("Attempt to write control register as FP") + wb = ( + f"xc->setMiscRegOperand(this, " + f"{self.dest_reg_idx}, {self.base_name});\n" + ) + wb += f""" if (traceData) {{ - traceData->setData({self.base_name}); + traceData->setData({self.reg_class}, {self.base_name}); }} - ''' + """ return wb + class ControlRegOperandDesc(RegOperandDesc): def __init__(self, *args, **kwargs): - super().__init__('MiscRegClass', ControlRegOperand, *args, **kwargs) + super().__init__("miscRegClass", ControlRegOperand, *args, **kwargs) + class MemOperand(Operand): def isMem(self): return 1 def makeConstructor(self): - return '' + return "" def makeDecl(self): # Declare memory data variable. - return f'{self.ctype} {self.base_name} = {{}};\n' + return f"{self.ctype} {self.base_name} = {{}};\n" def makeRead(self): - return '' + return "" def makeWrite(self): - return '' + return "" + class MemOperandDesc(OperandDesc): def __init__(self, *args, **kwargs): super().__init__(MemOperand, *args, **kwargs) + class PCStateOperand(Operand): def __init__(self, parser, *args, **kwargs): super().__init__(parser, *args, **kwargs) self.parser = parser def makeConstructor(self): - return '' + return "" def makeRead(self): if self.reg_spec: # A component of the PC state. - return f'{self.base_name} = ' \ - f'__parserAutoPCState.{self.reg_spec}();\n' + return ( + f"{self.base_name} = " + f"__parserAutoPCState.{self.reg_spec}();\n" + ) else: # The whole PC state itself. - return f'{self.base_name} = ' \ - f'xc->pcState().as<{self.parser.namespace}::PCState>();\n' + return ( + f"{self.base_name} = " + f"xc->pcState().as<{self.parser.namespace}::PCState>();\n" + ) def makeWrite(self): if self.reg_spec: # A component of the PC state. - return '__parserAutoPCState.%s(%s);\n' % \ - (self.reg_spec, self.base_name) + return "__parserAutoPCState.%s(%s);\n" % ( + self.reg_spec, + self.base_name, + ) else: # The whole PC state itself. - return f'xc->pcState({self.base_name});\n' + return f"xc->pcState({self.base_name});\n" def makeDecl(self): - ctype = f'{self.parser.namespace}::PCState' + ctype = f"{self.parser.namespace}::PCState" if self.isPCPart(): ctype = self.ctype # Note that initializations in the declarations are solely # to avoid 'uninitialized variable' errors from the compiler. - return '%s %s = 0;\n' % (ctype, self.base_name) + return "%s %s = 0;\n" % (ctype, self.base_name) def isPCState(self): return 1 + class PCStateOperandDesc(OperandDesc): def __init__(self, *args, **kwargs): super().__init__(PCStateOperand, *args, **kwargs) diff --git a/src/arch/isa_parser/util.py b/src/arch/isa_parser/util.py index 7a000e8ac4..2cf0d82a7a 100755 --- a/src/arch/isa_parser/util.py +++ b/src/arch/isa_parser/util.py @@ -48,15 +48,18 @@ import re # Used to make nested code blocks look pretty. # def indent(s): - return re.sub(r'(?m)^(?!#)', ' ', s) + return re.sub(r"(?m)^(?!#)", " ", s) + # Regular expression object to match C++ strings stringRE = re.compile(r'"([^"\\]|\\.)*"') # Regular expression object to match C++ comments # (used in findOperands()) -commentRE = re.compile(r'(^)?[^\S\n]*/(?:\*(.*?)\*/[^\S\n]*|/[^\n]*)($)?', - re.DOTALL | re.MULTILINE) +commentRE = re.compile( + r"(^)?[^\S\n]*/(?:\*(.*?)\*/[^\S\n]*|/[^\n]*)($)?", + re.DOTALL | re.MULTILINE, +) # Regular expression object to match assignment statements (used in # findOperands()). If the code immediately following the first @@ -65,7 +68,7 @@ commentRE = re.compile(r'(^)?[^\S\n]*/(?:\*(.*?)\*/[^\S\n]*|/[^\n]*)($)?', # destination. basically we're looking for an '=' that's not '=='. # The heinous tangle before that handles the case where the operand # has an array subscript. -assignRE = re.compile(r'(\[[^\]]+\])?\s*=(?!=)', re.MULTILINE) +assignRE = re.compile(r"(\[[^\]]+\])?\s*=(?!=)", re.MULTILINE) # # Munge a somewhat arbitrarily formatted piece of Python code @@ -84,15 +87,18 @@ assignRE = re.compile(r'(\[[^\]]+\])?\s*=(?!=)', re.MULTILINE) # We don't want to do this if (1) the code block is empty or (2) the # first line of the block doesn't have any whitespace at the front. + def fixPythonIndentation(s): # get rid of blank lines first - s = re.sub(r'(?m)^\s*\n', '', s); - if (s != '' and re.match(r'[ \t]', s[0])): - s = 'if 1:\n' + s + s = re.sub(r"(?m)^\s*\n", "", s) + if s != "" and re.match(r"[ \t]", s[0]): + s = "if 1:\n" + s return s + class ISAParserError(Exception): """Exception class for parser errors""" + def __init__(self, first, second=None): if second is None: self.lineno = 0 @@ -104,14 +110,17 @@ class ISAParserError(Exception): def __str__(self): return self.string + def error(*args): raise ISAParserError(*args) -def protectNonSubstPercents(s): - '''Protect any non-dict-substitution '%'s in a format string - (i.e. those not followed by '(')''' - return re.sub(r'%(?!\()', '%%', s) +def protectNonSubstPercents(s): + """Protect any non-dict-substitution '%'s in a format string + (i.e. those not followed by '(')""" + + return re.sub(r"%(?!\()", "%%", s) + ############## # Stack: a simple stack object. Used for both formats (formatStack) @@ -119,16 +128,18 @@ def protectNonSubstPercents(s): # stack-like syntax and enable initialization with an argument list # (as opposed to an argument that's a list). + class Stack(list): def __init__(self, *items): list.__init__(self, items) def push(self, item): - self.append(item); + self.append(item) def top(self): return self[-1] + # Format a file include stack backtrace as a string def backtrace(filename_stack): fmt = "In file included from %s:" @@ -143,6 +154,7 @@ def backtrace(filename_stack): # minimum of disruption to existing increment code. # + class LineTracker(object): def __init__(self, filename, lineno=1): self.filename = filename diff --git a/src/arch/micro_asm.py b/src/arch/micro_asm.py index 54f7b61ec0..5b4378881e 100644 --- a/src/arch/micro_asm.py +++ b/src/arch/micro_asm.py @@ -28,6 +28,7 @@ import os import sys import re import traceback + # get type names from types import * @@ -40,6 +41,7 @@ from ply import yacc # ########################################################################## + class MicroContainer: def __init__(self, name): self.microops = [] @@ -59,9 +61,11 @@ class MicroContainer: string += " %s\n" % microop return string + class CombinationalMacroop(MicroContainer): pass + class RomMacroop: def __init__(self, name, target): self.name = name @@ -70,32 +74,38 @@ class RomMacroop: def __str__(self): return "%s: %s\n" % (self.name, self.target) + class Rom(MicroContainer): def __init__(self, name): super().__init__(name) self.externs = {} + ########################################################################## # # Support classes # ########################################################################## + class Label(object): def __init__(self): self.extern = False self.name = "" + class Block(object): def __init__(self): self.statements = [] + class Statement(object): def __init__(self): self.is_microop = False self.is_directive = False self.params = "" + class Microop(Statement): def __init__(self): super().__init__() @@ -103,36 +113,47 @@ class Microop(Statement): self.labels = [] self.is_microop = True + class Directive(Statement): def __init__(self): super().__init__() self.name = "" self.is_directive = True + ########################################################################## # # Functions that handle common tasks # ########################################################################## + def print_error(message): print() print("*** %s" % message) print() + def handle_statement(parser, container, statement): if statement.is_microop: if statement.mnemonic not in parser.microops.keys(): - raise Exception("Unrecognized mnemonic: {}".format( - statement.mnemonic)) - parser.symbols["__microopClassFromInsideTheAssembler"] = \ - parser.microops[statement.mnemonic] + raise Exception( + "Unrecognized mnemonic: {}".format(statement.mnemonic) + ) + parser.symbols[ + "__microopClassFromInsideTheAssembler" + ] = parser.microops[statement.mnemonic] try: - microop = eval('__microopClassFromInsideTheAssembler(%s)' % - statement.params, {}, parser.symbols) + microop = eval( + "__microopClassFromInsideTheAssembler(%s)" % statement.params, + {}, + parser.symbols, + ) except: - print_error("Error creating microop object with mnemonic %s." % \ - statement.mnemonic) + print_error( + "Error creating microop object with mnemonic %s." + % statement.mnemonic + ) raise try: for label in statement.labels: @@ -145,20 +166,28 @@ def handle_statement(parser, container, statement): raise elif statement.is_directive: if statement.name not in container.directives.keys(): - raise Exception("Unrecognized directive: {}".format( - statement.name)) - parser.symbols["__directiveFunctionFromInsideTheAssembler"] = \ - container.directives[statement.name] + raise Exception( + "Unrecognized directive: {}".format(statement.name) + ) + parser.symbols[ + "__directiveFunctionFromInsideTheAssembler" + ] = container.directives[statement.name] try: - eval('__directiveFunctionFromInsideTheAssembler(%s)' % - statement.params, {}, parser.symbols) + eval( + "__directiveFunctionFromInsideTheAssembler(%s)" + % statement.params, + {}, + parser.symbols, + ) except: print_error("Error executing directive.") print(container.directives) raise else: - raise Exception("Didn't recognize the type of statement {}".format( - statement)) + raise Exception( + "Didn't recognize the type of statement {}".format(statement) + ) + ########################################################################## # @@ -170,9 +199,9 @@ def handle_statement(parser, container, statement): # Emacs compile-mode. Optional 'print_traceback' arg, if set to True, # prints a Python stack backtrace too (can be handy when trying to # debug the parser itself). -def error(lineno, string, print_traceback = False): +def error(lineno, string, print_traceback=False): # Print a Python stack backtrace if requested. - if (print_traceback): + if print_traceback: traceback.print_exc() if lineno != 0: line_str = "%d:" % lineno @@ -180,138 +209,159 @@ def error(lineno, string, print_traceback = False): line_str = "" sys.exit("%s %s" % (line_str, string)) -reserved = ('DEF', 'MACROOP', 'ROM', 'EXTERN') + +reserved = ("DEF", "MACROOP", "ROM", "EXTERN") tokens = reserved + ( - # identifier - 'ID', - # arguments for microops and directives - 'PARAMS', - - 'LPAREN', 'RPAREN', - 'LBRACE', 'RBRACE', - 'COLON', 'SEMI', 'DOT', - 'NEWLINE' - ) + # identifier + "ID", + # arguments for microops and directives + "PARAMS", + "LPAREN", + "RPAREN", + "LBRACE", + "RBRACE", + "COLON", + "SEMI", + "DOT", + "NEWLINE", +) # New lines are ignored at the top level, but they end statements in the # assembler states = ( - ('asm', 'exclusive'), - ('params', 'exclusive'), - ('header', 'exclusive'), + ("asm", "exclusive"), + ("params", "exclusive"), + ("header", "exclusive"), ) -reserved_map = { } +reserved_map = {} for r in reserved: reserved_map[r.lower()] = r # Ignore comments def t_ANY_COMMENT(t): - r'\#[^\n]*(?=\n)' + r"\#[^\n]*(?=\n)" + def t_ANY_MULTILINECOMMENT(t): - r'/\*([^/]|((?readMiscReg(MISCREG_STATUS); + StatusReg status = tc->readMiscReg(misc_reg::Status); if (status.exl != 1 && status.bev != 1) { // SRS Ctl is modified only if Status_EXL and Status_BEV are not set - SRSCtlReg srsCtl = tc->readMiscReg(MISCREG_SRSCTL); + SRSCtlReg srsCtl = tc->readMiscReg(misc_reg::Srsctl); srsCtl.pss = srsCtl.css; srsCtl.css = srsCtl.ess; - tc->setMiscRegNoEffect(MISCREG_SRSCTL, srsCtl); + tc->setMiscRegNoEffect(misc_reg::Srsctl, srsCtl); } // set EXL bit (don't care if it is already set!) status.exl = 1; - tc->setMiscRegNoEffect(MISCREG_STATUS, status); + tc->setMiscRegNoEffect(misc_reg::Status, status); // write EPC auto pc = tc->pcState().as(); DPRINTF(MipsPRA, "PC: %s\n", pc); bool delay_slot = pc.pc() + sizeof(MachInst) != pc.npc(); - tc->setMiscRegNoEffect(MISCREG_EPC, + tc->setMiscRegNoEffect(misc_reg::Epc, pc.pc() - (delay_slot ? sizeof(MachInst) : 0)); // Set Cause_EXCCODE field - CauseReg cause = tc->readMiscReg(MISCREG_CAUSE); + CauseReg cause = tc->readMiscReg(misc_reg::Cause); cause.excCode = excCode; cause.bd = delay_slot ? 1 : 0; cause.ce = 0; - tc->setMiscRegNoEffect(MISCREG_CAUSE, cause); + tc->setMiscRegNoEffect(misc_reg::Cause, cause); } void @@ -152,9 +152,9 @@ ResetFault::invoke(ThreadContext *tc, const StaticInstPtr &inst) } // Set Coprocessor 1 (Floating Point) To Usable - StatusReg status = tc->readMiscRegNoEffect(MISCREG_STATUS); + StatusReg status = tc->readMiscRegNoEffect(misc_reg::Status); status.cu.cu1 = 1; - tc->setMiscReg(MISCREG_STATUS, status); + tc->setMiscReg(misc_reg::Status, status); } void diff --git a/src/arch/mips/faults.hh b/src/arch/mips/faults.hh index 7b86c33fb5..ba2f5ac9b8 100644 --- a/src/arch/mips/faults.hh +++ b/src/arch/mips/faults.hh @@ -89,9 +89,9 @@ class MipsFaultBase : public FaultBase virtual ExcCode code() const = 0; virtual FaultVect base(ThreadContext *tc) const { - StatusReg status = tc->readMiscReg(MISCREG_STATUS); + StatusReg status = tc->readMiscReg(misc_reg::Status); if (!status.bev) - return tc->readMiscReg(MISCREG_EBASE); + return tc->readMiscReg(misc_reg::Ebase); else return 0xbfc00200; } @@ -167,9 +167,9 @@ class CoprocessorUnusableFault : public MipsFault { MipsFault::invoke(tc, inst); if (FullSystem) { - CauseReg cause = tc->readMiscReg(MISCREG_CAUSE); + CauseReg cause = tc->readMiscReg(misc_reg::Cause); cause.ce = coProcID; - tc->setMiscRegNoEffect(MISCREG_CAUSE, cause); + tc->setMiscRegNoEffect(misc_reg::Cause, cause); } } }; @@ -180,7 +180,7 @@ class InterruptFault : public MipsFault FaultVect offset(ThreadContext *tc) const { - CauseReg cause = tc->readMiscRegNoEffect(MISCREG_CAUSE); + CauseReg cause = tc->readMiscRegNoEffect(misc_reg::Cause); // offset 0x200 for release 2, 0x180 for release 1. return cause.iv ? 0x200 : 0x180; } @@ -202,7 +202,7 @@ class AddressFault : public MipsFault { MipsFault::invoke(tc, inst); if (FullSystem) - tc->setMiscRegNoEffect(MISCREG_BADVADDR, vaddr); + tc->setMiscRegNoEffect(misc_reg::Badvaddr, vaddr); } }; @@ -237,16 +237,16 @@ class TlbFault : public AddressFault { this->setExceptionState(tc, excCode); - tc->setMiscRegNoEffect(MISCREG_BADVADDR, this->vaddr); - EntryHiReg entryHi = tc->readMiscReg(MISCREG_ENTRYHI); + tc->setMiscRegNoEffect(misc_reg::Badvaddr, this->vaddr); + EntryHiReg entryHi = tc->readMiscReg(misc_reg::Entryhi); entryHi.asid = this->asid; entryHi.vpn2 = this->vpn >> 2; entryHi.vpn2x = this->vpn & 0x3; - tc->setMiscRegNoEffect(MISCREG_ENTRYHI, entryHi); + tc->setMiscRegNoEffect(misc_reg::Entryhi, entryHi); - ContextReg context = tc->readMiscReg(MISCREG_CONTEXT); + ContextReg context = tc->readMiscReg(misc_reg::Context); context.badVPN2 = this->vpn >> 2; - tc->setMiscRegNoEffect(MISCREG_CONTEXT, context); + tc->setMiscRegNoEffect(misc_reg::Context, context); } void @@ -280,7 +280,7 @@ class TlbRefillFault : public TlbFault FaultVect offset(ThreadContext *tc) const { - StatusReg status = tc->readMiscReg(MISCREG_STATUS); + StatusReg status = tc->readMiscReg(misc_reg::Status); return status.exl ? 0x180 : 0x000; } }; diff --git a/src/arch/mips/idle_event.cc b/src/arch/mips/idle_event.cc index dd5a4a3f60..6f08c4c7ed 100644 --- a/src/arch/mips/idle_event.cc +++ b/src/arch/mips/idle_event.cc @@ -28,13 +28,9 @@ #include "arch/mips/idle_event.hh" -#include "cpu/thread_context.hh" - namespace gem5 { -using namespace MipsISA; - void IdleStartEvent::process(ThreadContext *tc) { diff --git a/src/arch/mips/interrupts.cc b/src/arch/mips/interrupts.cc index 5ccc09f6db..647dbefc27 100644 --- a/src/arch/mips/interrupts.cc +++ b/src/arch/mips/interrupts.cc @@ -63,16 +63,16 @@ enum InterruptLevels static inline uint8_t getCauseIP(ThreadContext *tc) { - CauseReg cause = tc->readMiscRegNoEffect(MISCREG_CAUSE); + CauseReg cause = tc->readMiscRegNoEffect(misc_reg::Cause); return cause.ip; } static inline void setCauseIP(ThreadContext *tc, uint8_t val) { - CauseReg cause = tc->readMiscRegNoEffect(MISCREG_CAUSE); + CauseReg cause = tc->readMiscRegNoEffect(misc_reg::Cause); cause.ip = val; - tc->setMiscRegNoEffect(MISCREG_CAUSE, cause); + tc->setMiscRegNoEffect(misc_reg::Cause, cause); } void @@ -127,14 +127,14 @@ Interrupts::checkInterrupts() const return false; //Check if there are any outstanding interrupts - StatusReg status = tc->readMiscRegNoEffect(MISCREG_STATUS); + StatusReg status = tc->readMiscRegNoEffect(misc_reg::Status); // Interrupts must be enabled, error level must be 0 or interrupts // inhibited, and exception level must be 0 or interrupts inhibited if ((status.ie == 1) && (status.erl == 0) && (status.exl == 0)) { // Software interrupts & hardware interrupts are handled in software. // So if any interrupt that isn't masked is detected, jump to interrupt // handler - CauseReg cause = tc->readMiscRegNoEffect(MISCREG_CAUSE); + CauseReg cause = tc->readMiscRegNoEffect(misc_reg::Cause); if (status.im && cause.ip) return true; @@ -149,8 +149,8 @@ Interrupts::getInterrupt() assert(checkInterrupts()); [[maybe_unused]] StatusReg status = - tc->readMiscRegNoEffect(MISCREG_STATUS); - [[maybe_unused]] CauseReg cause = tc->readMiscRegNoEffect(MISCREG_CAUSE); + tc->readMiscRegNoEffect(misc_reg::Status); + [[maybe_unused]] CauseReg cause = tc->readMiscRegNoEffect(misc_reg::Cause); DPRINTF(Interrupt, "Interrupt! IM[7:0]=%d IP[7:0]=%d \n", (unsigned)status.im, (unsigned)cause.ip); @@ -160,8 +160,8 @@ Interrupts::getInterrupt() bool Interrupts::onCpuTimerInterrupt() const { - RegVal compare = tc->readMiscRegNoEffect(MISCREG_COMPARE); - RegVal count = tc->readMiscRegNoEffect(MISCREG_COUNT); + RegVal compare = tc->readMiscRegNoEffect(misc_reg::Compare); + RegVal count = tc->readMiscRegNoEffect(misc_reg::Count); if (compare == count && count != 0) return true; return false; @@ -177,7 +177,7 @@ Interrupts::interruptsPending() const if (onCpuTimerInterrupt()) { DPRINTF(Interrupt, "Interrupts OnCpuTimerInterrupt() == true\n"); //determine timer interrupt IP # - IntCtlReg intCtl = tc->readMiscRegNoEffect(MISCREG_INTCTL); + IntCtlReg intCtl = tc->readMiscRegNoEffect(misc_reg::Intctl); uint8_t intStatus = getCauseIP(tc); intStatus |= 1 << intCtl.ipti; setCauseIP(tc, intStatus); diff --git a/src/arch/mips/isa.cc b/src/arch/mips/isa.cc index 683ed728d6..6f39a81244 100644 --- a/src/arch/mips/isa.cc +++ b/src/arch/mips/isa.cc @@ -38,10 +38,7 @@ #include "cpu/base.hh" #include "cpu/reg_class.hh" #include "cpu/thread_context.hh" -#include "debug/FloatRegs.hh" -#include "debug/IntRegs.hh" #include "debug/MipsPRA.hh" -#include "debug/MiscRegs.hh" #include "params/MipsISA.hh" namespace gem5 @@ -51,7 +48,7 @@ namespace MipsISA { std::string -ISA::miscRegNames[MISCREG_NUMREGS] = +ISA::miscRegNames[misc_reg::NumRegs] = { "Index", "MVPControl", "MVPConf0", "MVPConf1", "", "", "", "", "Random", "VPEControl", "VPEConf0", "VPEConf1", @@ -97,40 +94,54 @@ ISA::miscRegNames[MISCREG_NUMREGS] = "LLFlag" }; +namespace +{ + +/* Not applicable to MIPS. */ +constexpr RegClass vecRegClass(VecRegClass, VecRegClassName, 1, + debug::IntRegs); +constexpr RegClass vecElemClass(VecElemClass, VecElemClassName, 2, + debug::IntRegs); +constexpr RegClass vecPredRegClass(VecPredRegClass, VecPredRegClassName, 1, + debug::IntRegs); +constexpr RegClass ccRegClass(CCRegClass, CCRegClassName, 0, debug::IntRegs); + +} // anonymous namespace + ISA::ISA(const Params &p) : BaseISA(p), numThreads(p.num_threads), numVpes(p.num_vpes) { - _regClasses.emplace_back(NumIntRegs, debug::IntRegs); - _regClasses.emplace_back(NumFloatRegs, debug::FloatRegs); - _regClasses.emplace_back(1, debug::IntRegs); // Not applicable to MIPS. - _regClasses.emplace_back(2, debug::IntRegs); // Not applicable to MIPS. - _regClasses.emplace_back(1, debug::IntRegs); // Not applicable to MIPS. - _regClasses.emplace_back(0, debug::IntRegs); // Not applicable to MIPS. - _regClasses.emplace_back(MISCREG_NUMREGS, debug::MiscRegs); + _regClasses.push_back(&intRegClass); + _regClasses.push_back(&floatRegClass); + _regClasses.push_back(&vecRegClass); + _regClasses.push_back(&vecElemClass); + _regClasses.push_back(&vecPredRegClass); + _regClasses.push_back(&ccRegClass); + _regClasses.push_back(&miscRegClass); - miscRegFile.resize(MISCREG_NUMREGS); - bankType.resize(MISCREG_NUMREGS); + miscRegFile.resize(misc_reg::NumRegs); + bankType.resize(misc_reg::NumRegs); - for (int i = 0; i < MISCREG_NUMREGS; i++) { + for (int i = 0; i < misc_reg::NumRegs; i++) { miscRegFile[i].resize(1); bankType[i] = perProcessor; } - miscRegFile_WriteMask.resize(MISCREG_NUMREGS); + miscRegFile_WriteMask.resize(misc_reg::NumRegs); - for (int i = 0; i < MISCREG_NUMREGS; i++) { + for (int i = 0; i < misc_reg::NumRegs; i++) { miscRegFile_WriteMask[i].push_back(0); } // Initialize all Per-VPE regs - uint32_t per_vpe_regs[] = { MISCREG_VPE_CONTROL, - MISCREG_VPE_CONF0, MISCREG_VPE_CONF1, - MISCREG_YQMASK, - MISCREG_VPE_SCHEDULE, MISCREG_VPE_SCHEFBACK, - MISCREG_VPE_OPT, MISCREG_SRS_CONF0, - MISCREG_SRS_CONF1, MISCREG_SRS_CONF2, - MISCREG_SRS_CONF3, MISCREG_SRS_CONF4, - MISCREG_EBASE + uint32_t per_vpe_regs[] = { misc_reg::VpeControl, + misc_reg::VpeConf0, misc_reg::VpeConf1, + misc_reg::Yqmask, + misc_reg::VpeSchedule, misc_reg::VpeSchefback, + misc_reg::VpeOpt, misc_reg::SrsConf0, + misc_reg::SrsConf1, misc_reg::SrsConf2, + misc_reg::SrsConf3, misc_reg::SrsConf4, + misc_reg::Ebase }; uint32_t num_vpe_regs = sizeof(per_vpe_regs) / 4; for (int i = 0; i < num_vpe_regs; i++) { @@ -141,12 +152,12 @@ ISA::ISA(const Params &p) : BaseISA(p), numThreads(p.num_threads), } // Initialize all Per-TC regs - uint32_t per_tc_regs[] = { MISCREG_STATUS, - MISCREG_TC_STATUS, MISCREG_TC_BIND, - MISCREG_TC_RESTART, MISCREG_TC_HALT, - MISCREG_TC_CONTEXT, MISCREG_TC_SCHEDULE, - MISCREG_TC_SCHEFBACK, - MISCREG_DEBUG, MISCREG_LLADDR + uint32_t per_tc_regs[] = { misc_reg::Status, + misc_reg::TcStatus, misc_reg::TcBind, + misc_reg::TcRestart, misc_reg::TcHalt, + misc_reg::TcContext, misc_reg::TcSchedule, + misc_reg::TcSchefback, + misc_reg::Debug, misc_reg::Lladdr }; uint32_t num_tc_regs = sizeof(per_tc_regs) / 4; @@ -161,7 +172,7 @@ ISA::ISA(const Params &p) : BaseISA(p), numThreads(p.num_threads), void ISA::clear() { - for (int i = 0; i < MISCREG_NUMREGS; i++) { + for (int i = 0; i < misc_reg::NumRegs; i++) { for (int j = 0; j < miscRegFile[i].size(); j++) miscRegFile[i][j] = 0; @@ -174,15 +185,15 @@ void ISA::copyRegsFrom(ThreadContext *src) { // First loop through the integer registers. - for (int i = 0; i < NumIntRegs; i++) - tc->setIntRegFlat(i, src->readIntRegFlat(i)); + for (auto &id: intRegClass) + tc->setReg(id, src->getReg(id)); // Then loop through the floating point registers. - for (int i = 0; i < NumFloatRegs; i++) - tc->setFloatRegFlat(i, src->readFloatRegFlat(i)); + for (auto &id: floatRegClass) + tc->setReg(id, src->getReg(id)); // Copy misc. registers - for (int i = 0; i < MISCREG_NUMREGS; i++) + for (int i = 0; i < misc_reg::NumRegs; i++) tc->setMiscRegNoEffect(i, src->readMiscRegNoEffect(i)); // Copy over the PC State @@ -206,34 +217,34 @@ ISA::configCP() // =================================================== DPRINTF(MipsPRA, "Initializing CP0 State.... "); - PRIdReg procId = readMiscRegNoEffect(MISCREG_PRID); + PRIdReg procId = readMiscRegNoEffect(misc_reg::Prid); procId.coOp = cp.CP0_PRId_CompanyOptions; procId.coId = cp.CP0_PRId_CompanyID; procId.procId = cp.CP0_PRId_ProcessorID; procId.rev = cp.CP0_PRId_Revision; - setMiscRegNoEffect(MISCREG_PRID, procId); + setMiscRegNoEffect(misc_reg::Prid, procId); // Now, create Write Mask for ProcID register RegVal procIDMask = 0; // Read-Only register replaceBits(procIDMask, 32, 0, 0); - setRegMask(MISCREG_PRID, procIDMask); + setRegMask(misc_reg::Prid, procIDMask); // Config - ConfigReg cfg = readMiscRegNoEffect(MISCREG_CONFIG); + ConfigReg cfg = readMiscRegNoEffect(misc_reg::Config); cfg.be = cp.CP0_Config_BE; cfg.at = cp.CP0_Config_AT; cfg.ar = cp.CP0_Config_AR; cfg.mt = cp.CP0_Config_MT; cfg.vi = cp.CP0_Config_VI; cfg.m = 1; - setMiscRegNoEffect(MISCREG_CONFIG, cfg); + setMiscRegNoEffect(misc_reg::Config, cfg); // Now, create Write Mask for Config register RegVal cfg_Mask = 0x7FFF0007; replaceBits(cfg_Mask, 32, 0, 0); - setRegMask(MISCREG_CONFIG, cfg_Mask); + setRegMask(misc_reg::Config, cfg_Mask); // Config1 - Config1Reg cfg1 = readMiscRegNoEffect(MISCREG_CONFIG1); + Config1Reg cfg1 = readMiscRegNoEffect(misc_reg::Config1); cfg1.mmuSize = cp.CP0_Config1_MMU; cfg1.is = cp.CP0_Config1_IS; cfg1.il = cp.CP0_Config1_IL; @@ -248,14 +259,14 @@ ISA::configCP() cfg1.c2 = cp.CP0_Config1_C2; cfg1.pc = cp.CP0_Config1_PC; cfg1.m = cp.CP0_Config1_M; - setMiscRegNoEffect(MISCREG_CONFIG1, cfg1); + setMiscRegNoEffect(misc_reg::Config1, cfg1); // Now, create Write Mask for Config register RegVal cfg1_Mask = 0; // Read Only Register replaceBits(cfg1_Mask, 32,0 , 0); - setRegMask(MISCREG_CONFIG1, cfg1_Mask); + setRegMask(misc_reg::Config1, cfg1_Mask); // Config2 - Config2Reg cfg2 = readMiscRegNoEffect(MISCREG_CONFIG2); + Config2Reg cfg2 = readMiscRegNoEffect(misc_reg::Config2); cfg2.tu = cp.CP0_Config2_TU; cfg2.ts = cp.CP0_Config2_TS; cfg2.tl = cp.CP0_Config2_TL; @@ -265,14 +276,14 @@ ISA::configCP() cfg2.sl = cp.CP0_Config2_SL; cfg2.sa = cp.CP0_Config2_SA; cfg2.m = cp.CP0_Config2_M; - setMiscRegNoEffect(MISCREG_CONFIG2, cfg2); + setMiscRegNoEffect(misc_reg::Config2, cfg2); // Now, create Write Mask for Config register RegVal cfg2_Mask = 0x7000F000; // Read Only Register replaceBits(cfg2_Mask, 32, 0, 0); - setRegMask(MISCREG_CONFIG2, cfg2_Mask); + setRegMask(misc_reg::Config2, cfg2_Mask); // Config3 - Config3Reg cfg3 = readMiscRegNoEffect(MISCREG_CONFIG3); + Config3Reg cfg3 = readMiscRegNoEffect(misc_reg::Config3); cfg3.dspp = cp.CP0_Config3_DSPP; cfg3.lpa = cp.CP0_Config3_LPA; cfg3.veic = cp.CP0_Config3_VEIC; @@ -281,79 +292,79 @@ ISA::configCP() cfg3.mt = cp.CP0_Config3_MT; cfg3.sm = cp.CP0_Config3_SM; cfg3.tl = cp.CP0_Config3_TL; - setMiscRegNoEffect(MISCREG_CONFIG3, cfg3); + setMiscRegNoEffect(misc_reg::Config3, cfg3); // Now, create Write Mask for Config register RegVal cfg3_Mask = 0; // Read Only Register replaceBits(cfg3_Mask, 32,0 , 0); - setRegMask(MISCREG_CONFIG3, cfg3_Mask); + setRegMask(misc_reg::Config3, cfg3_Mask); // EBase - CPUNum - EBaseReg eBase = readMiscRegNoEffect(MISCREG_EBASE); + EBaseReg eBase = readMiscRegNoEffect(misc_reg::Ebase); eBase.cpuNum = cp.CP0_EBase_CPUNum; replaceBits(eBase, 31, 31, 1); - setMiscRegNoEffect(MISCREG_EBASE, eBase); + setMiscRegNoEffect(misc_reg::Ebase, eBase); // Now, create Write Mask for Config register RegVal EB_Mask = 0x3FFFF000;// Except Exception Base, the // entire register is read only replaceBits(EB_Mask, 32, 0, 0); - setRegMask(MISCREG_EBASE, EB_Mask); + setRegMask(misc_reg::Ebase, EB_Mask); // SRS Control - HSS (Highest Shadow Set) - SRSCtlReg scsCtl = readMiscRegNoEffect(MISCREG_SRSCTL); + SRSCtlReg scsCtl = readMiscRegNoEffect(misc_reg::Srsctl); scsCtl.hss = cp.CP0_SrsCtl_HSS; - setMiscRegNoEffect(MISCREG_SRSCTL, scsCtl); + setMiscRegNoEffect(misc_reg::Srsctl, scsCtl); // Now, create Write Mask for the SRS Ctl register RegVal SC_Mask = 0x0000F3C0; replaceBits(SC_Mask, 32, 0, 0); - setRegMask(MISCREG_SRSCTL, SC_Mask); + setRegMask(misc_reg::Srsctl, SC_Mask); // IntCtl - IPTI, IPPCI - IntCtlReg intCtl = readMiscRegNoEffect(MISCREG_INTCTL); + IntCtlReg intCtl = readMiscRegNoEffect(misc_reg::Intctl); intCtl.ipti = cp.CP0_IntCtl_IPTI; intCtl.ippci = cp.CP0_IntCtl_IPPCI; - setMiscRegNoEffect(MISCREG_INTCTL, intCtl); + setMiscRegNoEffect(misc_reg::Intctl, intCtl); // Now, create Write Mask for the IntCtl register RegVal IC_Mask = 0x000003E0; replaceBits(IC_Mask, 32, 0, 0); - setRegMask(MISCREG_INTCTL, IC_Mask); + setRegMask(misc_reg::Intctl, IC_Mask); // Watch Hi - M - FIXME (More than 1 Watch register) - WatchHiReg watchHi = readMiscRegNoEffect(MISCREG_WATCHHI0); + WatchHiReg watchHi = readMiscRegNoEffect(misc_reg::Watchhi0); watchHi.m = cp.CP0_WatchHi_M; - setMiscRegNoEffect(MISCREG_WATCHHI0, watchHi); + setMiscRegNoEffect(misc_reg::Watchhi0, watchHi); // Now, create Write Mask for the IntCtl register RegVal wh_Mask = 0x7FFF0FFF; replaceBits(wh_Mask, 32, 0, 0); - setRegMask(MISCREG_WATCHHI0, wh_Mask); + setRegMask(misc_reg::Watchhi0, wh_Mask); // Perf Ctr - M - FIXME (More than 1 PerfCnt Pair) - PerfCntCtlReg perfCntCtl = readMiscRegNoEffect(MISCREG_PERFCNT0); + PerfCntCtlReg perfCntCtl = readMiscRegNoEffect(misc_reg::Perfcnt0); perfCntCtl.m = cp.CP0_PerfCtr_M; perfCntCtl.w = cp.CP0_PerfCtr_W; - setMiscRegNoEffect(MISCREG_PERFCNT0, perfCntCtl); + setMiscRegNoEffect(misc_reg::Perfcnt0, perfCntCtl); // Now, create Write Mask for the IntCtl register RegVal pc_Mask = 0x00007FF; replaceBits(pc_Mask, 32, 0, 0); - setRegMask(MISCREG_PERFCNT0, pc_Mask); + setRegMask(misc_reg::Perfcnt0, pc_Mask); // Random - setMiscRegNoEffect(MISCREG_CP0_RANDOM, 63); + setMiscRegNoEffect(misc_reg::Cp0Random, 63); // Now, create Write Mask for the IntCtl register RegVal random_Mask = 0; replaceBits(random_Mask, 32, 0, 0); - setRegMask(MISCREG_CP0_RANDOM, random_Mask); + setRegMask(misc_reg::Cp0Random, random_Mask); // PageGrain - PageGrainReg pageGrain = readMiscRegNoEffect(MISCREG_PAGEGRAIN); + PageGrainReg pageGrain = readMiscRegNoEffect(misc_reg::Pagegrain); pageGrain.esp = cp.CP0_Config3_SP; - setMiscRegNoEffect(MISCREG_PAGEGRAIN, pageGrain); + setMiscRegNoEffect(misc_reg::Pagegrain, pageGrain); // Now, create Write Mask for the IntCtl register RegVal pg_Mask = 0x10000000; replaceBits(pg_Mask, 32, 0, 0); - setRegMask(MISCREG_PAGEGRAIN, pg_Mask); + setRegMask(misc_reg::Pagegrain, pg_Mask); // Status - StatusReg status = readMiscRegNoEffect(MISCREG_STATUS); + StatusReg status = readMiscRegNoEffect(misc_reg::Status); // Only CU0 and IE are modified on a reset - everything else needs // to be controlled on a per CPU model basis @@ -365,47 +376,47 @@ ISA::configCP() // Enable BEV bit on a reset status.bev = 1; - setMiscRegNoEffect(MISCREG_STATUS, status); + setMiscRegNoEffect(misc_reg::Status, status); // Now, create Write Mask for the Status register RegVal stat_Mask = 0xFF78FF17; replaceBits(stat_Mask, 32, 0, 0); - setRegMask(MISCREG_STATUS, stat_Mask); + setRegMask(misc_reg::Status, stat_Mask); // MVPConf0 - MVPConf0Reg mvpConf0 = readMiscRegNoEffect(MISCREG_MVP_CONF0); + MVPConf0Reg mvpConf0 = readMiscRegNoEffect(misc_reg::MvpConf0); mvpConf0.tca = 1; mvpConf0.pvpe = numVpes - 1; mvpConf0.ptc = numThreads - 1; - setMiscRegNoEffect(MISCREG_MVP_CONF0, mvpConf0); + setMiscRegNoEffect(misc_reg::MvpConf0, mvpConf0); // VPEConf0 - VPEConf0Reg vpeConf0 = readMiscRegNoEffect(MISCREG_VPE_CONF0); + VPEConf0Reg vpeConf0 = readMiscRegNoEffect(misc_reg::VpeConf0); vpeConf0.mvp = 1; - setMiscRegNoEffect(MISCREG_VPE_CONF0, vpeConf0); + setMiscRegNoEffect(misc_reg::VpeConf0, vpeConf0); // TCBind for (ThreadID tid = 0; tid < numThreads; tid++) { - TCBindReg tcBind = readMiscRegNoEffect(MISCREG_TC_BIND, tid); + TCBindReg tcBind = readMiscRegNoEffect(misc_reg::TcBind, tid); tcBind.curTC = tid; - setMiscRegNoEffect(MISCREG_TC_BIND, tcBind, tid); + setMiscRegNoEffect(misc_reg::TcBind, tcBind, tid); } // TCHalt - TCHaltReg tcHalt = readMiscRegNoEffect(MISCREG_TC_HALT); + TCHaltReg tcHalt = readMiscRegNoEffect(misc_reg::TcHalt); tcHalt.h = 0; - setMiscRegNoEffect(MISCREG_TC_HALT, tcHalt); + setMiscRegNoEffect(misc_reg::TcHalt, tcHalt); // TCStatus // Set TCStatus Activated to 1 for the initial thread that is running - TCStatusReg tcStatus = readMiscRegNoEffect(MISCREG_TC_STATUS); + TCStatusReg tcStatus = readMiscRegNoEffect(misc_reg::TcStatus); tcStatus.a = 1; - setMiscRegNoEffect(MISCREG_TC_STATUS, tcStatus); + setMiscRegNoEffect(misc_reg::TcStatus, tcStatus); // Set Dynamically Allocatable bit to 1 for all other threads for (ThreadID tid = 1; tid < numThreads; tid++) { - tcStatus = readMiscRegNoEffect(MISCREG_TC_STATUS, tid); + tcStatus = readMiscRegNoEffect(misc_reg::TcStatus, tid); tcStatus.da = 1; - setMiscRegNoEffect(MISCREG_TC_STATUS, tcStatus, tid); + setMiscRegNoEffect(misc_reg::TcStatus, tcStatus, tid); } @@ -413,88 +424,86 @@ ISA::configCP() // Now, create Write Mask for the Index register replaceBits(mask, 32, 0, 0); - setRegMask(MISCREG_INDEX, mask); + setRegMask(misc_reg::Index, mask); mask = 0x3FFFFFFF; replaceBits(mask, 32, 0, 0); - setRegMask(MISCREG_ENTRYLO0, mask); - setRegMask(MISCREG_ENTRYLO1, mask); + setRegMask(misc_reg::Entrylo0, mask); + setRegMask(misc_reg::Entrylo1, mask); mask = 0xFF800000; replaceBits(mask, 32, 0, 0); - setRegMask(MISCREG_CONTEXT, mask); + setRegMask(misc_reg::Context, mask); mask = 0x1FFFF800; replaceBits(mask, 32, 0, 0); - setRegMask(MISCREG_PAGEMASK, mask); + setRegMask(misc_reg::Pagemask, mask); mask = 0x0; replaceBits(mask, 32, 0, 0); - setRegMask(MISCREG_BADVADDR, mask); - setRegMask(MISCREG_LLADDR, mask); + setRegMask(misc_reg::Badvaddr, mask); + setRegMask(misc_reg::Lladdr, mask); mask = 0x08C00300; replaceBits(mask, 32, 0, 0); - setRegMask(MISCREG_CAUSE, mask); + setRegMask(misc_reg::Cause, mask); } inline unsigned ISA::getVPENum(ThreadID tid) const { - TCBindReg tcBind = miscRegFile[MISCREG_TC_BIND][tid]; + TCBindReg tcBind = miscRegFile[misc_reg::TcBind][tid]; return tcBind.curVPE; } RegVal -ISA::readMiscRegNoEffect(int misc_reg, ThreadID tid) const +ISA::readMiscRegNoEffect(RegIndex idx, ThreadID tid) const { - unsigned reg_sel = (bankType[misc_reg] == perThreadContext) + unsigned reg_sel = (bankType[idx] == perThreadContext) ? tid : getVPENum(tid); DPRINTF(MipsPRA, "Reading CP0 Register:%u Select:%u (%s) (%lx).\n", - misc_reg / 8, misc_reg % 8, miscRegNames[misc_reg], - miscRegFile[misc_reg][reg_sel]); - return miscRegFile[misc_reg][reg_sel]; + idx / 8, idx % 8, miscRegNames[idx], miscRegFile[idx][reg_sel]); + return miscRegFile[idx][reg_sel]; } //@TODO: MIPS MT's register view automatically connects // Status to TCStatus depending on current thread //template RegVal -ISA::readMiscReg(int misc_reg, ThreadID tid) +ISA::readMiscReg(RegIndex idx, ThreadID tid) { - unsigned reg_sel = (bankType[misc_reg] == perThreadContext) + unsigned reg_sel = (bankType[idx] == perThreadContext) ? tid : getVPENum(tid); DPRINTF(MipsPRA, "Reading CP0 Register:%u Select:%u (%s) with effect (%lx).\n", - misc_reg / 8, misc_reg % 8, miscRegNames[misc_reg], - miscRegFile[misc_reg][reg_sel]); + idx / 8, idx % 8, miscRegNames[idx], miscRegFile[idx][reg_sel]); - return miscRegFile[misc_reg][reg_sel]; + return miscRegFile[idx][reg_sel]; } void -ISA::setMiscRegNoEffect(int misc_reg, RegVal val, ThreadID tid) +ISA::setMiscRegNoEffect(RegIndex idx, RegVal val, ThreadID tid) { - unsigned reg_sel = (bankType[misc_reg] == perThreadContext) + unsigned reg_sel = (bankType[idx] == perThreadContext) ? tid : getVPENum(tid); DPRINTF(MipsPRA, "[tid:%i] Setting (direct set) CP0 Register:%u " "Select:%u (%s) to %#x.\n", - tid, misc_reg / 8, misc_reg % 8, miscRegNames[misc_reg], val); + tid, idx / 8, idx % 8, miscRegNames[idx], val); - miscRegFile[misc_reg][reg_sel] = val; + miscRegFile[idx][reg_sel] = val; } void -ISA::setRegMask(int misc_reg, RegVal val, ThreadID tid) +ISA::setRegMask(RegIndex idx, RegVal val, ThreadID tid) { - unsigned reg_sel = (bankType[misc_reg] == perThreadContext) + unsigned reg_sel = (bankType[idx] == perThreadContext) ? tid : getVPENum(tid); DPRINTF(MipsPRA, "[tid:%i] Setting CP0 Register: %u Select: %u (%s) to %#x\n", - tid, misc_reg / 8, misc_reg % 8, miscRegNames[misc_reg], val); - miscRegFile_WriteMask[misc_reg][reg_sel] = val; + tid, idx / 8, idx % 8, miscRegNames[idx], val); + miscRegFile_WriteMask[idx][reg_sel] = val; } // PROGRAMMER'S NOTES: @@ -502,19 +511,19 @@ ISA::setRegMask(int misc_reg, RegVal val, ThreadID tid) // be overwritten. Make sure to handle those particular registers // with care! void -ISA::setMiscReg(int misc_reg, RegVal val, ThreadID tid) +ISA::setMiscReg(RegIndex idx, RegVal val, ThreadID tid) { - int reg_sel = (bankType[misc_reg] == perThreadContext) + int reg_sel = (bankType[idx] == perThreadContext) ? tid : getVPENum(tid); DPRINTF(MipsPRA, "[tid:%i] Setting CP0 Register:%u " "Select:%u (%s) to %#x, with effect.\n", - tid, misc_reg / 8, misc_reg % 8, miscRegNames[misc_reg], val); + tid, idx / 8, idx % 8, miscRegNames[idx], val); - RegVal cp0_val = filterCP0Write(misc_reg, reg_sel, val); + RegVal cp0_val = filterCP0Write(idx, reg_sel, val); - miscRegFile[misc_reg][reg_sel] = cp0_val; + miscRegFile[idx][reg_sel] = cp0_val; scheduleCP0Update(tc->getCpuPtr(), Cycles(1)); } @@ -525,22 +534,22 @@ ISA::setMiscReg(int misc_reg, RegVal val, ThreadID tid) * (setRegWithEffect) */ RegVal -ISA::filterCP0Write(int misc_reg, int reg_sel, RegVal val) +ISA::filterCP0Write(RegIndex idx, int reg_sel, RegVal val) { RegVal retVal = val; // Mask off read-only regions - retVal &= miscRegFile_WriteMask[misc_reg][reg_sel]; - RegVal curVal = miscRegFile[misc_reg][reg_sel]; + retVal &= miscRegFile_WriteMask[idx][reg_sel]; + RegVal curVal = miscRegFile[idx][reg_sel]; // Mask off current alue with inverse mask (clear writeable bits) - curVal &= (~miscRegFile_WriteMask[misc_reg][reg_sel]); + curVal &= (~miscRegFile_WriteMask[idx][reg_sel]); retVal |= curVal; // Combine the two DPRINTF(MipsPRA, "filterCP0Write: Mask: %lx, Inverse Mask: %lx, write Val: %x, " "current val: %lx, written val: %x\n", - miscRegFile_WriteMask[misc_reg][reg_sel], - ~miscRegFile_WriteMask[misc_reg][reg_sel], - val, miscRegFile[misc_reg][reg_sel], retVal); + miscRegFile_WriteMask[idx][reg_sel], + ~miscRegFile_WriteMask[idx][reg_sel], + val, miscRegFile[idx][reg_sel], retVal); return retVal; } @@ -566,12 +575,12 @@ ISA::updateCPU(BaseCPU *cpu) // EVALUATE CP0 STATE FOR MIPS MT // /////////////////////////////////////////////////////////////////// - MVPConf0Reg mvpConf0 = readMiscRegNoEffect(MISCREG_MVP_CONF0); + MVPConf0Reg mvpConf0 = readMiscRegNoEffect(misc_reg::MvpConf0); ThreadID num_threads = mvpConf0.ptc + 1; for (ThreadID tid = 0; tid < num_threads; tid++) { - TCStatusReg tcStatus = readMiscRegNoEffect(MISCREG_TC_STATUS, tid); - TCHaltReg tcHalt = readMiscRegNoEffect(MISCREG_TC_HALT, tid); + TCStatusReg tcStatus = readMiscRegNoEffect(misc_reg::TcStatus, tid); + TCHaltReg tcHalt = readMiscRegNoEffect(misc_reg::TcHalt, tid); //@todo: add vpe/mt check here thru mvpcontrol & vpecontrol regs if (tcHalt.h == 1 || tcStatus.a == 0) { diff --git a/src/arch/mips/isa.hh b/src/arch/mips/isa.hh index c7fbac703a..9ca4fdae01 100644 --- a/src/arch/mips/isa.hh +++ b/src/arch/mips/isa.hh @@ -77,7 +77,7 @@ namespace MipsISA std::vector bankType; public: - void clear(); + void clear() override; PCStateBase * newPCState(Addr new_inst_addr=0) const override @@ -98,18 +98,37 @@ namespace MipsISA ////////////////////////////////////////////////////////// //@TODO: MIPS MT's register view automatically connects // Status to TCStatus depending on current thread - void updateCP0ReadView(int misc_reg, ThreadID tid) { } - RegVal readMiscRegNoEffect(int misc_reg, ThreadID tid = 0) const; + void updateCP0ReadView(RegIndex idx, ThreadID tid) { } + RegVal readMiscRegNoEffect(RegIndex idx, ThreadID tid) const; + RegVal + readMiscRegNoEffect(RegIndex idx) const override + { + return readMiscRegNoEffect(idx, 0); + } - //template - RegVal readMiscReg(int misc_reg, ThreadID tid = 0); + RegVal readMiscReg(RegIndex idx, ThreadID tid); + RegVal + readMiscReg(RegIndex idx) override + { + return readMiscReg(idx, 0); + } - RegVal filterCP0Write(int misc_reg, int reg_sel, RegVal val); - void setRegMask(int misc_reg, RegVal val, ThreadID tid = 0); - void setMiscRegNoEffect(int misc_reg, RegVal val, ThreadID tid=0); + RegVal filterCP0Write(RegIndex idx, int reg_sel, RegVal val); + void setRegMask(RegIndex idx, RegVal val, ThreadID tid = 0); - //template - void setMiscReg(int misc_reg, RegVal val, ThreadID tid=0); + void setMiscRegNoEffect(RegIndex idx, RegVal val, ThreadID tid); + void + setMiscRegNoEffect(RegIndex idx, RegVal val) override + { + setMiscRegNoEffect(idx, val, 0); + } + + void setMiscReg(RegIndex idx, RegVal val, ThreadID tid); + void + setMiscReg(RegIndex idx, RegVal val) override + { + setMiscReg(idx, val, 0); + } ////////////////////////////////////////////////////////// // @@ -137,27 +156,16 @@ namespace MipsISA // and if necessary alert the CPU void updateCPU(BaseCPU *cpu); - static std::string miscRegNames[MISCREG_NUMREGS]; + static std::string miscRegNames[misc_reg::NumRegs]; public: ISA(const Params &p); - RegId flattenRegId(const RegId& regId) const { return regId; } - - int flattenIntIndex(int reg) const { return reg; } - int flattenFloatIndex(int reg) const { return reg; } - int flattenVecIndex(int reg) const { return reg; } - int flattenVecElemIndex(int reg) const { return reg; } - int flattenVecPredIndex(int reg) const { return reg; } - // dummy - int flattenCCIndex(int reg) const { return reg; } - int flattenMiscIndex(int reg) const { return reg; } - bool inUserMode() const override { - RegVal Stat = readMiscRegNoEffect(MISCREG_STATUS); - RegVal Dbg = readMiscRegNoEffect(MISCREG_DEBUG); + RegVal Stat = readMiscRegNoEffect(misc_reg::Status); + RegVal Dbg = readMiscRegNoEffect(misc_reg::Debug); if (// EXL, ERL or CU0 set, CP0 accessible (Stat & 0x10000006) == 0 && diff --git a/src/arch/mips/isa/base.isa b/src/arch/mips/isa/base.isa index 8e9b50be04..a06debcce8 100644 --- a/src/arch/mips/isa/base.isa +++ b/src/arch/mips/isa/base.isa @@ -145,4 +145,3 @@ output decoder {{ } }}; - diff --git a/src/arch/mips/isa/decoder.isa b/src/arch/mips/isa/decoder.isa index 2e56a04bef..db2bfa4a32 100644 --- a/src/arch/mips/isa/decoder.isa +++ b/src/arch/mips/isa/decoder.isa @@ -382,86 +382,85 @@ decode OPCODE_HI default Unknown::unknown() { // Decode MIPS MT MFTR instruction into sub-instructions 0x8: decode MT_U { 0x0: mftc0({{ - data = readRegOtherThread(xc, RegId(MiscRegClass, - (RT << 3 | SEL))); + data = readRegOtherThread(xc, + miscRegClass[RT << 3 | SEL]); }}); 0x1: decode SEL { 0x0: mftgpr({{ - data = readRegOtherThread(xc, - RegId(IntRegClass, RT)); + data = readRegOtherThread(xc, intRegClass[RT]); }}); 0x1: decode RT { 0x0: mftlo_dsp0({{ data = readRegOtherThread(xc, - RegId(IntRegClass, INTREG_DSP_LO0)); + int_reg::DspLo0); }}); 0x1: mfthi_dsp0({{ data = readRegOtherThread(xc, - RegId(IntRegClass, INTREG_DSP_HI0)); + int_reg::DspHi0); }}); 0x2: mftacx_dsp0({{ data = readRegOtherThread(xc, - RegId(IntRegClass, INTREG_DSP_ACX0)); + int_reg::DspAcx0); }}); 0x4: mftlo_dsp1({{ data = readRegOtherThread(xc, - RegId(IntRegClass, INTREG_DSP_LO1)); + int_reg::DspLo1); }}); 0x5: mfthi_dsp1({{ data = readRegOtherThread(xc, - RegId(IntRegClass, INTREG_DSP_HI1)); + int_reg::DspHi1); }}); 0x6: mftacx_dsp1({{ data = readRegOtherThread(xc, - RegId(IntRegClass, INTREG_DSP_ACX1)); + int_reg::DspAcx1); }}); 0x8: mftlo_dsp2({{ data = readRegOtherThread(xc, - RegId(IntRegClass, INTREG_DSP_LO2)); + int_reg::DspLo2); }}); 0x9: mfthi_dsp2({{ data = readRegOtherThread(xc, - RegId(IntRegClass, INTREG_DSP_HI2)); + int_reg::DspHi2); }}); 0x10: mftacx_dsp2({{ data = readRegOtherThread(xc, - RegId(IntRegClass, INTREG_DSP_ACX2)); + int_reg::DspAcx2); }}); 0x12: mftlo_dsp3({{ data = readRegOtherThread(xc, - RegId(IntRegClass, INTREG_DSP_LO3)); + int_reg::DspLo3); }}); 0x13: mfthi_dsp3({{ data = readRegOtherThread(xc, - RegId(IntRegClass, INTREG_DSP_HI3)); + int_reg::DspHi3); }}); 0x14: mftacx_dsp3({{ data = readRegOtherThread(xc, - RegId(IntRegClass, INTREG_DSP_ACX3)); + int_reg::DspAcx3); }}); 0x16: mftdsp({{ data = readRegOtherThread(xc, - RegId(IntRegClass, INTREG_DSP_CONTROL)); + int_reg::DspControl); }}); default: CP0Unimpl::unknown(); } 0x2: decode MT_H { 0x0: mftc1({{ data = readRegOtherThread(xc, - RegId(FloatRegClass, RT)); + floatRegClass[RT]); }}); 0x1: mfthc1({{ data = readRegOtherThread(xc, - RegId(FloatRegClass, RT)); + floatRegClass[RT]); }}); } 0x3: cftc1({{ uint32_t fcsr_val = readRegOtherThread(xc, - RegId(FloatRegClass, FLOATREG_FCSR)); + float_reg::Fcsr); switch (RT) { case 0: data = readRegOtherThread(xc, - RegId(MiscRegClass, FLOATREG_FIR)); + float_reg::Fir); break; case 25: data = (fcsr_val & 0xFE000000 >> 24) | @@ -491,62 +490,61 @@ decode OPCODE_HI default Unknown::unknown() { // Decode MIPS MT MTTR instruction into sub-instructions 0xC: decode MT_U { 0x0: mttc0({{ setRegOtherThread(xc, - RegId(MiscRegClass, (RD << 3 | SEL)), Rt); + miscRegClass[RD << 3 | SEL], Rt); }}); 0x1: decode SEL { 0x0: mttgpr({{ setRegOtherThread(xc, - RegId(IntRegClass, RD), Rt); + intRegClass[RD], Rt); }}); 0x1: decode RT { 0x0: mttlo_dsp0({{ setRegOtherThread(xc, - RegId(IntRegClass, INTREG_DSP_LO0), Rt); + int_reg::DspLo0, Rt); }}); 0x1: mtthi_dsp0({{ setRegOtherThread(xc, - RegId(IntRegClass, INTREG_DSP_HI0), Rt); + int_reg::DspHi0, Rt); }}); 0x2: mttacx_dsp0({{ setRegOtherThread(xc, - RegId(IntRegClass, INTREG_DSP_ACX0), Rt); + int_reg::DspAcx0, Rt); }}); 0x4: mttlo_dsp1({{ setRegOtherThread(xc, - RegId(IntRegClass, INTREG_DSP_LO1), Rt); + int_reg::DspLo1, Rt); }}); 0x5: mtthi_dsp1({{ setRegOtherThread(xc, - RegId(IntRegClass, INTREG_DSP_HI1), Rt); + int_reg::DspHi1, Rt); }}); 0x6: mttacx_dsp1({{ setRegOtherThread(xc, - RegId(IntRegClass, INTREG_DSP_ACX1), Rt); + int_reg::DspAcx1, Rt); }}); 0x8: mttlo_dsp2({{ setRegOtherThread(xc, - RegId(IntRegClass, INTREG_DSP_LO2), Rt); + int_reg::DspLo2, Rt); }}); 0x9: mtthi_dsp2({{ setRegOtherThread(xc, - RegId(IntRegClass, INTREG_DSP_HI2), Rt); + int_reg::DspHi2, Rt); }}); 0x10: mttacx_dsp2({{ setRegOtherThread(xc, - RegId(IntRegClass, INTREG_DSP_ACX2), Rt); + int_reg::DspAcx2, Rt); }}); 0x12: mttlo_dsp3({{ setRegOtherThread(xc, - RegId(IntRegClass, INTREG_DSP_LO3), Rt); + int_reg::DspLo3, Rt); }}); 0x13: mtthi_dsp3({{ setRegOtherThread(xc, - RegId(IntRegClass, INTREG_DSP_HI3), Rt); + int_reg::DspHi3, Rt); }}); 0x14: mttacx_dsp3({{ setRegOtherThread(xc, - RegId(IntRegClass, INTREG_DSP_ACX3), Rt); + int_reg::DspAcx3, Rt); }}); 0x16: mttdsp({{ setRegOtherThread(xc, - RegId(IntRegClass, INTREG_DSP_CONTROL), Rt); + int_reg::DspControl, Rt); }}); default: CP0Unimpl::unknown(); } 0x2: mttc1({{ uint64_t data = readRegOtherThread(xc, - RegId(FloatRegClass, RD)); + floatRegClass[RD]); data = insertBits(data, MT_H ? 63 : 31, MT_H ? 32 : 0, Rt); - setRegOtherThread(xc, RegId(FloatRegClass, RD), - data); + setRegOtherThread(xc, floatRegClass[RD], data); }}); 0x3: cttc1({{ uint32_t data; @@ -582,8 +580,7 @@ decode OPCODE_HI default Unknown::unknown() { "Access to Floating Control " "S""tatus Register", FS); } - setRegOtherThread(xc, - RegId(FloatRegClass, FLOATREG_FCSR), data); + setRegOtherThread(xc, float_reg::Fcsr, data); }}); default: CP0Unimpl::unknown(); } @@ -2548,5 +2545,3 @@ decode OPCODE_HI default Unknown::unknown() { 0x6: CP2Unimpl::sdc2(); } } - - diff --git a/src/arch/mips/isa/formats/basic.isa b/src/arch/mips/isa/formats/basic.isa index 765dae2755..8bb90d7215 100644 --- a/src/arch/mips/isa/formats/basic.isa +++ b/src/arch/mips/isa/formats/basic.isa @@ -41,7 +41,7 @@ def template BasicDeclare {{ /// Constructor. %(class_name)s(MachInst machInst); Fault execute(ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; }; }}; @@ -59,7 +59,7 @@ def template BasicConstructor {{ // Basic instruction class execute method template. def template BasicExecute {{ Fault %(class_name)s::execute( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { Fault fault = NoFault; diff --git a/src/arch/mips/isa/formats/branch.isa b/src/arch/mips/isa/formats/branch.isa index 96841cfa4a..1b7c1057b6 100644 --- a/src/arch/mips/isa/formats/branch.isa +++ b/src/arch/mips/isa/formats/branch.isa @@ -326,7 +326,3 @@ def format Jump(code, *opt_flags) {{ decode_block = BasicDecode.subst(iop) exec_output = BasicExecute.subst(iop) }}; - - - - diff --git a/src/arch/mips/isa/formats/control.isa b/src/arch/mips/isa/formats/control.isa index 67efc864c7..ea321db89a 100644 --- a/src/arch/mips/isa/formats/control.isa +++ b/src/arch/mips/isa/formats/control.isa @@ -64,7 +64,7 @@ output header {{ // Basic instruction class execute method template. def template CP0Execute {{ Fault %(class_name)s::execute( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { Fault fault = NoFault; %(op_decl)s; @@ -86,7 +86,7 @@ def template CP0Execute {{ def template CP1Execute {{ Fault %(class_name)s::execute( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { Fault fault = NoFault; %(op_decl)s; @@ -108,7 +108,7 @@ def template CP1Execute {{ // Basic instruction class execute method template. def template ControlTLBExecute {{ Fault %(class_name)s::execute( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { Fault fault = NoFault; %(op_decl)s; @@ -177,9 +177,9 @@ output exec {{ if (!FullSystem) return true; - RegVal Stat = xc->readMiscReg(MISCREG_STATUS); + RegVal Stat = xc->readMiscReg(misc_reg::Status); if (cop_num == 0) { - RegVal Dbg = xc->readMiscReg(MISCREG_DEBUG); + RegVal Dbg = xc->readMiscReg(misc_reg::Debug); // In Stat, EXL, ERL or CU0 set, CP0 accessible // In Dbg, DM bit set, CP0 accessible // In Stat, KSU = 0, kernel mode is base mode @@ -197,8 +197,8 @@ output exec {{ isCoprocessor0Enabled(ExecContext *xc) { if (FullSystem) { - RegVal Stat = xc->readMiscReg(MISCREG_STATUS); - RegVal Dbg = xc->readMiscReg(MISCREG_DEBUG); + RegVal Stat = xc->readMiscReg(misc_reg::Status); + RegVal Dbg = xc->readMiscReg(misc_reg::Debug); // In Stat, EXL, ERL or CU0 set, CP0 accessible // In Dbg, DM bit set, CP0 accessible // In Stat KSU = 0, kernel mode is base mode @@ -212,7 +212,7 @@ output exec {{ bool isMMUTLB(ExecContext *xc) { - RegVal Config = xc->readMiscReg(MISCREG_CONFIG); + RegVal Config = xc->readMiscReg(misc_reg::Config); return FullSystem && (Config & 0x380) == 0x80; } }}; @@ -241,5 +241,3 @@ def format CP1Control(code, *flags) {{ decode_block = BasicDecode.subst(iop) exec_output = CP1Execute.subst(iop) }}; - - diff --git a/src/arch/mips/isa/formats/dsp.isa b/src/arch/mips/isa/formats/dsp.isa index fc0895677a..847251f4a6 100644 --- a/src/arch/mips/isa/formats/dsp.isa +++ b/src/arch/mips/isa/formats/dsp.isa @@ -61,7 +61,7 @@ output header {{ // Dsp instruction class execute method template. def template DspExecute {{ Fault %(class_name)s::execute( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { Fault fault = NoFault; @@ -95,7 +95,7 @@ def template DspExecute {{ // DspHiLo instruction class execute method template. def template DspHiLoExecute {{ Fault %(class_name)s::execute( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { Fault fault = NoFault; @@ -147,7 +147,7 @@ output exec {{ bool isDspEnabled(ExecContext *xc) { - return !FullSystem || bits(xc->readMiscReg(MISCREG_STATUS), 24); + return !FullSystem || bits(xc->readMiscReg(misc_reg::Status), 24); } }}; @@ -155,7 +155,7 @@ output exec {{ bool isDspPresent(ExecContext *xc) { - return !FullSystem || bits(xc->readMiscReg(MISCREG_CONFIG3), 10); + return !FullSystem || bits(xc->readMiscReg(misc_reg::Config3), 10); } }}; @@ -208,6 +208,3 @@ def format DspHiLoOp(code, *opt_flags) {{ exec_output = DspHiLoExecute.subst(iop) }}; - - - diff --git a/src/arch/mips/isa/formats/fp.isa b/src/arch/mips/isa/formats/fp.isa index bfcdb8ca92..267972cc4d 100644 --- a/src/arch/mips/isa/formats/fp.isa +++ b/src/arch/mips/isa/formats/fp.isa @@ -98,7 +98,7 @@ output exec {{ template bool fpNanOperands(FPOp *inst, ExecContext *xc, const T &src_type, - Trace::InstRecord *traceData) + trace::InstRecord *traceData) { uint64_t mips_nan = 0; assert(sizeof(T) == 4); @@ -109,7 +109,7 @@ output exec {{ if (isNan(&src_bits, 32) ) { mips_nan = MIPS32_QNAN; xc->setRegOperand(inst, 0, mips_nan); - if (traceData) { traceData->setData(mips_nan); } + if (traceData) { traceData->setData(floatRegClass, mips_nan); } return true; } } @@ -119,7 +119,7 @@ output exec {{ template bool fpInvalidOp(FPOp *inst, ExecContext *cpu, const T dest_val, - Trace::InstRecord *traceData) + trace::InstRecord *traceData) { uint64_t mips_nan = 0; T src_op = dest_val; @@ -132,15 +132,14 @@ output exec {{ cpu->setRegOperand(inst, 0, mips_nan); //Read FCSR from FloatRegFile - uint32_t fcsr_bits = - cpu->tcBase()->readFloatReg(FLOATREG_FCSR); + uint32_t fcsr_bits = cpu->tcBase()->getReg(float_reg::Fcsr); uint32_t new_fcsr = genInvalidVector(fcsr_bits); //Write FCSR from FloatRegFile - cpu->tcBase()->setFloatReg(FLOATREG_FCSR, new_fcsr); + cpu->tcBase()->setReg(float_reg::Fcsr, new_fcsr); - if (traceData) { traceData->setData(mips_nan); } + if (traceData) { traceData->setData(floatRegClass, mips_nan); } return true; } @@ -151,19 +150,19 @@ output exec {{ fpResetCauseBits(ExecContext *cpu) { //Read FCSR from FloatRegFile - uint32_t fcsr = cpu->tcBase()->readFloatReg(FLOATREG_FCSR); + uint32_t fcsr = cpu->tcBase()->getReg(float_reg::Fcsr); // TODO: Use utility function here fcsr = bits(fcsr, 31, 18) << 18 | bits(fcsr, 11, 0); //Write FCSR from FloatRegFile - cpu->tcBase()->setFloatReg(FLOATREG_FCSR, fcsr); + cpu->tcBase()->setReg(float_reg::Fcsr, fcsr); } }}; def template FloatingPointExecute {{ Fault %(class_name)s::execute( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { Fault fault = NoFault; @@ -364,4 +363,3 @@ def format FloatPSCompareOp(cond_code1, cond_code2, *flags) {{ decode_block = BasicDecode.subst(iop) exec_output = BasicExecute.subst(iop) }}; - diff --git a/src/arch/mips/isa/formats/int.isa b/src/arch/mips/isa/formats/int.isa index f0306965d3..11d4adebf0 100644 --- a/src/arch/mips/isa/formats/int.isa +++ b/src/arch/mips/isa/formats/int.isa @@ -109,7 +109,7 @@ output header {{ def template HiLoExecute {{ Fault %(class_name)s::execute( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { Fault fault = NoFault; @@ -129,7 +129,7 @@ def template HiLoExecute {{ def template HiLoRsSelExecute {{ Fault %(class_name)s::execute( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { Fault fault = NoFault; @@ -153,7 +153,7 @@ def template HiLoRsSelExecute {{ def template HiLoRdSelExecute {{ Fault %(class_name)s::execute( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { Fault fault = NoFault; diff --git a/src/arch/mips/isa/formats/mem.isa b/src/arch/mips/isa/formats/mem.isa index 67973e15fb..1041500d9e 100644 --- a/src/arch/mips/isa/formats/mem.isa +++ b/src/arch/mips/isa/formats/mem.isa @@ -136,10 +136,10 @@ def template LoadStoreDeclare {{ /// Constructor. %(class_name)s(ExtMachInst machInst); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(Packet *, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; }; }}; @@ -155,7 +155,7 @@ def template LoadStoreConstructor {{ def template LoadExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -187,7 +187,7 @@ def template LoadExecute {{ def template LoadInitiateAcc {{ Fault %(class_name)s::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -213,7 +213,7 @@ def template LoadInitiateAcc {{ def template LoadCompleteAcc {{ Fault %(class_name)s::completeAcc(Packet *pkt, ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; @@ -243,7 +243,7 @@ def template LoadCompleteAcc {{ def template StoreExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -277,7 +277,7 @@ def template StoreExecute {{ def template StoreFPExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -312,7 +312,7 @@ def template StoreFPExecute {{ def template StoreCondExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -346,7 +346,7 @@ def template StoreCondExecute {{ def template StoreInitiateAcc {{ Fault %(class_name)s::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -373,7 +373,7 @@ def template StoreInitiateAcc {{ def template StoreCompleteAcc {{ Fault %(class_name)s::completeAcc(Packet *pkt, ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { return NoFault; } @@ -382,7 +382,7 @@ def template StoreCompleteAcc {{ def template StoreCondCompleteAcc {{ Fault %(class_name)s::completeAcc(Packet *pkt, ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; @@ -405,7 +405,7 @@ def template StoreCondCompleteAcc {{ def template MiscExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { [[maybe_unused]] Addr EA = 0; Fault fault = NoFault; @@ -425,7 +425,7 @@ def template MiscExecute {{ def template MiscInitiateAcc {{ Fault %(class_name)s::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { panic("Misc instruction does not support split access method!"); return NoFault; @@ -435,7 +435,7 @@ def template MiscInitiateAcc {{ def template MiscCompleteAcc {{ Fault %(class_name)s::completeAcc(Packet *pkt, ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { panic("Misc instruction does not support split access method!"); diff --git a/src/arch/mips/isa/formats/mt.isa b/src/arch/mips/isa/formats/mt.isa index ff4aca91b3..1ded627a71 100644 --- a/src/arch/mips/isa/formats/mt.isa +++ b/src/arch/mips/isa/formats/mt.isa @@ -92,23 +92,22 @@ output exec {{ TCBindReg &tc_bind, VPEControlReg &vpe_control, MVPConf0Reg &mvp_conf0) { - vpe_conf0 = xc->readMiscReg(MISCREG_VPE_CONF0); - tc_bind_mt = readRegOtherThread(xc, RegId(MiscRegClass, - MISCREG_TC_BIND)); - tc_bind = xc->readMiscReg(MISCREG_TC_BIND); - vpe_control = xc->readMiscReg(MISCREG_VPE_CONTROL); - mvp_conf0 = xc->readMiscReg(MISCREG_MVP_CONF0); + vpe_conf0 = xc->readMiscReg(misc_reg::VpeConf0); + tc_bind_mt = readRegOtherThread(xc, miscRegClass[misc_reg::TcBind]); + tc_bind = xc->readMiscReg(misc_reg::TcBind); + vpe_control = xc->readMiscReg(misc_reg::VpeControl); + mvp_conf0 = xc->readMiscReg(misc_reg::MvpConf0); } void getMTExValues(ExecContext *xc, Config3Reg &config3) { - config3 = xc->readMiscReg(MISCREG_CONFIG3); + config3 = xc->readMiscReg(misc_reg::Config3); } }}; def template ThreadRegisterExecute {{ Fault %(class_name)s::execute( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { Fault fault = NoFault; [[maybe_unused]] int64_t data; @@ -147,7 +146,7 @@ def template ThreadRegisterExecute {{ def template MTExecute{{ Fault %(class_name)s::execute( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { Fault fault = NoFault; %(op_decl)s; diff --git a/src/arch/mips/isa/formats/noop.isa b/src/arch/mips/isa/formats/noop.isa index 8ca30b9c05..93807c5224 100644 --- a/src/arch/mips/isa/formats/noop.isa +++ b/src/arch/mips/isa/formats/noop.isa @@ -54,7 +54,7 @@ output header {{ std::string generateDisassembly( Addr pc, const loader::SymbolTable *symtab) const override; - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -80,7 +80,7 @@ output decoder {{ output exec {{ Fault - Nop::execute(ExecContext *, Trace::InstRecord *) const + Nop::execute(ExecContext *, trace::InstRecord *) const { return NoFault; } @@ -134,4 +134,3 @@ def format BasicOperateWithNopCheck(code, *opt_args) {{ def format Nop() {{ decode_block = 'return new Nop(\"\",machInst);\n' }}; - diff --git a/src/arch/mips/isa/formats/tlbop.isa b/src/arch/mips/isa/formats/tlbop.isa index a8d50765dc..ad6372accd 100644 --- a/src/arch/mips/isa/formats/tlbop.isa +++ b/src/arch/mips/isa/formats/tlbop.isa @@ -56,7 +56,7 @@ output decoder {{ def template TlbOpExecute {{ Fault %(class_name)s::execute( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { //Write the resulting state to the execution context %(op_wb)s; diff --git a/src/arch/mips/isa/formats/trap.isa b/src/arch/mips/isa/formats/trap.isa index 8b353ac9c9..634b3c0393 100644 --- a/src/arch/mips/isa/formats/trap.isa +++ b/src/arch/mips/isa/formats/trap.isa @@ -75,7 +75,7 @@ output decoder {{ def template TrapExecute {{ // Edit This Template When Traps Are Implemented Fault %(class_name)s::execute( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { //Write the resulting state to the execution context %(op_wb)s; diff --git a/src/arch/mips/isa/formats/unimp.isa b/src/arch/mips/isa/formats/unimp.isa index 5a40a6cd39..198e20b820 100644 --- a/src/arch/mips/isa/formats/unimp.isa +++ b/src/arch/mips/isa/formats/unimp.isa @@ -51,7 +51,7 @@ output header {{ flags[IsNonSpeculative] = true; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; std::string generateDisassembly( Addr pc, const loader::SymbolTable *symtab) const override; @@ -68,7 +68,7 @@ output header {{ flags[IsNonSpeculative] = true; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; std::string generateDisassembly( Addr pc, const loader::SymbolTable *symtab) const override; @@ -85,7 +85,7 @@ output header {{ flags[IsNonSpeculative] = true; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; std::string generateDisassembly( Addr pc, const loader::SymbolTable *symtab) const override; @@ -102,7 +102,7 @@ output header {{ flags[IsNonSpeculative] = true; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; std::string generateDisassembly( Addr pc, const loader::SymbolTable *symtab) const override; @@ -133,7 +133,7 @@ output header {{ flags[IsNonSpeculative] = true; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; std::string generateDisassembly( Addr pc, const loader::SymbolTable *symtab) const override; @@ -179,7 +179,7 @@ output decoder {{ output exec {{ Fault FailUnimplemented::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { panic("attempt to execute unimplemented instruction '%s' " "(inst 0x%08x, opcode 0x%x, binary:%s)", mnemonic, machInst, OPCODE, @@ -189,7 +189,7 @@ output exec {{ Fault CP0Unimplemented::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { if (FullSystem) { if (!isCoprocessorEnabled(xc, 0)) @@ -206,7 +206,7 @@ output exec {{ Fault CP1Unimplemented::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { if (FullSystem) { if (!isCoprocessorEnabled(xc, 1)) @@ -223,7 +223,7 @@ output exec {{ Fault CP2Unimplemented::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { if (FullSystem) { if (!isCoprocessorEnabled(xc, 2)) @@ -240,7 +240,7 @@ output exec {{ Fault WarnUnimplemented::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { if (!warned) { warn("\tinstruction '%s' unimplemented\n", mnemonic); @@ -273,4 +273,3 @@ def format WarnUnimpl() {{ iop = InstObjParams(name, 'WarnUnimplemented') decode_block = BasicDecodeWithMnemonic.subst(iop) }}; - diff --git a/src/arch/mips/isa/formats/unknown.isa b/src/arch/mips/isa/formats/unknown.isa index fead3c4494..8d3ccdfef1 100644 --- a/src/arch/mips/isa/formats/unknown.isa +++ b/src/arch/mips/isa/formats/unknown.isa @@ -49,7 +49,7 @@ output header {{ flags[IsNonSpeculative] = true; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; std::string generateDisassembly( Addr pc, const loader::SymbolTable *symtab) const override; @@ -68,7 +68,7 @@ output decoder {{ output exec {{ Fault - Unknown::execute(ExecContext *xc, Trace::InstRecord *traceData) const + Unknown::execute(ExecContext *xc, trace::InstRecord *traceData) const { return std::make_shared(); } @@ -77,4 +77,3 @@ output exec {{ def format Unknown() {{ decode_block = 'return new Unknown(machInst);\n' }}; - diff --git a/src/arch/mips/isa/operands.isa b/src/arch/mips/isa/operands.isa index 16c34a7c1e..53df90e360 100644 --- a/src/arch/mips/isa/operands.isa +++ b/src/arch/mips/isa/operands.isa @@ -44,7 +44,7 @@ let {{ @overrideInOperand def regId(self): return f'(({self.reg_spec}) == 0) ? RegId() : ' \ - f'RegId({self.reg_class}, {self.reg_spec})' + f'{self.reg_class}[{self.reg_spec}]' }}; def operands {{ @@ -60,29 +60,29 @@ def operands {{ 'R31': IntReg('uw', '31', 'IsInteger', 4), #Special Integer Reg operands - 'LO0': IntReg('uw', 'INTREG_LO', 'IsInteger', 6), - 'HI0': IntReg('uw', 'INTREG_HI', 'IsInteger', 7), + 'LO0': IntReg('uw', 'int_reg::Lo', 'IsInteger', 6), + 'HI0': IntReg('uw', 'int_reg::Hi', 'IsInteger', 7), #Bitfield-dependent HI/LO Register Access - 'LO_RD_SEL': IntReg('uw', 'INTREG_DSP_LO0 + ACDST*3', None, 6), - 'HI_RD_SEL': IntReg('uw', 'INTREG_DSP_HI0 + ACDST*3', None, 7), - 'LO_RS_SEL': IntReg('uw', 'INTREG_DSP_LO0 + ACSRC*3', None, 6), - 'HI_RS_SEL': IntReg('uw', 'INTREG_DSP_HI0 + ACSRC*3', None, 7), + 'LO_RD_SEL': IntReg('uw', 'int_reg::DspLo0 + ACDST*3', None, 6), + 'HI_RD_SEL': IntReg('uw', 'int_reg::DspHi0 + ACDST*3', None, 7), + 'LO_RS_SEL': IntReg('uw', 'int_reg::DspLo0 + ACSRC*3', None, 6), + 'HI_RS_SEL': IntReg('uw', 'int_reg::DspHi0 + ACSRC*3', None, 7), #DSP Special Purpose Integer Operands - 'DSPControl': IntReg('uw', 'INTREG_DSP_CONTROL', None, 8), - 'DSPLo0': IntReg('uw', 'INTREG_LO', None, 1), - 'DSPHi0': IntReg('uw', 'INTREG_HI', None, 1), - 'DSPACX0': IntReg('uw', 'INTREG_DSP_ACX0', None, 1), - 'DSPLo1': IntReg('uw', 'INTREG_DSP_LO1', None, 1), - 'DSPHi1': IntReg('uw', 'INTREG_DSP_HI1', None, 1), - 'DSPACX1': IntReg('uw', 'INTREG_DSP_ACX1', None, 1), - 'DSPLo2': IntReg('uw', 'INTREG_DSP_LO2', None, 1), - 'DSPHi2': IntReg('uw', 'INTREG_DSP_HI2', None, 1), - 'DSPACX2': IntReg('uw', 'INTREG_DSP_ACX2', None, 1), - 'DSPLo3': IntReg('uw', 'INTREG_DSP_LO3', None, 1), - 'DSPHi3': IntReg('uw', 'INTREG_DSP_HI3', None, 1), - 'DSPACX3': IntReg('uw', 'INTREG_DSP_ACX3', None, 1), + 'DSPControl': IntReg('uw', 'int_reg::DspControl', None, 8), + 'DSPLo0': IntReg('uw', 'int_reg::Lo', None, 1), + 'DSPHi0': IntReg('uw', 'int_reg::Hi', None, 1), + 'DSPACX0': IntReg('uw', 'int_reg::DspAcx0', None, 1), + 'DSPLo1': IntReg('uw', 'int_reg::DspLo1', None, 1), + 'DSPHi1': IntReg('uw', 'int_reg::DspHi1', None, 1), + 'DSPACX1': IntReg('uw', 'int_reg::DspAcx1', None, 1), + 'DSPLo2': IntReg('uw', 'int_reg::DspLo2', None, 1), + 'DSPHi2': IntReg('uw', 'int_reg::DspHi2', None, 1), + 'DSPACX2': IntReg('uw', 'int_reg::DspAcx2', None, 1), + 'DSPLo3': IntReg('uw', 'int_reg::DspLo3', None, 1), + 'DSPHi3': IntReg('uw', 'int_reg::DspHi3', None, 1), + 'DSPACX3': IntReg('uw', 'int_reg::DspAcx3', None, 1), #Floating Point Reg Operands 'Fd': FloatRegOp('sf', 'FD', 'IsFloating', 1), @@ -91,11 +91,11 @@ def operands {{ 'Fr': FloatRegOp('sf', 'FR', 'IsFloating', 3), #Special Purpose Floating Point Control Reg Operands - 'FIR': FloatRegOp('uw', 'FLOATREG_FIR', 'IsFloating', 1), - 'FCCR': FloatRegOp('uw', 'FLOATREG_FCCR', 'IsFloating', 2), - 'FEXR': FloatRegOp('uw', 'FLOATREG_FEXR', 'IsFloating', 3), - 'FENR': FloatRegOp('uw', 'FLOATREG_FENR', 'IsFloating', 3), - 'FCSR': FloatRegOp('uw', 'FLOATREG_FCSR', 'IsFloating', 3), + 'FIR': FloatRegOp('uw', 'float_reg::Fir', 'IsFloating', 1), + 'FCCR': FloatRegOp('uw', 'float_reg::Fccr', 'IsFloating', 2), + 'FEXR': FloatRegOp('uw', 'float_reg::Fexr', 'IsFloating', 3), + 'FENR': FloatRegOp('uw', 'float_reg::Fenr', 'IsFloating', 3), + 'FCSR': FloatRegOp('uw', 'float_reg::Fcsr', 'IsFloating', 3), #Operands For Paired Singles FP Operations 'Fd1': FloatRegOp('sf', 'FD', 'IsFloating', 4), @@ -108,48 +108,48 @@ def operands {{ 'Fr2': FloatRegOp('sf', 'FR+1', 'IsFloating', 7), #Status Control Reg - 'Status': ControlRegOp('uw', 'MISCREG_STATUS', None, 1), + 'Status': ControlRegOp('uw', 'misc_reg::Status', None, 1), #LL Flag - 'LLFlag': ControlRegOp('uw', 'MISCREG_LLFLAG', None, 1), + 'LLFlag': ControlRegOp('uw', 'misc_reg::Llflag', None, 1), #Thread pointer value for SE mode - 'TpValue': ControlRegOp('ud', 'MISCREG_TP_VALUE', None, 1), + 'TpValue': ControlRegOp('ud', 'misc_reg::TpValue', None, 1), # Index Register - 'Index': ControlRegOp('uw','MISCREG_INDEX',None,1), + 'Index': ControlRegOp('uw','misc_reg::Index',None,1), 'CP0_RD_SEL': ControlRegOp('uw', '(RD << 3 | SEL)', None, 1), #MT Control Regs - 'MVPConf0': ControlRegOp('uw', 'MISCREG_MVP_CONF0', None, 1), - 'MVPControl': ControlRegOp('uw', 'MISCREG_MVP_CONTROL', None, 1), - 'TCBind': ControlRegOp('uw', 'MISCREG_TC_BIND', None, 1), - 'TCStatus': ControlRegOp('uw', 'MISCREG_TC_STATUS', None, 1), - 'TCRestart': ControlRegOp('uw', 'MISCREG_TC_RESTART', None, 1), - 'VPEConf0': ControlRegOp('uw', 'MISCREG_VPE_CONF0', None, 1), - 'VPEControl': ControlRegOp('uw', 'MISCREG_VPE_CONTROL', None, 1), - 'YQMask': ControlRegOp('uw', 'MISCREG_YQMASK', None, 1), + 'MVPConf0': ControlRegOp('uw', 'misc_reg::MvpConf0', None, 1), + 'MVPControl': ControlRegOp('uw', 'misc_reg::MvpControl', None, 1), + 'TCBind': ControlRegOp('uw', 'misc_reg::TcBind', None, 1), + 'TCStatus': ControlRegOp('uw', 'misc_reg::TcStatus', None, 1), + 'TCRestart': ControlRegOp('uw', 'misc_reg::TcRestart', None, 1), + 'VPEConf0': ControlRegOp('uw', 'misc_reg::VpeConf0', None, 1), + 'VPEControl': ControlRegOp('uw', 'misc_reg::VpeControl', None, 1), + 'YQMask': ControlRegOp('uw', 'misc_reg::Yqmask', None, 1), #CP0 Control Regs - 'EntryHi': ControlRegOp('uw', 'MISCREG_ENTRYHI',None,1), - 'EntryLo0': ControlRegOp('uw', 'MISCREG_ENTRYLO0',None,1), - 'EntryLo1': ControlRegOp('uw', 'MISCREG_ENTRYLO1',None,1), - 'PageMask': ControlRegOp('uw', 'MISCREG_PAGEMASK',None,1), - 'Random': ControlRegOp('uw', 'MISCREG_CP0_RANDOM',None,1), - 'ErrorEPC': ControlRegOp('uw', 'MISCREG_ERROR_EPC',None,1), - 'EPC': ControlRegOp('uw', 'MISCREG_EPC',None,1), - 'DEPC': ControlRegOp('uw', 'MISCREG_DEPC',None,1), - 'IntCtl': ControlRegOp('uw', 'MISCREG_INTCTL',None,1), - 'SRSCtl': ControlRegOp('uw', 'MISCREG_SRSCTL',None,1), - 'Config': ControlRegOp('uw', 'MISCREG_CONFIG',None,1), - 'Config3': ControlRegOp('uw', 'MISCREG_CONFIG3',None,1), - 'Config1': ControlRegOp('uw', 'MISCREG_CONFIG1',None,1), - 'Config2': ControlRegOp('uw', 'MISCREG_CONFIG2',None,1), - 'PageGrain': ControlRegOp('uw', 'MISCREG_PAGEGRAIN',None,1), - 'Debug': ControlRegOp('uw', 'MISCREG_DEBUG',None,1), - 'Cause': ControlRegOp('uw', 'MISCREG_CAUSE',None,1), + 'EntryHi': ControlRegOp('uw', 'misc_reg::Entryhi',None,1), + 'EntryLo0': ControlRegOp('uw', 'misc_reg::Entrylo0',None,1), + 'EntryLo1': ControlRegOp('uw', 'misc_reg::Entrylo1',None,1), + 'PageMask': ControlRegOp('uw', 'misc_reg::Pagemask',None,1), + 'Random': ControlRegOp('uw', 'misc_reg::Cp0Random',None,1), + 'ErrorEPC': ControlRegOp('uw', 'misc_reg::ErrorEpc',None,1), + 'EPC': ControlRegOp('uw', 'misc_reg::Epc',None,1), + 'DEPC': ControlRegOp('uw', 'misc_reg::Depc',None,1), + 'IntCtl': ControlRegOp('uw', 'misc_reg::Intctl',None,1), + 'SRSCtl': ControlRegOp('uw', 'misc_reg::Srsctl',None,1), + 'Config': ControlRegOp('uw', 'misc_reg::Config',None,1), + 'Config3': ControlRegOp('uw', 'misc_reg::Config3',None,1), + 'Config1': ControlRegOp('uw', 'misc_reg::Config1',None,1), + 'Config2': ControlRegOp('uw', 'misc_reg::Config2',None,1), + 'PageGrain': ControlRegOp('uw', 'misc_reg::Pagegrain',None,1), + 'Debug': ControlRegOp('uw', 'misc_reg::Debug',None,1), + 'Cause': ControlRegOp('uw', 'misc_reg::Cause',None,1), #Memory Operand 'Mem': MemOp('uw', None, (None, 'IsLoad', 'IsStore'), 4), diff --git a/src/arch/mips/linux/se_workload.cc b/src/arch/mips/linux/se_workload.cc index 6767bf7e87..0f5cd788e2 100644 --- a/src/arch/mips/linux/se_workload.cc +++ b/src/arch/mips/linux/se_workload.cc @@ -82,7 +82,7 @@ EmuLinux::syscall(ThreadContext *tc) // This will move into the base SEWorkload function at some point. process->Process::syscall(tc); - syscallDescs.get(tc->readIntReg(2))->doSyscall(tc); + syscallDescs.get(tc->getReg(int_reg::V0))->doSyscall(tc); } /// Target uname() handler. @@ -153,7 +153,7 @@ sys_setsysinfoFunc(SyscallDesc *desc, ThreadContext *tc, unsigned op, static SyscallReturn setThreadAreaFunc(SyscallDesc *desc, ThreadContext *tc, VPtr<> addr) { - tc->setMiscRegNoEffect(MISCREG_TP_VALUE, addr); + tc->setMiscRegNoEffect(misc_reg::TpValue, addr); return 0; } diff --git a/src/arch/mips/mt.hh b/src/arch/mips/mt.hh index adbbf526f2..a7b72d507f 100644 --- a/src/arch/mips/mt.hh +++ b/src/arch/mips/mt.hh @@ -41,6 +41,7 @@ #include "arch/mips/mt_constants.hh" #include "arch/mips/pcstate.hh" #include "arch/mips/pra_constants.hh" +#include "arch/mips/regs/int.hh" #include "arch/mips/regs/misc.hh" #include "base/bitfield.hh" #include "base/logging.hh" @@ -65,10 +66,8 @@ readRegOtherThread(ThreadContext *tc, const RegId ®, switch (reg.classValue()) { case IntRegClass: - return otc->readIntReg(reg.index()); - break; case FloatRegClass: - return otc->readFloatReg(reg.index()); + return otc->getReg(reg); break; case MiscRegClass: return otc->readMiscReg(reg.index()); @@ -89,10 +88,8 @@ setRegOtherThread(ThreadContext *tc, const RegId& reg, RegVal val, switch (reg.classValue()) { case IntRegClass: - return otc->setIntReg(reg.index(), val); - break; case FloatRegClass: - return otc->setFloatReg(reg.index(), val); + otc->setReg(reg, val); break; case MiscRegClass: return otc->setMiscReg(reg.index(), val); @@ -119,7 +116,7 @@ template inline unsigned getVirtProcNum(TC *tc) { - TCBindReg tcbind = tc->readMiscRegNoEffect(MISCREG_TC_BIND); + TCBindReg tcbind = tc->readMiscRegNoEffect(misc_reg::TcBind); return tcbind.curVPE; } @@ -127,7 +124,7 @@ template inline unsigned getTargetThread(TC *tc) { - VPEControlReg vpeCtrl = tc->readMiscRegNoEffect(MISCREG_VPE_CONTROL); + VPEControlReg vpeCtrl = tc->readMiscRegNoEffect(misc_reg::VpeControl); return vpeCtrl.targTC; } @@ -142,7 +139,7 @@ haltThread(TC *tc) // @TODO: Needs to check if this is a branch and if so, // take previous instruction auto &pc = tc->pcState().template as(); - tc->setMiscReg(MISCREG_TC_RESTART, pc.npc()); + tc->setMiscReg(misc_reg::TcRestart, pc.npc()); warn("%i: Halting thread %i in %s @ PC %x, setting restart PC to %x", curTick(), tc->threadId(), tc->getCpuPtr()->name(), @@ -156,7 +153,7 @@ restoreThread(TC *tc) { if (tc->status() != TC::Active) { // Restore PC from TCRestart - Addr restartPC = tc->readMiscRegNoEffect(MISCREG_TC_RESTART); + Addr restartPC = tc->readMiscRegNoEffect(misc_reg::TcRestart); // TODO: SET PC WITH AN EVENT INSTEAD OF INSTANTANEOUSLY tc->pcState(restartPC); @@ -171,34 +168,32 @@ template void forkThread(TC *tc, Fault &fault, int Rd_bits, int Rs, int Rt) { - MVPConf0Reg mvpConf = tc->readMiscRegNoEffect(MISCREG_MVP_CONF0); + MVPConf0Reg mvpConf = tc->readMiscRegNoEffect(misc_reg::MvpConf0); int num_threads = mvpConf.ptc + 1; int success = 0; for (ThreadID tid = 0; tid < num_threads && success == 0; tid++) { TCBindReg tidTCBind = - readRegOtherThread(tc, RegId(MiscRegClass, MISCREG_TC_BIND), tid); - TCBindReg tcBind = tc->readMiscRegNoEffect(MISCREG_TC_BIND); + readRegOtherThread(tc, miscRegClass[misc_reg::TcBind], tid); + TCBindReg tcBind = tc->readMiscRegNoEffect(misc_reg::TcBind); if (tidTCBind.curVPE == tcBind.curVPE) { TCStatusReg tidTCStatus = - readRegOtherThread(tc, RegId(MiscRegClass, MISCREG_TC_STATUS), - tid); + readRegOtherThread(tc, miscRegClass[misc_reg::TcStatus], tid); TCHaltReg tidTCHalt = - readRegOtherThread(tc, RegId(MiscRegClass, MISCREG_TC_HALT), - tid); + readRegOtherThread(tc, miscRegClass[misc_reg::TcHalt], tid); if (tidTCStatus.da == 1 && tidTCHalt.h == 0 && tidTCStatus.a == 0 && success == 0) { - setRegOtherThread(tc, RegId(MiscRegClass, MISCREG_TC_RESTART), - Rs, tid); - setRegOtherThread(tc, RegId(IntRegClass, Rd_bits), Rt, tid); + setRegOtherThread(tc, miscRegClass[misc_reg::TcRestart], Rs, + tid); + setRegOtherThread(tc, intRegClass[Rd_bits], Rt, tid); - StatusReg status = tc->readMiscReg(MISCREG_STATUS); - TCStatusReg tcStatus = tc->readMiscReg(MISCREG_TC_STATUS); + StatusReg status = tc->readMiscReg(misc_reg::Status); + TCStatusReg tcStatus = tc->readMiscReg(misc_reg::TcStatus); // Set Run-State to Running tidTCStatus.rnst = 0; @@ -214,8 +209,8 @@ forkThread(TC *tc, Fault &fault, int Rd_bits, int Rs, int Rt) tidTCStatus.asid = tcStatus.asid; // Write Status Register - setRegOtherThread(tc, RegId(MiscRegClass, MISCREG_TC_STATUS), - tidTCStatus, tid); + setRegOtherThread(tc, miscRegClass[misc_reg::TcStatus], + tidTCStatus, tid); // Mark As Successful Fork success = 1; @@ -227,9 +222,9 @@ forkThread(TC *tc, Fault &fault, int Rd_bits, int Rs, int Rt) if (success == 0) { VPEControlReg vpeControl = - tc->readMiscRegNoEffect(MISCREG_VPE_CONTROL); + tc->readMiscRegNoEffect(misc_reg::VpeControl); vpeControl.excpt = 1; - tc->setMiscReg(MISCREG_VPE_CONTROL, vpeControl); + tc->setMiscReg(misc_reg::VpeControl, vpeControl); fault = std::make_shared(); } } @@ -240,24 +235,21 @@ int yieldThread(TC *tc, Fault &fault, int src_reg, uint32_t yield_mask) { if (src_reg == 0) { - MVPConf0Reg mvpConf0 = tc->readMiscRegNoEffect(MISCREG_MVP_CONF0); + MVPConf0Reg mvpConf0 = tc->readMiscRegNoEffect(misc_reg::MvpConf0); ThreadID num_threads = mvpConf0.ptc + 1; int ok = 0; // Get Current VPE & TC numbers from calling thread - TCBindReg tcBind = tc->readMiscRegNoEffect(MISCREG_TC_BIND); + TCBindReg tcBind = tc->readMiscRegNoEffect(misc_reg::TcBind); for (ThreadID tid = 0; tid < num_threads; tid++) { TCStatusReg tidTCStatus = - readRegOtherThread(tc, RegId(MiscRegClass, MISCREG_TC_STATUS), - tid); + readRegOtherThread(tc, miscRegClass[misc_reg::TcStatus], tid); TCHaltReg tidTCHalt = - readRegOtherThread(tc, RegId(MiscRegClass, MISCREG_TC_HALT), - tid); + readRegOtherThread(tc, miscRegClass[misc_reg::TcHalt], tid); TCBindReg tidTCBind = - readRegOtherThread(tc, RegId(MiscRegClass, MISCREG_TC_BIND), - tid); + readRegOtherThread(tc, miscRegClass[misc_reg::TcBind], tid); if (tidTCBind.curVPE == tcBind.curVPE && tidTCBind.curTC == tcBind.curTC && @@ -269,24 +261,24 @@ yieldThread(TC *tc, Fault &fault, int src_reg, uint32_t yield_mask) } if (ok == 1) { - TCStatusReg tcStatus = tc->readMiscRegNoEffect(MISCREG_TC_STATUS); + TCStatusReg tcStatus = tc->readMiscRegNoEffect(misc_reg::TcStatus); tcStatus.a = 0; - tc->setMiscReg(MISCREG_TC_STATUS, tcStatus); + tc->setMiscReg(misc_reg::TcStatus, tcStatus); warn("%i: Deactivating Hardware Thread Context #%i", curTick(), tc->threadId()); } } else if (src_reg > 0) { if ((src_reg & ~yield_mask) != 0) { - VPEControlReg vpeControl = tc->readMiscReg(MISCREG_VPE_CONTROL); + VPEControlReg vpeControl = tc->readMiscReg(misc_reg::VpeControl); vpeControl.excpt = 2; - tc->setMiscReg(MISCREG_VPE_CONTROL, vpeControl); + tc->setMiscReg(misc_reg::VpeControl, vpeControl); fault = std::make_shared(); } else { } } else if (src_reg != -2) { - TCStatusReg tcStatus = tc->readMiscRegNoEffect(MISCREG_TC_STATUS); + TCStatusReg tcStatus = tc->readMiscRegNoEffect(misc_reg::TcStatus); VPEControlReg vpeControl = - tc->readMiscRegNoEffect(MISCREG_VPE_CONTROL); + tc->readMiscRegNoEffect(misc_reg::VpeControl); if (vpeControl.ysi == 1 && tcStatus.dt == 1 ) { vpeControl.excpt = 4; @@ -307,14 +299,14 @@ updateStatusView(TC *tc) { // TCStatus' register view must be the same as // Status register view for CU, MX, KSU bits - TCStatusReg tcStatus = tc->readMiscRegNoEffect(MISCREG_TC_STATUS); - StatusReg status = tc->readMiscRegNoEffect(MISCREG_STATUS); + TCStatusReg tcStatus = tc->readMiscRegNoEffect(misc_reg::TcStatus); + StatusReg status = tc->readMiscRegNoEffect(misc_reg::Status); status.cu = tcStatus.tcu; status.mx = tcStatus.tmx; status.ksu = tcStatus.tksu; - tc->setMiscRegNoEffect(MISCREG_STATUS, status); + tc->setMiscRegNoEffect(misc_reg::Status, status); } // TC will usually be a object derived from ThreadContext @@ -325,14 +317,14 @@ updateTCStatusView(TC *tc) { // TCStatus' register view must be the same as // Status register view for CU, MX, KSU bits - TCStatusReg tcStatus = tc->readMiscRegNoEffect(MISCREG_TC_STATUS); - StatusReg status = tc->readMiscRegNoEffect(MISCREG_STATUS); + TCStatusReg tcStatus = tc->readMiscRegNoEffect(misc_reg::TcStatus); + StatusReg status = tc->readMiscRegNoEffect(misc_reg::Status); tcStatus.tcu = status.cu; tcStatus.tmx = status.mx; tcStatus.tksu = status.ksu; - tc->setMiscRegNoEffect(MISCREG_TC_STATUS, tcStatus); + tc->setMiscRegNoEffect(misc_reg::TcStatus, tcStatus); } } // namespace MipsISA diff --git a/src/arch/mips/process.cc b/src/arch/mips/process.cc index b0fe339fb2..37ece00ada 100644 --- a/src/arch/mips/process.cc +++ b/src/arch/mips/process.cc @@ -200,9 +200,9 @@ MipsProcess::argsInit(int pageSize) ThreadContext *tc = system->threads[contextIds[0]]; - tc->setIntReg(FirstArgumentReg, argc); - tc->setIntReg(FirstArgumentReg + 1, argv_array_base); - tc->setIntReg(StackPointerReg, memState->getStackMin()); + tc->setReg(int_reg::A0, argc); + tc->setReg(int_reg::A1, argv_array_base); + tc->setReg(int_reg::Sp, memState->getStackMin()); tc->pcState(getStartPC()); } diff --git a/src/arch/mips/regs/float.hh b/src/arch/mips/regs/float.hh index 52472f3ede..7c24e0704e 100644 --- a/src/arch/mips/regs/float.hh +++ b/src/arch/mips/regs/float.hh @@ -32,30 +32,111 @@ #include +#include "cpu/reg_class.hh" +#include "debug/FloatRegs.hh" + namespace gem5 { - namespace MipsISA { - -// Constants Related to the number of registers -const int NumFloatArchRegs = 32; -const int NumFloatSpecialRegs = 5; - -const int NumFloatRegs = NumFloatArchRegs + NumFloatSpecialRegs;// - -const uint32_t MIPS32_QNAN = 0x7fbfffff; -const uint64_t MIPS64_QNAN = 0x7ff7ffffffffffffULL; - -enum FPControlRegNums +namespace float_reg { - FLOATREG_FIR = NumFloatArchRegs, - FLOATREG_FCCR, - FLOATREG_FEXR, - FLOATREG_FENR, - FLOATREG_FCSR + +enum : RegIndex +{ + _F0Idx, + _F1Idx, + _F2Idx, + _F3Idx, + _F4Idx, + _F5Idx, + _F6Idx, + _F7Idx, + _F8Idx, + _F9Idx, + _F10Idx, + _F11Idx, + _F12Idx, + _F13Idx, + _F14Idx, + _F15Idx, + _F16Idx, + _F17Idx, + _F18Idx, + _F19Idx, + _F20Idx, + _F21Idx, + _F22Idx, + _F23Idx, + _F24Idx, + _F25Idx, + _F26Idx, + _F27Idx, + _F28Idx, + _F29Idx, + _F30Idx, + _F31Idx, + NumArchRegs, + + _FirIdx = NumArchRegs, + _FccrIdx, + _FexrIdx, + _FenrIdx, + _FcsrIdx, + + NumRegs, }; +} // namespace float_reg + +inline constexpr RegClass floatRegClass(FloatRegClass, FloatRegClassName, + float_reg::NumRegs, debug::FloatRegs); + +namespace float_reg +{ + +inline constexpr RegId + F0 = floatRegClass[_F0Idx], + F1 = floatRegClass[_F1Idx], + F2 = floatRegClass[_F2Idx], + F3 = floatRegClass[_F3Idx], + F4 = floatRegClass[_F4Idx], + F5 = floatRegClass[_F5Idx], + F6 = floatRegClass[_F6Idx], + F7 = floatRegClass[_F7Idx], + F8 = floatRegClass[_F8Idx], + F9 = floatRegClass[_F9Idx], + F10 = floatRegClass[_F10Idx], + F11 = floatRegClass[_F11Idx], + F12 = floatRegClass[_F12Idx], + F13 = floatRegClass[_F13Idx], + F14 = floatRegClass[_F14Idx], + F15 = floatRegClass[_F15Idx], + F16 = floatRegClass[_F16Idx], + F17 = floatRegClass[_F17Idx], + F18 = floatRegClass[_F18Idx], + F19 = floatRegClass[_F19Idx], + F20 = floatRegClass[_F20Idx], + F21 = floatRegClass[_F21Idx], + F22 = floatRegClass[_F22Idx], + F23 = floatRegClass[_F23Idx], + F24 = floatRegClass[_F24Idx], + F25 = floatRegClass[_F25Idx], + F26 = floatRegClass[_F26Idx], + F27 = floatRegClass[_F27Idx], + F28 = floatRegClass[_F28Idx], + F29 = floatRegClass[_F29Idx], + F30 = floatRegClass[_F30Idx], + F31 = floatRegClass[_F31Idx], + + Fir = floatRegClass[_FirIdx], + Fccr = floatRegClass[_FccrIdx], + Fexr = floatRegClass[_FexrIdx], + Fenr = floatRegClass[_FenrIdx], + Fcsr = floatRegClass[_FcsrIdx]; + +} // namespace float_reg + enum FCSRBits { Inexact = 1, @@ -73,6 +154,9 @@ enum FCSRFields Cause_Field = 11 }; +const uint32_t MIPS32_QNAN = 0x7fbfffff; +const uint64_t MIPS64_QNAN = 0x7ff7ffffffffffffULL; + } // namespace MipsISA } // namespace gem5 diff --git a/src/arch/mips/regs/int.hh b/src/arch/mips/regs/int.hh index 65f277960f..f891096d78 100644 --- a/src/arch/mips/regs/int.hh +++ b/src/arch/mips/regs/int.hh @@ -30,47 +30,184 @@ #ifndef __ARCH_MIPS_REGS_INT_HH__ #define __ARCH_MIPS_REGS_INT_HH__ +#include "cpu/reg_class.hh" +#include "debug/IntRegs.hh" + namespace gem5 { - namespace MipsISA { // Constants Related to the number of registers -const int NumIntArchRegs = 32; -const int NumIntSpecialRegs = 9; const int MaxShadowRegSets = 16; // Maximum number of shadow register sets -const int NumIntRegs = NumIntArchRegs + NumIntSpecialRegs; //HI & LO Regs -enum MiscIntRegNums +namespace int_reg { - INTREG_LO = NumIntArchRegs, - INTREG_DSP_LO0 = INTREG_LO, - INTREG_HI, - INTREG_DSP_HI0 = INTREG_HI, - INTREG_DSP_ACX0, - INTREG_DSP_LO1, - INTREG_DSP_HI1, - INTREG_DSP_ACX1, - INTREG_DSP_LO2, - INTREG_DSP_HI2, - INTREG_DSP_ACX2, - INTREG_DSP_LO3, - INTREG_DSP_HI3, - INTREG_DSP_ACX3, - INTREG_DSP_CONTROL + +enum : RegIndex +{ + _ZeroIdx = 0, + + _AtIdx = 1, + + _V0Idx = 2, + _V1Idx = 3, + + _A0Idx = 4, + _A1Idx = 5, + _A2Idx = 6, + _A3Idx = 7, + + _T0Idx = 8, + _T1Idx = 9, + _T2Idx = 10, + _T3Idx = 11, + _T4Idx = 12, + _T5Idx = 13, + _T6Idx = 14, + _T7Idx = 15, + + _S0Idx = 16, + _S1Idx = 17, + _S2Idx = 18, + _S3Idx = 19, + _S4Idx = 20, + _S5Idx = 21, + _S6Idx = 22, + _S7Idx = 23, + + _T8Idx = 24, + _T9Idx = 25, + + _K0Idx = 26, + _K1Idx = 27, + + _GpIdx = 28, + + _SpIdx = 29, + + _S8Idx = 30, + _FpIdx = _S8Idx, + + _RaIdx = 31, + + NumArchRegs, + + _LoIdx = NumArchRegs, + _DspLo0Idx = _LoIdx, + _HiIdx, + _DspHi0Idx = _HiIdx, + _DspAcx0Idx, + + _DspLo1Idx, + _DspHi1Idx, + _DspAcx1Idx, + + _DspLo2Idx, + _DspHi2Idx, + _DspAcx2Idx, + + _DspLo3Idx, + _DspHi3Idx, + _DspAcx3Idx, + + _DspControlIdx, + + NumRegs }; -// semantically meaningful register indices -const int SyscallSuccessReg = 7; -const int FirstArgumentReg = 4; -const int ReturnValueReg = 2; +} // namespace int_reg -const int StackPointerReg = 29; +inline constexpr RegClass intRegClass(IntRegClass, IntRegClassName, + int_reg::NumRegs, debug::IntRegs); -const int SyscallPseudoReturnReg = 3; +namespace int_reg +{ +inline constexpr RegId + // Zero register. + Zero = intRegClass[_ZeroIdx], + + // Assembly temporary. + At = intRegClass[_AtIdx], + + // Value returned by subroutine. + V0 = intRegClass[_V0Idx], + V1 = intRegClass[_V1Idx], + + // Arguments for subroutine. + A0 = intRegClass[_A0Idx], + A1 = intRegClass[_A1Idx], + A2 = intRegClass[_A2Idx], + A3 = intRegClass[_A3Idx], + + // Temporaries. + T0 = intRegClass[_T0Idx], + T1 = intRegClass[_T1Idx], + T2 = intRegClass[_T2Idx], + T3 = intRegClass[_T3Idx], + T4 = intRegClass[_T4Idx], + T5 = intRegClass[_T5Idx], + T6 = intRegClass[_T6Idx], + T7 = intRegClass[_T7Idx], + T8 = intRegClass[_T8Idx], + T9 = intRegClass[_T9Idx], + + // Subroutine registers. + S0 = intRegClass[_S0Idx], + S1 = intRegClass[_S1Idx], + S2 = intRegClass[_S2Idx], + S3 = intRegClass[_S3Idx], + S4 = intRegClass[_S4Idx], + S5 = intRegClass[_S5Idx], + S6 = intRegClass[_S6Idx], + S7 = intRegClass[_S7Idx], + + // For use in an interrupt/trap handler. + K0 = intRegClass[_K0Idx], + K1 = intRegClass[_K1Idx], + + // Global pointer. + Gp = intRegClass[_GpIdx], + + // Stack pointer. + Sp = intRegClass[_SpIdx], + + // Frame pointer. + Fp = intRegClass[_FpIdx], + + // Return address. + Ra = intRegClass[_RaIdx], + + DspLo0 = intRegClass[_DspLo0Idx], + DspHi0 = intRegClass[_DspHi0Idx], + DspAcx0 = intRegClass[_DspAcx0Idx], + + DspLo1 = intRegClass[_DspLo1Idx], + DspHi1 = intRegClass[_DspHi1Idx], + DspAcx1 = intRegClass[_DspAcx1Idx], + + DspLo2 = intRegClass[_DspLo2Idx], + DspHi2 = intRegClass[_DspHi2Idx], + DspAcx2 = intRegClass[_DspAcx2Idx], + + DspLo3 = intRegClass[_DspLo3Idx], + DspHi3 = intRegClass[_DspHi3Idx], + DspAcx3 = intRegClass[_DspAcx3Idx], + + DspControl = intRegClass[_DspControlIdx]; + +// Register aliases. +inline constexpr auto + &S8 = Fp, + + &Lo = DspLo0, + &Hi = DspHi0, + + &SyscallSuccess = A3; + +} // namespace int_reg } // namespace MipsISA } // namespace gem5 diff --git a/src/arch/mips/regs/misc.hh b/src/arch/mips/regs/misc.hh index 65635e82fa..0521d85487 100644 --- a/src/arch/mips/regs/misc.hh +++ b/src/arch/mips/regs/misc.hh @@ -30,11 +30,15 @@ #ifndef __ARCH_MIPS_REGS_MISC_HH__ #define __ARCH_MIPS_REGS_MISC_HH__ +#include "cpu/reg_class.hh" +#include "debug/MiscRegs.hh" + namespace gem5 { - namespace MipsISA { +namespace misc_reg +{ // Enumerate names for 'Control' Registers in the CPU // Reference MIPS32 Arch. for Programmers, Vol. III, Ch.8 @@ -43,157 +47,162 @@ namespace MipsISA // The first set of names classify the CP0 names as Register Banks // for easy indexing when using the 'RD + SEL' index combination // in CP0 instructions. -enum MiscRegIndex +enum : RegIndex { - MISCREG_INDEX = 0, //Bank 0: 0 - 3 - MISCREG_MVP_CONTROL, - MISCREG_MVP_CONF0, - MISCREG_MVP_CONF1, + Index = 0, //Bank 0: 0 - 3 + MvpControl, + MvpConf0, + MvpConf1, - MISCREG_CP0_RANDOM = 8, //Bank 1: 8 - 15 - MISCREG_VPE_CONTROL, - MISCREG_VPE_CONF0, - MISCREG_VPE_CONF1, - MISCREG_YQMASK, - MISCREG_VPE_SCHEDULE, - MISCREG_VPE_SCHEFBACK, - MISCREG_VPE_OPT, + Cp0Random = 8, //Bank 1: 8 - 15 + VpeControl, + VpeConf0, + VpeConf1, + Yqmask, + VpeSchedule, + VpeSchefback, + VpeOpt, - MISCREG_ENTRYLO0 = 16, //Bank 2: 16 - 23 - MISCREG_TC_STATUS, - MISCREG_TC_BIND, - MISCREG_TC_RESTART, - MISCREG_TC_HALT, - MISCREG_TC_CONTEXT, - MISCREG_TC_SCHEDULE, - MISCREG_TC_SCHEFBACK, + Entrylo0 = 16, //Bank 2: 16 - 23 + TcStatus, + TcBind, + TcRestart, + TcHalt, + TcContext, + TcSchedule, + TcSchefback, - MISCREG_ENTRYLO1 = 24, // Bank 3: 24 + Entrylo1 = 24, // Bank 3: 24 - MISCREG_CONTEXT = 32, // Bank 4: 32 - 33 - MISCREG_CONTEXT_CONFIG, + Context = 32, // Bank 4: 32 - 33 + ContextConfig, - MISCREG_PAGEMASK = 40, //Bank 5: 40 - 41 - MISCREG_PAGEGRAIN = 41, + Pagemask = 40, //Bank 5: 40 - 41 + Pagegrain = 41, - MISCREG_WIRED = 48, //Bank 6:48-55 - MISCREG_SRS_CONF0, - MISCREG_SRS_CONF1, - MISCREG_SRS_CONF2, - MISCREG_SRS_CONF3, - MISCREG_SRS_CONF4, + Wired = 48, //Bank 6:48-55 + SrsConf0, + SrsConf1, + SrsConf2, + SrsConf3, + SrsConf4, - MISCREG_HWRENA = 56, //Bank 7: 56-63 + Hwrena = 56, //Bank 7: 56-63 - MISCREG_BADVADDR = 64, //Bank 8: 64-71 + Badvaddr = 64, //Bank 8: 64-71 - MISCREG_COUNT = 72, //Bank 9: 72-79 + Count = 72, //Bank 9: 72-79 - MISCREG_ENTRYHI = 80, //Bank 10: 80-87 + Entryhi = 80, //Bank 10: 80-87 - MISCREG_COMPARE = 88, //Bank 11: 88-95 + Compare = 88, //Bank 11: 88-95 - MISCREG_STATUS = 96, //Bank 12: 96-103 - MISCREG_INTCTL, - MISCREG_SRSCTL, - MISCREG_SRSMAP, + Status = 96, //Bank 12: 96-103 + Intctl, + Srsctl, + Srsmap, - MISCREG_CAUSE = 104, //Bank 13: 104-111 + Cause = 104, //Bank 13: 104-111 - MISCREG_EPC = 112, //Bank 14: 112-119 + Epc = 112, //Bank 14: 112-119 - MISCREG_PRID = 120, //Bank 15: 120-127, - MISCREG_EBASE, + Prid = 120, //Bank 15: 120-127, + Ebase, - MISCREG_CONFIG = 128, //Bank 16: 128-135 - MISCREG_CONFIG1, - MISCREG_CONFIG2, - MISCREG_CONFIG3, - MISCREG_CONFIG4, - MISCREG_CONFIG5, - MISCREG_CONFIG6, - MISCREG_CONFIG7, + Config = 128, //Bank 16: 128-135 + Config1, + Config2, + Config3, + Config4, + Config5, + Config6, + Config7, - MISCREG_LLADDR = 136, //Bank 17: 136-143 + Lladdr = 136, //Bank 17: 136-143 - MISCREG_WATCHLO0 = 144, //Bank 18: 144-151 - MISCREG_WATCHLO1, - MISCREG_WATCHLO2, - MISCREG_WATCHLO3, - MISCREG_WATCHLO4, - MISCREG_WATCHLO5, - MISCREG_WATCHLO6, - MISCREG_WATCHLO7, + Watchlo0 = 144, //Bank 18: 144-151 + Watchlo1, + Watchlo2, + Watchlo3, + Watchlo4, + Watchlo5, + Watchlo6, + Watchlo7, - MISCREG_WATCHHI0 = 152, //Bank 19: 152-159 - MISCREG_WATCHHI1, - MISCREG_WATCHHI2, - MISCREG_WATCHHI3, - MISCREG_WATCHHI4, - MISCREG_WATCHHI5, - MISCREG_WATCHHI6, - MISCREG_WATCHHI7, + Watchhi0 = 152, //Bank 19: 152-159 + Watchhi1, + Watchhi2, + Watchhi3, + Watchhi4, + Watchhi5, + Watchhi6, + Watchhi7, - MISCREG_XCCONTEXT64 = 160, //Bank 20: 160-167 + Xccontext64 = 160, //Bank 20: 160-167 //Bank 21: 168-175 //Bank 22: 176-183 - MISCREG_DEBUG = 184, //Bank 23: 184-191 - MISCREG_TRACE_CONTROL1, - MISCREG_TRACE_CONTROL2, - MISCREG_USER_TRACE_DATA, - MISCREG_TRACE_BPC, + Debug = 184, //Bank 23: 184-191 + TraceControl1, + TraceControl2, + UserTraceData, + TraceBpc, - MISCREG_DEPC = 192, //Bank 24: 192-199 + Depc = 192, //Bank 24: 192-199 - MISCREG_PERFCNT0 = 200, //Bank 25: 200-207 - MISCREG_PERFCNT1, - MISCREG_PERFCNT2, - MISCREG_PERFCNT3, - MISCREG_PERFCNT4, - MISCREG_PERFCNT5, - MISCREG_PERFCNT6, - MISCREG_PERFCNT7, + Perfcnt0 = 200, //Bank 25: 200-207 + Perfcnt1, + Perfcnt2, + Perfcnt3, + Perfcnt4, + Perfcnt5, + Perfcnt6, + Perfcnt7, - MISCREG_ERRCTL = 208, //Bank 26: 208-215 + Errctl = 208, //Bank 26: 208-215 - MISCREG_CACHEERR0 = 216, //Bank 27: 216-223 - MISCREG_CACHEERR1, - MISCREG_CACHEERR2, - MISCREG_CACHEERR3, + Cacheerr0 = 216, //Bank 27: 216-223 + Cacheerr1, + Cacheerr2, + Cacheerr3, - MISCREG_TAGLO0 = 224, //Bank 28: 224-231 - MISCREG_DATALO1, - MISCREG_TAGLO2, - MISCREG_DATALO3, - MISCREG_TAGLO4, - MISCREG_DATALO5, - MISCREG_TAGLO6, - MISCREG_DATALO7, + Taglo0 = 224, //Bank 28: 224-231 + Datalo1, + Taglo2, + Datalo3, + Taglo4, + Datalo5, + Taglo6, + Datalo7, - MISCREG_TAGHI0 = 232, //Bank 29: 232-239 - MISCREG_DATAHI1, - MISCREG_TAGHI2, - MISCREG_DATAHI3, - MISCREG_TAGHI4, - MISCREG_DATAHI5, - MISCREG_TAGHI6, - MISCREG_DATAHI7, + Taghi0 = 232, //Bank 29: 232-239 + Datahi1, + Taghi2, + Datahi3, + Taghi4, + Datahi5, + Taghi6, + Datahi7, - MISCREG_ERROR_EPC = 240, //Bank 30: 240-247 + ErrorEpc = 240, //Bank 30: 240-247 - MISCREG_DESAVE = 248, //Bank 31: 248-256 + Desave = 248, //Bank 31: 248-256 - MISCREG_LLFLAG = 257, - MISCREG_TP_VALUE, + Llflag = 257, + TpValue, - MISCREG_NUMREGS + NumRegs }; +} // namespace misc_reg + +inline constexpr RegClass miscRegClass(MiscRegClass, MiscRegClassName, + misc_reg::NumRegs, debug::MiscRegs); + } // namespace MipsISA } // namespace gem5 diff --git a/src/arch/mips/remote_gdb.cc b/src/arch/mips/remote_gdb.cc index ad393002f1..fcf31e109d 100644 --- a/src/arch/mips/remote_gdb.cc +++ b/src/arch/mips/remote_gdb.cc @@ -173,16 +173,18 @@ RemoteGDB::MipsGdbRegCache::getRegs(ThreadContext *context) { DPRINTF(GDBAcc, "getregs in remotegdb \n"); - for (int i = 0; i < 32; i++) r.gpr[i] = context->readIntReg(i); - r.sr = context->readMiscRegNoEffect(MISCREG_STATUS); - r.lo = context->readIntReg(INTREG_LO); - r.hi = context->readIntReg(INTREG_HI); - r.badvaddr = context->readMiscRegNoEffect(MISCREG_BADVADDR); - r.cause = context->readMiscRegNoEffect(MISCREG_CAUSE); + for (int i = 0; i < 32; i++) + r.gpr[i] = context->getReg(intRegClass[i]); + r.sr = context->readMiscRegNoEffect(misc_reg::Status); + r.lo = context->getReg(int_reg::Lo); + r.hi = context->getReg(int_reg::Hi); + r.badvaddr = context->readMiscRegNoEffect(misc_reg::Badvaddr); + r.cause = context->readMiscRegNoEffect(misc_reg::Cause); r.pc = context->pcState().instAddr(); - for (int i = 0; i < 32; i++) r.fpr[i] = context->readFloatReg(i); - r.fsr = context->readFloatReg(FLOATREG_FCCR); - r.fir = context->readFloatReg(FLOATREG_FIR); + for (int i = 0; i < 32; i++) + r.fpr[i] = context->getReg(floatRegClass[i]); + r.fsr = context->getReg(float_reg::Fccr); + r.fir = context->getReg(float_reg::Fir); } void @@ -190,16 +192,18 @@ RemoteGDB::MipsGdbRegCache::setRegs(ThreadContext *context) const { DPRINTF(GDBAcc, "setregs in remotegdb \n"); - for (int i = 1; i < 32; i++) context->setIntReg(i, r.gpr[i]); - context->setMiscRegNoEffect(MISCREG_STATUS, r.sr); - context->setIntReg(INTREG_LO, r.lo); - context->setIntReg(INTREG_HI, r.hi); - context->setMiscRegNoEffect(MISCREG_BADVADDR, r.badvaddr); - context->setMiscRegNoEffect(MISCREG_CAUSE, r.cause); + for (int i = 1; i < 32; i++) + context->setReg(intRegClass[i], r.gpr[i]); + context->setMiscRegNoEffect(misc_reg::Status, r.sr); + context->setReg(int_reg::Lo, r.lo); + context->setReg(int_reg::Hi, r.hi); + context->setMiscRegNoEffect(misc_reg::Badvaddr, r.badvaddr); + context->setMiscRegNoEffect(misc_reg::Cause, r.cause); context->pcState(r.pc); - for (int i = 0; i < 32; i++) context->setFloatReg(i, r.fpr[i]); - context->setFloatReg(FLOATREG_FCCR, r.fsr); - context->setFloatReg(FLOATREG_FIR, r.fir); + for (int i = 0; i < 32; i++) + context->setReg(floatRegClass[i], r.fpr[i]); + context->setReg(float_reg::Fccr, r.fsr); + context->setReg(float_reg::Fir, r.fir); } BaseGdbRegCache* diff --git a/src/arch/mips/se_workload.cc b/src/arch/mips/se_workload.cc index 75e1ac1806..8bd5146751 100644 --- a/src/arch/mips/se_workload.cc +++ b/src/arch/mips/se_workload.cc @@ -33,8 +33,9 @@ namespace gem5 namespace MipsISA { -const std::vector SEWorkload::SyscallABI::ArgumentRegs = { - 4, 5, 6, 7, 8, 9 +const std::vector SEWorkload::SyscallABI::ArgumentRegs = { + int_reg::A0, int_reg::A1, int_reg::A2, + int_reg::A3, int_reg::T0, int_reg::T1 }; } // namespace MipsISA diff --git a/src/arch/mips/se_workload.hh b/src/arch/mips/se_workload.hh index c10ceb031a..dc6f1dd5e3 100644 --- a/src/arch/mips/se_workload.hh +++ b/src/arch/mips/se_workload.hh @@ -44,7 +44,7 @@ namespace MipsISA class SEWorkload : public gem5::SEWorkload { public: - using Params = MipsSEWorkloadParams; + PARAMS(MipsSEWorkload); SEWorkload(const Params &p, Addr page_shift) : gem5::SEWorkload(p, page_shift) @@ -54,14 +54,15 @@ class SEWorkload : public gem5::SEWorkload setSystem(System *sys) override { gem5::SEWorkload::setSystem(sys); - gdb = BaseRemoteGDB::build(system); + gdb = BaseRemoteGDB::build( + params().remote_gdb_port, system); } loader::Arch getArch() const override { return loader::Mips; } struct SyscallABI : public GenericSyscallABI64 { - static const std::vector ArgumentRegs; + static const std::vector ArgumentRegs; }; }; @@ -79,15 +80,15 @@ struct Result { if (ret.successful()) { // no error - tc->setIntReg(MipsISA::SyscallSuccessReg, 0); - tc->setIntReg(MipsISA::ReturnValueReg, ret.returnValue()); + tc->setReg(MipsISA::int_reg::SyscallSuccess, (RegVal)0); + tc->setReg(MipsISA::int_reg::V0, ret.returnValue()); } else { // got an error, return details - tc->setIntReg(MipsISA::SyscallSuccessReg, (uint32_t)(-1)); - tc->setIntReg(MipsISA::ReturnValueReg, ret.errnoValue()); + tc->setReg(MipsISA::int_reg::SyscallSuccess, (uint32_t)(-1)); + tc->setReg(MipsISA::int_reg::V0, ret.errnoValue()); } if (ret.count() > 1) - tc->setIntReg(MipsISA::SyscallPseudoReturnReg, ret.value2()); + tc->setReg(MipsISA::int_reg::V1, ret.value2()); } }; diff --git a/src/arch/null/SConsopts b/src/arch/null/SConsopts index 6355ce314b..2d552a1dc8 100644 --- a/src/arch/null/SConsopts +++ b/src/arch/null/SConsopts @@ -1,16 +1,4 @@ -# -*- mode:python -*- - -# Copyright (c) 2013 ARM Limited -# All rights reserved -# -# The license below extends only to copyright in the software and shall -# not be construed as granting a license to any other intellectual -# property including but not limited to intellectual property relating -# to a hardware implementation of the functionality of the software -# licensed hereunder. You may use the software subject to the license -# terms below provided that you ensure that this notice is replicated -# unmodified and in its entirety in all distributions of the software, -# modified or unmodified, in source code or in binary form. +# Copyright 2021 Google, Inc. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are @@ -36,5 +24,4 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Import('*') - -main.Append(ALL_ISAS=['null']) +sticky_vars.Add(BoolVariable('USE_NULL_ISA', 'Enable NULL ISA support', False)) diff --git a/src/arch/power/AtomicSimpleCPU.py b/src/arch/power/AtomicSimpleCPU.py deleted file mode 100644 index 55b6b960b1..0000000000 --- a/src/arch/power/AtomicSimpleCPU.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2021 Google, Inc. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer; -# redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution; -# neither the name of the copyright holders nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from m5.objects.PowerCPU import PowerAtomicSimpleCPU - -AtomicSimpleCPU = PowerAtomicSimpleCPU diff --git a/src/arch/power/NonCachingSimpleCPU.py b/src/arch/power/NonCachingSimpleCPU.py deleted file mode 100644 index 171a90d9a1..0000000000 --- a/src/arch/power/NonCachingSimpleCPU.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2021 Google, Inc. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer; -# redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution; -# neither the name of the copyright holders nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from m5.objects.PowerCPU import PowerNonCachingSimpleCPU - -NonCachingSimpleCPU = PowerNonCachingSimpleCPU diff --git a/src/arch/power/PowerCPU.py b/src/arch/power/PowerCPU.py index bf7dc91d2e..f19c6f6a2f 100644 --- a/src/arch/power/PowerCPU.py +++ b/src/arch/power/PowerCPU.py @@ -27,25 +27,35 @@ from m5.objects.BaseAtomicSimpleCPU import BaseAtomicSimpleCPU from m5.objects.BaseNonCachingSimpleCPU import BaseNonCachingSimpleCPU from m5.objects.BaseTimingSimpleCPU import BaseTimingSimpleCPU from m5.objects.BaseO3CPU import BaseO3CPU +from m5.objects.BaseMinorCPU import BaseMinorCPU from m5.objects.PowerDecoder import PowerDecoder from m5.objects.PowerMMU import PowerMMU from m5.objects.PowerInterrupts import PowerInterrupts from m5.objects.PowerISA import PowerISA + class PowerCPU: ArchDecoder = PowerDecoder ArchMMU = PowerMMU ArchInterrupts = PowerInterrupts ArchISA = PowerISA + class PowerAtomicSimpleCPU(BaseAtomicSimpleCPU, PowerCPU): mmu = PowerMMU() + class PowerNonCachingSimpleCPU(BaseNonCachingSimpleCPU, PowerCPU): mmu = PowerMMU() + class PowerTimingSimpleCPU(BaseTimingSimpleCPU, PowerCPU): mmu = PowerMMU() + class PowerO3CPU(BaseO3CPU, PowerCPU): mmu = PowerMMU() + + +class PowerMinorCPU(BaseMinorCPU, PowerCPU): + mmu = PowerMMU() diff --git a/src/arch/power/PowerDecoder.py b/src/arch/power/PowerDecoder.py index 7298c40249..bf0973a3f9 100644 --- a/src/arch/power/PowerDecoder.py +++ b/src/arch/power/PowerDecoder.py @@ -25,7 +25,8 @@ from m5.objects.InstDecoder import InstDecoder + class PowerDecoder(InstDecoder): - type = 'PowerDecoder' - cxx_class = 'gem5::PowerISA::Decoder' + type = "PowerDecoder" + cxx_class = "gem5::PowerISA::Decoder" cxx_header = "arch/power/decoder.hh" diff --git a/src/arch/power/PowerISA.py b/src/arch/power/PowerISA.py index d6146ca3e0..c4334ed62b 100644 --- a/src/arch/power/PowerISA.py +++ b/src/arch/power/PowerISA.py @@ -35,7 +35,8 @@ from m5.objects.BaseISA import BaseISA + class PowerISA(BaseISA): - type = 'PowerISA' - cxx_class = 'gem5::PowerISA::ISA' + type = "PowerISA" + cxx_class = "gem5::PowerISA::ISA" cxx_header = "arch/power/isa.hh" diff --git a/src/arch/power/PowerInterrupts.py b/src/arch/power/PowerInterrupts.py index 2ee91e3ab4..1b7f5d348f 100644 --- a/src/arch/power/PowerInterrupts.py +++ b/src/arch/power/PowerInterrupts.py @@ -26,7 +26,8 @@ from m5.objects.BaseInterrupts import BaseInterrupts + class PowerInterrupts(BaseInterrupts): - type = 'PowerInterrupts' - cxx_class = 'gem5::PowerISA::Interrupts' - cxx_header = 'arch/power/interrupts.hh' + type = "PowerInterrupts" + cxx_class = "gem5::PowerISA::Interrupts" + cxx_header = "arch/power/interrupts.hh" diff --git a/src/arch/power/PowerMMU.py b/src/arch/power/PowerMMU.py index aaf288a78a..932eb5c864 100644 --- a/src/arch/power/PowerMMU.py +++ b/src/arch/power/PowerMMU.py @@ -38,9 +38,10 @@ from m5.objects.BaseMMU import BaseMMU from m5.objects.PowerTLB import PowerTLB + class PowerMMU(BaseMMU): - type = 'PowerMMU' - cxx_class = 'gem5::PowerISA::MMU' - cxx_header = 'arch/power/mmu.hh' + type = "PowerMMU" + cxx_class = "gem5::PowerISA::MMU" + cxx_header = "arch/power/mmu.hh" itb = PowerTLB(entry_type="instruction") dtb = PowerTLB(entry_type="data") diff --git a/src/arch/power/PowerSeWorkload.py b/src/arch/power/PowerSeWorkload.py index 2b081f292d..162104d0dd 100644 --- a/src/arch/power/PowerSeWorkload.py +++ b/src/arch/power/PowerSeWorkload.py @@ -27,18 +27,22 @@ from m5.params import * from m5.objects.Workload import SEWorkload + class PowerSEWorkload(SEWorkload): - type = 'PowerSEWorkload' + type = "PowerSEWorkload" cxx_header = "arch/power/se_workload.hh" - cxx_class = 'gem5::PowerISA::SEWorkload' + cxx_class = "gem5::PowerISA::SEWorkload" abstract = True + class PowerEmuLinux(PowerSEWorkload): - type = 'PowerEmuLinux' + type = "PowerEmuLinux" cxx_header = "arch/power/linux/se_workload.hh" - cxx_class = 'gem5::PowerISA::EmuLinux' + cxx_class = "gem5::PowerISA::EmuLinux" @classmethod def _is_compatible_with(cls, obj): - return obj.get_arch() in ('power', 'power64') and \ - obj.get_op_sys() in ('linux', 'unknown') + return obj.get_arch() in ("power", "power64") and obj.get_op_sys() in ( + "linux", + "unknown", + ) diff --git a/src/arch/power/PowerTLB.py b/src/arch/power/PowerTLB.py index 39f0a440a3..32c4a68940 100644 --- a/src/arch/power/PowerTLB.py +++ b/src/arch/power/PowerTLB.py @@ -31,8 +31,9 @@ from m5.params import * from m5.objects.BaseTLB import BaseTLB + class PowerTLB(BaseTLB): - type = 'PowerTLB' - cxx_class = 'gem5::PowerISA::TLB' - cxx_header = 'arch/power/tlb.hh' + type = "PowerTLB" + cxx_class = "gem5::PowerISA::TLB" + cxx_header = "arch/power/tlb.hh" size = Param.Int(64, "TLB size") diff --git a/src/arch/power/SConscript b/src/arch/power/SConscript index ab96d494b6..8ddb96694a 100644 --- a/src/arch/power/SConscript +++ b/src/arch/power/SConscript @@ -30,6 +30,9 @@ Import('*') +if env['USE_POWER_ISA']: + env.TagImplies('power isa', 'gem5 lib') + Source('decoder.cc', tags='power isa') Source('faults.cc', tags='power isa') Source('insts/branch.cc', tags='power isa') @@ -56,10 +59,6 @@ SimObject('PowerSeWorkload.py', sim_objects=[ SimObject('PowerTLB.py', sim_objects=['PowerTLB'], tags='power isa') SimObject('PowerCPU.py', sim_objects=[], tags='power isa') -SimObject('AtomicSimpleCPU.py', sim_objects=[], tags='power isa') -SimObject('TimingSimpleCPU.py', sim_objects=[], tags='power isa') -SimObject('NonCachingSimpleCPU.py', sim_objects=[], tags='power isa') -SimObject('O3CPU.py', sim_objects=[], tags='power isa') DebugFlag('Power', tags='power isa') diff --git a/src/arch/power/SConsopts b/src/arch/power/SConsopts index cb136fe777..099f37553a 100644 --- a/src/arch/power/SConsopts +++ b/src/arch/power/SConsopts @@ -1,7 +1,4 @@ -# -*- mode:python -*- - -# Copyright (c) 2009 The University of Edinburgh -# All rights reserved. +# Copyright 2021 Google, Inc. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are @@ -27,5 +24,5 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Import('*') - -main.Append(ALL_ISAS=['power']) +sticky_vars.Add(BoolVariable('USE_POWER_ISA', 'Enable POWER ISA support', + False)) diff --git a/src/arch/power/TimingSimpleCPU.py b/src/arch/power/TimingSimpleCPU.py deleted file mode 100644 index 5a9cfa7052..0000000000 --- a/src/arch/power/TimingSimpleCPU.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2021 Google, Inc. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer; -# redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution; -# neither the name of the copyright holders nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from m5.objects.PowerCPU import PowerTimingSimpleCPU - -TimingSimpleCPU = PowerTimingSimpleCPU diff --git a/src/arch/power/decoder.hh b/src/arch/power/decoder.hh index 8668095ebb..5c2bd073e9 100644 --- a/src/arch/power/decoder.hh +++ b/src/arch/power/decoder.hh @@ -40,10 +40,11 @@ namespace gem5 { +class BaseISA; + namespace PowerISA { -class ISA; class Decoder : public InstDecoder { protected: diff --git a/src/arch/power/isa.cc b/src/arch/power/isa.cc index ff9f9b66d8..80c984cfc4 100644 --- a/src/arch/power/isa.cc +++ b/src/arch/power/isa.cc @@ -41,9 +41,6 @@ #include "arch/power/regs/int.hh" #include "arch/power/regs/misc.hh" #include "cpu/thread_context.hh" -#include "debug/FloatRegs.hh" -#include "debug/IntRegs.hh" -#include "debug/MiscRegs.hh" #include "params/PowerISA.hh" namespace gem5 @@ -52,15 +49,26 @@ namespace gem5 namespace PowerISA { +namespace +{ + +RegClass vecRegClass(VecRegClass, VecRegClassName, 1, debug::IntRegs); +RegClass vecElemClass(VecElemClass, VecElemClassName, 2, debug::IntRegs); +RegClass vecPredRegClass(VecPredRegClass, VecPredRegClassName, 1, + debug::IntRegs); +RegClass ccRegClass(CCRegClass, CCRegClassName, 0, debug::IntRegs); + +} // anonymous namespace + ISA::ISA(const Params &p) : BaseISA(p) { - _regClasses.emplace_back(int_reg::NumRegs, debug::IntRegs); - _regClasses.emplace_back(float_reg::NumRegs, debug::FloatRegs); - _regClasses.emplace_back(1, debug::IntRegs); - _regClasses.emplace_back(2, debug::IntRegs); - _regClasses.emplace_back(1, debug::IntRegs); - _regClasses.emplace_back(0, debug::IntRegs); - _regClasses.emplace_back(NUM_MISCREGS, debug::MiscRegs); + _regClasses.push_back(&intRegClass); + _regClasses.push_back(&floatRegClass); + _regClasses.push_back(&vecRegClass); + _regClasses.push_back(&vecElemClass); + _regClasses.push_back(&vecPredRegClass); + _regClasses.push_back(&ccRegClass); + _regClasses.push_back(&miscRegClass); clear(); } @@ -68,16 +76,12 @@ void ISA::copyRegsFrom(ThreadContext *src) { // First loop through the integer registers. - for (int i = 0; i < int_reg::NumRegs; ++i) { - RegId reg(IntRegClass, i); - tc->setReg(reg, src->getReg(reg)); - } + for (auto &id: intRegClass) + tc->setReg(id, src->getReg(id)); // Then loop through the floating point registers. - for (int i = 0; i < float_reg::NumRegs; ++i) { - RegId reg(FloatRegClass, i); - tc->setReg(reg, src->getReg(reg)); - } + for (auto &id: floatRegClass) + tc->setReg(id, src->getReg(id)); //TODO Copy misc. registers diff --git a/src/arch/power/isa.hh b/src/arch/power/isa.hh index 6c42f3271e..4af9a1c9a6 100644 --- a/src/arch/power/isa.hh +++ b/src/arch/power/isa.hh @@ -53,88 +53,37 @@ namespace PowerISA class ISA : public BaseISA { protected: - RegVal dummy; RegVal miscRegs[NUM_MISCREGS]; public: - void clear() {} - PCStateBase * newPCState(Addr new_inst_addr=0) const override { return new PCState(new_inst_addr); } - public: RegVal - readMiscRegNoEffect(int misc_reg) const + readMiscRegNoEffect(RegIndex idx) const override { fatal("Power does not currently have any misc regs defined\n"); - return dummy; } RegVal - readMiscReg(int misc_reg) - { - fatal("Power does not currently have any misc regs defined\n"); - return dummy; - } - - void - setMiscRegNoEffect(int misc_reg, RegVal val) + readMiscReg(RegIndex idx) override { fatal("Power does not currently have any misc regs defined\n"); } void - setMiscReg(int misc_reg, RegVal val) + setMiscRegNoEffect(RegIndex idx, RegVal val) override { fatal("Power does not currently have any misc regs defined\n"); } - RegId flattenRegId(const RegId& regId) const { return regId; } - - int - flattenIntIndex(int reg) const + void + setMiscReg(RegIndex idx, RegVal val) override { - return reg; - } - - int - flattenFloatIndex(int reg) const - { - return reg; - } - - int - flattenVecIndex(int reg) const - { - return reg; - } - - int - flattenVecElemIndex(int reg) const - { - return reg; - } - - int - flattenVecPredIndex(int reg) const - { - return reg; - } - - // dummy - int - flattenCCIndex(int reg) const - { - return reg; - } - - int - flattenMiscIndex(int reg) const - { - return reg; + fatal("Power does not currently have any misc regs defined\n"); } bool diff --git a/src/arch/power/isa/formats/basic.isa b/src/arch/power/isa/formats/basic.isa index 20d380f096..056c77e594 100644 --- a/src/arch/power/isa/formats/basic.isa +++ b/src/arch/power/isa/formats/basic.isa @@ -39,7 +39,7 @@ def template BasicDeclare {{ public: /// Constructor. %(class_name)s(ExtMachInst machInst); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -57,7 +57,7 @@ def template BasicConstructor {{ // Basic instruction class execute method template. def template BasicExecute {{ Fault %(class_name)s::execute( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { Fault fault = NoFault; diff --git a/src/arch/power/isa/formats/mem.isa b/src/arch/power/isa/formats/mem.isa index 97b4f81886..a58f0e4b25 100644 --- a/src/arch/power/isa/formats/mem.isa +++ b/src/arch/power/isa/formats/mem.isa @@ -45,10 +45,10 @@ def template LoadStoreDeclare {{ /// Constructor. %(class_name)s(ExtMachInst machInst); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; }; }}; @@ -65,7 +65,7 @@ def template LoadStoreConstructor {{ def template LoadExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -94,7 +94,7 @@ def template LoadExecute {{ def template LoadInitiateAcc {{ Fault %(class_name)s::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -115,7 +115,7 @@ def template LoadInitiateAcc {{ def template LoadCompleteAcc {{ Fault %(class_name)s::completeAcc(PacketPtr pkt, ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { [[maybe_unused]] Addr EA; Fault fault = NoFault; @@ -146,7 +146,7 @@ def template LoadCompleteAcc {{ def template StoreExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -180,7 +180,7 @@ def template StoreExecute {{ def template StoreInitiateAcc {{ Fault %(class_name)s::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; Fault fault = NoFault; @@ -209,7 +209,7 @@ def template StoreInitiateAcc {{ def template StoreCompleteAcc {{ Fault %(class_name)s::completeAcc(PacketPtr pkt, ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { [[maybe_unused]] Addr EA; Fault fault = NoFault; diff --git a/src/arch/power/isa/formats/misc.isa b/src/arch/power/isa/formats/misc.isa index 877ac634de..cca1252860 100644 --- a/src/arch/power/isa/formats/misc.isa +++ b/src/arch/power/isa/formats/misc.isa @@ -33,7 +33,7 @@ def template MiscOpExecute {{ Fault %(class_name)s::execute( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { Fault fault = NoFault; %(op_decl)s; diff --git a/src/arch/power/isa/formats/unimp.isa b/src/arch/power/isa/formats/unimp.isa index 3f709e501e..9e1e46051b 100644 --- a/src/arch/power/isa/formats/unimp.isa +++ b/src/arch/power/isa/formats/unimp.isa @@ -53,7 +53,7 @@ output header {{ flags[IsNonSpeculative] = true; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; std::string generateDisassembly( Addr pc, const loader::SymbolTable *symtab) const override; @@ -84,7 +84,7 @@ output header {{ flags[IsNonSpeculative] = true; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; std::string generateDisassembly( Addr pc, const loader::SymbolTable *symtab) const override; @@ -110,7 +110,7 @@ output decoder {{ output exec {{ Fault FailUnimplemented::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { panic("attempt to execute unimplemented instruction '%s' " "(inst 0x%08x, opcode 0x%x, binary:%s)", mnemonic, machInst, PO, @@ -120,7 +120,7 @@ output exec {{ Fault WarnUnimplemented::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { if (!warned) { warn("\tinstruction '%s' unimplemented\n", mnemonic); @@ -141,4 +141,3 @@ def format WarnUnimpl() {{ iop = InstObjParams(name, 'WarnUnimplemented') decode_block = BasicDecodeWithMnemonic.subst(iop) }}; - diff --git a/src/arch/power/isa/formats/unknown.isa b/src/arch/power/isa/formats/unknown.isa index f68aff8efb..85dacc5796 100644 --- a/src/arch/power/isa/formats/unknown.isa +++ b/src/arch/power/isa/formats/unknown.isa @@ -51,7 +51,7 @@ output header {{ flags[IsNonSpeculative] = true; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; std::string generateDisassembly( Addr pc, const loader::SymbolTable *symtab) const override; @@ -70,7 +70,7 @@ output decoder {{ output exec {{ Fault - Unknown::execute(ExecContext *xc, Trace::InstRecord *traceData) const + Unknown::execute(ExecContext *xc, trace::InstRecord *traceData) const { inform("attempt to execute unknown instruction at %s" "(inst 0x%08x, opcode 0x%x, binary: %s)", @@ -82,4 +82,3 @@ output exec {{ def format Unknown() {{ decode_block = 'return new Unknown(machInst);\n' }}; - diff --git a/src/arch/power/isa/formats/util.isa b/src/arch/power/isa/formats/util.isa index 1a8d34e631..8c8fa9ce03 100644 --- a/src/arch/power/isa/formats/util.isa +++ b/src/arch/power/isa/formats/util.isa @@ -224,5 +224,3 @@ output decoder {{ } }}; - - diff --git a/src/arch/power/isa/includes.isa b/src/arch/power/isa/includes.isa index 5ab9e891b2..32e6bd7661 100644 --- a/src/arch/power/isa/includes.isa +++ b/src/arch/power/isa/includes.isa @@ -57,6 +57,7 @@ output decoder {{ #include "arch/power/decoder.hh" #include "arch/power/faults.hh" +#include "arch/power/regs/float.hh" #include "arch/power/regs/int.hh" #include "base/loader/symtab.hh" #include "base/cprintf.hh" @@ -73,6 +74,7 @@ output exec {{ #include "arch/generic/memhelpers.hh" #include "arch/power/faults.hh" +#include "arch/power/regs/float.hh" #include "arch/power/regs/int.hh" #include "arch/power/regs/misc.hh" #include "base/condcodes.hh" diff --git a/src/arch/power/process.cc b/src/arch/power/process.cc index 6b147efcdb..d31aca2faf 100644 --- a/src/arch/power/process.cc +++ b/src/arch/power/process.cc @@ -341,7 +341,7 @@ PowerProcess::argsInit(int pageSize) //Reset the special-purpose registers for (int i = int_reg::NumArchRegs; i < int_reg::NumRegs; i++) - tc->setReg(RegId(IntRegClass, i), (RegVal)0); + tc->setReg(intRegClass[i], (RegVal)0); //Set the machine status for a typical userspace Msr msr = 0; diff --git a/src/arch/power/regs/float.hh b/src/arch/power/regs/float.hh index ce31c3a7fe..654460eac9 100644 --- a/src/arch/power/regs/float.hh +++ b/src/arch/power/regs/float.hh @@ -29,6 +29,9 @@ #ifndef __ARCH_POWER_REGS_FLOAT_HH__ #define __ARCH_POWER_REGS_FLOAT_HH__ +#include "cpu/reg_class.hh" +#include "debug/FloatRegs.hh" + namespace gem5 { @@ -42,6 +45,10 @@ const int NumArchRegs = 32; const int NumRegs = NumArchRegs; } // namespace float_reg + +inline constexpr RegClass floatRegClass(FloatRegClass, FloatRegClassName, + float_reg::NumRegs, debug::FloatRegs); + } // namespace PowerISA } // namespace gem5 diff --git a/src/arch/power/regs/int.hh b/src/arch/power/regs/int.hh index 324062d15e..6bb95cdc7f 100644 --- a/src/arch/power/regs/int.hh +++ b/src/arch/power/regs/int.hh @@ -31,6 +31,7 @@ #define __ARCH_POWER_REGS_INT_HH__ #include "cpu/reg_class.hh" +#include "debug/IntRegs.hh" namespace gem5 { @@ -92,50 +93,58 @@ enum : RegIndex NumRegs }; -inline constexpr RegId - R0(IntRegClass, _R0Idx), - R1(IntRegClass, _R1Idx), - R2(IntRegClass, _R2Idx), - R3(IntRegClass, _R3Idx), - R4(IntRegClass, _R4Idx), - R5(IntRegClass, _R5Idx), - R6(IntRegClass, _R6Idx), - R7(IntRegClass, _R7Idx), - R8(IntRegClass, _R8Idx), - R9(IntRegClass, _R9Idx), - R10(IntRegClass, _R10Idx), - R11(IntRegClass, _R11Idx), - R12(IntRegClass, _R12Idx), - R13(IntRegClass, _R13Idx), - R14(IntRegClass, _R14Idx), - R15(IntRegClass, _R15Idx), - R16(IntRegClass, _R16Idx), - R17(IntRegClass, _R17Idx), - R18(IntRegClass, _R18Idx), - R19(IntRegClass, _R19Idx), - R20(IntRegClass, _R20Idx), - R21(IntRegClass, _R21Idx), - R22(IntRegClass, _R22Idx), - R23(IntRegClass, _R23Idx), - R24(IntRegClass, _R24Idx), - R25(IntRegClass, _R25Idx), - R26(IntRegClass, _R26Idx), - R27(IntRegClass, _R27Idx), - R28(IntRegClass, _R28Idx), - R29(IntRegClass, _R29Idx), - R30(IntRegClass, _R30Idx), - R31(IntRegClass, _R31Idx), +} // namespace int_reg - Cr(IntRegClass, _CrIdx), - Xer(IntRegClass, _XerIdx), - Lr(IntRegClass, _LrIdx), - Ctr(IntRegClass, _CtrIdx), - Tar(IntRegClass, _TarIdx), - Fpscr(IntRegClass, _FpscrIdx), - Msr(IntRegClass, _MsrIdx), - Rsv(IntRegClass, _RsvIdx), - RsvLen(IntRegClass, _RsvLenIdx), - RsvAddr(IntRegClass, _RsvAddrIdx); +inline constexpr RegClass intRegClass(IntRegClass, IntRegClassName, + int_reg::NumRegs, debug::IntRegs); + +namespace int_reg +{ + +inline constexpr RegId + R0 = intRegClass[_R0Idx], + R1 = intRegClass[_R1Idx], + R2 = intRegClass[_R2Idx], + R3 = intRegClass[_R3Idx], + R4 = intRegClass[_R4Idx], + R5 = intRegClass[_R5Idx], + R6 = intRegClass[_R6Idx], + R7 = intRegClass[_R7Idx], + R8 = intRegClass[_R8Idx], + R9 = intRegClass[_R9Idx], + R10 = intRegClass[_R10Idx], + R11 = intRegClass[_R11Idx], + R12 = intRegClass[_R12Idx], + R13 = intRegClass[_R13Idx], + R14 = intRegClass[_R14Idx], + R15 = intRegClass[_R15Idx], + R16 = intRegClass[_R16Idx], + R17 = intRegClass[_R17Idx], + R18 = intRegClass[_R18Idx], + R19 = intRegClass[_R19Idx], + R20 = intRegClass[_R20Idx], + R21 = intRegClass[_R21Idx], + R22 = intRegClass[_R22Idx], + R23 = intRegClass[_R23Idx], + R24 = intRegClass[_R24Idx], + R25 = intRegClass[_R25Idx], + R26 = intRegClass[_R26Idx], + R27 = intRegClass[_R27Idx], + R28 = intRegClass[_R28Idx], + R29 = intRegClass[_R29Idx], + R30 = intRegClass[_R30Idx], + R31 = intRegClass[_R31Idx], + + Cr = intRegClass[_CrIdx], + Xer = intRegClass[_XerIdx], + Lr = intRegClass[_LrIdx], + Ctr = intRegClass[_CtrIdx], + Tar = intRegClass[_TarIdx], + Fpscr = intRegClass[_FpscrIdx], + Msr = intRegClass[_MsrIdx], + Rsv = intRegClass[_RsvIdx], + RsvLen = intRegClass[_RsvLenIdx], + RsvAddr = intRegClass[_RsvAddrIdx]; } // namespace int_reg diff --git a/src/arch/power/regs/misc.hh b/src/arch/power/regs/misc.hh index dd59ea8615..8601697135 100644 --- a/src/arch/power/regs/misc.hh +++ b/src/arch/power/regs/misc.hh @@ -31,6 +31,8 @@ #define __ARCH_POWER_MISCREGS_HH__ #include "base/bitunion.hh" +#include "cpu/reg_class.hh" +#include "debug/MiscRegs.hh" namespace gem5 { @@ -46,6 +48,9 @@ enum MiscRegIndex const char * const miscRegName[NUM_MISCREGS] = { }; +inline constexpr RegClass miscRegClass(MiscRegClass, MiscRegClassName, + NUM_MISCREGS, debug::MiscRegs); + BitUnion32(Cr) SubBitUnion(cr0, 31, 28) Bitfield<31> lt; diff --git a/src/arch/power/remote_gdb.cc b/src/arch/power/remote_gdb.cc index bd2e087285..c69c571979 100644 --- a/src/arch/power/remote_gdb.cc +++ b/src/arch/power/remote_gdb.cc @@ -188,12 +188,12 @@ RemoteGDB::PowerGdbRegCache::getRegs(ThreadContext *context) // PC, MSR, CR, LR, CTR, XER, FPSCR (32-bit each) for (int i = 0; i < int_reg::NumArchRegs; i++) { - RegId reg(IntRegClass, i); + RegId reg = intRegClass[i]; r.gpr[i] = htog((uint32_t)context->getReg(reg), order); } for (int i = 0; i < float_reg::NumArchRegs; i++) - r.fpr[i] = context->getReg(RegId(FloatRegClass, i)); + r.fpr[i] = context->getReg(floatRegClass[i]); r.pc = htog((uint32_t)context->pcState().instAddr(), order); r.msr = 0; // MSR is privileged, hence not exposed here @@ -213,10 +213,10 @@ RemoteGDB::PowerGdbRegCache::setRegs(ThreadContext *context) const ByteOrder order = (msr.le ? ByteOrder::little : ByteOrder::big); for (int i = 0; i < int_reg::NumArchRegs; i++) - context->setReg(RegId(IntRegClass, i), gtoh(r.gpr[i], order)); + context->setReg(intRegClass[i], gtoh(r.gpr[i], order)); for (int i = 0; i < float_reg::NumArchRegs; i++) - context->setReg(RegId(FloatRegClass, i), r.fpr[i]); + context->setReg(floatRegClass[i], r.fpr[i]); auto pc = context->pcState().as(); pc.byteOrder(order); @@ -244,10 +244,10 @@ RemoteGDB::Power64GdbRegCache::getRegs(ThreadContext *context) // each and the rest are 64-bit) for (int i = 0; i < int_reg::NumArchRegs; i++) - r.gpr[i] = htog(context->getReg(RegId(IntRegClass, i)), order); + r.gpr[i] = htog(context->getReg(intRegClass[i]), order); for (int i = 0; i < float_reg::NumArchRegs; i++) - r.fpr[i] = context->getReg(RegId(FloatRegClass, i)); + r.fpr[i] = context->getReg(floatRegClass[i]); r.pc = htog(context->pcState().instAddr(), order); r.msr = 0; // MSR is privileged, hence not exposed here @@ -267,10 +267,10 @@ RemoteGDB::Power64GdbRegCache::setRegs(ThreadContext *context) const ByteOrder order = (msr.le ? ByteOrder::little : ByteOrder::big); for (int i = 0; i < int_reg::NumArchRegs; i++) - context->setReg(RegId(IntRegClass, i), gtoh(r.gpr[i], order)); + context->setReg(intRegClass[i], gtoh(r.gpr[i], order)); for (int i = 0; i < float_reg::NumArchRegs; i++) - context->setReg(RegId(FloatRegClass, i), r.fpr[i]); + context->setReg(floatRegClass[i], r.fpr[i]); auto pc = context->pcState().as(); pc.byteOrder(order); diff --git a/src/arch/power/se_workload.cc b/src/arch/power/se_workload.cc index 4177fc0188..d9125c50b2 100644 --- a/src/arch/power/se_workload.cc +++ b/src/arch/power/se_workload.cc @@ -33,7 +33,7 @@ namespace gem5 namespace PowerISA { -const std::vector SEWorkload::SyscallABI::ArgumentRegs = { +const std::vector SEWorkload::SyscallABI::ArgumentRegs = { ArgumentReg0, ArgumentReg1, ArgumentReg2, diff --git a/src/arch/power/se_workload.hh b/src/arch/power/se_workload.hh index fdbc08e8ec..d041c45728 100644 --- a/src/arch/power/se_workload.hh +++ b/src/arch/power/se_workload.hh @@ -45,7 +45,7 @@ namespace PowerISA class SEWorkload : public gem5::SEWorkload { public: - using Params = PowerSEWorkloadParams; + PARAMS(PowerSEWorkload); SEWorkload(const Params &p, Addr page_shift) : gem5::SEWorkload(p, page_shift) {} @@ -54,14 +54,15 @@ class SEWorkload : public gem5::SEWorkload setSystem(System *sys) override { gem5::SEWorkload::setSystem(sys); - gdb = BaseRemoteGDB::build(system); + gdb = BaseRemoteGDB::build( + params().remote_gdb_port, system); } loader::Arch getArch() const override { return loader::Power; } struct SyscallABI : public GenericSyscallABI64 { - static const std::vector ArgumentRegs; + static const std::vector ArgumentRegs; }; }; diff --git a/src/arch/riscv/AtomicSimpleCPU.py b/src/arch/riscv/AtomicSimpleCPU.py deleted file mode 100644 index f471b64420..0000000000 --- a/src/arch/riscv/AtomicSimpleCPU.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2021 Google, Inc. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer; -# redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution; -# neither the name of the copyright holders nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from m5.objects.RiscvCPU import RiscvAtomicSimpleCPU - -AtomicSimpleCPU = RiscvAtomicSimpleCPU diff --git a/src/arch/riscv/MinorCPU.py b/src/arch/riscv/MinorCPU.py deleted file mode 100644 index 5254bada24..0000000000 --- a/src/arch/riscv/MinorCPU.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2021 Google, Inc. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer; -# redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution; -# neither the name of the copyright holders nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from m5.objects.RiscvCPU import RiscvMinorCPU - -MinorCPU = RiscvMinorCPU diff --git a/src/arch/riscv/NonCachingSimpleCPU.py b/src/arch/riscv/NonCachingSimpleCPU.py deleted file mode 100644 index f7dcebf3c6..0000000000 --- a/src/arch/riscv/NonCachingSimpleCPU.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2021 Google, Inc. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer; -# redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution; -# neither the name of the copyright holders nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from m5.objects.RiscvCPU import RiscvNonCachingSimpleCPU - -NonCachingSimpleCPU = RiscvNonCachingSimpleCPU diff --git a/src/arch/riscv/PMAChecker.py b/src/arch/riscv/PMAChecker.py index 22424eb561..581560bd56 100644 --- a/src/arch/riscv/PMAChecker.py +++ b/src/arch/riscv/PMAChecker.py @@ -39,9 +39,10 @@ from m5.SimObject import SimObject from m5.params import * from m5.proxy import * + class PMAChecker(SimObject): - type = 'PMAChecker' - cxx_header = 'arch/riscv/pma_checker.hh' - cxx_class = 'gem5::PMAChecker' + type = "PMAChecker" + cxx_header = "arch/riscv/pma_checker.hh" + cxx_class = "gem5::PMAChecker" uncacheable = VectorParam.AddrRange([], "Uncacheable address ranges") diff --git a/src/arch/riscv/PMP.py b/src/arch/riscv/PMP.py index 9a86c2f306..a3844c99fd 100644 --- a/src/arch/riscv/PMP.py +++ b/src/arch/riscv/PMP.py @@ -28,10 +28,10 @@ from m5.SimObject import SimObject from m5.params import * from m5.proxy import * + class PMP(SimObject): - type = 'PMP' - cxx_header = 'arch/riscv/pmp.hh' - cxx_class = 'gem5::PMP' + type = "PMP" + cxx_header = "arch/riscv/pmp.hh" + cxx_class = "gem5::PMP" pmp_entries = Param.Int(16, "Maximum PMP Entries Supported") - diff --git a/src/arch/riscv/RiscvCPU.py b/src/arch/riscv/RiscvCPU.py index 36c2920713..1c77045c67 100644 --- a/src/arch/riscv/RiscvCPU.py +++ b/src/arch/riscv/RiscvCPU.py @@ -33,23 +33,29 @@ from m5.objects.RiscvMMU import RiscvMMU from m5.objects.RiscvInterrupts import RiscvInterrupts from m5.objects.RiscvISA import RiscvISA + class RiscvCPU: ArchDecoder = RiscvDecoder ArchMMU = RiscvMMU ArchInterrupts = RiscvInterrupts ArchISA = RiscvISA + class RiscvAtomicSimpleCPU(BaseAtomicSimpleCPU, RiscvCPU): mmu = RiscvMMU() + class RiscvNonCachingSimpleCPU(BaseNonCachingSimpleCPU, RiscvCPU): mmu = RiscvMMU() + class RiscvTimingSimpleCPU(BaseTimingSimpleCPU, RiscvCPU): mmu = RiscvMMU() + class RiscvO3CPU(BaseO3CPU, RiscvCPU): mmu = RiscvMMU() + class RiscvMinorCPU(BaseMinorCPU, RiscvCPU): mmu = RiscvMMU() diff --git a/src/arch/riscv/RiscvDecoder.py b/src/arch/riscv/RiscvDecoder.py index 1eebf2f806..30c1077662 100644 --- a/src/arch/riscv/RiscvDecoder.py +++ b/src/arch/riscv/RiscvDecoder.py @@ -25,7 +25,8 @@ from m5.objects.InstDecoder import InstDecoder + class RiscvDecoder(InstDecoder): - type = 'RiscvDecoder' - cxx_class = 'gem5::RiscvISA::Decoder' + type = "RiscvDecoder" + cxx_class = "gem5::RiscvISA::Decoder" cxx_header = "arch/riscv/decoder.hh" diff --git a/src/arch/riscv/RiscvFsWorkload.py b/src/arch/riscv/RiscvFsWorkload.py index f92945eb2b..9e158811da 100644 --- a/src/arch/riscv/RiscvFsWorkload.py +++ b/src/arch/riscv/RiscvFsWorkload.py @@ -32,20 +32,23 @@ from m5.params import * from m5.objects.System import System from m5.objects.Workload import Workload, KernelWorkload + class RiscvBareMetal(Workload): - type = 'RiscvBareMetal' - cxx_class = 'gem5::RiscvISA::BareMetal' - cxx_header = 'arch/riscv/bare_metal/fs_workload.hh' + type = "RiscvBareMetal" + cxx_class = "gem5::RiscvISA::BareMetal" + cxx_header = "arch/riscv/bare_metal/fs_workload.hh" bootloader = Param.String("File, that contains the bootloader code") bare_metal = Param.Bool(True, "Using Bare Metal Application?") - reset_vect = Param.Addr(0x0, 'Reset vector') + reset_vect = Param.Addr(0x0, "Reset vector") + class RiscvLinux(KernelWorkload): - type = 'RiscvLinux' - cxx_class = 'gem5::RiscvISA::FsLinux' - cxx_header = 'arch/riscv/linux/fs_workload.hh' + type = "RiscvLinux" + cxx_class = "gem5::RiscvISA::FsLinux" + cxx_header = "arch/riscv/linux/fs_workload.hh" - dtb_filename = Param.String("", - "File that contains the Device Tree Blob. Don't use DTB if empty.") - dtb_addr = Param.Addr(0x87e00000, "DTB address") + dtb_filename = Param.String( + "", "File that contains the Device Tree Blob. Don't use DTB if empty." + ) + dtb_addr = Param.Addr(0x87E00000, "DTB address") diff --git a/src/arch/riscv/RiscvISA.py b/src/arch/riscv/RiscvISA.py index a54dcfd2a1..ee98a5b95d 100644 --- a/src/arch/riscv/RiscvISA.py +++ b/src/arch/riscv/RiscvISA.py @@ -38,9 +38,15 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +from m5.params import Param from m5.objects.BaseISA import BaseISA + class RiscvISA(BaseISA): - type = 'RiscvISA' - cxx_class = 'gem5::RiscvISA::ISA' + type = "RiscvISA" + cxx_class = "gem5::RiscvISA::ISA" cxx_header = "arch/riscv/isa.hh" + + check_alignment = Param.Bool( + False, "whether to check memory access alignment" + ) diff --git a/src/arch/riscv/RiscvInterrupts.py b/src/arch/riscv/RiscvInterrupts.py index ce0ef01d05..ad64013a2e 100644 --- a/src/arch/riscv/RiscvInterrupts.py +++ b/src/arch/riscv/RiscvInterrupts.py @@ -29,7 +29,8 @@ from m5.objects.BaseInterrupts import BaseInterrupts + class RiscvInterrupts(BaseInterrupts): - type = 'RiscvInterrupts' - cxx_class = 'gem5::RiscvISA::Interrupts' - cxx_header = 'arch/riscv/interrupts.hh' + type = "RiscvInterrupts" + cxx_class = "gem5::RiscvISA::Interrupts" + cxx_header = "arch/riscv/interrupts.hh" diff --git a/src/arch/riscv/RiscvMMU.py b/src/arch/riscv/RiscvMMU.py index 193398cbe5..312244a85d 100644 --- a/src/arch/riscv/RiscvMMU.py +++ b/src/arch/riscv/RiscvMMU.py @@ -42,10 +42,11 @@ from m5.objects.RiscvTLB import RiscvTLB from m5.objects.PMAChecker import PMAChecker from m5.objects.PMP import PMP + class RiscvMMU(BaseMMU): - type = 'RiscvMMU' - cxx_class = 'gem5::RiscvISA::MMU' - cxx_header = 'arch/riscv/mmu.hh' + type = "RiscvMMU" + cxx_class = "gem5::RiscvISA::MMU" + cxx_header = "arch/riscv/mmu.hh" itb = RiscvTLB(entry_type="instruction") dtb = RiscvTLB(entry_type="data") diff --git a/src/arch/riscv/RiscvSeWorkload.py b/src/arch/riscv/RiscvSeWorkload.py index f14244fece..5df6b786c3 100644 --- a/src/arch/riscv/RiscvSeWorkload.py +++ b/src/arch/riscv/RiscvSeWorkload.py @@ -27,18 +27,22 @@ from m5.params import * from m5.objects.Workload import SEWorkload + class RiscvSEWorkload(SEWorkload): - type = 'RiscvSEWorkload' + type = "RiscvSEWorkload" cxx_header = "arch/riscv/se_workload.hh" - cxx_class = 'gem5::RiscvISA::SEWorkload' + cxx_class = "gem5::RiscvISA::SEWorkload" abstract = True + class RiscvEmuLinux(RiscvSEWorkload): - type = 'RiscvEmuLinux' + type = "RiscvEmuLinux" cxx_header = "arch/riscv/linux/se_workload.hh" - cxx_class = 'gem5::RiscvISA::EmuLinux' + cxx_class = "gem5::RiscvISA::EmuLinux" @classmethod def _is_compatible_with(cls, obj): - return obj.get_arch() in ('riscv64', 'riscv32') and \ - obj.get_op_sys() in ('linux', 'unknown') + return obj.get_arch() in ( + "riscv64", + "riscv32", + ) and obj.get_op_sys() in ("linux", "unknown") diff --git a/src/arch/riscv/RiscvTLB.py b/src/arch/riscv/RiscvTLB.py index 0cbce35825..e943d8ddab 100644 --- a/src/arch/riscv/RiscvTLB.py +++ b/src/arch/riscv/RiscvTLB.py @@ -34,27 +34,31 @@ from m5.proxy import * from m5.objects.BaseTLB import BaseTLB from m5.objects.ClockedObject import ClockedObject + class RiscvPagetableWalker(ClockedObject): - type = 'RiscvPagetableWalker' - cxx_class = 'gem5::RiscvISA::Walker' - cxx_header = 'arch/riscv/pagetable_walker.hh' + type = "RiscvPagetableWalker" + cxx_class = "gem5::RiscvISA::Walker" + cxx_header = "arch/riscv/pagetable_walker.hh" port = RequestPort("Port for the hardware table walker") system = Param.System(Parent.any, "system object") - num_squash_per_cycle = Param.Unsigned(4, - "Number of outstanding walks that can be squashed per cycle") + num_squash_per_cycle = Param.Unsigned( + 4, "Number of outstanding walks that can be squashed per cycle" + ) # Grab the pma_checker from the MMU pma_checker = Param.PMAChecker(Parent.any, "PMA Checker") pmp = Param.PMP(Parent.any, "PMP") + class RiscvTLB(BaseTLB): - type = 'RiscvTLB' - cxx_class = 'gem5::RiscvISA::TLB' - cxx_header = 'arch/riscv/tlb.hh' + type = "RiscvTLB" + cxx_class = "gem5::RiscvISA::TLB" + cxx_header = "arch/riscv/tlb.hh" size = Param.Int(64, "TLB size") - walker = Param.RiscvPagetableWalker(\ - RiscvPagetableWalker(), "page table walker") + walker = Param.RiscvPagetableWalker( + RiscvPagetableWalker(), "page table walker" + ) # Grab the pma_checker from the MMU pma_checker = Param.PMAChecker(Parent.any, "PMA Checker") - pmp = Param.PMP(Parent.any, "Physical Memory Protection Unit") + pmp = Param.PMP(Parent.any, "Physical Memory Protection Unit") diff --git a/src/arch/riscv/SConscript b/src/arch/riscv/SConscript index a9664f4b9f..dd4e9aed17 100644 --- a/src/arch/riscv/SConscript +++ b/src/arch/riscv/SConscript @@ -43,6 +43,9 @@ Import('*') +if env['USE_RISCV_ISA']: + env.TagImplies('riscv isa', 'gem5 lib') + Source('decoder.cc', tags='riscv isa') Source('faults.cc', tags='riscv isa') Source('isa.cc', tags='riscv isa') @@ -75,11 +78,6 @@ SimObject('RiscvTLB.py', sim_objects=['RiscvPagetableWalker', 'RiscvTLB'], tags='riscv isa') SimObject('RiscvCPU.py', sim_objects=[], tags='riscv isa') -SimObject('AtomicSimpleCPU.py', sim_objects=[], tags='riscv isa') -SimObject('TimingSimpleCPU.py', sim_objects=[], tags='riscv isa') -SimObject('NonCachingSimpleCPU.py', sim_objects=[], tags='riscv isa') -SimObject('O3CPU.py', sim_objects=[], tags='riscv isa') -SimObject('MinorCPU.py', sim_objects=[], tags='riscv isa') DebugFlag('RiscvMisc', tags='riscv isa') DebugFlag('PMP', tags='riscv isa') diff --git a/src/arch/riscv/SConsopts b/src/arch/riscv/SConsopts index 76713ee13d..751311de5c 100644 --- a/src/arch/riscv/SConsopts +++ b/src/arch/riscv/SConsopts @@ -1,7 +1,4 @@ -# -*- mode:python -*- - -# Copyright (c) 2004-2005 The Regents of The University of Michigan -# All rights reserved. +# Copyright 2021 Google, Inc. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are @@ -27,5 +24,5 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Import('*') - -main.Append(ALL_ISAS=['riscv']) +sticky_vars.Add(BoolVariable('USE_RISCV_ISA', 'Enable RISC-V ISA support', + False)) diff --git a/src/arch/riscv/TimingSimpleCPU.py b/src/arch/riscv/TimingSimpleCPU.py deleted file mode 100644 index 03d530f5ad..0000000000 --- a/src/arch/riscv/TimingSimpleCPU.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2021 Google, Inc. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer; -# redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution; -# neither the name of the copyright holders nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from m5.objects.RiscvCPU import RiscvTimingSimpleCPU - -TimingSimpleCPU = RiscvTimingSimpleCPU diff --git a/src/arch/riscv/bare_metal/fs_workload.hh b/src/arch/riscv/bare_metal/fs_workload.hh index e10c0a0433..35f42555df 100644 --- a/src/arch/riscv/bare_metal/fs_workload.hh +++ b/src/arch/riscv/bare_metal/fs_workload.hh @@ -60,7 +60,8 @@ class BareMetal : public Workload setSystem(System *sys) override { Workload::setSystem(sys); - gdb = BaseRemoteGDB::build(system); + gdb = BaseRemoteGDB::build( + params().remote_gdb_port, system); } loader::Arch getArch() const override { return bootloader->getArch(); } diff --git a/src/arch/riscv/decoder.hh b/src/arch/riscv/decoder.hh index d4abe688d9..15cbefe39c 100644 --- a/src/arch/riscv/decoder.hh +++ b/src/arch/riscv/decoder.hh @@ -42,10 +42,11 @@ namespace gem5 { +class BaseISA; + namespace RiscvISA { -class ISA; class Decoder : public InstDecoder { private: diff --git a/src/arch/riscv/faults.hh b/src/arch/riscv/faults.hh index 1bc671ef26..e66476727c 100644 --- a/src/arch/riscv/faults.hh +++ b/src/arch/riscv/faults.hh @@ -93,7 +93,11 @@ enum ExceptionCode : uint64_t INT_EXT_USER = 8, INT_EXT_SUPER = 9, INT_EXT_MACHINE = 11, - NumInterruptTypes + NumInterruptTypes, + // INT_NMI does not exist in the spec, it's a modeling artifact for NMI. We + // intentionally set it to be NumInterruptTypes so it can never conflict + // with any real INT_NUM in used. + INT_NMI = NumInterruptTypes, }; enum class FaultType diff --git a/src/arch/riscv/gdb-xml/riscv-64bit-cpu.xml b/src/arch/riscv/gdb-xml/riscv-64bit-cpu.xml index ca59ac307d..7de083d25c 100644 --- a/src/arch/riscv/gdb-xml/riscv-64bit-cpu.xml +++ b/src/arch/riscv/gdb-xml/riscv-64bit-cpu.xml @@ -45,4 +45,4 @@ - \ No newline at end of file + diff --git a/src/arch/riscv/gdb-xml/riscv-64bit-csr.xml b/src/arch/riscv/gdb-xml/riscv-64bit-csr.xml index 6b2ae790fe..3c9d2e90f4 100644 --- a/src/arch/riscv/gdb-xml/riscv-64bit-csr.xml +++ b/src/arch/riscv/gdb-xml/riscv-64bit-csr.xml @@ -245,4 +245,4 @@ --> - \ No newline at end of file + diff --git a/src/arch/riscv/gdb-xml/riscv-64bit-fpu.xml b/src/arch/riscv/gdb-xml/riscv-64bit-fpu.xml index 7b68ba4d60..9661b0e004 100644 --- a/src/arch/riscv/gdb-xml/riscv-64bit-fpu.xml +++ b/src/arch/riscv/gdb-xml/riscv-64bit-fpu.xml @@ -55,4 +55,4 @@ - \ No newline at end of file + diff --git a/src/arch/riscv/gdb-xml/riscv.xml b/src/arch/riscv/gdb-xml/riscv.xml index cae8bf7abc..e39ae98e82 100644 --- a/src/arch/riscv/gdb-xml/riscv.xml +++ b/src/arch/riscv/gdb-xml/riscv.xml @@ -10,4 +10,4 @@ - \ No newline at end of file + diff --git a/src/arch/riscv/insts/amo.cc b/src/arch/riscv/insts/amo.cc index 45a703ac6f..d845c91bf3 100644 --- a/src/arch/riscv/insts/amo.cc +++ b/src/arch/riscv/insts/amo.cc @@ -54,7 +54,7 @@ MemFenceMicro::generateDisassembly( } Fault MemFenceMicro::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { return NoFault; } @@ -72,8 +72,8 @@ LoadReserved::generateDisassembly( ss << "aq"; if (RL) ss << "rl"; - ss << ' ' << registerName(RegId(IntRegClass, RD)) << ", (" - << registerName(RegId(IntRegClass, RS1)) << ')'; + ss << ' ' << registerName(intRegClass[RD]) << ", (" + << registerName(intRegClass[RS1]) << ')'; return ss.str(); } @@ -100,9 +100,9 @@ StoreCond::generateDisassembly( ss << "aq"; if (RL) ss << "rl"; - ss << ' ' << registerName(RegId(IntRegClass, RD)) << ", " - << registerName(RegId(IntRegClass, RS2)) << ", (" - << registerName(RegId(IntRegClass, RS1)) << ')'; + ss << ' ' << registerName(intRegClass[RD]) << ", " + << registerName(intRegClass[RS2]) << ", (" + << registerName(intRegClass[RS1]) << ')'; return ss.str(); } @@ -130,9 +130,9 @@ AtomicMemOp::generateDisassembly( ss << "aq"; if (RL) ss << "rl"; - ss << ' ' << registerName(RegId(IntRegClass, RD)) << ", " - << registerName(RegId(IntRegClass, RS2)) << ", (" - << registerName(RegId(IntRegClass, RS1)) << ')'; + ss << ' ' << registerName(intRegClass[RD]) << ", " + << registerName(intRegClass[RS2]) << ", (" + << registerName(intRegClass[RS1]) << ')'; return ss.str(); } diff --git a/src/arch/riscv/insts/amo.hh b/src/arch/riscv/insts/amo.hh index 9c73c1f420..0d64b076dc 100644 --- a/src/arch/riscv/insts/amo.hh +++ b/src/arch/riscv/insts/amo.hh @@ -52,7 +52,7 @@ class MemFenceMicro : public RiscvMicroInst protected: using RiscvMicroInst::RiscvMicroInst; - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; std::string generateDisassembly( Addr pc, const loader::SymbolTable *symtab) const override; }; diff --git a/src/arch/riscv/insts/standard.hh b/src/arch/riscv/insts/standard.hh index 93e6a4f77e..be3470fda4 100644 --- a/src/arch/riscv/insts/standard.hh +++ b/src/arch/riscv/insts/standard.hh @@ -92,30 +92,11 @@ class CSROp : public RiscvStaticInst uint64_t csr; uint64_t uimm; - bool valid = false; - RegIndex midx = 0; - std::string csrName; - uint64_t maskVal = 0; - /// Constructor CSROp(const char *mnem, MachInst _machInst, OpClass __opClass) : RiscvStaticInst(mnem, _machInst, __opClass), csr(FUNCT12), uimm(CSRIMM) { - auto csr_data_it = CSRData.find(csr); - if (csr_data_it == CSRData.end()) { - valid = false; - } else { - valid = true; - midx = csr_data_it->second.physIndex; - csrName = csr_data_it->second.name; - auto mask_it = CSRMasks.find(csr); - if (mask_it == CSRMasks.end()) - maskVal = mask(64); - else - maskVal = mask_it->second; - } - if (csr == CSR_SATP) { flags[IsSquashAfter] = true; } diff --git a/src/arch/riscv/insts/static_inst.cc b/src/arch/riscv/insts/static_inst.cc index 6f6b3fe7bd..fc615c8d31 100644 --- a/src/arch/riscv/insts/static_inst.cc +++ b/src/arch/riscv/insts/static_inst.cc @@ -29,6 +29,7 @@ #include "arch/riscv/insts/static_inst.hh" +#include "arch/riscv/isa.hh" #include "arch/riscv/pcstate.hh" #include "arch/riscv/types.hh" #include "cpu/static_inst.hh" @@ -39,6 +40,19 @@ namespace gem5 namespace RiscvISA { +bool +RiscvStaticInst::alignmentOk(ExecContext* xc, Addr addr, Addr size) const +{ + if (addr % size == 0) { + return true; + } + // Even if it's not aligned, we're still fine if the check is not enabled. + // We perform the check first because detecting whether the check itself is + // enabled involves multiple indirect references and is quite slow. + auto *isa = static_cast(xc->tcBase()->getIsaPtr()); + return !isa->alignmentCheckEnabled(); +} + void RiscvMicroInst::advancePC(PCStateBase &pcState) const { diff --git a/src/arch/riscv/insts/static_inst.hh b/src/arch/riscv/insts/static_inst.hh index eec9c88287..bccecf2e2f 100644 --- a/src/arch/riscv/insts/static_inst.hh +++ b/src/arch/riscv/insts/static_inst.hh @@ -56,6 +56,8 @@ class RiscvStaticInst : public StaticInst StaticInst(_mnemonic, __opClass), machInst(_machInst) {} + bool alignmentOk(ExecContext* xc, Addr addr, Addr size) const; + public: ExtMachInst machInst; @@ -114,20 +116,20 @@ class RiscvMacroInst : public RiscvStaticInst } Fault - initiateAcc(ExecContext *xc, Trace::InstRecord *traceData) const override + initiateAcc(ExecContext *xc, trace::InstRecord *traceData) const override { panic("Tried to execute a macroop directly!\n"); } Fault completeAcc(PacketPtr pkt, ExecContext *xc, - Trace::InstRecord *traceData) const override + trace::InstRecord *traceData) const override { panic("Tried to execute a macroop directly!\n"); } Fault - execute(ExecContext *xc, Trace::InstRecord *traceData) const override + execute(ExecContext *xc, trace::InstRecord *traceData) const override { panic("Tried to execute a macroop directly!\n"); } diff --git a/src/arch/riscv/insts/unknown.hh b/src/arch/riscv/insts/unknown.hh index 30cec98843..a271eb98b0 100644 --- a/src/arch/riscv/insts/unknown.hh +++ b/src/arch/riscv/insts/unknown.hh @@ -58,7 +58,7 @@ class Unknown : public RiscvStaticInst {} Fault - execute(ExecContext *, Trace::InstRecord *) const override + execute(ExecContext *, trace::InstRecord *) const override { return std::make_shared(machInst); } diff --git a/src/arch/riscv/interrupts.hh b/src/arch/riscv/interrupts.hh index 88b7b2e5a4..f10c5f386a 100644 --- a/src/arch/riscv/interrupts.hh +++ b/src/arch/riscv/interrupts.hh @@ -141,14 +141,22 @@ class Interrupts : public BaseInterrupts post(int int_num, int index) { DPRINTF(Interrupt, "Interrupt %d:%d posted\n", int_num, index); - ip[int_num] = true; + if (int_num != INT_NMI) { + ip[int_num] = true; + } else { + postNMI(); + } } void clear(int int_num, int index) { DPRINTF(Interrupt, "Interrupt %d:%d cleared\n", int_num, index); - ip[int_num] = false; + if (int_num != INT_NMI) { + ip[int_num] = false; + } else { + clearNMI(); + } } void postNMI() { tc->setMiscReg(MISCREG_NMIP, 1); } diff --git a/src/arch/riscv/isa.cc b/src/arch/riscv/isa.cc index 6bf34af5bf..c76bb2bdf3 100644 --- a/src/arch/riscv/isa.cc +++ b/src/arch/riscv/isa.cc @@ -47,10 +47,7 @@ #include "base/trace.hh" #include "cpu/base.hh" #include "debug/Checkpoint.hh" -#include "debug/FloatRegs.hh" -#include "debug/IntRegs.hh" #include "debug/LLSC.hh" -#include "debug/MiscRegs.hh" #include "debug/RiscvMisc.hh" #include "mem/packet.hh" #include "mem/request.hh" @@ -194,15 +191,28 @@ namespace RiscvISA [MISCREG_NMIP] = "NMIP", }}; -ISA::ISA(const Params &p) : BaseISA(p) +namespace { - _regClasses.emplace_back(NumIntRegs, debug::IntRegs); - _regClasses.emplace_back(NumFloatRegs, debug::FloatRegs); - _regClasses.emplace_back(1, debug::IntRegs); // Not applicable to RISCV - _regClasses.emplace_back(2, debug::IntRegs); // Not applicable to RISCV - _regClasses.emplace_back(1, debug::IntRegs); // Not applicable to RISCV - _regClasses.emplace_back(0, debug::IntRegs); // Not applicable to RISCV - _regClasses.emplace_back(NUM_MISCREGS, debug::MiscRegs); + +/* Not applicable to RISCV */ +RegClass vecRegClass(VecRegClass, VecRegClassName, 1, debug::IntRegs); +RegClass vecElemClass(VecElemClass, VecElemClassName, 2, debug::IntRegs); +RegClass vecPredRegClass(VecPredRegClass, VecPredRegClassName, 1, + debug::IntRegs); +RegClass ccRegClass(CCRegClass, CCRegClassName, 0, debug::IntRegs); + +} // anonymous namespace + +ISA::ISA(const Params &p) : + BaseISA(p), checkAlignment(p.check_alignment) +{ + _regClasses.push_back(&intRegClass); + _regClasses.push_back(&floatRegClass); + _regClasses.push_back(&vecRegClass); + _regClasses.push_back(&vecElemClass); + _regClasses.push_back(&vecPredRegClass); + _regClasses.push_back(&ccRegClass); + _regClasses.push_back(&miscRegClass); miscRegFile.resize(NUM_MISCREGS); clear(); @@ -217,12 +227,12 @@ void ISA::copyRegsFrom(ThreadContext *src) { // First loop through the integer registers. - for (int i = 0; i < NumIntRegs; ++i) - tc->setIntReg(i, src->readIntReg(i)); + for (auto &id: intRegClass) + tc->setReg(id, src->getReg(id)); // Second loop through the float registers. - for (int i = 0; i < NumFloatRegs; ++i) - tc->setFloatReg(i, src->readFloatReg(i)); + for (auto &id: floatRegClass) + tc->setReg(id, src->getReg(id)); // Lastly copy PC/NPC tc->pcState(src->pcState()); @@ -272,22 +282,19 @@ ISA::hpmCounterEnabled(int misc_reg) const } RegVal -ISA::readMiscRegNoEffect(int misc_reg) const +ISA::readMiscRegNoEffect(RegIndex idx) const { - if (misc_reg > NUM_MISCREGS || misc_reg < 0) { - // Illegal CSR - panic("Illegal CSR index %#x\n", misc_reg); - return -1; - } + // Illegal CSR + panic_if(idx > NUM_MISCREGS, "Illegal CSR index %#x\n", idx); DPRINTF(RiscvMisc, "Reading MiscReg %s (%d): %#x.\n", - MiscRegNames[misc_reg], misc_reg, miscRegFile[misc_reg]); - return miscRegFile[misc_reg]; + MiscRegNames[idx], idx, miscRegFile[idx]); + return miscRegFile[idx]; } RegVal -ISA::readMiscReg(int misc_reg) +ISA::readMiscReg(RegIndex idx) { - switch (misc_reg) { + switch (idx) { case MISCREG_HARTID: return tc->contextId(); case MISCREG_CYCLE: @@ -333,7 +340,7 @@ ISA::readMiscReg(int misc_reg) case MISCREG_MEPC: { auto misa = readMiscRegNoEffect(MISCREG_ISA); - auto val = readMiscRegNoEffect(misc_reg); + auto val = readMiscRegNoEffect(idx); // if compressed instructions are disabled, epc[1] is set to 0 if ((misa & ISA_EXT_C_MASK) == 0) return mbits(val, 63, 2); @@ -341,44 +348,65 @@ ISA::readMiscReg(int misc_reg) else return mbits(val, 63, 1); } + case MISCREG_STATUS: + { + // Updating the SD bit. + // . Per RISC-V ISA Manual, vol II, section 3.1.6.6, page 26, + // the SD bit is a read-only bit indicating whether any of + // FS, VS, and XS fields being in the respective dirty state. + // . Per section 3.1.6, page 20, the SD bit is the most + // significant bit of the MSTATUS CSR for both RV32 and RV64. + // . Per section 3.1.6.6, page 29, the explicit formula for + // updating the SD is, + // SD = ((FS==DIRTY) | (XS==DIRTY) | (VS==DIRTY)) + // . Ideally, we want to update the SD after every relevant + // instruction, however, lazily updating the Status register + // upon its read produces the same effect as well. + STATUS status = readMiscRegNoEffect(idx); + uint64_t sd_bit = \ + (status.xs == 3) || (status.fs == 3) || (status.vs == 3); + // We assume RV64 here, updating the SD bit at index 63. + status.sd = sd_bit; + setMiscRegNoEffect(idx, status); + + return readMiscRegNoEffect(idx); + } default: // Try reading HPM counters // As a placeholder, all HPM counters are just cycle counters - if (misc_reg >= MISCREG_HPMCOUNTER03 && - misc_reg <= MISCREG_HPMCOUNTER31) { - if (hpmCounterEnabled(misc_reg)) { + if (idx >= MISCREG_HPMCOUNTER03 && + idx <= MISCREG_HPMCOUNTER31) { + if (hpmCounterEnabled(idx)) { DPRINTF(RiscvMisc, "HPM counter %d: %llu.\n", - misc_reg - MISCREG_CYCLE, tc->getCpuPtr()->curCycle()); + idx - MISCREG_CYCLE, tc->getCpuPtr()->curCycle()); return tc->getCpuPtr()->curCycle(); } else { - warn("HPM counter %d disabled.\n", misc_reg - MISCREG_CYCLE); + warn("HPM counter %d disabled.\n", idx - MISCREG_CYCLE); return 0; } } - return readMiscRegNoEffect(misc_reg); + return readMiscRegNoEffect(idx); } } void -ISA::setMiscRegNoEffect(int misc_reg, RegVal val) +ISA::setMiscRegNoEffect(RegIndex idx, RegVal val) { - if (misc_reg > NUM_MISCREGS || misc_reg < 0) { - // Illegal CSR - panic("Illegal CSR index %#x\n", misc_reg); - } + // Illegal CSR + panic_if(idx > NUM_MISCREGS, "Illegal CSR index %#x\n", idx); DPRINTF(RiscvMisc, "Setting MiscReg %s (%d) to %#x.\n", - MiscRegNames[misc_reg], misc_reg, val); - miscRegFile[misc_reg] = val; + MiscRegNames[idx], idx, val); + miscRegFile[idx] = val; } void -ISA::setMiscReg(int misc_reg, RegVal val) +ISA::setMiscReg(RegIndex idx, RegVal val) { - if (misc_reg >= MISCREG_CYCLE && misc_reg <= MISCREG_HPMCOUNTER31) { + if (idx >= MISCREG_CYCLE && idx <= MISCREG_HPMCOUNTER31) { // Ignore writes to HPM counters for now - warn("Ignoring write to %s.\n", CSRData.at(misc_reg).name); + warn("Ignoring write to %s.\n", CSRData.at(idx).name); } else { - switch (misc_reg) { + switch (idx) { // From section 3.7.1 of RISCV priv. specs // V1.12, the odd-numbered configuration @@ -407,13 +435,13 @@ ISA::setMiscReg(int misc_reg, RegVal val) // Form pmp_index using the index i and // PMPCFG register number // Note: MISCREG_PMPCFG2 - MISCREG_PMPCFG0 = 1 - // 8*(misc_reg-MISCREG_PMPCFG0) will be useful + // 8*(idx-MISCREG_PMPCFG0) will be useful // if a system contains more than 16 PMP entries - uint32_t pmp_index = i+(8*(misc_reg-MISCREG_PMPCFG0)); + uint32_t pmp_index = i+(8*(idx-MISCREG_PMPCFG0)); mmu->getPMP()->pmpUpdateCfg(pmp_index,cfg_val); } - setMiscRegNoEffect(misc_reg, val); + setMiscRegNoEffect(idx, val); } break; case MISCREG_PMPADDR00 ... MISCREG_PMPADDR15: @@ -423,10 +451,10 @@ ISA::setMiscReg(int misc_reg, RegVal val) auto mmu = dynamic_cast (tc->getMMUPtr()); - uint32_t pmp_index = misc_reg-MISCREG_PMPADDR00; + uint32_t pmp_index = idx-MISCREG_PMPADDR00; mmu->getPMP()->pmpUpdateAddr(pmp_index, val); - setMiscRegNoEffect(misc_reg, val); + setMiscRegNoEffect(idx, val); } break; @@ -448,24 +476,24 @@ ISA::setMiscReg(int misc_reg, RegVal val) { // we only support bare and Sv39 mode; setting a different mode // shall have no effect (see 4.1.12 in priv ISA manual) - SATP cur_val = readMiscRegNoEffect(misc_reg); + SATP cur_val = readMiscRegNoEffect(idx); SATP new_val = val; if (new_val.mode != AddrXlateMode::BARE && new_val.mode != AddrXlateMode::SV39) new_val.mode = cur_val.mode; - setMiscRegNoEffect(misc_reg, new_val); + setMiscRegNoEffect(idx, new_val); } break; case MISCREG_TSELECT: { // we don't support debugging, so always set a different value // than written - setMiscRegNoEffect(misc_reg, val + 1); + setMiscRegNoEffect(idx, val + 1); } break; case MISCREG_ISA: { - auto cur_val = readMiscRegNoEffect(misc_reg); + auto cur_val = readMiscRegNoEffect(idx); // only allow to disable compressed instructions // if the following instruction is 4-byte aligned if ((val & ISA_EXT_C_MASK) == 0 && @@ -473,20 +501,20 @@ ISA::setMiscReg(int misc_reg, RegVal val) 2, 0) != 0) { val |= cur_val & ISA_EXT_C_MASK; } - setMiscRegNoEffect(misc_reg, val); + setMiscRegNoEffect(idx, val); } break; case MISCREG_STATUS: { // SXL and UXL are hard-wired to 64 bit - auto cur = readMiscRegNoEffect(misc_reg); + auto cur = readMiscRegNoEffect(idx); val &= ~(STATUS_SXL_MASK | STATUS_UXL_MASK); val |= cur & (STATUS_SXL_MASK | STATUS_UXL_MASK); - setMiscRegNoEffect(misc_reg, val); + setMiscRegNoEffect(idx, val); } break; default: - setMiscRegNoEffect(misc_reg, val); + setMiscRegNoEffect(idx, val); } } } diff --git a/src/arch/riscv/isa.hh b/src/arch/riscv/isa.hh index 81923b5aef..97a05814fe 100644 --- a/src/arch/riscv/isa.hh +++ b/src/arch/riscv/isa.hh @@ -34,10 +34,12 @@ #ifndef __ARCH_RISCV_ISA_HH__ #define __ARCH_RISCV_ISA_HH__ +#include #include #include "arch/generic/isa.hh" #include "arch/riscv/pcstate.hh" +#include "arch/riscv/regs/misc.hh" #include "arch/riscv/types.hh" #include "base/types.hh" @@ -69,13 +71,14 @@ class ISA : public BaseISA { protected: std::vector miscRegFile; + bool checkAlignment; bool hpmCounterEnabled(int counter) const; public: using Params = RiscvISAParams; - void clear(); + void clear() override; PCStateBase * newPCState(Addr new_inst_addr=0) const override @@ -84,19 +87,27 @@ class ISA : public BaseISA } public: - RegVal readMiscRegNoEffect(int misc_reg) const; - RegVal readMiscReg(int misc_reg); - void setMiscRegNoEffect(int misc_reg, RegVal val); - void setMiscReg(int misc_reg, RegVal val); + RegVal readMiscRegNoEffect(RegIndex idx) const override; + RegVal readMiscReg(RegIndex idx) override; + void setMiscRegNoEffect(RegIndex idx, RegVal val) override; + void setMiscReg(RegIndex idx, RegVal val) override; - RegId flattenRegId(const RegId ®Id) const { return regId; } - int flattenIntIndex(int reg) const { return reg; } - int flattenFloatIndex(int reg) const { return reg; } - int flattenVecIndex(int reg) const { return reg; } - int flattenVecElemIndex(int reg) const { return reg; } - int flattenVecPredIndex(int reg) const { return reg; } - int flattenCCIndex(int reg) const { return reg; } - int flattenMiscIndex(int reg) const { return reg; } + // Derived class could provide knowledge of non-standard CSRs to other + // components by overriding the two getCSRxxxMap here and properly + // implementing the corresponding read/set function. However, customized + // maps should always be compatible with the standard maps. + virtual const std::unordered_map& + getCSRDataMap() const + { + return CSRData; + } + virtual const std::unordered_map& + getCSRMaskMap() const + { + return CSRMasks; + } + + bool alignmentCheckEnabled() const { return checkAlignment; } bool inUserMode() const override; void copyRegsFrom(ThreadContext *src) override; diff --git a/src/arch/riscv/isa/bitfields.isa b/src/arch/riscv/isa/bitfields.isa index e32c82de53..60636c68f8 100644 --- a/src/arch/riscv/isa/bitfields.isa +++ b/src/arch/riscv/isa/bitfields.isa @@ -123,3 +123,9 @@ def bitfield CIMM1 <12>; // Pseudo instructions def bitfield M5FUNC <31:25>; + +// Cryptography instructions +def bitfield BIT24 <24>; +def bitfield RNUM <23:20>; +def bitfield KFUNCT5 <29:25>; +def bitfield BS <31:30>; diff --git a/src/arch/riscv/isa/decoder.isa b/src/arch/riscv/isa/decoder.isa index 6cd7d952e1..c6b74ff44f 100644 --- a/src/arch/riscv/isa/decoder.isa +++ b/src/arch/riscv/isa/decoder.isa @@ -445,10 +445,50 @@ decode QUADRANT default Unknown::unknown() { 0x00: slli({{ Rd = Rs1 << imm; }}, imm_type = uint64_t, imm_code = {{ imm = SHAMT6; }}); + 0x02: decode FS2 { + 0x0: sha256sum0({{ + Rd_sw = _rvk_emu_sha256sum0(Rs1_sw); + }}); + 0x1: sha256sum1({{ + Rd_sw = _rvk_emu_sha256sum1(Rs1_sw); + }}); + 0x2: sha256sig0({{ + Rd_sw = _rvk_emu_sha256sig0(Rs1_sw); + }}); + 0x3: sha256sig1({{ + Rd_sw = _rvk_emu_sha256sig1(Rs1_sw); + }}); + 0x4: sha512sum0({{ + Rd_sd = _rvk_emu_sha512sum0(Rs1_sd); + }}); + 0x5: sha512sum1({{ + Rd_sd = _rvk_emu_sha512sum1(Rs1_sd); + }}); + 0x6: sha512sig0({{ + Rd_sd = _rvk_emu_sha512sig0(Rs1_sd); + }}); + 0x7: sha512sig1({{ + Rd_sd = _rvk_emu_sha512sig1(Rs1_sd); + }}); + 0x8: sm3p0({{ + Rd_sw = _rvk_emu_sm3p0(Rs1_sw); + }}); + 0x9: sm3p1({{ + Rd_sw = _rvk_emu_sm3p1(Rs1_sw); + }}); + } 0x05: bseti({{ uint64_t index = imm & (64 - 1); Rd = Rs1 | (UINT64_C(1) << index); }}, imm_type = uint64_t, imm_code = {{ imm = SHAMT6; }}); + 0x06: decode BIT24 { + 0x0: aes64im({{ + Rd_sd = _rvk_emu_aes64im(Rs1_sd); + }}); + 0x1: aes64ks1i({{ + Rd_sd = _rvk_emu_aes64ks1i(Rs1_sd, imm); + }}, imm_type = int32_t, imm_code={{ imm = RNUM; }}); + } 0x09: bclri({{ uint64_t index = imm & (64 - 1); Rd = Rs1 & (~(UINT64_C(1) << index)); @@ -469,10 +509,10 @@ decode QUADRANT default Unknown::unknown() { 0x02: cpop({{ Rd = popCount(Rs1); }}); - 0x04: sextb({{ + 0x04: sext_b({{ Rd = sext<8>(Rs1_ub); }}); - 0x05: sexth({{ + 0x05: sext_h({{ Rd = sext<16>(Rs1_uh); }}); } @@ -496,16 +536,17 @@ decode QUADRANT default Unknown::unknown() { 0x0: srli({{ Rd = Rs1 >> imm; }}, imm_type = uint64_t, imm_code = {{ imm = SHAMT6; }}); - 0x5: orcb({{ - Rd = 0; - Rd |= (Rs1<7:0> ? UINT64_C(0xff) : 0x0); - Rd |= (Rs1<15:8> ? UINT64_C(0xff) : 0x0) << 8; - Rd |= (Rs1<23:16> ? UINT64_C(0xff) : 0x0) << 16; - Rd |= (Rs1<31:24> ? UINT64_C(0xff) : 0x0) << 24; - Rd |= (Rs1<39:32> ? UINT64_C(0xff) : 0x0) << 32; - Rd |= (Rs1<47:40> ? UINT64_C(0xff) : 0x0) << 40; - Rd |= (Rs1<55:48> ? UINT64_C(0xff) : 0x0) << 48; - Rd |= (Rs1<63:56> ? UINT64_C(0xff) : 0x0) << 56; + 0x5: orc_b({{ + uint64_t result = 0; + result |= (Rs1<7:0> ? UINT64_C(0xff) : 0x0); + result |= (Rs1<15:8> ? UINT64_C(0xff) : 0x0) << 8; + result |= (Rs1<23:16> ? UINT64_C(0xff) : 0x0) << 16; + result |= (Rs1<31:24> ? UINT64_C(0xff) : 0x0) << 24; + result |= (Rs1<39:32> ? UINT64_C(0xff) : 0x0) << 32; + result |= (Rs1<47:40> ? UINT64_C(0xff) : 0x0) << 40; + result |= (Rs1<55:48> ? UINT64_C(0xff) : 0x0) << 48; + result |= (Rs1<63:56> ? UINT64_C(0xff) : 0x0) << 56; + Rd = result; }}, imm_type = uint64_t, imm_code = {{ imm = SHAMT6; }}); 0x8: srai({{ Rd_sd = Rs1_sd >> imm; @@ -519,12 +560,22 @@ decode QUADRANT default Unknown::unknown() { }}, imm_type = uint64_t, imm_code = {{ imm = SHAMT6; }}); 0xd: decode RS2 { 0x18: rev8({{ - Rd = 0; - Rd |= ((Rs1 & 0xffULL) << 56) | (((Rs1 >> 56) & 0xffULL)); - Rd |= (((Rs1 >> 8) & 0xffULL) << 48) | (((Rs1 >> 48) & 0xffULL) << 8); - Rd |= (((Rs1 >> 16) & 0xffULL) << 40) | (((Rs1 >> 40) & 0xffULL) << 16); - Rd |= (((Rs1 >> 24) & 0xffULL) << 32) | (((Rs1 >> 32) & 0xffULL) << 24); - }}, imm_type = uint64_t, imm_code = {{ imm = SHAMT6; }}); + uint64_t result = 0; + result |= + ((Rs1 & 0xffULL) << 56) + | (((Rs1 >> 56) & 0xffULL)); + result |= + (((Rs1 >> 8) & 0xffULL) << 48) + | (((Rs1 >> 48) & 0xffULL) << 8); + result |= + (((Rs1 >> 16) & 0xffULL) << 40) + | (((Rs1 >> 40) & 0xffULL) << 16); + result |= + (((Rs1 >> 24) & 0xffULL) << 32) + | (((Rs1 >> 32) & 0xffULL) << 24); + Rd = result; + }}, + imm_type = uint64_t, imm_code = {{ imm = SHAMT6; }}); } } 0x6: ori({{ @@ -549,7 +600,7 @@ decode QUADRANT default Unknown::unknown() { 0x0: slliw({{ Rd_sd = Rs1_sw << imm; }}, imm_type = uint64_t, imm_code = {{ imm = SHAMT5; }}); - 0x1: slliuw({{ + 0x1: slli_uw({{ Rd = ((uint64_t)(Rs1_uw)) << imm; }}, imm_type = uint64_t, imm_code = {{ imm = SHAMT5; }}); 0xc: decode FS2 { @@ -774,16 +825,49 @@ decode QUADRANT default Unknown::unknown() { } 0x0c: decode FUNCT3 { format ROp { - 0x0: decode FUNCT7 { - 0x0: add({{ - Rd = Rs1_sd + Rs2_sd; + 0x0: decode KFUNCT5 { + 0x00: decode BS { + 0x0: add({{ + Rd = Rs1_sd + Rs2_sd; + }}); + 0x1: sub({{ + Rd = Rs1_sd - Rs2_sd; + }}); + } + 0x01: decode BS { + 0x0: mul({{ + Rd = Rs1_sd * Rs2_sd; + }}, IntMultOp); + } + 0x18: sm4ed({{ + Rd_sd = _rvk_emu_sm4ed(Rs1_sd, Rs2_sd, (uint8_t)BS); }}); - 0x1: mul({{ - Rd = Rs1_sd*Rs2_sd; - }}, IntMultOp); - 0x20: sub({{ - Rd = Rs1_sd - Rs2_sd; + 0x19: decode BS { + 0x0: aes64es({{ + Rd_sd = _rvk_emu_aes64es(Rs1_sd, Rs2_sd); + }}); + } + 0x1a: sm4ks({{ + Rd_sd = _rvk_emu_sm4ks(Rs1_sd, Rs2_sd, (uint8_t)BS); }}); + 0x1b: decode BS { + 0x0: aes64esm({{ + Rd_sd = _rvk_emu_aes64esm(Rs1_sd, Rs2_sd); + }}); + } + 0x1d: decode BS { + 0x0: aes64ds({{ + Rd_sd = _rvk_emu_aes64ds(Rs1_sd, Rs2_sd); + }}); + } + 0x1f: decode BS{ + 0x0: aes64dsm({{ + Rd_sd = _rvk_emu_aes64dsm(Rs1_sd, Rs2_sd); + }}); + 0x1: aes64ks2({{ + Rd_sd = _rvk_emu_aes64ks2(Rs1_sd, Rs2_sd); + }}); + } } 0x1: decode FUNCT7 { 0x0: sll({{ @@ -812,12 +896,13 @@ decode QUADRANT default Unknown::unknown() { : res; }}, IntMultOp); 0x5: clmul({{ - Rd = 0; + uint64_t result = 0; for (int i = 0; i < 64; i++) { if ((Rs2 >> i) & 1) { - Rd ^= Rs1 << i; + result ^= Rs1 << i; } } + Rd = result; }}); 0x14: bset({{ Rs2 &= (64 - 1); @@ -861,16 +946,20 @@ decode QUADRANT default Unknown::unknown() { Rd = negate ? ~res + (Rs1_sd*Rs2 == 0 ? 1 : 0) : res; }}, IntMultOp); 0x5: clmulr({{ - Rd = 0; + uint64_t result = 0; for (int i = 0; i < 64; i++) { if ((Rs2 >> i) & 1) { - Rd ^= Rs1 >> (64-i-1); + result ^= Rs1 >> (64-i-1); } } + Rd = result; }}); 0x10: sh1add({{ Rd = (Rs1 << 1) + Rs2; }}); + 0x14: xperm4({{ + Rd_sd = _rvk_emu_xperm4_64(Rs1_sd, Rs2_sd); + }}); } 0x3: decode FUNCT7 { 0x0: sltu({{ @@ -892,12 +981,13 @@ decode QUADRANT default Unknown::unknown() { Rd = hi + (mid1 >> 32) + (mid2 >> 32) + carry; }}, IntMultOp); 0x5: clmulh({{ - Rd = 0; + uint64_t result = 0; for (int i = 1; i < 64; i++) { if ((Rs2 >> i) & 1) { - Rd ^= (Rs1 >> (64-i)); + result ^= (Rs1 >> (64-i)); } } + Rd = result; }}); } 0x4: decode FUNCT7 { @@ -921,6 +1011,9 @@ decode QUADRANT default Unknown::unknown() { 0x10: sh2add({{ Rd = (Rs1 << 2) + Rs2; }}); + 0x14: xperm8({{ + Rd_sd = _rvk_emu_xperm8_64(Rs1_sd, Rs2_sd); + }}); 0x20: xnor({{ Rd = ~(Rs1 ^ Rs2); }}); @@ -1010,7 +1103,7 @@ decode QUADRANT default Unknown::unknown() { 0x1: mulw({{ Rd_sd = (int32_t)(Rs1_sw*Rs2_sw); }}, IntMultOp); - 0x4: adduw({{ + 0x4: add_uw({{ Rd = Rs1_uw + Rs2; }}); 0x20: subw({{ @@ -1027,7 +1120,7 @@ decode QUADRANT default Unknown::unknown() { }}); } 0x2: decode FUNCT7 { - 0x10: sh1adduw({{ + 0x10: sh1add_uw({{ Rd = (((uint64_t)Rs1_uw) << 1) + Rs2; }}); } @@ -1042,10 +1135,10 @@ decode QUADRANT default Unknown::unknown() { Rd_sd = Rs1_sw/Rs2_sw; } }}, IntDivOp); - 0x4: zexth ({{ + 0x4: zext_h({{ Rd = Rs1_uh; }}); - 0x10: sh2adduw({{ + 0x10: sh2add_uw({{ Rd = (((uint64_t)Rs1_uw) << 2) + Rs2; }}); } @@ -1079,7 +1172,7 @@ decode QUADRANT default Unknown::unknown() { Rd_sd = Rs1_sw%Rs2_sw; } }}, IntDivOp); - 0x10: sh3adduw({{ + 0x10: sh3add_uw({{ Rd = (((uint64_t)Rs1_uw) << 3) + Rs2; }}); } @@ -1671,10 +1764,11 @@ decode QUADRANT default Unknown::unknown() { } 0x70: decode ROUND_MODE { 0x0: fmv_x_w({{ - Rd = (uint32_t)Fs1_bits; - if ((Rd&0x80000000) != 0) { - Rd |= (0xFFFFFFFFULL << 32); + uint64_t result = (uint32_t)Fs1_bits; + if ((result&0x80000000) != 0) { + result |= (0xFFFFFFFFULL << 32); } + Rd = result; }}, FloatCvtOp); 0x1: fclass_s({{ Rd = f32_classify(f32(freg(Fs1_bits))); @@ -1690,10 +1784,11 @@ decode QUADRANT default Unknown::unknown() { } 0x72: decode ROUND_MODE { 0x0: fmv_x_h({{ - Rd = (uint16_t)Fs1_bits; - if ((Rd&0x8000) != 0) { - Rd |= (0xFFFFFFFFFFFFULL << 16); + uint64_t result = (uint16_t)Fs1_bits; + if ((result&0x8000) != 0) { + result |= (0xFFFFFFFFFFFFULL << 16); } + Rd = result; }}, FloatCvtOp); 0x1: fclass_h({{ Rd = f16_classify(f16(freg(Fs1_bits))); @@ -1827,7 +1922,17 @@ decode QUADRANT default Unknown::unknown() { "wfi in user mode or TW enabled", machInst); } - // don't do anything for now + // Go to sleep only if there's no pending interrupt + // at all, including masked interrupts. + auto tc = xc->tcBase(); + auto cpu = tc->getCpuPtr(); + auto ic = dynamic_cast( + cpu->getInterruptController(tc->threadId())); + panic_if(!ic, "Invalid Interrupt Controller."); + if (ic->readIP() == 0 + && xc->readMiscReg(MISCREG_NMIP) == 0) { + tc->quiesce(); + } }}, No_OpClass); } 0x9: sfence_vma({{ diff --git a/src/arch/riscv/isa/formats/amo.isa b/src/arch/riscv/isa/formats/amo.isa index f7e9b5bcc6..6b22e8f439 100644 --- a/src/arch/riscv/isa/formats/amo.isa +++ b/src/arch/riscv/isa/formats/amo.isa @@ -59,10 +59,10 @@ def template AtomicMemOpRMWDeclare {{ // Constructor %(class_name)sRMW(ExtMachInst machInst, %(class_name)s *_p); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; }; }}; @@ -91,10 +91,10 @@ def template LRSCMicroDeclare {{ // Constructor %(class_name)sMicro(ExtMachInst machInst, %(class_name)s *_p); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; }; }}; @@ -233,7 +233,7 @@ def template AtomicMemOpRMWConstructor {{ def template LoadReservedExecute {{ Fault %(class_name)s::%(class_name)sMicro::execute( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { Addr EA; @@ -241,6 +241,9 @@ def template LoadReservedExecute {{ %(op_rd)s; %(ea_code)s; + if (!alignmentOk(xc, EA, sizeof(Mem))) { + return std::make_shared(EA, LOAD_ADDR_MISALIGNED); + } { Fault fault = readMemAtomicLE(xc, traceData, EA, Mem, memAccessFlags); @@ -257,7 +260,7 @@ def template LoadReservedExecute {{ def template StoreCondExecute {{ Fault %(class_name)s::%(class_name)sMicro::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; uint64_t result; @@ -268,6 +271,9 @@ def template StoreCondExecute {{ %(memacc_code)s; + if (!alignmentOk(xc, EA, sizeof(Mem))) { + return std::make_shared(EA, STORE_ADDR_MISALIGNED); + } { Fault fault = writeMemAtomicLE(xc, traceData, Mem, EA, memAccessFlags, @@ -288,7 +294,7 @@ def template StoreCondExecute {{ def template AtomicMemOpRMWExecute {{ Fault %(class_name)s::%(class_name)sRMW::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; @@ -299,6 +305,9 @@ def template AtomicMemOpRMWExecute {{ assert(amo_op); + if (!alignmentOk(xc, EA, sizeof(Mem))) { + return std::make_shared(EA, AMO_ADDR_MISALIGNED); + } { Fault fault = amoMemAtomicLE(xc, traceData, Mem, EA, memAccessFlags, amo_op); @@ -319,7 +328,7 @@ def template AtomicMemOpRMWExecute {{ def template LoadReservedInitiateAcc {{ Fault %(class_name)s::%(class_name)sMicro::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; @@ -327,6 +336,9 @@ def template LoadReservedInitiateAcc {{ %(op_rd)s; %(ea_code)s; + if (!alignmentOk(xc, EA, sizeof(Mem))) { + return std::make_shared(EA, LOAD_ADDR_MISALIGNED); + } return initiateMemRead(xc, traceData, EA, Mem, memAccessFlags); } }}; @@ -334,7 +346,7 @@ def template LoadReservedInitiateAcc {{ def template StoreCondInitiateAcc {{ Fault %(class_name)s::%(class_name)sMicro::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; @@ -343,6 +355,9 @@ def template StoreCondInitiateAcc {{ %(ea_code)s; %(memacc_code)s; + if (!alignmentOk(xc, EA, sizeof(Mem))) { + return std::make_shared(EA, STORE_ADDR_MISALIGNED); + } { Fault fault = writeMemTimingLE(xc, traceData, Mem, EA, memAccessFlags, nullptr); @@ -359,7 +374,7 @@ def template StoreCondInitiateAcc {{ def template AtomicMemOpRMWInitiateAcc {{ Fault %(class_name)s::%(class_name)sRMW::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; @@ -370,6 +385,9 @@ def template AtomicMemOpRMWInitiateAcc {{ assert(amo_op); + if (!alignmentOk(xc, EA, sizeof(Mem))) { + return std::make_shared(EA, AMO_ADDR_MISALIGNED); + } return initiateMemAMO(xc, traceData, EA, Mem, memAccessFlags, amo_op); } }}; @@ -379,7 +397,7 @@ def template AtomicMemOpRMWInitiateAcc {{ def template LoadReservedCompleteAcc {{ Fault %(class_name)s::%(class_name)sMicro::completeAcc(PacketPtr pkt, - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { %(op_decl)s; %(op_rd)s; @@ -395,7 +413,7 @@ def template LoadReservedCompleteAcc {{ def template StoreCondCompleteAcc {{ Fault %(class_name)s::%(class_name)sMicro::completeAcc(Packet *pkt, - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { %(op_dest_decl)s; @@ -412,7 +430,7 @@ def template StoreCondCompleteAcc {{ def template AtomicMemOpRMWCompleteAcc {{ Fault %(class_name)s::%(class_name)sRMW::completeAcc(Packet *pkt, - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { %(op_decl)s; %(op_rd)s; @@ -450,7 +468,6 @@ def format LoadReserved(memacc_code, postacc_code={{ }}, ea_code={{EA = Rs1;}}, header_output += LRSCMicroDeclare.subst(iop) decoder_output += LRSCMicroConstructor.subst(iop) - decode_block += BasicDecode.subst(iop) exec_output += LoadReservedExecute.subst(iop) \ + LoadReservedInitiateAcc.subst(iop) \ + LoadReservedCompleteAcc.subst(iop) @@ -478,7 +495,6 @@ def format StoreCond(memacc_code, postacc_code={{ }}, ea_code={{EA = Rs1;}}, header_output += LRSCMicroDeclare.subst(iop) decoder_output += LRSCMicroConstructor.subst(iop) - decode_block += BasicDecode.subst(iop) exec_output += StoreCondExecute.subst(iop) \ + StoreCondInitiateAcc.subst(iop) \ + StoreCondCompleteAcc.subst(iop) @@ -510,7 +526,6 @@ def format AtomicMemOp(memacc_code, amoop_code, postacc_code={{ }}, header_output += AtomicMemOpRMWDeclare.subst(rmw_iop) decoder_output += AtomicMemOpRMWConstructor.subst(rmw_iop) - decode_block += BasicDecode.subst(rmw_iop) exec_output += AtomicMemOpRMWExecute.subst(rmw_iop) \ + AtomicMemOpRMWInitiateAcc.subst(rmw_iop) \ + AtomicMemOpRMWCompleteAcc.subst(rmw_iop) diff --git a/src/arch/riscv/isa/formats/basic.isa b/src/arch/riscv/isa/formats/basic.isa index 416b458784..6dfeea851c 100644 --- a/src/arch/riscv/isa/formats/basic.isa +++ b/src/arch/riscv/isa/formats/basic.isa @@ -40,7 +40,7 @@ def template BasicDeclare {{ public: /// Constructor. %(class_name)s(MachInst machInst); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; using %(base_class)s::generateDisassembly; }; }}; @@ -60,7 +60,7 @@ def template BasicConstructor {{ def template BasicExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { %(op_decl)s; %(op_rd)s; diff --git a/src/arch/riscv/isa/formats/compressed.isa b/src/arch/riscv/isa/formats/compressed.isa index a2da59df11..6fe899b881 100644 --- a/src/arch/riscv/isa/formats/compressed.isa +++ b/src/arch/riscv/isa/formats/compressed.isa @@ -126,7 +126,7 @@ def template CBasicDeclare {{ public: /// Constructor. %(class_name)s(MachInst machInst); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; std::string generateDisassembly( Addr pc, const loader::SymbolTable *symtab) const override; }; @@ -136,7 +136,7 @@ def template CBasicDeclare {{ def template CBasicExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { %(op_decl)s; %(op_rd)s; diff --git a/src/arch/riscv/isa/formats/fp.isa b/src/arch/riscv/isa/formats/fp.isa index d015239980..d0bd245ae4 100644 --- a/src/arch/riscv/isa/formats/fp.isa +++ b/src/arch/riscv/isa/formats/fp.isa @@ -34,12 +34,15 @@ // def template FloatExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { STATUS status = xc->readMiscReg(MISCREG_STATUS); if (status.fs == FPUStatus::OFF) return std::make_shared("FPU is off", machInst); + status.fs = FPUStatus::DIRTY; + xc->setMiscReg(MISCREG_STATUS, status); + %(op_decl)s; %(op_rd)s; diff --git a/src/arch/riscv/isa/formats/mem.isa b/src/arch/riscv/isa/formats/mem.isa index a1005b4a15..fa334585a7 100644 --- a/src/arch/riscv/isa/formats/mem.isa +++ b/src/arch/riscv/isa/formats/mem.isa @@ -44,10 +44,10 @@ def template LoadStoreDeclare {{ /// Constructor. %(class_name)s(ExtMachInst machInst); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; }; }}; @@ -98,7 +98,7 @@ def LoadStoreBase(name, Name, offset_code, ea_code, memacc_code, mem_flags, def template LoadExecute {{ Fault %(class_name)s::execute( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { Addr EA; @@ -106,6 +106,9 @@ def template LoadExecute {{ %(op_rd)s; %(ea_code)s; + if (!alignmentOk(xc, EA, sizeof(Mem))) { + return std::make_shared(EA, LOAD_ADDR_MISALIGNED); + } { Fault fault = readMemAtomicLE(xc, traceData, EA, Mem, memAccessFlags); @@ -124,7 +127,7 @@ def template LoadExecute {{ def template LoadInitiateAcc {{ Fault %(class_name)s::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; @@ -132,6 +135,9 @@ def template LoadInitiateAcc {{ %(op_rd)s; %(ea_code)s; + if (!alignmentOk(xc, EA, sizeof(Mem))) { + return std::make_shared(EA, LOAD_ADDR_MISALIGNED); + } return initiateMemRead(xc, traceData, EA, Mem, memAccessFlags); } }}; @@ -139,7 +145,7 @@ def template LoadInitiateAcc {{ def template LoadCompleteAcc {{ Fault %(class_name)s::completeAcc(PacketPtr pkt, ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { %(op_decl)s; %(op_rd)s; @@ -156,7 +162,7 @@ def template LoadCompleteAcc {{ def template StoreExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; @@ -166,6 +172,9 @@ def template StoreExecute {{ %(memacc_code)s; + if (!alignmentOk(xc, EA, sizeof(Mem))) { + return std::make_shared(EA, STORE_ADDR_MISALIGNED); + } { Fault fault = writeMemAtomicLE(xc, traceData, Mem, EA, memAccessFlags, @@ -184,7 +193,7 @@ def template StoreExecute {{ def template StoreInitiateAcc {{ Fault %(class_name)s::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Addr EA; @@ -194,6 +203,9 @@ def template StoreInitiateAcc {{ %(memacc_code)s; + if (!alignmentOk(xc, EA, sizeof(Mem))) { + return std::make_shared(EA, STORE_ADDR_MISALIGNED); + } { Fault fault = writeMemTimingLE(xc, traceData, Mem, EA, memAccessFlags, nullptr); @@ -210,7 +222,7 @@ def template StoreInitiateAcc {{ def template StoreCompleteAcc {{ Fault %(class_name)s::completeAcc(PacketPtr pkt, ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { return NoFault; } diff --git a/src/arch/riscv/isa/formats/standard.isa b/src/arch/riscv/isa/formats/standard.isa index e40f097ad8..3cad5ed0c9 100644 --- a/src/arch/riscv/isa/formats/standard.isa +++ b/src/arch/riscv/isa/formats/standard.isa @@ -45,7 +45,7 @@ def template ImmDeclare {{ public: /// Constructor. %(class_name)s(MachInst machInst); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; std::string generateDisassembly(Addr pc, const loader::SymbolTable *symtab) const override; }; @@ -64,7 +64,7 @@ def template ImmConstructor {{ def template ImmExecute {{ Fault %(class_name)s::execute( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { %(op_decl)s; %(op_rd)s; @@ -90,7 +90,7 @@ def template ImmExecute {{ def template CILuiExecute {{ Fault %(class_name)s::execute( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { %(op_decl)s; %(op_rd)s; @@ -118,7 +118,7 @@ def template CILuiExecute {{ def template FenceExecute {{ Fault %(class_name)s::execute( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { %(op_decl)s; %(op_rd)s; @@ -169,7 +169,7 @@ def template BranchDeclare {{ public: /// Constructor. %(class_name)s(MachInst machInst); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; std::string generateDisassembly( @@ -185,7 +185,7 @@ def template BranchDeclare {{ def template BranchExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { %(op_decl)s; %(op_rd)s; @@ -227,7 +227,7 @@ def template JumpDeclare {{ public: /// Constructor. %(class_name)s(MachInst machInst); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; std::string generateDisassembly( @@ -286,7 +286,7 @@ def template JumpConstructor {{ def template JumpExecute {{ Fault %(class_name)s::execute( - ExecContext *xc, Trace::InstRecord *traceData) const + ExecContext *xc, trace::InstRecord *traceData) const { %(op_decl)s; %(op_rd)s; @@ -299,8 +299,7 @@ def template JumpExecute {{ %(class_name)s::branchTarget(ThreadContext *tc) const { PCStateBase *pc_ptr = tc->pcState().clone(); - pc_ptr->as().set( - (tc->readIntReg(srcRegIdx(0).index()) + imm) & ~0x1); + pc_ptr->as().set((tc->getReg(srcRegIdx(0)) + imm) & ~0x1); return std::unique_ptr{pc_ptr}; } @@ -322,13 +321,25 @@ def template JumpExecute {{ def template CSRExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { - if (!valid) { + // We assume a riscv instruction is always run with a riscv ISA. + auto isa = static_cast(xc->tcBase()->getIsaPtr()); + auto& csr_data = isa->getCSRDataMap(); + auto& csr_masks = isa->getCSRMaskMap(); + + auto csr_data_it = csr_data.find(csr); + if (csr_data_it == csr_data.end()) { return std::make_shared( csprintf("Illegal CSR index %#x\n", csr), machInst); } + RegIndex midx = csr_data_it->second.physIndex; + const std::string& csrName = csr_data_it->second.name; + auto mask_it = csr_masks.find(csr); + RegVal maskVal = (mask_it == csr_masks.end()) ? mask(64) + : mask_it->second; + %(op_decl)s; %(op_rd)s; diff --git a/src/arch/riscv/isa/includes.isa b/src/arch/riscv/isa/includes.isa index 88837e5855..a5cc5e85cc 100644 --- a/src/arch/riscv/isa/includes.isa +++ b/src/arch/riscv/isa/includes.isa @@ -52,6 +52,7 @@ output header {{ #include "arch/riscv/insts/standard.hh" #include "arch/riscv/insts/static_inst.hh" #include "arch/riscv/insts/unknown.hh" +#include "arch/riscv/interrupts.hh" #include "cpu/static_inst.hh" #include "mem/packet.hh" #include "mem/request.hh" diff --git a/src/arch/riscv/isa/operands.isa b/src/arch/riscv/isa/operands.isa index 4f0c3ed6f8..72d8f81bca 100644 --- a/src/arch/riscv/isa/operands.isa +++ b/src/arch/riscv/isa/operands.isa @@ -46,7 +46,7 @@ let {{ @overrideInOperand def regId(self): return f'(({self.reg_spec}) == 0) ? RegId() : ' \ - f'RegId({self.reg_class}, {self.reg_spec})' + f'{self.reg_class}[{self.reg_spec}]' }}; def operands {{ diff --git a/src/arch/riscv/linux/fs_workload.cc b/src/arch/riscv/linux/fs_workload.cc index 2e922451c1..4a4a3812ec 100644 --- a/src/arch/riscv/linux/fs_workload.cc +++ b/src/arch/riscv/linux/fs_workload.cc @@ -63,7 +63,7 @@ FsLinux::initState() delete dtb_file; for (auto *tc: system->threads) { - tc->setIntReg(11, params().dtb_addr); + tc->setReg(int_reg::A1, params().dtb_addr); } } else { warn("No DTB file specified\n"); diff --git a/src/arch/riscv/linux/fs_workload.hh b/src/arch/riscv/linux/fs_workload.hh index cb29beeafc..1dc704d906 100644 --- a/src/arch/riscv/linux/fs_workload.hh +++ b/src/arch/riscv/linux/fs_workload.hh @@ -51,7 +51,8 @@ class FsLinux : public KernelWorkload setSystem(System *sys) override { KernelWorkload::setSystem(sys); - gdb = BaseRemoteGDB::build(system); + gdb = BaseRemoteGDB::build( + params().remote_gdb_port, system); } ByteOrder byteOrder() const override { return ByteOrder::little; } diff --git a/src/arch/riscv/linux/linux.hh b/src/arch/riscv/linux/linux.hh index 9c99d1b5d0..b0721836a5 100644 --- a/src/arch/riscv/linux/linux.hh +++ b/src/arch/riscv/linux/linux.hh @@ -94,20 +94,20 @@ class RiscvLinux64 : public RiscvLinux, public OpenFlagTable static constexpr int TGT_O_TRUNC = 0x000200; //!< O_TRUNC static constexpr int TGT_O_APPEND = 0x000400; //!< O_APPEND static constexpr int TGT_O_NONBLOCK = 0x000800; //!< O_NONBLOCK - static constexpr int TGT_O_SYNC = 0x001000; //!< O_SYNC + static constexpr int TGT_O_SYNC = 0x101000; //!< O_SYNC static constexpr int TGT_FSYNC = 0x001000; //!< FSYNC - static constexpr int TGT_FASYNC = 0x008000; //!< FASYNC + static constexpr int TGT_FASYNC = 0x002000; //!< FASYNC // The following are not present in riscv64-unknown-elf - static constexpr int TGT_O_DSYNC = 0x010000; //!< O_DSYNC - static constexpr int TGT_O_CLOEXEC = 0x040000; //!< O_CLOEXEC + static constexpr int TGT_O_DSYNC = 0x001000; //!< O_DSYNC + static constexpr int TGT_O_CLOEXEC = 0x080000; //!< O_CLOEXEC static constexpr int TGT_O_NOINHERIT = 0x040000; //!< O_NOINHERIT - static constexpr int TGT_O_DIRECT = 0x080000; //!< O_DIRECT - static constexpr int TGT_O_NOFOLLOW = 0x100000; //!< O_NOFOLLOW - static constexpr int TGT_O_DIRECTORY = 0x200000; //!< O_DIRECTORY + static constexpr int TGT_O_DIRECT = 0x004000; //!< O_DIRECT + static constexpr int TGT_O_NOFOLLOW = 0x020000; //!< O_NOFOLLOW + static constexpr int TGT_O_DIRECTORY = 0x010000; //!< O_DIRECTORY // The following are not defined by riscv64-unknown-elf - static constexpr int TGT_O_LARGEFILE = 0x020000; //!< O_LARGEFILE - static constexpr int TGT_O_NOATIME = 0x800000; //!< O_NOATIME - static constexpr int TGT_O_PATH = 0x400000; //!< O_PATH + static constexpr int TGT_O_LARGEFILE = 0x000000; //!< O_LARGEFILE + static constexpr int TGT_O_NOATIME = 0x040000; //!< O_NOATIME + static constexpr int TGT_O_PATH = 0x200000; //!< O_PATH //@} // Only defined in riscv-unknown-elf for proxy kernel and not linux kernel @@ -203,9 +203,9 @@ class RiscvLinux64 : public RiscvLinux, public OpenFlagTable { ctc->getIsaPtr()->copyRegsFrom(ptc); if (flags & TGT_CLONE_SETTLS) - ctc->setIntReg(RiscvISA::ThreadPointerReg, tls); + ctc->setReg(RiscvISA::ThreadPointerReg, tls); if (stack) - ctc->setIntReg(RiscvISA::StackPointerReg, stack); + ctc->setReg(RiscvISA::StackPointerReg, stack); } }; @@ -372,7 +372,7 @@ class RiscvLinux32 : public RiscvLinux, public OpenFlagTable { ctc->getIsaPtr()->copyRegsFrom(ptc); if (stack) - ctc->setIntReg(RiscvISA::StackPointerReg, stack); + ctc->setReg(RiscvISA::StackPointerReg, stack); } }; diff --git a/src/arch/riscv/linux/se_workload.cc b/src/arch/riscv/linux/se_workload.cc index dd9a8a2129..dac28071f4 100644 --- a/src/arch/riscv/linux/se_workload.cc +++ b/src/arch/riscv/linux/se_workload.cc @@ -86,7 +86,7 @@ EmuLinux::syscall(ThreadContext *tc) // This will move into the base SEWorkload function at some point. process->Process::syscall(tc); - RegVal num = tc->readIntReg(RiscvISA::SyscallNumReg); + RegVal num = tc->getReg(RiscvISA::SyscallNumReg); if (dynamic_cast(process)) syscallDescs64.get(num)->doSyscall(tc); else diff --git a/src/arch/riscv/process.cc b/src/arch/riscv/process.cc index cbd13b00e7..7c91b92217 100644 --- a/src/arch/riscv/process.cc +++ b/src/arch/riscv/process.cc @@ -244,7 +244,7 @@ RiscvProcess::argsInit(int pageSize) } ThreadContext *tc = system->threads[contextIds[0]]; - tc->setIntReg(StackPointerReg, memState->getStackMin()); + tc->setReg(StackPointerReg, memState->getStackMin()); tc->pcState(getStartPC()); memState->setStackMin(roundDown(memState->getStackMin(), pageSize)); diff --git a/src/arch/riscv/reg_abi.cc b/src/arch/riscv/reg_abi.cc index 977bdff8e7..b9827f74cf 100644 --- a/src/arch/riscv/reg_abi.cc +++ b/src/arch/riscv/reg_abi.cc @@ -26,6 +26,7 @@ */ #include "arch/riscv/reg_abi.hh" +#include "arch/riscv/regs/int.hh" namespace gem5 { @@ -33,7 +34,10 @@ namespace gem5 namespace RiscvISA { -const std::vector RegABI64::ArgumentRegs = {10, 11, 12, 13, 14, 15, 16}; +const std::vector RegABI64::ArgumentRegs = { + int_reg::A0, int_reg::A1, int_reg::A2, int_reg::A3, + int_reg::A4, int_reg::A5, int_reg::A6 +}; } // namespace RiscvISA } // namespace gem5 diff --git a/src/arch/riscv/reg_abi.hh b/src/arch/riscv/reg_abi.hh index 91881e9793..3419c31222 100644 --- a/src/arch/riscv/reg_abi.hh +++ b/src/arch/riscv/reg_abi.hh @@ -41,7 +41,7 @@ namespace RiscvISA //FIXME RISCV needs to handle 64 bit arguments in its 32 bit ISA. struct RegABI64 : public GenericSyscallABI64 { - static const std::vector ArgumentRegs; + static const std::vector ArgumentRegs; }; } // namespace RiscvISA diff --git a/src/arch/riscv/regs/float.hh b/src/arch/riscv/regs/float.hh index 78b4ca30d2..1654bdb627 100644 --- a/src/arch/riscv/regs/float.hh +++ b/src/arch/riscv/regs/float.hh @@ -54,6 +54,8 @@ #include #include "base/bitfield.hh" +#include "cpu/reg_class.hh" +#include "debug/FloatRegs.hh" namespace gem5 { @@ -105,9 +107,98 @@ static constexpr freg_t freg(float32_t f) { return {boxF32(f.v)}; } static constexpr freg_t freg(float64_t f) { return f; } static constexpr freg_t freg(uint_fast16_t f) { return {f}; } -const int NumFloatRegs = 32; +namespace float_reg +{ -const std::vector FloatRegNames = { +enum : RegIndex +{ + _Ft0Idx, + _Ft1Idx, + _Ft2Idx, + _Ft3Idx, + _Ft4Idx, + _Ft5Idx, + _Ft6Idx, + _Ft7Idx, + + _Fs0Idx, + _Fs1Idx, + + _Fa0Idx, + _Fa1Idx, + _Fa2Idx, + _Fa3Idx, + _Fa4Idx, + _Fa5Idx, + _Fa6Idx, + _Fa7Idx, + + _Fs2Idx, + _Fs3Idx, + _Fs4Idx, + _Fs5Idx, + _Fs6Idx, + _Fs7Idx, + _Fs8Idx, + _Fs9Idx, + _Fs10Idx, + _Fs11Idx, + + _Ft8Idx, + _Ft9Idx, + _Ft10Idx, + _Ft11Idx, + + NumRegs +}; + +} // namespace float_reg + +inline constexpr RegClass floatRegClass(FloatRegClass, FloatRegClassName, + float_reg::NumRegs, debug::FloatRegs); + +namespace float_reg +{ + +inline constexpr RegId + Ft0 = floatRegClass[_Ft0Idx], + Ft1 = floatRegClass[_Ft1Idx], + Ft2 = floatRegClass[_Ft2Idx], + Ft3 = floatRegClass[_Ft3Idx], + Ft4 = floatRegClass[_Ft4Idx], + Ft5 = floatRegClass[_Ft5Idx], + Ft6 = floatRegClass[_Ft6Idx], + Ft7 = floatRegClass[_Ft7Idx], + + Fs0 = floatRegClass[_Fs0Idx], + Fs1 = floatRegClass[_Fs1Idx], + + Fa0 = floatRegClass[_Fa0Idx], + Fa1 = floatRegClass[_Fa1Idx], + Fa2 = floatRegClass[_Fa2Idx], + Fa3 = floatRegClass[_Fa3Idx], + Fa4 = floatRegClass[_Fa4Idx], + Fa5 = floatRegClass[_Fa5Idx], + Fa6 = floatRegClass[_Fa6Idx], + Fa7 = floatRegClass[_Fa7Idx], + + Fs2 = floatRegClass[_Fs2Idx], + Fs3 = floatRegClass[_Fs3Idx], + Fs4 = floatRegClass[_Fs4Idx], + Fs5 = floatRegClass[_Fs5Idx], + Fs6 = floatRegClass[_Fs6Idx], + Fs7 = floatRegClass[_Fs7Idx], + Fs8 = floatRegClass[_Fs8Idx], + Fs9 = floatRegClass[_Fs9Idx], + Fs10 = floatRegClass[_Fs10Idx], + Fs11 = floatRegClass[_Fs11Idx], + + Ft8 = floatRegClass[_Ft8Idx], + Ft9 = floatRegClass[_Ft9Idx], + Ft10 = floatRegClass[_Ft10Idx], + Ft11 = floatRegClass[_Ft11Idx]; + +const std::vector RegNames = { "ft0", "ft1", "ft2", "ft3", "ft4", "ft5", "ft6", "ft7", "fs0", "fs1", "fa0", "fa1", @@ -118,6 +209,8 @@ const std::vector FloatRegNames = { "ft8", "ft9", "ft10", "ft11" }; +} // namespace float_reg + } // namespace RiscvISA } // namespace gem5 diff --git a/src/arch/riscv/regs/int.hh b/src/arch/riscv/regs/int.hh index ef8f1418cd..4ac01c60c1 100644 --- a/src/arch/riscv/regs/int.hh +++ b/src/arch/riscv/regs/int.hh @@ -49,27 +49,80 @@ #include #include +#include "cpu/reg_class.hh" +#include "debug/IntRegs.hh" + namespace gem5 { namespace RiscvISA { -const int NumIntArchRegs = 32; -const int NumMicroIntRegs = 1; -const int NumIntRegs = NumIntArchRegs + NumMicroIntRegs; +namespace int_reg +{ -// Semantically meaningful register indices -const int ReturnAddrReg = 1; -const int StackPointerReg = 2; -const int ThreadPointerReg = 4; -const int ReturnValueReg = 10; -const std::vector ArgumentRegs = {10, 11, 12, 13, 14, 15, 16, 17}; -const int AMOTempReg = 32; +enum : RegIndex +{ + _ZeroIdx, _RaIdx, _SpIdx, _GpIdx, + _TpIdx, _T0Idx, _T1Idx, _T2Idx, + _S0Idx, _S1Idx, _A0Idx, _A1Idx, + _A2Idx, _A3Idx, _A4Idx, _A5Idx, + _A6Idx, _A7Idx, _S2Idx, _S3Idx, + _S4Idx, _S5Idx, _S6Idx, _S7Idx, + _S8Idx, _S9Idx, _S10Idx, _S11Idx, + _T3Idx, _T4Idx, _T5Idx, _T6Idx, -const int SyscallNumReg = 17; + NumArchRegs, -const std::vector IntRegNames = { + _Ureg0Idx = NumArchRegs, + + NumRegs +}; + +} // namespace int_reg + +inline constexpr RegClass intRegClass(IntRegClass, IntRegClassName, + int_reg::NumRegs, debug::IntRegs); + +namespace int_reg +{ + +inline constexpr RegId + Zero = intRegClass[_ZeroIdx], + Ra = intRegClass[_RaIdx], + Sp = intRegClass[_SpIdx], + Gp = intRegClass[_GpIdx], + Tp = intRegClass[_TpIdx], + T0 = intRegClass[_T0Idx], + T1 = intRegClass[_T1Idx], + T2 = intRegClass[_T2Idx], + S0 = intRegClass[_S0Idx], + S1 = intRegClass[_S1Idx], + A0 = intRegClass[_A0Idx], + A1 = intRegClass[_A1Idx], + A2 = intRegClass[_A2Idx], + A3 = intRegClass[_A3Idx], + A4 = intRegClass[_A4Idx], + A5 = intRegClass[_A5Idx], + A6 = intRegClass[_A6Idx], + A7 = intRegClass[_A7Idx], + S2 = intRegClass[_S2Idx], + S3 = intRegClass[_S3Idx], + S4 = intRegClass[_S4Idx], + S5 = intRegClass[_S5Idx], + S6 = intRegClass[_S6Idx], + S7 = intRegClass[_S7Idx], + S8 = intRegClass[_S8Idx], + S9 = intRegClass[_S9Idx], + S10 = intRegClass[_S10Idx], + S11 = intRegClass[_S11Idx], + T3 = intRegClass[_T3Idx], + T4 = intRegClass[_T4Idx], + T5 = intRegClass[_T5Idx], + T6 = intRegClass[_T6Idx], + Ureg0 = intRegClass[_Ureg0Idx]; + +const std::vector RegNames = { "zero", "ra", "sp", "gp", "tp", "t0", "t1", "t2", "s0", "s1", "a0", "a1", @@ -80,6 +133,22 @@ const std::vector IntRegNames = { "t3", "t4", "t5", "t6" }; +} // namespace int_reg + +// Semantically meaningful register indices +inline constexpr auto + &ReturnAddrReg = int_reg::Ra, + &StackPointerReg = int_reg::Sp, + &ThreadPointerReg = int_reg::Tp, + &ReturnValueReg = int_reg::A0, + &AMOTempReg = int_reg::Ureg0, + &SyscallNumReg = int_reg::A7; + +inline constexpr RegId ArgumentRegs[] = { + int_reg::A0, int_reg::A1, int_reg::A2, int_reg::A3, + int_reg::A4, int_reg::A5, int_reg::A6, int_reg::A7 +}; + } // namespace RiscvISA } // namespace gem5 diff --git a/src/arch/riscv/regs/misc.hh b/src/arch/riscv/regs/misc.hh index 4eaedfebf8..5f074475c9 100644 --- a/src/arch/riscv/regs/misc.hh +++ b/src/arch/riscv/regs/misc.hh @@ -46,13 +46,15 @@ #ifndef __ARCH_RISCV_REGS_MISC_HH__ #define __ARCH_RISCV_REGS_MISC_HH__ -#include #include +#include #include "arch/generic/vec_pred_reg.hh" #include "arch/generic/vec_reg.hh" #include "base/bitunion.hh" #include "base/types.hh" +#include "cpu/reg_class.hh" +#include "debug/MiscRegs.hh" namespace gem5 { @@ -200,6 +202,9 @@ enum MiscRegIndex NUM_MISCREGS }; +inline constexpr RegClass miscRegClass(MiscRegClass, MiscRegClassName, + NUM_MISCREGS, debug::MiscRegs); + enum CSRIndex { CSR_USTATUS = 0x000, @@ -373,7 +378,7 @@ struct CSRMetadata const int physIndex; }; -const std::map CSRData = { +const std::unordered_map CSRData = { {CSR_USTATUS, {"ustatus", MISCREG_STATUS}}, {CSR_UIE, {"uie", MISCREG_IE}}, {CSR_UTVEC, {"utvec", MISCREG_UTVEC}}, @@ -557,6 +562,7 @@ BitUnion64(STATUS) Bitfield<16, 15> xs; Bitfield<14, 13> fs; Bitfield<12, 11> mpp; + Bitfield<10, 9> vs; Bitfield<8> spp; Bitfield<7> mpie; Bitfield<5> spie; @@ -607,6 +613,7 @@ const RegVal STATUS_MPRV_MASK = 1ULL << 17; const RegVal STATUS_XS_MASK = 3ULL << 15; const RegVal STATUS_FS_MASK = 3ULL << FS_OFFSET; const RegVal STATUS_MPP_MASK = 3ULL << 11; +const RegVal STATUS_VS_MASK = 3ULL << 9; const RegVal STATUS_SPP_MASK = 1ULL << 8; const RegVal STATUS_MPIE_MASK = 1ULL << 7; const RegVal STATUS_SPIE_MASK = 1ULL << 5; @@ -619,21 +626,21 @@ const RegVal MSTATUS_MASK = STATUS_SD_MASK | STATUS_SXL_MASK | STATUS_TW_MASK | STATUS_TVM_MASK | STATUS_MXR_MASK | STATUS_SUM_MASK | STATUS_MPRV_MASK | STATUS_XS_MASK | - STATUS_FS_MASK | STATUS_MPP_MASK | - STATUS_SPP_MASK | STATUS_MPIE_MASK | - STATUS_SPIE_MASK | STATUS_UPIE_MASK | - STATUS_MIE_MASK | STATUS_SIE_MASK | - STATUS_UIE_MASK; + STATUS_FS_MASK | STATUS_VS_MASK | + STATUS_MPP_MASK | STATUS_SPP_MASK | + STATUS_MPIE_MASK | STATUS_SPIE_MASK | + STATUS_UPIE_MASK | STATUS_MIE_MASK | + STATUS_SIE_MASK | STATUS_UIE_MASK; const RegVal SSTATUS_MASK = STATUS_SD_MASK | STATUS_UXL_MASK | STATUS_MXR_MASK | STATUS_SUM_MASK | STATUS_XS_MASK | STATUS_FS_MASK | - STATUS_SPP_MASK | STATUS_SPIE_MASK | - STATUS_UPIE_MASK | STATUS_SIE_MASK | - STATUS_UIE_MASK; + STATUS_VS_MASK | STATUS_SPP_MASK | + STATUS_SPIE_MASK | STATUS_UPIE_MASK | + STATUS_SIE_MASK | STATUS_UIE_MASK; const RegVal USTATUS_MASK = STATUS_SD_MASK | STATUS_MXR_MASK | STATUS_SUM_MASK | STATUS_XS_MASK | - STATUS_FS_MASK | STATUS_UPIE_MASK | - STATUS_UIE_MASK; + STATUS_FS_MASK | STATUS_VS_MASK | + STATUS_UPIE_MASK | STATUS_UIE_MASK; const RegVal MEI_MASK = 1ULL << 11; const RegVal SEI_MASK = 1ULL << 9; @@ -654,7 +661,7 @@ const RegVal UI_MASK = UEI_MASK | UTI_MASK | USI_MASK; const RegVal FFLAGS_MASK = (1 << FRM_OFFSET) - 1; const RegVal FRM_MASK = 0x7; -const std::map CSRMasks = { +const std::unordered_map CSRMasks = { {CSR_USTATUS, USTATUS_MASK}, {CSR_UIE, UI_MASK}, {CSR_UIP, UI_MASK}, diff --git a/src/arch/riscv/remote_gdb.cc b/src/arch/riscv/remote_gdb.cc index 50b0ba7d9c..ed700bbf8d 100644 --- a/src/arch/riscv/remote_gdb.cc +++ b/src/arch/riscv/remote_gdb.cc @@ -191,15 +191,14 @@ RemoteGDB::RiscvGdbRegCache::getRegs(ThreadContext *context) DPRINTF(GDBAcc, "getregs in remotegdb, size %lu\n", size()); // General registers - for (int i = 0; i < NumIntArchRegs; i++) - { - r.gpr[i] = context->readIntReg(i); + for (int i = 0; i < int_reg::NumArchRegs; i++) { + r.gpr[i] = context->getReg(intRegClass[i]); } r.pc = context->pcState().instAddr(); // Floating point registers - for (int i = 0; i < NumFloatRegs; i++) - r.fpu[i] = context->readFloatReg(i); + for (int i = 0; i < float_reg::NumRegs; i++) + r.fpu[i] = context->getReg(floatRegClass[i]); r.fflags = context->readMiscRegNoEffect( CSRData.at(CSR_FFLAGS).physIndex) & CSRMasks.at(CSR_FFLAGS); r.frm = context->readMiscRegNoEffect( @@ -303,13 +302,13 @@ RemoteGDB::RiscvGdbRegCache::setRegs(ThreadContext *context) const RegVal newVal; DPRINTF(GDBAcc, "setregs in remotegdb \n"); - for (int i = 0; i < NumIntArchRegs; i++) - context->setIntReg(i, r.gpr[i]); + for (int i = 0; i < int_reg::NumArchRegs; i++) + context->setReg(intRegClass[i], r.gpr[i]); context->pcState(r.pc); // Floating point registers - for (int i = 0; i < NumFloatRegs; i++) - context->setFloatReg(i, r.fpu[i]); + for (int i = 0; i < float_reg::NumRegs; i++) + context->setReg(floatRegClass[i], r.fpu[i]); oldVal = context->readMiscRegNoEffect( CSRData.at(CSR_FFLAGS).physIndex); diff --git a/src/arch/riscv/remote_gdb.hh b/src/arch/riscv/remote_gdb.hh index 753859fe77..f87481ece0 100644 --- a/src/arch/riscv/remote_gdb.hh +++ b/src/arch/riscv/remote_gdb.hh @@ -72,9 +72,9 @@ class RemoteGDB : public BaseRemoteGDB */ struct { - uint64_t gpr[NumIntArchRegs]; + uint64_t gpr[int_reg::NumArchRegs]; uint64_t pc; - uint64_t fpu[NumFloatRegs]; + uint64_t fpu[float_reg::NumRegs]; uint32_t fflags; uint32_t frm; uint32_t fcsr; diff --git a/src/arch/riscv/rvk.hh b/src/arch/riscv/rvk.hh new file mode 100644 index 0000000000..d4af3cda23 --- /dev/null +++ b/src/arch/riscv/rvk.hh @@ -0,0 +1,601 @@ +/* + * Copyright (c) 2021, Markku-Juhani O. Saarinen + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __ARCH_RISCV_RVK_HH__ +#define __ARCH_RISCV_RVK_HH__ + +#include + +// Standard scalar cryptography extension + +namespace gem5 +{ + +namespace RiscvISA +{ + +/** + * Ref: + * https://github.com/rvkrypto/rvkrypto-fips + */ + +const uint8_t _rvk_emu_aes_fwd_sbox[256] = { + 0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, + 0xFE, 0xD7, 0xAB, 0x76, 0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, + 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0, 0xB7, 0xFD, 0x93, 0x26, + 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15, + 0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, + 0xEB, 0x27, 0xB2, 0x75, 0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, + 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84, 0x53, 0xD1, 0x00, 0xED, + 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF, + 0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, + 0x50, 0x3C, 0x9F, 0xA8, 0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, + 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2, 0xCD, 0x0C, 0x13, 0xEC, + 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73, + 0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, + 0xDE, 0x5E, 0x0B, 0xDB, 0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, + 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79, 0xE7, 0xC8, 0x37, 0x6D, + 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08, + 0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, + 0x4B, 0xBD, 0x8B, 0x8A, 0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, + 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E, 0xE1, 0xF8, 0x98, 0x11, + 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF, + 0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, + 0xB0, 0x54, 0xBB, 0x16 +}; + +// AES Inverse S-Box +const uint8_t _rvk_emu_aes_inv_sbox[256] = { + 0x52, 0x09, 0x6A, 0xD5, 0x30, 0x36, 0xA5, 0x38, 0xBF, 0x40, 0xA3, 0x9E, + 0x81, 0xF3, 0xD7, 0xFB, 0x7C, 0xE3, 0x39, 0x82, 0x9B, 0x2F, 0xFF, 0x87, + 0x34, 0x8E, 0x43, 0x44, 0xC4, 0xDE, 0xE9, 0xCB, 0x54, 0x7B, 0x94, 0x32, + 0xA6, 0xC2, 0x23, 0x3D, 0xEE, 0x4C, 0x95, 0x0B, 0x42, 0xFA, 0xC3, 0x4E, + 0x08, 0x2E, 0xA1, 0x66, 0x28, 0xD9, 0x24, 0xB2, 0x76, 0x5B, 0xA2, 0x49, + 0x6D, 0x8B, 0xD1, 0x25, 0x72, 0xF8, 0xF6, 0x64, 0x86, 0x68, 0x98, 0x16, + 0xD4, 0xA4, 0x5C, 0xCC, 0x5D, 0x65, 0xB6, 0x92, 0x6C, 0x70, 0x48, 0x50, + 0xFD, 0xED, 0xB9, 0xDA, 0x5E, 0x15, 0x46, 0x57, 0xA7, 0x8D, 0x9D, 0x84, + 0x90, 0xD8, 0xAB, 0x00, 0x8C, 0xBC, 0xD3, 0x0A, 0xF7, 0xE4, 0x58, 0x05, + 0xB8, 0xB3, 0x45, 0x06, 0xD0, 0x2C, 0x1E, 0x8F, 0xCA, 0x3F, 0x0F, 0x02, + 0xC1, 0xAF, 0xBD, 0x03, 0x01, 0x13, 0x8A, 0x6B, 0x3A, 0x91, 0x11, 0x41, + 0x4F, 0x67, 0xDC, 0xEA, 0x97, 0xF2, 0xCF, 0xCE, 0xF0, 0xB4, 0xE6, 0x73, + 0x96, 0xAC, 0x74, 0x22, 0xE7, 0xAD, 0x35, 0x85, 0xE2, 0xF9, 0x37, 0xE8, + 0x1C, 0x75, 0xDF, 0x6E, 0x47, 0xF1, 0x1A, 0x71, 0x1D, 0x29, 0xC5, 0x89, + 0x6F, 0xB7, 0x62, 0x0E, 0xAA, 0x18, 0xBE, 0x1B, 0xFC, 0x56, 0x3E, 0x4B, + 0xC6, 0xD2, 0x79, 0x20, 0x9A, 0xDB, 0xC0, 0xFE, 0x78, 0xCD, 0x5A, 0xF4, + 0x1F, 0xDD, 0xA8, 0x33, 0x88, 0x07, 0xC7, 0x31, 0xB1, 0x12, 0x10, 0x59, + 0x27, 0x80, 0xEC, 0x5F, 0x60, 0x51, 0x7F, 0xA9, 0x19, 0xB5, 0x4A, 0x0D, + 0x2D, 0xE5, 0x7A, 0x9F, 0x93, 0xC9, 0x9C, 0xEF, 0xA0, 0xE0, 0x3B, 0x4D, + 0xAE, 0x2A, 0xF5, 0xB0, 0xC8, 0xEB, 0xBB, 0x3C, 0x83, 0x53, 0x99, 0x61, + 0x17, 0x2B, 0x04, 0x7E, 0xBA, 0x77, 0xD6, 0x26, 0xE1, 0x69, 0x14, 0x63, + 0x55, 0x21, 0x0C, 0x7D +}; + +// SM4 Forward S-Box (there is no need for an inverse S-Box) +const uint8_t _rvk_emu_sm4_sbox[256] = { + 0xD6, 0x90, 0xE9, 0xFE, 0xCC, 0xE1, 0x3D, 0xB7, 0x16, 0xB6, 0x14, 0xC2, + 0x28, 0xFB, 0x2C, 0x05, 0x2B, 0x67, 0x9A, 0x76, 0x2A, 0xBE, 0x04, 0xC3, + 0xAA, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99, 0x9C, 0x42, 0x50, 0xF4, + 0x91, 0xEF, 0x98, 0x7A, 0x33, 0x54, 0x0B, 0x43, 0xED, 0xCF, 0xAC, 0x62, + 0xE4, 0xB3, 0x1C, 0xA9, 0xC9, 0x08, 0xE8, 0x95, 0x80, 0xDF, 0x94, 0xFA, + 0x75, 0x8F, 0x3F, 0xA6, 0x47, 0x07, 0xA7, 0xFC, 0xF3, 0x73, 0x17, 0xBA, + 0x83, 0x59, 0x3C, 0x19, 0xE6, 0x85, 0x4F, 0xA8, 0x68, 0x6B, 0x81, 0xB2, + 0x71, 0x64, 0xDA, 0x8B, 0xF8, 0xEB, 0x0F, 0x4B, 0x70, 0x56, 0x9D, 0x35, + 0x1E, 0x24, 0x0E, 0x5E, 0x63, 0x58, 0xD1, 0xA2, 0x25, 0x22, 0x7C, 0x3B, + 0x01, 0x21, 0x78, 0x87, 0xD4, 0x00, 0x46, 0x57, 0x9F, 0xD3, 0x27, 0x52, + 0x4C, 0x36, 0x02, 0xE7, 0xA0, 0xC4, 0xC8, 0x9E, 0xEA, 0xBF, 0x8A, 0xD2, + 0x40, 0xC7, 0x38, 0xB5, 0xA3, 0xF7, 0xF2, 0xCE, 0xF9, 0x61, 0x15, 0xA1, + 0xE0, 0xAE, 0x5D, 0xA4, 0x9B, 0x34, 0x1A, 0x55, 0xAD, 0x93, 0x32, 0x30, + 0xF5, 0x8C, 0xB1, 0xE3, 0x1D, 0xF6, 0xE2, 0x2E, 0x82, 0x66, 0xCA, 0x60, + 0xC0, 0x29, 0x23, 0xAB, 0x0D, 0x53, 0x4E, 0x6F, 0xD5, 0xDB, 0x37, 0x45, + 0xDE, 0xFD, 0x8E, 0x2F, 0x03, 0xFF, 0x6A, 0x72, 0x6D, 0x6C, 0x5B, 0x51, + 0x8D, 0x1B, 0xAF, 0x92, 0xBB, 0xDD, 0xBC, 0x7F, 0x11, 0xD9, 0x5C, 0x41, + 0x1F, 0x10, 0x5A, 0xD8, 0x0A, 0xC1, 0x31, 0x88, 0xA5, 0xCD, 0x7B, 0xBD, + 0x2D, 0x74, 0xD0, 0x12, 0xB8, 0xE5, 0xB4, 0xB0, 0x89, 0x69, 0x97, 0x4A, + 0x0C, 0x96, 0x77, 0x7E, 0x65, 0xB9, 0xF1, 0x09, 0xC5, 0x6E, 0xC6, 0x84, + 0x18, 0xF0, 0x7D, 0xEC, 0x3A, 0xDC, 0x4D, 0x20, 0x79, 0xEE, 0x5F, 0x3E, + 0xD7, 0xCB, 0x39, 0x48 +}; + +inline int32_t _rvk_emu_sll_32(int32_t rs1, int32_t rs2) + { return rs1 << (rs2 & 31); } +inline int32_t _rvk_emu_srl_32(int32_t rs1, int32_t rs2) + { return (uint32_t)rs1 >> (rs2 & 31); } +inline int64_t _rvk_emu_sll_64(int64_t rs1, int64_t rs2) + { return rs1 << (rs2 & 63); } +inline int64_t _rvk_emu_srl_64(int64_t rs1, int64_t rs2) + { return (uint64_t)rs1 >> (rs2 & 63); } + +// rotate (a part of the extension). no separate intrinsic for rori +inline int32_t _rvk_emu_rol_32(int32_t rs1, int32_t rs2) + { return _rvk_emu_sll_32(rs1, rs2) | _rvk_emu_srl_32(rs1, -rs2); } +inline int32_t _rvk_emu_ror_32(int32_t rs1, int32_t rs2) + { return _rvk_emu_srl_32(rs1, rs2) | _rvk_emu_sll_32(rs1, -rs2); } + +inline int64_t _rvk_emu_rol_64(int64_t rs1, int64_t rs2) + { return _rvk_emu_sll_64(rs1, rs2) | _rvk_emu_srl_64(rs1, -rs2); } +inline int64_t _rvk_emu_ror_64(int64_t rs1, int64_t rs2) + { return _rvk_emu_srl_64(rs1, rs2) | _rvk_emu_sll_64(rs1, -rs2); } + +// brev8, rev8 +inline int32_t _rvk_emu_grev_32(int32_t rs1, int32_t rs2) +{ + uint32_t x = rs1; + int shamt = rs2 & 31; + if (shamt & 1) x = ((x & 0x55555555) << 1) | ((x & 0xAAAAAAAA) >> 1); + if (shamt & 2) x = ((x & 0x33333333) << 2) | ((x & 0xCCCCCCCC) >> 2); + if (shamt & 4) x = ((x & 0x0F0F0F0F) << 4) | ((x & 0xF0F0F0F0) >> 4); + if (shamt & 8) x = ((x & 0x00FF00FF) << 8) | ((x & 0xFF00FF00) >> 8); + if (shamt & 16) x = ((x & 0x0000FFFF) << 16) | ((x & 0xFFFF0000) >> 16); + return x; +} + +inline int64_t _rvk_emu_grev_64(int64_t rs1, int64_t rs2) +{ + uint64_t x = rs1; + int shamt = rs2 & 63; + if (shamt & 1) + x = ((x & 0x5555555555555555LL) << 1) | + ((x & 0xAAAAAAAAAAAAAAAALL) >> 1); + if (shamt & 2) + x = ((x & 0x3333333333333333LL) << 2) | + ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2); + if (shamt & 4) + x = ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | + ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4); + if (shamt & 8) + x = ((x & 0x00FF00FF00FF00FFLL) << 8) | + ((x & 0xFF00FF00FF00FF00LL) >> 8); + if (shamt & 16) + x = ((x & 0x0000FFFF0000FFFFLL) << 16) | + ((x & 0xFFFF0000FFFF0000LL) >> 16); + if (shamt & 32) + x = ((x & 0x00000000FFFFFFFFLL) << 32) | + ((x & 0xFFFFFFFF00000000LL) >> 32); + return x; +} + +inline int32_t _rvk_emu_brev8_32(int32_t rs1) + { return _rvk_emu_grev_32(rs1, 7); } + +inline int64_t _rvk_emu_brev8_64(int64_t rs1) + { return _rvk_emu_grev_64(rs1, 7); } + +inline uint32_t _rvk_emu_shuffle32_stage(uint32_t src, + uint32_t maskL, uint32_t maskR, int N) +{ + uint32_t x = src & ~(maskL | maskR); + x |= ((src << N) & maskL) | ((src >> N) & maskR); + return x; +} + +inline int32_t _rvk_emu_shfl_32(int32_t rs1, int32_t rs2) +{ + uint32_t x = rs1; + int shamt = rs2 & 15; + + if (shamt & 8) x = _rvk_emu_shuffle32_stage(x, 0x00ff0000, 0x0000ff00, 8); + if (shamt & 4) x = _rvk_emu_shuffle32_stage(x, 0x0f000f00, 0x00f000f0, 4); + if (shamt & 2) x = _rvk_emu_shuffle32_stage(x, 0x30303030, 0x0c0c0c0c, 2); + if (shamt & 1) x = _rvk_emu_shuffle32_stage(x, 0x44444444, 0x22222222, 1); + + return x; +} + +inline int32_t _rvk_emu_unshfl_32(int32_t rs1, int32_t rs2) +{ + uint32_t x = rs1; + int shamt = rs2 & 15; + + if (shamt & 1) x = _rvk_emu_shuffle32_stage(x, 0x44444444, 0x22222222, 1); + if (shamt & 2) x = _rvk_emu_shuffle32_stage(x, 0x30303030, 0x0c0c0c0c, 2); + if (shamt & 4) x = _rvk_emu_shuffle32_stage(x, 0x0f000f00, 0x00f000f0, 4); + if (shamt & 8) x = _rvk_emu_shuffle32_stage(x, 0x00ff0000, 0x0000ff00, 8); + + return x; +} + +inline int32_t _rvk_emu_zip_32(int32_t rs1) + { return _rvk_emu_shfl_32(rs1, 15); } + +inline int32_t _rvk_emu_unzip_32(int32_t rs1) + { return _rvk_emu_unshfl_32(rs1, 15); } + +// Zbkc: Carry-less multiply instructions +inline int32_t _rvk_emu_clmul_32(int32_t rs1, int32_t rs2) +{ + uint32_t a = rs1, b = rs2, x = 0; + for (int i = 0; i < 32; i++) { + if ((b >> i) & 1) + x ^= a << i; + } + return x; +} + +inline int32_t _rvk_emu_clmulh_32(int32_t rs1, int32_t rs2) +{ + uint32_t a = rs1, b = rs2, x = 0; + for (int i = 1; i < 32; i++) { + if ((b >> i) & 1) + x ^= a >> (32-i); + } + return x; +} + +inline int64_t _rvk_emu_clmul_64(int64_t rs1, int64_t rs2) +{ + uint64_t a = rs1, b = rs2, x = 0; + + for (int i = 0; i < 64; i++) { + if ((b >> i) & 1) + x ^= a << i; + } + return x; +} + +inline int64_t _rvk_emu_clmulh_64(int64_t rs1, int64_t rs2) +{ + uint64_t a = rs1, b = rs2, x = 0; + + for (int i = 1; i < 64; i++) { + if ((b >> i) & 1) + x ^= a >> (64-i); + } + return x; +} + +// Zbkx: Crossbar permutation instructions +inline uint32_t _rvk_emu_xperm32(uint32_t rs1, uint32_t rs2, int sz_log2) +{ + uint32_t r = 0; + uint32_t sz = 1LL << sz_log2; + uint32_t mask = (1LL << sz) - 1; + for (int i = 0; i < 32; i += sz) { + uint32_t pos = ((rs2 >> i) & mask) << sz_log2; + if (pos < 32) + r |= ((rs1 >> pos) & mask) << i; + } + return r; +} + +inline int32_t _rvk_emu_xperm4_32(int32_t rs1, int32_t rs2) + { return _rvk_emu_xperm32(rs1, rs2, 2); } + +inline int32_t _rvk_emu_xperm8_32(int32_t rs1, int32_t rs2) + { return _rvk_emu_xperm32(rs1, rs2, 3); } + +inline uint64_t _rvk_emu_xperm64(uint64_t rs1, uint64_t rs2, int sz_log2) +{ + uint64_t r = 0; + uint64_t sz = 1LL << sz_log2; + uint64_t mask = (1LL << sz) - 1; + for (int i = 0; i < 64; i += sz) { + uint64_t pos = ((rs2 >> i) & mask) << sz_log2; + if (pos < 64) + r |= ((rs1 >> pos) & mask) << i; + } + return r; +} + +inline int64_t _rvk_emu_xperm4_64(int64_t rs1, int64_t rs2) + { return _rvk_emu_xperm64(rs1, rs2, 2); } + +inline int64_t _rvk_emu_xperm8_64(int64_t rs1, int64_t rs2) + { return _rvk_emu_xperm64(rs1, rs2, 3); } + +// rvk_emu internal: multiply by 0x02 in AES's GF(256) - LFSR style. +inline uint8_t _rvk_emu_aes_xtime(uint8_t x) +{ + return (x << 1) ^ ((x & 0x80) ? 0x11B : 0x00); +} + +// rvk_emu internal: AES forward MixColumns 8->32 bits +inline uint32_t _rvk_emu_aes_fwd_mc_8(uint32_t x) +{ + uint32_t x2; + x2 = _rvk_emu_aes_xtime(x); + x = ((x ^ x2) << 24) | (x << 16) |(x << 8) | x2; + return x; +} + +// rvk_emu internal: AES forward MixColumns 32->32 bits +inline uint32_t _rvk_emu_aes_fwd_mc_32(uint32_t x) +{ + return _rvk_emu_aes_fwd_mc_8(x & 0xFF) ^ + _rvk_emu_rol_32(_rvk_emu_aes_fwd_mc_8((x >> 8) & 0xFF), 8) ^ + _rvk_emu_rol_32(_rvk_emu_aes_fwd_mc_8((x >> 16) & 0xFF), 16) ^ + _rvk_emu_rol_32(_rvk_emu_aes_fwd_mc_8((x >> 24) & 0xFF), 24); +} + +// rvk_emu internal: AES inverse MixColumns 8->32 bits +inline uint32_t _rvk_emu_aes_inv_mc_8(uint32_t x) +{ + uint32_t x2, x4, x8; + + x2 = _rvk_emu_aes_xtime(x); + x4 = _rvk_emu_aes_xtime(x2); + x8 = _rvk_emu_aes_xtime(x4); + + x = ((x ^ x2 ^ x8) << 24) | + ((x ^ x4 ^ x8) << 16) | + ((x ^ x8) << 8) | + (x2 ^ x4 ^ x8); + + return x; +} + +// rvk_emu internal: AES inverse MixColumns 32->32 bits +inline uint32_t _rvk_emu_aes_inv_mc_32(uint32_t x) +{ + return _rvk_emu_aes_inv_mc_8(x & 0xFF) ^ + _rvk_emu_rol_32(_rvk_emu_aes_inv_mc_8((x >> 8) & 0xFF), 8) ^ + _rvk_emu_rol_32(_rvk_emu_aes_inv_mc_8((x >> 16) & 0xFF), 16) ^ + _rvk_emu_rol_32(_rvk_emu_aes_inv_mc_8((x >> 24) & 0xFF), 24); +} + +// Zknd: NIST Suite: AES Decryption +inline int32_t _rvk_emu_aes32dsi(int32_t rs1, int32_t rs2, uint8_t bs) +{ + int32_t x; + + bs = (bs & 3) << 3; + x = (rs2 >> bs) & 0xFF; + x = _rvk_emu_aes_inv_sbox[x]; + + return rs1 ^ _rvk_emu_rol_32(x, bs); +} + +inline int32_t _rvk_emu_aes32dsmi(int32_t rs1, int32_t rs2, uint8_t bs) +{ + int32_t x; + + bs = (bs & 3) << 3; + x = (rs2 >> bs) & 0xFF; + x = _rvk_emu_aes_inv_sbox[x]; + x = _rvk_emu_aes_inv_mc_8(x); + + return rs1 ^ _rvk_emu_rol_32(x, bs); +} + +inline int64_t _rvk_emu_aes64ds(int64_t rs1, int64_t rs2) +{ + return ((int64_t) _rvk_emu_aes_inv_sbox[rs1 & 0xFF]) | + (((int64_t) _rvk_emu_aes_inv_sbox[(rs2 >> 40) & 0xFF]) << 8) | + (((int64_t) _rvk_emu_aes_inv_sbox[(rs2 >> 16) & 0xFF]) << 16) | + (((int64_t) _rvk_emu_aes_inv_sbox[(rs1 >> 56) & 0xFF]) << 24) | + (((int64_t) _rvk_emu_aes_inv_sbox[(rs1 >> 32) & 0xFF]) << 32) | + (((int64_t) _rvk_emu_aes_inv_sbox[(rs1 >> 8) & 0xFF]) << 40) | + (((int64_t) _rvk_emu_aes_inv_sbox[(rs2 >> 48) & 0xFF]) << 48) | + (((int64_t) _rvk_emu_aes_inv_sbox[(rs2 >> 24) & 0xFF]) << 56); +} + +inline int64_t _rvk_emu_aes64im(int64_t rs1) +{ + return ((int64_t) _rvk_emu_aes_inv_mc_32(rs1)) | + (((int64_t) _rvk_emu_aes_inv_mc_32(rs1 >> 32)) << 32); +} + +inline int64_t _rvk_emu_aes64dsm(int64_t rs1, int64_t rs2) +{ + int64_t x; + + x = _rvk_emu_aes64ds(rs1, rs2); + x = _rvk_emu_aes64im(x); + return x; +} + +inline int64_t _rvk_emu_aes64ks1i(int64_t rs1, int rnum) +{ + const uint8_t aes_rcon[] = { + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1B, 0x36 + }; + + uint32_t t, rc; + + t = rs1 >> 32; + rc = 0; + + if (rnum < 10) { + t = _rvk_emu_ror_32(t, 8); + rc = aes_rcon[rnum]; + } + + t = ((uint32_t) _rvk_emu_aes_fwd_sbox[t & 0xFF]) | + (((uint32_t) _rvk_emu_aes_fwd_sbox[(t >> 8) & 0xFF]) << 8) | + (((uint32_t) _rvk_emu_aes_fwd_sbox[(t >> 16) & 0xFF]) << 16) | + (((uint32_t) _rvk_emu_aes_fwd_sbox[(t >> 24) & 0xFF]) << 24); + + t ^= rc; + + return ((int64_t) t) | (((int64_t) t) << 32); +} + +inline int64_t _rvk_emu_aes64ks2(int64_t rs1, int64_t rs2) +{ + uint32_t t; + + t = (rs1 >> 32) ^ (rs2 & 0xFFFFFFFF); + + return ((int64_t) t) ^ + (((int64_t) t) << 32) ^ (rs2 & 0xFFFFFFFF00000000ULL); +} + +inline int32_t _rvk_emu_aes32esi(int32_t rs1, int32_t rs2, uint8_t bs) +{ + int32_t x; + + bs = (bs & 3) << 3; + x = (rs2 >> bs) & 0xFF; + x = _rvk_emu_aes_fwd_sbox[x]; + + return rs1 ^ _rvk_emu_rol_32(x, bs); +} + +inline int32_t _rvk_emu_aes32esmi(int32_t rs1, int32_t rs2, uint8_t bs) +{ + uint32_t x; + + bs = (bs & 3) << 3; + x = (rs2 >> bs) & 0xFF; + x = _rvk_emu_aes_fwd_sbox[x]; + x = _rvk_emu_aes_fwd_mc_8(x); + + return rs1 ^ _rvk_emu_rol_32(x, bs); +} + +inline int64_t _rvk_emu_aes64es(int64_t rs1, int64_t rs2) +{ + return ((int64_t) _rvk_emu_aes_fwd_sbox[rs1 & 0xFF]) | + (((int64_t) _rvk_emu_aes_fwd_sbox[(rs1 >> 40) & 0xFF]) << 8) | + (((int64_t) _rvk_emu_aes_fwd_sbox[(rs2 >> 16) & 0xFF]) << 16) | + (((int64_t) _rvk_emu_aes_fwd_sbox[(rs2 >> 56) & 0xFF]) << 24) | + (((int64_t) _rvk_emu_aes_fwd_sbox[(rs1 >> 32) & 0xFF]) << 32) | + (((int64_t) _rvk_emu_aes_fwd_sbox[(rs2 >> 8) & 0xFF]) << 40) | + (((int64_t) _rvk_emu_aes_fwd_sbox[(rs2 >> 48) & 0xFF]) << 48) | + (((int64_t) _rvk_emu_aes_fwd_sbox[(rs1 >> 24) & 0xFF]) << 56); +} + +inline int64_t _rvk_emu_aes64esm(int64_t rs1, int64_t rs2) +{ + int64_t x; + + x = _rvk_emu_aes64es(rs1, rs2); + x = ((int64_t) _rvk_emu_aes_fwd_mc_32(x)) | + (((int64_t) _rvk_emu_aes_fwd_mc_32(x >> 32)) << 32); + return x; +} + +inline int32_t _rvk_emu_sha256sig0(int32_t rs1) +{ + int32_t x; + + x = _rvk_emu_ror_32(rs1, 7) ^ _rvk_emu_ror_32(rs1, 18) ^ + _rvk_emu_srl_32(rs1, 3); + return (int32_t) x; +} + +inline int32_t _rvk_emu_sha256sig1(int32_t rs1) +{ + int32_t x; + + x = _rvk_emu_ror_32(rs1, 17) ^ _rvk_emu_ror_32(rs1, 19) ^ + _rvk_emu_srl_32(rs1, 10); + return (int32_t) x; +} + +inline int32_t _rvk_emu_sha256sum0(int32_t rs1) +{ + int32_t x; + + x = _rvk_emu_ror_32(rs1, 2) ^ _rvk_emu_ror_32(rs1, 13) ^ + _rvk_emu_ror_32(rs1, 22); + return (int32_t) x; +} + +inline int32_t _rvk_emu_sha256sum1(int32_t rs1) +{ + int32_t x; + + x = _rvk_emu_ror_32(rs1, 6) ^ _rvk_emu_ror_32(rs1, 11) ^ + _rvk_emu_ror_32(rs1, 25); + return (int32_t) x; +} + +inline int64_t _rvk_emu_sha512sig0(int64_t rs1) +{ + return _rvk_emu_ror_64(rs1, 1) ^ _rvk_emu_ror_64(rs1, 8) ^ + _rvk_emu_srl_64(rs1,7); +} + +inline int64_t _rvk_emu_sha512sig1(int64_t rs1) +{ + return _rvk_emu_ror_64(rs1, 19) ^ _rvk_emu_ror_64(rs1, 61) ^ + _rvk_emu_srl_64(rs1, 6); +} + +inline int64_t _rvk_emu_sha512sum0(int64_t rs1) +{ + return _rvk_emu_ror_64(rs1, 28) ^ _rvk_emu_ror_64(rs1, 34) ^ + _rvk_emu_ror_64(rs1, 39); +} + +inline int64_t _rvk_emu_sha512sum1(int64_t rs1) +{ + return _rvk_emu_ror_64(rs1, 14) ^ _rvk_emu_ror_64(rs1, 18) ^ + _rvk_emu_ror_64(rs1, 41); +} + +// Zksed: ShangMi Suite: SM4 Block Cipher Instructions +inline int32_t _rvk_emu_sm4ed(int32_t rs1, int32_t rs2, uint8_t bs) +{ + int32_t x; + + bs = (bs & 3) << 3; + x = (rs2 >> bs) & 0xFF; + x = _rvk_emu_sm4_sbox[x]; + + x = x ^ (x << 8) ^ (x << 2) ^ (x << 18) ^ + ((x & 0x3F) << 26) ^ ((x & 0xC0) << 10); + x = rs1 ^ _rvk_emu_rol_32(x, bs); + return (int32_t) x; +} + +inline int32_t _rvk_emu_sm4ks(int32_t rs1, int32_t rs2, uint8_t bs) +{ + int32_t x; + + bs = (bs & 3) << 3; + x = (rs2 >> bs) & 0xFF; + x = _rvk_emu_sm4_sbox[x]; + + x = x ^ ((x & 0x07) << 29) ^ ((x & 0xFE) << 7) ^ + ((x & 1) << 23) ^ ((x & 0xF8) << 13); + x = rs1 ^ _rvk_emu_rol_32(x, bs); + return (int32_t) x; +} + +// Zksh: ShangMi Suite: SM3 Hash Function Instructions +inline int32_t _rvk_emu_sm3p0(int32_t rs1) +{ + int32_t x; + + x = rs1 ^ _rvk_emu_rol_32(rs1, 9) ^ _rvk_emu_rol_32(rs1, 17); + return (int32_t) x; +} + +inline int32_t _rvk_emu_sm3p1(int32_t rs1) +{ + int32_t x; + + x = rs1 ^ _rvk_emu_rol_32(rs1, 15) ^ _rvk_emu_rol_32(rs1, 23); + return (int32_t) x; +} + +} // namespace RiscvISA +} // namespace gem5 + +#endif // __ARCH_RISCV_UTILITY_HH__ diff --git a/src/arch/riscv/se_workload.hh b/src/arch/riscv/se_workload.hh index 484803e3c7..6f7c2edb70 100644 --- a/src/arch/riscv/se_workload.hh +++ b/src/arch/riscv/se_workload.hh @@ -44,7 +44,7 @@ namespace RiscvISA class SEWorkload : public gem5::SEWorkload { public: - using Params = RiscvSEWorkloadParams; + PARAMS(RiscvSEWorkload); SEWorkload(const Params &p, Addr page_shift) : gem5::SEWorkload(p, page_shift) @@ -54,7 +54,8 @@ class SEWorkload : public gem5::SEWorkload setSystem(System *sys) override { gem5::SEWorkload::setSystem(sys); - gdb = BaseRemoteGDB::build(system); + gdb = BaseRemoteGDB::build( + params().remote_gdb_port, system); } loader::Arch getArch() const override { return loader::Riscv64; } @@ -77,10 +78,10 @@ struct Result { if (ret.successful()) { // no error - tc->setIntReg(RiscvISA::ReturnValueReg, ret.returnValue()); + tc->setReg(RiscvISA::ReturnValueReg, ret.returnValue()); } else { // got an error, return details - tc->setIntReg(RiscvISA::ReturnValueReg, ret.encodedValue()); + tc->setReg(RiscvISA::ReturnValueReg, ret.encodedValue()); } } }; diff --git a/src/arch/riscv/utility.hh b/src/arch/riscv/utility.hh index d1355fcb9b..3bd34c4801 100644 --- a/src/arch/riscv/utility.hh +++ b/src/arch/riscv/utility.hh @@ -55,6 +55,7 @@ #include "cpu/reg_class.hh" #include "cpu/static_inst.hh" #include "cpu/thread_context.hh" +#include "rvk.hh" namespace gem5 { @@ -106,7 +107,7 @@ inline std::string registerName(RegId reg) { if (reg.is(IntRegClass)) { - if (reg.index() >= NumIntArchRegs) { + if (reg.index() >= int_reg::NumArchRegs) { /* * This should only happen if a instruction is being speculatively * executed along a not-taken branch, and if that instruction's @@ -120,14 +121,19 @@ registerName(RegId reg) str << "?? (x" << reg.index() << ')'; return str.str(); } - return IntRegNames[reg.index()]; - } else { - if (reg.index() >= NumFloatRegs) { + return int_reg::RegNames[reg.index()]; + } else if (reg.is(FloatRegClass)) { + if (reg.index() >= float_reg::NumRegs) { std::stringstream str; str << "?? (f" << reg.index() << ')'; return str.str(); } - return FloatRegNames[reg.index()]; + return float_reg::RegNames[reg.index()]; + } else { + /* It must be an InvalidRegClass, in RISC-V we should treat it as a + * zero register for the disassembler to work correctly. + */ + return int_reg::RegNames[reg.index()]; } } diff --git a/src/arch/sparc/AtomicSimpleCPU.py b/src/arch/sparc/AtomicSimpleCPU.py deleted file mode 100644 index 6f57f88064..0000000000 --- a/src/arch/sparc/AtomicSimpleCPU.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2021 Google, Inc. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer; -# redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution; -# neither the name of the copyright holders nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from m5.objects.SparcCPU import SparcAtomicSimpleCPU - -AtomicSimpleCPU = SparcAtomicSimpleCPU diff --git a/src/arch/sparc/NonCachingSimpleCPU.py b/src/arch/sparc/NonCachingSimpleCPU.py deleted file mode 100644 index 5d8b5ffcdb..0000000000 --- a/src/arch/sparc/NonCachingSimpleCPU.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2021 Google, Inc. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer; -# redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution; -# neither the name of the copyright holders nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from m5.objects.SparcCPU import SparcNonCachingSimpleCPU - -NonCachingSimpleCPU = SparcNonCachingSimpleCPU diff --git a/src/arch/sparc/O3CPU.py b/src/arch/sparc/O3CPU.py deleted file mode 100644 index 486c6c8bc7..0000000000 --- a/src/arch/sparc/O3CPU.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2021 Google, Inc. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer; -# redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution; -# neither the name of the copyright holders nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from m5.objects.SparcCPU import SparcO3CPU - -O3CPU = SparcO3CPU - -# Deprecated -DerivO3CPU = O3CPU diff --git a/src/arch/sparc/SConscript b/src/arch/sparc/SConscript index 71770b3f7c..a721c4ae56 100644 --- a/src/arch/sparc/SConscript +++ b/src/arch/sparc/SConscript @@ -28,6 +28,9 @@ Import('*') +if env['USE_SPARC_ISA']: + env.TagImplies('sparc isa', 'gem5 lib') + Source('asi.cc', tags='sparc isa') Source('decoder.cc', tags='sparc isa') Source('faults.cc', tags='sparc isa') @@ -38,6 +41,7 @@ Source('linux/syscalls.cc', tags='sparc isa') Source('nativetrace.cc', tags='sparc isa') Source('pagetable.cc', tags='sparc isa') Source('process.cc', tags='sparc isa') +Source('regs/int.cc', tags='sparc isa') Source('remote_gdb.cc', tags='sparc isa') Source('se_workload.cc', tags='sparc isa') Source('tlb.cc', tags='sparc isa') @@ -57,10 +61,6 @@ SimObject('SparcSeWorkload.py', sim_objects=[ SimObject('SparcTLB.py', sim_objects=['SparcTLB'], tags='sparc isa') SimObject('SparcCPU.py', sim_objects=[], tags='sparc isa') -SimObject('AtomicSimpleCPU.py', sim_objects=[], tags='sparc isa') -SimObject('TimingSimpleCPU.py', sim_objects=[], tags='sparc isa') -SimObject('NonCachingSimpleCPU.py', sim_objects=[], tags='sparc isa') -SimObject('O3CPU.py', sim_objects=[], tags='sparc isa') DebugFlag('Sparc', "Generic SPARC ISA stuff", tags='sparc isa') DebugFlag('RegisterWindows', "Register window manipulation", tags='sparc isa') diff --git a/src/arch/sparc/SConsopts b/src/arch/sparc/SConsopts index 48fb4a6e62..917485af9c 100644 --- a/src/arch/sparc/SConsopts +++ b/src/arch/sparc/SConsopts @@ -1,7 +1,4 @@ -# -*- mode:python -*- - -# Copyright (c) 2006 The Regents of The University of Michigan -# All rights reserved. +# Copyright 2021 Google, Inc. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are @@ -27,5 +24,5 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Import('*') - -main.Append(ALL_ISAS=['sparc']) +sticky_vars.Add(BoolVariable('USE_SPARC_ISA', 'Enable SPARC ISA support', + False)) diff --git a/src/arch/sparc/SparcCPU.py b/src/arch/sparc/SparcCPU.py index b6c33059df..44d9ceed08 100644 --- a/src/arch/sparc/SparcCPU.py +++ b/src/arch/sparc/SparcCPU.py @@ -27,25 +27,35 @@ from m5.objects.BaseAtomicSimpleCPU import BaseAtomicSimpleCPU from m5.objects.BaseNonCachingSimpleCPU import BaseNonCachingSimpleCPU from m5.objects.BaseTimingSimpleCPU import BaseTimingSimpleCPU from m5.objects.BaseO3CPU import BaseO3CPU +from m5.objects.BaseMinorCPU import BaseMinorCPU from m5.objects.SparcDecoder import SparcDecoder from m5.objects.SparcMMU import SparcMMU from m5.objects.SparcInterrupts import SparcInterrupts from m5.objects.SparcISA import SparcISA + class SparcCPU: ArchDecoder = SparcDecoder ArchMMU = SparcMMU ArchInterrupts = SparcInterrupts ArchISA = SparcISA + class SparcAtomicSimpleCPU(BaseAtomicSimpleCPU, SparcCPU): mmu = SparcMMU() + class SparcNonCachingSimpleCPU(BaseNonCachingSimpleCPU, SparcCPU): mmu = SparcMMU() + class SparcTimingSimpleCPU(BaseTimingSimpleCPU, SparcCPU): mmu = SparcMMU() + class SparcO3CPU(BaseO3CPU, SparcCPU): mmu = SparcMMU() + + +class SparcMinorCPU(BaseMinorCPU, SparcCPU): + mmu = SparcMMU() diff --git a/src/arch/sparc/SparcDecoder.py b/src/arch/sparc/SparcDecoder.py index cc91948372..794d4df476 100644 --- a/src/arch/sparc/SparcDecoder.py +++ b/src/arch/sparc/SparcDecoder.py @@ -25,7 +25,8 @@ from m5.objects.InstDecoder import InstDecoder + class SparcDecoder(InstDecoder): - type = 'SparcDecoder' - cxx_class = 'gem5::SparcISA::Decoder' + type = "SparcDecoder" + cxx_class = "gem5::SparcISA::Decoder" cxx_header = "arch/sparc/decoder.hh" diff --git a/src/arch/sparc/SparcFsWorkload.py b/src/arch/sparc/SparcFsWorkload.py index 0d6bb543e0..ba70dcfa59 100644 --- a/src/arch/sparc/SparcFsWorkload.py +++ b/src/arch/sparc/SparcFsWorkload.py @@ -28,7 +28,8 @@ from m5.params import * from m5.objects.Workload import Workload + class SparcFsWorkload(Workload): - type = 'SparcFsWorkload' - cxx_header = 'arch/sparc/fs_workload.hh' - cxx_class = 'gem5::SparcISA::FsWorkload' + type = "SparcFsWorkload" + cxx_header = "arch/sparc/fs_workload.hh" + cxx_class = "gem5::SparcISA::FsWorkload" diff --git a/src/arch/sparc/SparcISA.py b/src/arch/sparc/SparcISA.py index 88ea301ec5..a2746b1461 100644 --- a/src/arch/sparc/SparcISA.py +++ b/src/arch/sparc/SparcISA.py @@ -35,7 +35,8 @@ from m5.objects.BaseISA import BaseISA + class SparcISA(BaseISA): - type = 'SparcISA' - cxx_class = 'gem5::SparcISA::ISA' + type = "SparcISA" + cxx_class = "gem5::SparcISA::ISA" cxx_header = "arch/sparc/isa.hh" diff --git a/src/arch/sparc/SparcInterrupts.py b/src/arch/sparc/SparcInterrupts.py index 00d6f84390..d4fa437ca5 100644 --- a/src/arch/sparc/SparcInterrupts.py +++ b/src/arch/sparc/SparcInterrupts.py @@ -26,7 +26,8 @@ from m5.objects.BaseInterrupts import BaseInterrupts + class SparcInterrupts(BaseInterrupts): - type = 'SparcInterrupts' - cxx_class = 'gem5::SparcISA::Interrupts' - cxx_header = 'arch/sparc/interrupts.hh' + type = "SparcInterrupts" + cxx_class = "gem5::SparcISA::Interrupts" + cxx_header = "arch/sparc/interrupts.hh" diff --git a/src/arch/sparc/SparcMMU.py b/src/arch/sparc/SparcMMU.py index 671ece64ad..1202594fc6 100644 --- a/src/arch/sparc/SparcMMU.py +++ b/src/arch/sparc/SparcMMU.py @@ -40,9 +40,10 @@ from m5.params import * from m5.objects.BaseMMU import BaseMMU from m5.objects.SparcTLB import SparcTLB + class SparcMMU(BaseMMU): - type = 'SparcMMU' - cxx_class = 'gem5::SparcISA::MMU' - cxx_header = 'arch/sparc/mmu.hh' + type = "SparcMMU" + cxx_class = "gem5::SparcISA::MMU" + cxx_header = "arch/sparc/mmu.hh" itb = SparcTLB(entry_type="instruction") dtb = SparcTLB(entry_type="data") diff --git a/src/arch/sparc/SparcNativeTrace.py b/src/arch/sparc/SparcNativeTrace.py index 6437763fa3..0a93126f2c 100644 --- a/src/arch/sparc/SparcNativeTrace.py +++ b/src/arch/sparc/SparcNativeTrace.py @@ -29,7 +29,8 @@ from m5.params import * from m5.objects.CPUTracers import NativeTrace + class SparcNativeTrace(NativeTrace): - type = 'SparcNativeTrace' - cxx_class = 'gem5::Trace::SparcNativeTrace' - cxx_header = 'arch/sparc/nativetrace.hh' + type = "SparcNativeTrace" + cxx_class = "gem5::trace::SparcNativeTrace" + cxx_header = "arch/sparc/nativetrace.hh" diff --git a/src/arch/sparc/SparcSeWorkload.py b/src/arch/sparc/SparcSeWorkload.py index 700d0b8a8c..3dbd341801 100644 --- a/src/arch/sparc/SparcSeWorkload.py +++ b/src/arch/sparc/SparcSeWorkload.py @@ -27,18 +27,22 @@ from m5.params import * from m5.objects.Workload import SEWorkload + class SparcSEWorkload(SEWorkload): - type = 'SparcSEWorkload' + type = "SparcSEWorkload" cxx_header = "arch/sparc/se_workload.hh" - cxx_class = 'gem5::SparcISA::SEWorkload' + cxx_class = "gem5::SparcISA::SEWorkload" abstract = True + class SparcEmuLinux(SparcSEWorkload): - type = 'SparcEmuLinux' + type = "SparcEmuLinux" cxx_header = "arch/sparc/linux/se_workload.hh" - cxx_class = 'gem5::SparcISA::EmuLinux' + cxx_class = "gem5::SparcISA::EmuLinux" @classmethod def _is_compatible_with(cls, obj): - return obj.get_arch() in ('sparc64', 'sparc32') and \ - obj.get_op_sys() in ('linux', 'unknown') + return obj.get_arch() in ( + "sparc64", + "sparc32", + ) and obj.get_op_sys() in ("linux", "unknown") diff --git a/src/arch/sparc/SparcTLB.py b/src/arch/sparc/SparcTLB.py index 87a74f0aaa..e834d62000 100644 --- a/src/arch/sparc/SparcTLB.py +++ b/src/arch/sparc/SparcTLB.py @@ -29,8 +29,9 @@ from m5.params import * from m5.objects.BaseTLB import BaseTLB + class SparcTLB(BaseTLB): - type = 'SparcTLB' - cxx_class = 'gem5::SparcISA::TLB' - cxx_header = 'arch/sparc/tlb.hh' + type = "SparcTLB" + cxx_class = "gem5::SparcISA::TLB" + cxx_header = "arch/sparc/tlb.hh" size = Param.Int(64, "TLB size") diff --git a/src/arch/sparc/TimingSimpleCPU.py b/src/arch/sparc/TimingSimpleCPU.py deleted file mode 100644 index 0471c1882b..0000000000 --- a/src/arch/sparc/TimingSimpleCPU.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2021 Google, Inc. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer; -# redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution; -# neither the name of the copyright holders nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from m5.objects.SparcCPU import SparcTimingSimpleCPU - -TimingSimpleCPU = SparcTimingSimpleCPU diff --git a/src/arch/sparc/decoder.hh b/src/arch/sparc/decoder.hh index 56eb177400..13384cdc45 100644 --- a/src/arch/sparc/decoder.hh +++ b/src/arch/sparc/decoder.hh @@ -39,21 +39,21 @@ namespace gem5 { +class BaseISA; + namespace SparcISA { -class ISA; class Decoder : public InstDecoder { protected: // The extended machine instruction being generated ExtMachInst emi; uint32_t machInst; - RegVal asi; + RegVal asi = 0; public: - Decoder(const SparcDecoderParams &p) : InstDecoder(p, &machInst), asi(0) - {} + Decoder(const SparcDecoderParams &p) : InstDecoder(p, &machInst) {} // Use this to give data to the predecoder. This should be used // when there is control flow. diff --git a/src/arch/sparc/faults.cc b/src/arch/sparc/faults.cc index 421befab9a..ff80ec1baf 100644 --- a/src/arch/sparc/faults.cc +++ b/src/arch/sparc/faults.cc @@ -306,10 +306,10 @@ doREDFault(ThreadContext *tc, TrapType tt) RegVal TSTATE = tc->readMiscRegNoEffect(MISCREG_TSTATE); PSTATE pstate = tc->readMiscRegNoEffect(MISCREG_PSTATE); HPSTATE hpstate = tc->readMiscRegNoEffect(MISCREG_HPSTATE); - CCR ccr = tc->readIntReg(INTREG_CCR); + CCR ccr = tc->getReg(int_reg::Ccr); RegVal ASI = tc->readMiscRegNoEffect(MISCREG_ASI); RegVal CWP = tc->readMiscRegNoEffect(MISCREG_CWP); - RegVal CANSAVE = tc->readMiscRegNoEffect(INTREG_CANSAVE); + RegVal CANSAVE = tc->getReg(int_reg::Cansave); RegVal GL = tc->readMiscRegNoEffect(MISCREG_GL); auto &pc = tc->pcState().as(); @@ -385,10 +385,10 @@ doNormalFault(ThreadContext *tc, TrapType tt, bool gotoHpriv) RegVal TSTATE = tc->readMiscRegNoEffect(MISCREG_TSTATE); PSTATE pstate = tc->readMiscRegNoEffect(MISCREG_PSTATE); HPSTATE hpstate = tc->readMiscRegNoEffect(MISCREG_HPSTATE); - CCR ccr = tc->readIntReg(INTREG_CCR); + CCR ccr = tc->getReg(int_reg::Ccr); RegVal ASI = tc->readMiscRegNoEffect(MISCREG_ASI); RegVal CWP = tc->readMiscRegNoEffect(MISCREG_CWP); - RegVal CANSAVE = tc->readIntReg(INTREG_CANSAVE); + RegVal CANSAVE = tc->getReg(int_reg::Cansave); RegVal GL = tc->readMiscRegNoEffect(MISCREG_GL); auto &pc = tc->pcState().as(); diff --git a/src/arch/sparc/fs_workload.hh b/src/arch/sparc/fs_workload.hh index 90d5131a21..3e901a4465 100644 --- a/src/arch/sparc/fs_workload.hh +++ b/src/arch/sparc/fs_workload.hh @@ -46,6 +46,7 @@ class FsWorkload : public Workload loader::SymbolTable defaultSymtab; public: + PARAMS(SparcFsWorkload); FsWorkload(const SparcFsWorkloadParams ¶ms) : Workload(params) {} void initState() override; @@ -53,7 +54,8 @@ class FsWorkload : public Workload setSystem(System *sys) override { Workload::setSystem(sys); - gdb = BaseRemoteGDB::build(system); + gdb = BaseRemoteGDB::build( + params().remote_gdb_port, system); } Addr diff --git a/src/arch/sparc/insts/micro.hh b/src/arch/sparc/insts/micro.hh index 8526cae75a..fa3ab2061c 100644 --- a/src/arch/sparc/insts/micro.hh +++ b/src/arch/sparc/insts/micro.hh @@ -71,19 +71,19 @@ class SparcMacroInst : public SparcStaticInst } Fault - execute(ExecContext *, Trace::InstRecord *) const override + execute(ExecContext *, trace::InstRecord *) const override { panic("Tried to execute a macroop directly!\n"); } Fault - initiateAcc(ExecContext *, Trace::InstRecord *) const override + initiateAcc(ExecContext *, trace::InstRecord *) const override { panic("Tried to execute a macroop directly!\n"); } Fault - completeAcc(PacketPtr, ExecContext *, Trace::InstRecord *) const override + completeAcc(PacketPtr, ExecContext *, trace::InstRecord *) const override { panic("Tried to execute a macroop directly!\n"); } diff --git a/src/arch/sparc/insts/nop.cc b/src/arch/sparc/insts/nop.cc index 0d2aa2169d..f95056c623 100644 --- a/src/arch/sparc/insts/nop.cc +++ b/src/arch/sparc/insts/nop.cc @@ -48,7 +48,7 @@ output header {{ } Fault - execute(ExecContext *xc, Trace::InstRecord *traceData) const + execute(ExecContext *xc, trace::InstRecord *traceData) const { return NoFault; } @@ -71,7 +71,7 @@ output decoder {{ def template NopExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { // Nothing to see here, move along return NoFault; diff --git a/src/arch/sparc/insts/nop.hh b/src/arch/sparc/insts/nop.hh index 0aad703888..bcba61d8d8 100644 --- a/src/arch/sparc/insts/nop.hh +++ b/src/arch/sparc/insts/nop.hh @@ -56,7 +56,7 @@ class Nop : public SparcStaticInst } Fault - execute(ExecContext *xc, Trace::InstRecord *traceData) const override + execute(ExecContext *xc, trace::InstRecord *traceData) const override { return NoFault; } diff --git a/src/arch/sparc/insts/unimp.hh b/src/arch/sparc/insts/unimp.hh index e8694d9969..9eda012182 100644 --- a/src/arch/sparc/insts/unimp.hh +++ b/src/arch/sparc/insts/unimp.hh @@ -62,7 +62,7 @@ class FailUnimplemented : public SparcStaticInst {} Fault - execute(ExecContext *xc, Trace::InstRecord *traceData) const override + execute(ExecContext *xc, trace::InstRecord *traceData) const override { return std::make_shared( "attempt to execute unimplemented instruction '%s' (inst %#08x)", @@ -99,7 +99,7 @@ class WarnUnimplemented : public SparcStaticInst {} Fault - execute(ExecContext *xc, Trace::InstRecord *traceData) const override + execute(ExecContext *xc, trace::InstRecord *traceData) const override { if (!warned) { return std::make_shared( diff --git a/src/arch/sparc/insts/unknown.hh b/src/arch/sparc/insts/unknown.hh index 813acf1b74..f4bb143198 100644 --- a/src/arch/sparc/insts/unknown.hh +++ b/src/arch/sparc/insts/unknown.hh @@ -50,7 +50,7 @@ class Unknown : public SparcStaticInst {} Fault - execute(ExecContext *, Trace::InstRecord *) const override + execute(ExecContext *, trace::InstRecord *) const override { return std::make_shared(); } diff --git a/src/arch/sparc/isa.cc b/src/arch/sparc/isa.cc index cec483ff6c..255dbb0b09 100644 --- a/src/arch/sparc/isa.cc +++ b/src/arch/sparc/isa.cc @@ -39,9 +39,6 @@ #include "base/trace.hh" #include "cpu/base.hh" #include "cpu/thread_context.hh" -#include "debug/FloatRegs.hh" -#include "debug/IntRegs.hh" -#include "debug/MiscRegs.hh" #include "debug/Timer.hh" #include "params/SparcISA.hh" @@ -68,15 +65,28 @@ buildPstateMask() static const PSTATE PstateMask = buildPstateMask(); +namespace +{ + +/* Not applicable for SPARC */ +RegClass vecRegClass(VecRegClass, VecRegClassName, 1, debug::IntRegs); +RegClass vecElemClass(VecElemClass, VecElemClassName, 2, debug::IntRegs); +RegClass vecPredRegClass(VecPredRegClass, VecPredRegClassName, 1, + debug::IntRegs); +RegClass ccRegClass(CCRegClass, CCRegClassName, 0, debug::IntRegs); + +} // anonymous namespace + ISA::ISA(const Params &p) : BaseISA(p) { - _regClasses.emplace_back(NumIntRegs, debug::IntRegs); - _regClasses.emplace_back(NumFloatRegs, debug::FloatRegs); - _regClasses.emplace_back(1, debug::IntRegs); // Not applicable for SPARC - _regClasses.emplace_back(2, debug::IntRegs); // Not applicable for SPARC - _regClasses.emplace_back(1, debug::IntRegs); // Not applicable for SPARC - _regClasses.emplace_back(0, debug::IntRegs); // Not applicable for SPARC - _regClasses.emplace_back(NumMiscRegs, debug::MiscRegs); + _regClasses.push_back(&flatIntRegClass); + _regClasses.push_back(&floatRegClass); + _regClasses.push_back(&vecRegClass); + _regClasses.push_back(&vecElemClass); + _regClasses.push_back(&vecPredRegClass); + _regClasses.push_back(&ccRegClass); + _regClasses.push_back(&miscRegClass); + clear(); } @@ -226,19 +236,26 @@ ISA::copyRegsFrom(ThreadContext *src) src->setMiscReg(MISCREG_GL, x); tc->setMiscReg(MISCREG_GL, x); // Skip %g0 which is always zero. - for (int y = 1; y < 8; y++) - tc->setIntReg(y, src->readIntReg(y)); + for (int y = 1; y < 8; y++) { + RegId reg = intRegClass[y]; + tc->setReg(reg, src->getReg(reg)); + } } // Locals and ins. Outs are all also ins. for (int x = 0; x < NWindows; ++x) { src->setMiscReg(MISCREG_CWP, x); tc->setMiscReg(MISCREG_CWP, x); - for (int y = 16; y < 32; y++) - tc->setIntReg(y, src->readIntReg(y)); + for (int y = 16; y < 32; y++) { + RegId reg = intRegClass[y]; + tc->setReg(reg, src->getReg(reg)); + } } // Microcode reg and pseudo int regs (misc regs in the integer regfile). - for (int y = NumIntArchRegs; y < NumIntArchRegs + NumMicroIntRegs; ++y) - tc->setIntReg(y, src->readIntReg(y)); + for (int y = int_reg::NumArchRegs; + y < int_reg::NumArchRegs + int_reg::NumMicroRegs; ++y) { + RegId reg = intRegClass[y]; + tc->setReg(reg, src->getReg(reg)); + } // Restore src's GL, CWP src->setMiscReg(MISCREG_GL, old_gl); @@ -246,8 +263,9 @@ ISA::copyRegsFrom(ThreadContext *src) // Then loop through the floating point registers. - for (int i = 0; i < SparcISA::NumFloatArchRegs; ++i) { - tc->setFloatReg(i, src->readFloatReg(i)); + for (int i = 0; i < SparcISA::float_reg::NumArchRegs; ++i) { + RegId reg = floatRegClass[i]; + tc->setReg(reg, src->getReg(reg)); } // Copy misc. registers @@ -263,7 +281,7 @@ ISA::reloadRegMap() installGlobals(gl, CurrentGlobalsOffset); installWindow(cwp, CurrentWindowOffset); // Microcode registers. - for (int i = 0; i < NumMicroIntRegs; i++) + for (int i = 0; i < int_reg::NumMicroRegs; i++) intRegMap[MicroIntOffset + i] = i + TotalGlobals + NWindows * 16; installGlobals(gl, NextGlobalsOffset); installWindow(cwp - 1, NextWindowOffset); @@ -274,7 +292,7 @@ ISA::reloadRegMap() void ISA::installWindow(int cwp, int offset) { - assert(offset >= 0 && offset + NumWindowedRegs <= NumIntRegs); + assert(offset >= 0 && offset + NumWindowedRegs <= int_reg::NumRegs); RegIndex *mapChunk = intRegMap + offset; for (int i = 0; i < NumWindowedRegs; i++) mapChunk[i] = TotalGlobals + @@ -284,7 +302,7 @@ ISA::installWindow(int cwp, int offset) void ISA::installGlobals(int gl, int offset) { - assert(offset >= 0 && offset + NumGlobalRegs <= NumIntRegs); + assert(offset >= 0 && offset + NumGlobalRegs <= int_reg::NumRegs); RegIndex *mapChunk = intRegMap + offset; mapChunk[0] = 0; for (int i = 1; i < NumGlobalRegs; i++) @@ -355,17 +373,17 @@ ISA::clear() } RegVal -ISA::readMiscRegNoEffect(int miscReg) const +ISA::readMiscRegNoEffect(RegIndex idx) const { - // The three miscRegs are moved up from the switch statement + // The three idxs are moved up from the switch statement // due to more frequent calls. - if (miscReg == MISCREG_GL) + if (idx == MISCREG_GL) return gl; - if (miscReg == MISCREG_CWP) + if (idx == MISCREG_CWP) return cwp; - if (miscReg == MISCREG_TLB_DATA) { + if (idx == MISCREG_TLB_DATA) { /* Package up all the data for the tlb: * 6666555555555544444444443333333333222222222211111111110000000000 * 3210987654321098765432109876543210987654321098765432109876543210 @@ -387,7 +405,7 @@ ISA::readMiscRegNoEffect(int miscReg) const (uint64_t)secContext << 48; } - switch (miscReg) { + switch (idx) { // case MISCREG_TLB_DATA: // [original contents see above] // case MISCREG_Y: @@ -511,14 +529,14 @@ ISA::readMiscRegNoEffect(int miscReg) const case MISCREG_QUEUE_NRES_ERROR_TAIL: return nres_error_tail; default: - panic("Miscellaneous register %d not implemented\n", miscReg); + panic("Miscellaneous register %d not implemented\n", idx); } } RegVal -ISA::readMiscReg(int miscReg) +ISA::readMiscReg(RegIndex idx) { - switch (miscReg) { + switch (idx) { // tick and stick are aliased to each other in niagra // well store the tick data in stick and the interrupt bit in tick case MISCREG_STICK: @@ -558,15 +576,15 @@ ISA::readMiscReg(int miscReg) case MISCREG_QUEUE_NRES_ERROR_HEAD: case MISCREG_QUEUE_NRES_ERROR_TAIL: case MISCREG_HPSTATE: - return readFSReg(miscReg); + return readFSReg(idx); } - return readMiscRegNoEffect(miscReg); + return readMiscRegNoEffect(idx); } void -ISA::setMiscRegNoEffect(int miscReg, RegVal val) +ISA::setMiscRegNoEffect(RegIndex idx, RegVal val) { - switch (miscReg) { + switch (idx) { // case MISCREG_Y: // y = val; // break; @@ -740,16 +758,16 @@ ISA::setMiscRegNoEffect(int miscReg, RegVal val) nres_error_tail = val; break; default: - panic("Miscellaneous register %d not implemented\n", miscReg); + panic("Miscellaneous register %d not implemented\n", idx); } } void -ISA::setMiscReg(int miscReg, RegVal val) +ISA::setMiscReg(RegIndex idx, RegVal val) { RegVal new_val = val; - switch (miscReg) { + switch (idx) { case MISCREG_ASI: tc->getDecoderPtr()->as().setContext(val); break; @@ -814,10 +832,10 @@ ISA::setMiscReg(int miscReg, RegVal val) case MISCREG_QUEUE_NRES_ERROR_HEAD: case MISCREG_QUEUE_NRES_ERROR_TAIL: case MISCREG_HPSTATE: - setFSReg(miscReg, val); + setFSReg(idx, val); return; } - setMiscRegNoEffect(miscReg, new_val); + setMiscRegNoEffect(idx, new_val); } void diff --git a/src/arch/sparc/isa.hh b/src/arch/sparc/isa.hh index f26de40318..22bfba4f06 100644 --- a/src/arch/sparc/isa.hh +++ b/src/arch/sparc/isa.hh @@ -34,6 +34,7 @@ #include "arch/generic/isa.hh" #include "arch/sparc/pcstate.hh" +#include "arch/sparc/regs/float.hh" #include "arch/sparc/regs/int.hh" #include "arch/sparc/regs/misc.hh" #include "arch/sparc/sparc_traits.hh" @@ -154,7 +155,7 @@ class ISA : public BaseISA CurrentGlobalsOffset = 0, CurrentWindowOffset = CurrentGlobalsOffset + NumGlobalRegs, MicroIntOffset = CurrentWindowOffset + NumWindowedRegs, - NextGlobalsOffset = MicroIntOffset + NumMicroIntRegs, + NextGlobalsOffset = MicroIntOffset + int_reg::NumMicroRegs, NextWindowOffset = NextGlobalsOffset + NumGlobalRegs, PreviousGlobalsOffset = NextWindowOffset + NumWindowedRegs, PreviousWindowOffset = PreviousGlobalsOffset + NumGlobalRegs, @@ -167,7 +168,9 @@ class ISA : public BaseISA void reloadRegMap(); public: - void clear(); + const RegIndex &mapIntRegId(RegIndex idx) const { return intRegMap[idx]; } + + void clear() override; PCStateBase * newPCState(Addr new_inst_addr=0) const override @@ -185,47 +188,11 @@ class ISA : public BaseISA public: - RegVal readMiscRegNoEffect(int miscReg) const; - RegVal readMiscReg(int miscReg); + RegVal readMiscRegNoEffect(RegIndex idx) const override; + RegVal readMiscReg(RegIndex idx) override; - void setMiscRegNoEffect(int miscReg, RegVal val); - void setMiscReg(int miscReg, RegVal val); - - RegId - flattenRegId(const RegId& regId) const - { - switch (regId.classValue()) { - case IntRegClass: - return RegId(IntRegClass, flattenIntIndex(regId.index())); - case FloatRegClass: - return RegId(FloatRegClass, flattenFloatIndex(regId.index())); - case CCRegClass: - return RegId(CCRegClass, flattenCCIndex(regId.index())); - case MiscRegClass: - return RegId(MiscRegClass, flattenMiscIndex(regId.index())); - default: - break; - } - return regId; - } - - int - flattenIntIndex(int reg) const - { - assert(reg < TotalInstIntRegs); - RegIndex flatIndex = intRegMap[reg]; - assert(flatIndex < NumIntRegs); - return flatIndex; - } - - int flattenFloatIndex(int reg) const { return reg; } - int flattenVecIndex(int reg) const { return reg; } - int flattenVecElemIndex(int reg) const { return reg; } - int flattenVecPredIndex(int reg) const { return reg; } - - // dummy - int flattenCCIndex(int reg) const { return reg; } - int flattenMiscIndex(int reg) const { return reg; } + void setMiscRegNoEffect(RegIndex idx, RegVal val) override; + void setMiscReg(RegIndex idx, RegVal val) override; uint64_t getExecutingAsid() const override diff --git a/src/arch/sparc/isa/base.isa b/src/arch/sparc/isa/base.isa index 8b118f4f0c..d250266dec 100644 --- a/src/arch/sparc/isa/base.isa +++ b/src/arch/sparc/isa/base.isa @@ -132,5 +132,3 @@ output exec {{ } } }}; - - diff --git a/src/arch/sparc/isa/formats/basic.isa b/src/arch/sparc/isa/formats/basic.isa index 0d2346dfd0..1386e6e3a7 100644 --- a/src/arch/sparc/isa/formats/basic.isa +++ b/src/arch/sparc/isa/formats/basic.isa @@ -37,7 +37,7 @@ class %(class_name)s : public %(base_class)s public: // Constructor. %(class_name)s(ExtMachInst machInst); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -54,8 +54,8 @@ class %(class_name)s : public %(base_class)s public: // Constructor. %(class_name)s(ExtMachInst machInst); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - GEM5_NO_INLINE Fault doFpOp(ExecContext *, Trace::InstRecord *) const; + Fault execute(ExecContext *, trace::InstRecord *) const override; + GEM5_NO_INLINE Fault doFpOp(ExecContext *, trace::InstRecord *) const; }; }}; @@ -72,7 +72,7 @@ class %(class_name)s : public %(base_class)s public: // Constructor. %(class_name)s(const char *mnemonic, ExtMachInst machInst); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -100,7 +100,7 @@ def template BasicConstructorWithMnemonic {{ def template BasicExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; @@ -119,7 +119,7 @@ Fault def template FpExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; @@ -137,7 +137,7 @@ Fault def template DoFpOpExecute {{ Fault -%(class_name)s::doFpOp(ExecContext *xc, Trace::InstRecord *traceData) const +%(class_name)s::doFpOp(ExecContext *xc, trace::InstRecord *traceData) const { Fault fault = NoFault; %(op_decl)s; diff --git a/src/arch/sparc/isa/formats/branch.isa b/src/arch/sparc/isa/formats/branch.isa index d1107e64bd..bf7991bac9 100644 --- a/src/arch/sparc/isa/formats/branch.isa +++ b/src/arch/sparc/isa/formats/branch.isa @@ -31,7 +31,7 @@ def template JumpExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { // Attempt to execute the instruction Fault fault = NoFault; @@ -53,7 +53,7 @@ def template JumpExecute {{ def template BranchExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { // Attempt to execute the instruction Fault fault = NoFault; @@ -187,4 +187,3 @@ def format BranchSplit(code=default_branch_code, decode_block) = doUncondBranch(name, Name, "BranchSplit", code, annul_code, opt_flags) }}; - diff --git a/src/arch/sparc/isa/formats/formats.isa b/src/arch/sparc/isa/formats/formats.isa index 6d2b0d7494..c7aa6fabee 100644 --- a/src/arch/sparc/isa/formats/formats.isa +++ b/src/arch/sparc/isa/formats/formats.isa @@ -51,4 +51,3 @@ // Include the branch format ##include "branch.isa" - diff --git a/src/arch/sparc/isa/formats/integerop.isa b/src/arch/sparc/isa/formats/integerop.isa index aa67b7c637..36c4803b9f 100644 --- a/src/arch/sparc/isa/formats/integerop.isa +++ b/src/arch/sparc/isa/formats/integerop.isa @@ -40,7 +40,7 @@ def template SetHiDecode {{ def template IntOpExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; @@ -162,4 +162,3 @@ def format SetHi(code, *opt_flags) {{ exec_output = IntOpExecute.subst(iop) decode_block = SetHiDecode.subst(iop) }}; - diff --git a/src/arch/sparc/isa/formats/mem/basicmem.isa b/src/arch/sparc/isa/formats/mem/basicmem.isa index 83377ae8fe..e2cefbce97 100644 --- a/src/arch/sparc/isa/formats/mem/basicmem.isa +++ b/src/arch/sparc/isa/formats/mem/basicmem.isa @@ -45,11 +45,11 @@ def template MemDeclare {{ /// Constructor. %(class_name)s(ExtMachInst machInst); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; Fault initiateAcc(ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; }; }}; @@ -166,4 +166,3 @@ def format TwinLoad(code, *opt_flags) {{ AlternateASIPrivFaultCheck + TwinAlignmentFaultCheck, name, Name, "EXT_ASI", opt_flags) }}; - diff --git a/src/arch/sparc/isa/formats/mem/blockmem.isa b/src/arch/sparc/isa/formats/mem/blockmem.isa index 3ae08204a4..9f5c427370 100644 --- a/src/arch/sparc/isa/formats/mem/blockmem.isa +++ b/src/arch/sparc/isa/formats/mem/blockmem.isa @@ -61,10 +61,10 @@ def template BlockMemMicroDeclare {{ public: // Constructor %(class_name)s_%(micro_pc)s(ExtMachInst machInst); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; }; }}; diff --git a/src/arch/sparc/isa/formats/mem/swap.isa b/src/arch/sparc/isa/formats/mem/swap.isa index 62348b4f67..6d83a2feb6 100644 --- a/src/arch/sparc/isa/formats/mem/swap.isa +++ b/src/arch/sparc/isa/formats/mem/swap.isa @@ -27,7 +27,7 @@ // This template provides the execute functions for a swap def template SwapExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; // This is to support the conditional store in cas instructions. @@ -65,7 +65,7 @@ def template SwapExecute {{ def template SwapInitiateAcc {{ Fault %(class_name)s::initiateAcc(ExecContext * xc, - Trace::InstRecord * traceData) const + trace::InstRecord * traceData) const { Fault fault = NoFault; Addr EA; @@ -93,7 +93,7 @@ def template SwapInitiateAcc {{ def template SwapCompleteAcc {{ Fault %(class_name)s::completeAcc(PacketPtr pkt, ExecContext * xc, - Trace::InstRecord * traceData) const + trace::InstRecord * traceData) const { Fault fault = NoFault; %(op_decl)s; @@ -174,5 +174,3 @@ def format CasAlt(code, postacc_code, mem_flags, *opt_flags) {{ decode_block) = doCasFormat(code, SwapFuncs, AlternateASIPrivFaultCheck, name, Name, flags, ["IsStoreConditional"], postacc_code) }}; - - diff --git a/src/arch/sparc/isa/formats/mem/util.isa b/src/arch/sparc/isa/formats/mem/util.isa index 82387e2eef..1e5a55b55b 100644 --- a/src/arch/sparc/isa/formats/mem/util.isa +++ b/src/arch/sparc/isa/formats/mem/util.isa @@ -32,7 +32,7 @@ // This template provides the execute functions for a load def template LoadExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; Addr EA; @@ -59,7 +59,7 @@ def template LoadExecute {{ def template LoadInitiateAcc {{ Fault %(class_name)s::initiateAcc(ExecContext * xc, - Trace::InstRecord * traceData) const + trace::InstRecord * traceData) const { Fault fault = NoFault; Addr EA; @@ -78,7 +78,7 @@ def template LoadInitiateAcc {{ def template LoadCompleteAcc {{ Fault %(class_name)s::completeAcc(PacketPtr pkt, ExecContext * xc, - Trace::InstRecord * traceData) const + trace::InstRecord * traceData) const { Fault fault = NoFault; %(op_decl)s; @@ -95,7 +95,7 @@ def template LoadCompleteAcc {{ // This template provides the execute functions for a store def template StoreExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; // This is to support the conditional store in cas instructions. @@ -126,7 +126,7 @@ def template StoreExecute {{ def template StoreInitiateAcc {{ Fault %(class_name)s::initiateAcc(ExecContext * xc, - Trace::InstRecord * traceData) const + trace::InstRecord * traceData) const { Fault fault = NoFault; bool storeCond = true; @@ -151,7 +151,7 @@ def template StoreInitiateAcc {{ def template StoreCompleteAcc {{ Fault %(class_name)s::completeAcc(PacketPtr, ExecContext * xc, - Trace::InstRecord * traceData) const + trace::InstRecord * traceData) const { return NoFault; } diff --git a/src/arch/sparc/isa/formats/priv.isa b/src/arch/sparc/isa/formats/priv.isa index a0c3a18d45..403a86ee17 100644 --- a/src/arch/sparc/isa/formats/priv.isa +++ b/src/arch/sparc/isa/formats/priv.isa @@ -40,7 +40,7 @@ def template ControlRegConstructor {{ def template PrivExecute {{ Fault -%(class_name)s::execute(ExecContext *xc, Trace::InstRecord *traceData) const +%(class_name)s::execute(ExecContext *xc, trace::InstRecord *traceData) const { %(op_decl)s; %(op_rd)s; @@ -133,4 +133,3 @@ def format HPriv(code, check_tl=false, *opt_flags) {{ doPrivFormat(code, check_code, name, Name, opt_flags, check_tl=(check_tl != 'false')) }}; - diff --git a/src/arch/sparc/isa/formats/trap.isa b/src/arch/sparc/isa/formats/trap.isa index 9d9a08b94a..5c0f21e091 100644 --- a/src/arch/sparc/isa/formats/trap.isa +++ b/src/arch/sparc/isa/formats/trap.isa @@ -32,7 +32,7 @@ def template TrapExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; %(op_decl)s; @@ -45,7 +45,7 @@ def template TrapExecute {{ def template FpUnimplExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; %(op_decl)s; diff --git a/src/arch/sparc/isa/formats/unimp.isa b/src/arch/sparc/isa/formats/unimp.isa index 2e7787e1ed..37cbaabd04 100644 --- a/src/arch/sparc/isa/formats/unimp.isa +++ b/src/arch/sparc/isa/formats/unimp.isa @@ -40,4 +40,3 @@ def format WarnUnimpl() {{ iop = InstObjParams(name, 'WarnUnimplemented') decode_block = BasicDecodeWithMnemonic.subst(iop) }}; - diff --git a/src/arch/sparc/isa/includes.isa b/src/arch/sparc/isa/includes.isa index d35619d7d9..b55dcae8a1 100644 --- a/src/arch/sparc/isa/includes.isa +++ b/src/arch/sparc/isa/includes.isa @@ -60,6 +60,7 @@ output decoder {{ #include #include "arch/sparc/decoder.hh" +#include "arch/sparc/regs/float.hh" #include "base/cprintf.hh" #include "base/fenv.hh" #include "base/loader/symtab.hh" @@ -79,6 +80,7 @@ output exec {{ #include "arch/generic/memhelpers.hh" #include "arch/sparc/asi.hh" #include "arch/sparc/pseudo_inst_abi.hh" +#include "arch/sparc/regs/float.hh" #include "base/fenv.hh" #include "cpu/base.hh" #include "cpu/exetrace.hh" diff --git a/src/arch/sparc/isa/operands.isa b/src/arch/sparc/isa/operands.isa index 5e10017a5f..12048ca029 100644 --- a/src/arch/sparc/isa/operands.isa +++ b/src/arch/sparc/isa/operands.isa @@ -70,7 +70,7 @@ let {{ @overrideInOperand def regId(self): return f'(({self.reg_spec}) == 0) ? RegId() : ' \ - f'RegId({self.reg_class}, {self.reg_spec})' + f'{self.reg_class}[{self.reg_spec}]' }}; def operands {{ @@ -81,10 +81,12 @@ def operands {{ 'Rd': IntReg('udw', 'RD', 'IsInteger', 1), # The Rd from the previous window 'Rd_prev': IntReg('udw', - 'RD + NumIntArchRegs + NumMicroIntRegs', 'IsInteger', 2), + 'RD + int_reg::NumArchRegs + int_reg::NumMicroRegs', + 'IsInteger', 2), # The Rd from the next window 'Rd_next': IntReg('udw', - 'RD + 2 * NumIntArchRegs + NumMicroIntRegs', 'IsInteger', 3), + 'RD + 2 * int_reg::NumArchRegs + int_reg::NumMicroRegs', + 'IsInteger', 3), # For microcoded twin load instructions, RdTwin appears in the "code" # for the instruction is replaced by RdLow or RdHigh by the format # before it's processed by the iop. @@ -95,7 +97,7 @@ def operands {{ 'Rs1': IntReg('udw', 'RS1', 'IsInteger', 6), 'Rs2': IntReg('udw', 'RS2', 'IsInteger', 7), # A microcode register. Right now, this is the only one. - 'uReg0': IntReg('udw', 'INTREG_UREG0', 'IsInteger', 8), + 'uReg0': IntReg('udw', 'int_reg::Ureg0', 'IsInteger', 8), # Because double and quad precision register numbers are decoded # differently, they get different operands. The single precision versions # have an s post pended to their name. @@ -142,25 +144,16 @@ def operands {{ 'NNPC': PCStateOp('udw', 'nnpc', (None, None, 'IsControl'), 30), # Registers which are used explicitly in instructions - 'R0': IntReg('udw', '0', None, 6), - 'R1': IntReg('udw', '1', None, 7), 'R15': IntReg('udw', '15', 'IsInteger', 8), - 'R16': IntReg('udw', '16', None, 9), - 'O0': IntReg('udw', 'INTREG_O0', 'IsInteger', 10), - 'O1': IntReg('udw', 'INTREG_O1', 'IsInteger', 11), - 'O2': IntReg('udw', 'INTREG_O2', 'IsInteger', 12), - 'O3': IntReg('udw', 'INTREG_O3', 'IsInteger', 13), - 'O4': IntReg('udw', 'INTREG_O4', 'IsInteger', 14), - 'O5': IntReg('udw', 'INTREG_O5', 'IsInteger', 15), # Control registers - 'Y': IntReg('udw', 'INTREG_Y', None, 40), - 'Ccr': IntReg('udw', 'INTREG_CCR', None, 41), + 'Y': IntReg('udw', 'int_reg::Y', None, 40), + 'Ccr': IntReg('udw', 'int_reg::Ccr', None, 41), 'Asi': ControlRegOp('udw', 'MISCREG_ASI', None, 42), 'Fprs': ControlRegOp('udw', 'MISCREG_FPRS', None, 43), 'Pcr': ControlRegOp('udw', 'MISCREG_PCR', None, 44), 'Pic': ControlRegOp('udw', 'MISCREG_PIC', None, 45), - 'Gsr': IntReg('udw', 'INTREG_GSR', None, 46), + 'Gsr': IntReg('udw', 'int_reg::Gsr', None, 46), 'Softint': ControlRegOp('udw', 'MISCREG_SOFTINT', None, 47), 'SoftintSet': ControlRegOp('udw', 'MISCREG_SOFTINT_SET', None, 48), 'SoftintClr': ControlRegOp('udw', 'MISCREG_SOFTINT_CLR', None, 49), @@ -181,11 +174,11 @@ def operands {{ (None, None, ['IsSerializeAfter', 'IsSerializing', 'IsNonSpeculative']), 62), - 'Cansave': IntReg('udw', 'INTREG_CANSAVE', None, 63), - 'Canrestore': IntReg('udw', 'INTREG_CANRESTORE', None, 64), - 'Cleanwin': IntReg('udw', 'INTREG_CLEANWIN', None, 65), - 'Otherwin': IntReg('udw', 'INTREG_OTHERWIN', None, 66), - 'Wstate': IntReg('udw', 'INTREG_WSTATE', None, 67), + 'Cansave': IntReg('udw', 'int_reg::Cansave', None, 63), + 'Canrestore': IntReg('udw', 'int_reg::Canrestore', None, 64), + 'Cleanwin': IntReg('udw', 'int_reg::Cleanwin', None, 65), + 'Otherwin': IntReg('udw', 'int_reg::Otherwin', None, 66), + 'Wstate': IntReg('udw', 'int_reg::Wstate', None, 67), 'Gl': ControlRegOp('udw', 'MISCREG_GL', None, 68), 'Hpstate': ControlRegOp('hpstate', 'MISCREG_HPSTATE', None, 69), diff --git a/src/arch/sparc/linux/linux.hh b/src/arch/sparc/linux/linux.hh index 0912de23c4..a5d88abe6a 100644 --- a/src/arch/sparc/linux/linux.hh +++ b/src/arch/sparc/linux/linux.hh @@ -221,24 +221,26 @@ class SparcLinux : public Linux, public OpenFlagTable uint64_t stack, uint64_t tls) { ctc->getIsaPtr()->copyRegsFrom(ptc); - ctc->setIntReg(SparcISA::INTREG_OTHERWIN, 0); - ctc->setIntReg(SparcISA::INTREG_CANRESTORE, 0); - ctc->setIntReg(SparcISA::INTREG_CANSAVE, SparcISA::NWindows - 2); - ctc->setIntReg(SparcISA::INTREG_CLEANWIN, SparcISA::NWindows); - ctc->setMiscReg(SparcISA::MISCREG_CWP, 0); - ctc->setIntReg(SparcISA::INTREG_WSTATE, 0); - ctc->setMiscRegNoEffect(SparcISA::MISCREG_TL, 0); + ctc->setReg(SparcISA::int_reg::Otherwin, (RegVal)0); + ctc->setReg(SparcISA::int_reg::Canrestore, (RegVal)0); + ctc->setReg(SparcISA::int_reg::Cansave, SparcISA::NWindows - 2); + ctc->setReg(SparcISA::int_reg::Cleanwin, SparcISA::NWindows); + ctc->setMiscReg(SparcISA::MISCREG_CWP, (RegVal)0); + ctc->setReg(SparcISA::int_reg::Wstate, (RegVal)0); + ctc->setMiscRegNoEffect(SparcISA::MISCREG_TL, (RegVal)0); ctc->setMiscReg(SparcISA::MISCREG_ASI, SparcISA::ASI_PRIMARY); - for (int y = 8; y < 32; y++) - ctc->setIntReg(y, ptc->readIntReg(y)); + for (int y = 8; y < 32; y++) { + RegId reg = SparcISA::intRegClass[y]; + ctc->setReg(reg, ptc->getReg(reg)); + } if (stack) - ctc->setIntReg(SparcISA::StackPointerReg, stack); + ctc->setReg(SparcISA::StackPointerReg, stack); // Set these extra values. Since "clone" doesn't return two values, // we can set these and they won't be clobbered by the syscall ABI. - ptc->setIntReg(SparcISA::SyscallPseudoReturnReg, 0); - ctc->setIntReg(SparcISA::SyscallPseudoReturnReg, 1); + ptc->setReg(SparcISA::SyscallPseudoReturnReg, (RegVal)0); + ctc->setReg(SparcISA::SyscallPseudoReturnReg, 1); } }; diff --git a/src/arch/sparc/linux/se_workload.cc b/src/arch/sparc/linux/se_workload.cc index c502b532cc..a2752c2462 100644 --- a/src/arch/sparc/linux/se_workload.cc +++ b/src/arch/sparc/linux/se_workload.cc @@ -32,6 +32,7 @@ #include "arch/sparc/page_size.hh" #include "arch/sparc/process.hh" +#include "arch/sparc/regs/int.hh" #include "base/loader/object_file.hh" #include "base/trace.hh" #include "cpu/thread_context.hh" @@ -117,7 +118,7 @@ EmuLinux::syscall32(ThreadContext *tc) // This will move into the base SEWorkload function at some point. process->Process::syscall(tc); - syscall32Descs.get(tc->readIntReg(1))->doSyscall(tc); + syscall32Descs.get(tc->getReg(int_reg::G1))->doSyscall(tc); } void @@ -128,7 +129,7 @@ EmuLinux::syscall64(ThreadContext *tc) // This will move into the base SEWorkload function at some point. process->Process::syscall(tc); - syscallDescs.get(tc->readIntReg(1))->doSyscall(tc); + syscallDescs.get(tc->getReg(int_reg::G1))->doSyscall(tc); } void diff --git a/src/arch/sparc/nativetrace.cc b/src/arch/sparc/nativetrace.cc index f10d0ad64f..3eee494388 100644 --- a/src/arch/sparc/nativetrace.cc +++ b/src/arch/sparc/nativetrace.cc @@ -37,9 +37,9 @@ namespace gem5 { -namespace Trace { +namespace trace { -static const char *intRegNames[SparcISA::NumIntArchRegs] = { +static const char *intRegNames[SparcISA::int_reg::NumArchRegs] = { // Global registers "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7", // Output registers @@ -51,7 +51,7 @@ static const char *intRegNames[SparcISA::NumIntArchRegs] = { }; void -Trace::SparcNativeTrace::check(NativeTraceRecord *record) +SparcNativeTrace::check(NativeTraceRecord *record) { ThreadContext *tc = record->getThread(); @@ -60,10 +60,10 @@ Trace::SparcNativeTrace::check(NativeTraceRecord *record) // Integer registers // I doubt a real SPARC will describe more integer registers than this. - assert(SparcISA::NumIntArchRegs == 32); + assert(SparcISA::int_reg::NumArchRegs == 32); const char **regName = intRegNames; - for (int i = 0; i < SparcISA::NumIntArchRegs; i++) { - regVal = tc->readIntReg(i); + for (int i = 0; i < SparcISA::int_reg::NumArchRegs; i++) { + regVal = tc->getReg(SparcISA::intRegClass[i]); read(&realRegVal, sizeof(realRegVal)); realRegVal = betoh(realRegVal); checkReg(*(regName++), regVal, realRegVal); @@ -85,9 +85,9 @@ Trace::SparcNativeTrace::check(NativeTraceRecord *record) // CCR read(&realRegVal, sizeof(realRegVal)); realRegVal = betoh(realRegVal); - regVal = tc->readIntReg(SparcISA::INTREG_CCR); + regVal = tc->getReg(SparcISA::int_reg::Ccr); checkReg("ccr", regVal, realRegVal); } -} // namespace Trace +} // namespace trace } // namespace gem5 diff --git a/src/arch/sparc/nativetrace.hh b/src/arch/sparc/nativetrace.hh index 3a9d178f9a..a45b7421f9 100644 --- a/src/arch/sparc/nativetrace.hh +++ b/src/arch/sparc/nativetrace.hh @@ -37,7 +37,7 @@ namespace gem5 class ThreadContext; -namespace Trace { +namespace trace { class SparcNativeTrace : public NativeTrace { @@ -48,7 +48,7 @@ class SparcNativeTrace : public NativeTrace void check(NativeTraceRecord *record); }; -} // namespace Trace +} // namespace trace } // namespace gem5 #endif // __CPU_NATIVETRACE_HH__ diff --git a/src/arch/sparc/process.cc b/src/arch/sparc/process.cc index 2ae9d4aa1b..32109850d2 100644 --- a/src/arch/sparc/process.cc +++ b/src/arch/sparc/process.cc @@ -83,17 +83,17 @@ SparcProcess::initState() */ // No windows contain info from other programs - tc->setIntReg(INTREG_OTHERWIN, 0); + tc->setReg(int_reg::Otherwin, (RegVal)0); // There are no windows to pop - tc->setIntReg(INTREG_CANRESTORE, 0); + tc->setReg(int_reg::Canrestore, (RegVal)0); // All windows are available to save into - tc->setIntReg(INTREG_CANSAVE, NWindows - 2); + tc->setReg(int_reg::Cansave, NWindows - 2); // All windows are "clean" - tc->setIntReg(INTREG_CLEANWIN, NWindows); + tc->setReg(int_reg::Cleanwin, NWindows); // Start with register window 0 tc->setMiscReg(MISCREG_CWP, 0); // Always use spill and fill traps 0 - tc->setIntReg(INTREG_WSTATE, 0); + tc->setReg(int_reg::Wstate, (RegVal)0); // Set the trap level to 0 tc->setMiscRegNoEffect(MISCREG_TL, 0); // Set the ASI register to something fixed @@ -358,13 +358,13 @@ SparcProcess::argsInit(int pageSize) ThreadContext *tc = system->threads[contextIds[0]]; // Set up the thread context to start running the process // assert(NumArgumentRegs >= 2); - // tc->setIntReg(ArgumentReg[0], argc); - // tc->setIntReg(ArgumentReg[1], argv_array_base); - tc->setIntReg(StackPointerReg, memState->getStackMin() - StackBias); + // tc->setReg(ArgumentReg[0], argc); + // tc->setReg(ArgumentReg[1], argv_array_base); + tc->setReg(StackPointerReg, memState->getStackMin() - StackBias); // %g1 is a pointer to a function that should be run at exit. Since we // don't have anything like that, it should be set to 0. - tc->setIntReg(1, 0); + tc->setReg(int_reg::G1, (RegVal)0); tc->pcState(getStartPC()); diff --git a/src/arch/sparc/pseudo_inst_abi.hh b/src/arch/sparc/pseudo_inst_abi.hh index 12e4ca1498..993e11bff3 100644 --- a/src/arch/sparc/pseudo_inst_abi.hh +++ b/src/arch/sparc/pseudo_inst_abi.hh @@ -53,7 +53,7 @@ struct Result // This assumes that all pseudo ops have their return value set // by the pseudo op instruction. This may need to be revisited if we // modify the pseudo op ABI in util/m5/m5op_x86.S - tc->setIntReg(SparcISA::INTREG_O0, ret); + tc->setReg(SparcISA::int_reg::O0, ret); } }; @@ -64,7 +64,7 @@ struct Argument get(ThreadContext *tc, SparcPseudoInstABI::State &state) { panic_if(state >= 6, "Too many psuedo inst arguments."); - return tc->readIntReg(SparcISA::INTREG_O0 + state++); + return tc->getReg(SparcISA::int_reg::o(state++)); } }; diff --git a/src/arch/sparc/regs/float.hh b/src/arch/sparc/regs/float.hh index 3588090029..64a9de0065 100644 --- a/src/arch/sparc/regs/float.hh +++ b/src/arch/sparc/regs/float.hh @@ -29,14 +29,25 @@ #ifndef __ARCH_SPARC_REGS_FLOAT_HH__ #define __ARCH_SPARC_REGS_FLOAT_HH__ +#include "cpu/reg_class.hh" +#include "debug/FloatRegs.hh" + namespace gem5 { namespace SparcISA { -const int NumFloatRegs = 64; -const int NumFloatArchRegs = NumFloatRegs; +namespace float_reg +{ + +const int NumRegs = 64; +const int NumArchRegs = NumRegs; + +} // namespace float_reg + +inline constexpr RegClass floatRegClass(FloatRegClass, FloatRegClassName, + float_reg::NumRegs, debug::FloatRegs); } // namespace SparcISA } // namespace gem5 diff --git a/src/arch/sparc/vecregs.hh b/src/arch/sparc/regs/int.cc similarity index 85% rename from src/arch/sparc/vecregs.hh rename to src/arch/sparc/regs/int.cc index d1d9dfd1d2..47f05077e5 100644 --- a/src/arch/sparc/vecregs.hh +++ b/src/arch/sparc/regs/int.cc @@ -26,11 +26,9 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef __ARCH_SPARC_VECREGS_HH__ -#define __ARCH_SPARC_VECREGS_HH__ +#include "arch/sparc/regs/int.hh" -#include "arch/generic/vec_pred_reg.hh" -#include "arch/generic/vec_reg.hh" +#include "arch/sparc/isa.hh" namespace gem5 { @@ -38,11 +36,12 @@ namespace gem5 namespace SparcISA { -// Not applicable to SPARC -using VecRegContainer = ::gem5::DummyVecRegContainer; -using VecPredRegContainer = ::gem5::DummyVecPredRegContainer; +RegId +IntRegClassOps::flatten(const BaseISA &isa, const RegId &id) const +{ + auto &sparc_isa = static_cast(isa); + return {flatIntRegClass, sparc_isa.mapIntRegId(id.index())}; +} } // namespace SparcISA } // namespace gem5 - -#endif diff --git a/src/arch/sparc/regs/int.hh b/src/arch/sparc/regs/int.hh index 06048fc90c..75db224bbc 100644 --- a/src/arch/sparc/regs/int.hh +++ b/src/arch/sparc/regs/int.hh @@ -30,6 +30,8 @@ #define __ARCH_SPARC_REGS_INT_HH__ #include "arch/sparc/sparc_traits.hh" +#include "cpu/reg_class.hh" +#include "debug/IntRegs.hh" namespace gem5 { @@ -37,46 +39,139 @@ namespace gem5 namespace SparcISA { +namespace int_reg +{ + // semantically meaningful register indices enum { - // Globals - INTREG_G0, INTREG_G1, INTREG_G2, INTREG_G3, - INTREG_G4, INTREG_G5, INTREG_G6, INTREG_G7, - // Outputs - INTREG_O0, INTREG_O1, INTREG_O2, INTREG_O3, - INTREG_O4, INTREG_O5, INTREG_O6, INTREG_O7, - // Locals - INTREG_L0, INTREG_L1, INTREG_L2, INTREG_L3, - INTREG_L4, INTREG_L5, INTREG_L6, INTREG_L7, - // Inputs - INTREG_I0, INTREG_I1, INTREG_I2, INTREG_I3, - INTREG_I4, INTREG_I5, INTREG_I6, INTREG_I7, + _G0Idx, _G1Idx, _G2Idx, _G3Idx, _G4Idx, _G5Idx, _G6Idx, _G7Idx, + _O0Idx, _O1Idx, _O2Idx, _O3Idx, _O4Idx, _O5Idx, _O6Idx, _O7Idx, + _L0Idx, _L1Idx, _L2Idx, _L3Idx, _L4Idx, _L5Idx, _L6Idx, _L7Idx, + _I0Idx, _I1Idx, _I2Idx, _I3Idx, _I4Idx, _I5Idx, _I6Idx, _I7Idx, - NumIntArchRegs, + NumArchRegs, - INTREG_UREG0 = NumIntArchRegs, - INTREG_Y, - INTREG_CCR, - INTREG_CANSAVE, - INTREG_CANRESTORE, - INTREG_CLEANWIN, - INTREG_OTHERWIN, - INTREG_WSTATE, - INTREG_GSR, + _Ureg0Idx = NumArchRegs, + _YIdx, + _CcrIdx, + _CansaveIdx, + _CanrestoreIdx, + _CleanwinIdx, + _OtherwinIdx, + _WstateIdx, + _GsrIdx, - NumMicroIntRegs = INTREG_GSR - INTREG_UREG0 + 1 + NumMicroRegs = _GsrIdx - _Ureg0Idx + 1 }; +const int NumRegs = (MaxGL + 1) * 8 + NWindows * 16 + NumMicroRegs; + +} // namespace int_reg + +class IntRegClassOps : public RegClassOps +{ + RegId flatten(const BaseISA &isa, const RegId &id) const override; +}; + +inline constexpr IntRegClassOps intRegClassOps; + +inline constexpr RegClass intRegClass = + RegClass(IntRegClass, IntRegClassName, int_reg::NumRegs, debug::IntRegs). + ops(intRegClassOps). + needsFlattening(); + +inline constexpr RegClass flatIntRegClass = + RegClass(IntRegClass, IntRegClassName, int_reg::NumRegs, debug::IntRegs); + +namespace int_reg +{ + +inline constexpr RegId + // Globals + G0 = intRegClass[_G0Idx], + G1 = intRegClass[_G1Idx], + G2 = intRegClass[_G2Idx], + G3 = intRegClass[_G3Idx], + G4 = intRegClass[_G4Idx], + G5 = intRegClass[_G5Idx], + G6 = intRegClass[_G6Idx], + G7 = intRegClass[_G7Idx], + + // Outputs + O0 = intRegClass[_O0Idx], + O1 = intRegClass[_O1Idx], + O2 = intRegClass[_O2Idx], + O3 = intRegClass[_O3Idx], + O4 = intRegClass[_O4Idx], + O5 = intRegClass[_O5Idx], + O6 = intRegClass[_O6Idx], + O7 = intRegClass[_O7Idx], + + // Locals + L0 = intRegClass[_L0Idx], + L1 = intRegClass[_L1Idx], + L2 = intRegClass[_L2Idx], + L3 = intRegClass[_L3Idx], + L4 = intRegClass[_L4Idx], + L5 = intRegClass[_L5Idx], + L6 = intRegClass[_L6Idx], + L7 = intRegClass[_L7Idx], + + // Inputs + I0 = intRegClass[_I0Idx], + I1 = intRegClass[_I1Idx], + I2 = intRegClass[_I2Idx], + I3 = intRegClass[_I3Idx], + I4 = intRegClass[_I4Idx], + I5 = intRegClass[_I5Idx], + I6 = intRegClass[_I6Idx], + I7 = intRegClass[_I7Idx], + + Ureg0 = intRegClass[_Ureg0Idx], + Y = intRegClass[_YIdx], + Ccr = intRegClass[_CcrIdx], + Cansave = intRegClass[_CansaveIdx], + Canrestore = intRegClass[_CanrestoreIdx], + Cleanwin = intRegClass[_CleanwinIdx], + Otherwin = intRegClass[_OtherwinIdx], + Wstate = intRegClass[_WstateIdx], + Gsr = intRegClass[_GsrIdx]; + +inline constexpr RegId +g(int index) +{ + return intRegClass[G0 + index]; +} + +inline constexpr RegId +o(int index) +{ + return intRegClass[O0 + index]; +} + +inline constexpr RegId +l(int index) +{ + return intRegClass[L0 + index]; +} + +inline constexpr RegId +i(int index) +{ + return intRegClass[I0 + index]; +} + +} // namespace int_reg + // the rest of these depend on the ABI -const int ReturnAddressReg = INTREG_I7; // post call, precall is 15 -const int ReturnValueReg = INTREG_O0; // Post return, 24 is pre-return. -const int StackPointerReg = INTREG_O6; -const int FramePointerReg = INTREG_I6; +inline constexpr auto + &ReturnAddressReg = int_reg::I7, // post call, precall is 15 + &ReturnValueReg = int_reg::O0, // Post return, 24 is pre-return. + &StackPointerReg = int_reg::O6, + &FramePointerReg = int_reg::I6, -// Some OS syscall use a second register to return a second value -const int SyscallPseudoReturnReg = INTREG_O1; - -const int NumIntRegs = (MaxGL + 1) * 8 + NWindows * 16 + NumMicroIntRegs; + // Some OS syscall use a second register to return a second value + &SyscallPseudoReturnReg = int_reg::O1; } // namespace SparcISA } // namespace gem5 diff --git a/src/arch/sparc/regs/misc.hh b/src/arch/sparc/regs/misc.hh index 1620c600a1..0c2fa18e5a 100644 --- a/src/arch/sparc/regs/misc.hh +++ b/src/arch/sparc/regs/misc.hh @@ -31,6 +31,8 @@ #include "base/bitunion.hh" #include "base/types.hh" +#include "cpu/reg_class.hh" +#include "debug/MiscRegs.hh" namespace gem5 { @@ -172,6 +174,9 @@ struct STS const int NumMiscRegs = MISCREG_NUMMISCREGS; +inline constexpr RegClass miscRegClass(MiscRegClass, MiscRegClassName, + NumMiscRegs, debug::MiscRegs); + } // namespace SparcISA } // namespace gem5 diff --git a/src/arch/sparc/remote_gdb.cc b/src/arch/sparc/remote_gdb.cc index 9e1e06e5b0..481332d311 100644 --- a/src/arch/sparc/remote_gdb.cc +++ b/src/arch/sparc/remote_gdb.cc @@ -177,15 +177,15 @@ RemoteGDB::SPARCGdbRegCache::getRegs(ThreadContext *context) { DPRINTF(GDBAcc, "getRegs in remotegdb \n"); for (int i = 0; i < 32; i++) - r.gpr[i] = htobe((uint32_t)context->readIntReg(i)); + r.gpr[i] = htobe((uint32_t)context->getReg(intRegClass[i])); auto &pc = context->pcState().as(); r.pc = htobe((uint32_t)pc.pc()); r.npc = htobe((uint32_t)pc.npc()); - r.y = htobe((uint32_t)context->readIntReg(INTREG_Y)); + r.y = htobe((uint32_t)context->getReg(int_reg::Y)); PSTATE pstate = context->readMiscReg(MISCREG_PSTATE); r.psr = htobe((uint32_t)pstate); r.fsr = htobe((uint32_t)context->readMiscReg(MISCREG_FSR)); - r.csr = htobe((uint32_t)context->readIntReg(INTREG_CCR)); + r.csr = htobe((uint32_t)context->getReg(int_reg::Ccr)); } void @@ -193,7 +193,7 @@ RemoteGDB::SPARC64GdbRegCache::getRegs(ThreadContext *context) { DPRINTF(GDBAcc, "getRegs in remotegdb \n"); for (int i = 0; i < 32; i++) - r.gpr[i] = htobe(context->readIntReg(i)); + r.gpr[i] = htobe(context->getReg(intRegClass[i])); for (int i = 0; i < 32; i++) r.fpr[i] = 0; auto &pc = context->pcState().as(); @@ -201,20 +201,20 @@ RemoteGDB::SPARC64GdbRegCache::getRegs(ThreadContext *context) r.npc = htobe(pc.npc()); r.fsr = htobe(context->readMiscReg(MISCREG_FSR)); r.fprs = htobe(context->readMiscReg(MISCREG_FPRS)); - r.y = htobe(context->readIntReg(INTREG_Y)); + r.y = htobe(context->getReg(int_reg::Y)); PSTATE pstate = context->readMiscReg(MISCREG_PSTATE); r.state = htobe( context->readMiscReg(MISCREG_CWP) | pstate << 8 | context->readMiscReg(MISCREG_ASI) << 24 | - context->readIntReg(INTREG_CCR) << 32); + context->getReg(int_reg::Ccr) << 32); } void RemoteGDB::SPARCGdbRegCache::setRegs(ThreadContext *context) const { for (int i = 0; i < 32; i++) - context->setIntReg(i, r.gpr[i]); + context->setReg(intRegClass[i], r.gpr[i]); PCState pc; pc.pc(r.pc); pc.npc(r.npc); @@ -231,7 +231,7 @@ void RemoteGDB::SPARC64GdbRegCache::setRegs(ThreadContext *context) const { for (int i = 0; i < 32; i++) - context->setIntReg(i, r.gpr[i]); + context->setReg(intRegClass[i], r.gpr[i]); PCState pc; pc.pc(r.pc); pc.npc(r.npc); diff --git a/src/arch/sparc/se_workload.cc b/src/arch/sparc/se_workload.cc index c87244fa93..953c0837a1 100644 --- a/src/arch/sparc/se_workload.cc +++ b/src/arch/sparc/se_workload.cc @@ -41,8 +41,9 @@ namespace gem5 namespace SparcISA { -const std::vector SEWorkload::BaseSyscallABI::ArgumentRegs = { - INTREG_O0, INTREG_O1, INTREG_O2, INTREG_O3, INTREG_O4, INTREG_O5 +const std::vector SEWorkload::BaseSyscallABI::ArgumentRegs = { + int_reg::O0, int_reg::O1, int_reg::O2, + int_reg::O3, int_reg::O4, int_reg::O5 }; bool @@ -96,9 +97,9 @@ SEWorkload::handleTrap(ThreadContext *tc, int trapNum) void SEWorkload::flushWindows(ThreadContext *tc) { - RegVal Cansave = tc->readIntReg(INTREG_CANSAVE); - RegVal Canrestore = tc->readIntReg(INTREG_CANRESTORE); - RegVal Otherwin = tc->readIntReg(INTREG_OTHERWIN); + RegVal Cansave = tc->getReg(int_reg::Cansave); + RegVal Canrestore = tc->getReg(int_reg::Canrestore); + RegVal Otherwin = tc->getReg(int_reg::Otherwin); RegVal CWP = tc->readMiscReg(MISCREG_CWP); RegVal origCWP = CWP; @@ -114,15 +115,16 @@ SEWorkload::flushWindows(ThreadContext *tc) tc->setMiscReg(MISCREG_CWP, CWP); // Do the stores - RegVal sp = tc->readIntReg(StackPointerReg); + RegVal sp = tc->getReg(StackPointerReg); Addr addr = is_64 ? sp + 2047 : sp; for (int index = 16; index < 32; index++) { + RegId reg = intRegClass[index]; if (is_64) { - uint64_t regVal = htobe(tc->readIntReg(index)); + uint64_t regVal = htobe(tc->getReg(reg)); memcpy(bytes, ®Val, reg_bytes); } else { - uint32_t regVal = htobe(tc->readIntReg(index)); + uint32_t regVal = htobe(tc->getReg(reg)); memcpy(bytes, ®Val, reg_bytes); } if (!proxy.tryWriteBlob(addr, bytes, reg_bytes)) { @@ -136,8 +138,8 @@ SEWorkload::flushWindows(ThreadContext *tc) CWP = (CWP + 1) % NWindows; } - tc->setIntReg(INTREG_CANSAVE, Cansave); - tc->setIntReg(INTREG_CANRESTORE, Canrestore); + tc->setReg(int_reg::Cansave, Cansave); + tc->setReg(int_reg::Canrestore, Canrestore); tc->setMiscReg(MISCREG_CWP, origCWP); } diff --git a/src/arch/sparc/se_workload.hh b/src/arch/sparc/se_workload.hh index 18988fe66d..8cb373ac90 100644 --- a/src/arch/sparc/se_workload.hh +++ b/src/arch/sparc/se_workload.hh @@ -35,6 +35,7 @@ #include "arch/sparc/remote_gdb.hh" #include "base/loader/object_file.hh" #include "cpu/thread_context.hh" +#include "params/SparcSEWorkload.hh" #include "sim/se_workload.hh" #include "sim/syscall_abi.hh" @@ -47,13 +48,15 @@ namespace SparcISA class SEWorkload : public gem5::SEWorkload { public: + PARAMS(SparcSEWorkload); using gem5::SEWorkload::SEWorkload; void setSystem(System *sys) override { gem5::SEWorkload::setSystem(sys); - gdb = BaseRemoteGDB::build(system); + gdb = BaseRemoteGDB::build( + params().remote_gdb_port, system); } virtual void handleTrap(ThreadContext *tc, int trapNum); @@ -63,7 +66,7 @@ class SEWorkload : public gem5::SEWorkload struct BaseSyscallABI { - static const std::vector ArgumentRegs; + static const std::vector ArgumentRegs; }; struct SyscallABI32 : public GenericSyscallABI32, @@ -94,7 +97,7 @@ struct ResultreadMiscRegNoEffect(SparcISA::MISCREG_PSTATE); - SparcISA::CCR ccr = tc->readIntReg(SparcISA::INTREG_CCR); + SparcISA::CCR ccr = tc->getReg(SparcISA::int_reg::Ccr); RegVal val; if (ret.successful()) { ccr.xcc.c = ccr.icc.c = 0; @@ -103,12 +106,12 @@ struct ResultsetIntReg(SparcISA::INTREG_CCR, ccr); + tc->setReg(SparcISA::int_reg::Ccr, ccr); if (pstate.am) val = bits(val, 31, 0); - tc->setIntReg(SparcISA::ReturnValueReg, val); + tc->setReg(SparcISA::ReturnValueReg, val); if (ret.count() == 2) - tc->setIntReg(SparcISA::SyscallPseudoReturnReg, ret.value2()); + tc->setReg(SparcISA::SyscallPseudoReturnReg, ret.value2()); } }; diff --git a/src/arch/x86/AtomicSimpleCPU.py b/src/arch/x86/AtomicSimpleCPU.py deleted file mode 100644 index 432346102c..0000000000 --- a/src/arch/x86/AtomicSimpleCPU.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2021 Google, Inc. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer; -# redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution; -# neither the name of the copyright holders nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from m5.objects.X86CPU import X86AtomicSimpleCPU - -AtomicSimpleCPU = X86AtomicSimpleCPU diff --git a/src/arch/x86/NonCachingSimpleCPU.py b/src/arch/x86/NonCachingSimpleCPU.py deleted file mode 100644 index 0559bf6082..0000000000 --- a/src/arch/x86/NonCachingSimpleCPU.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2021 Google, Inc. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer; -# redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution; -# neither the name of the copyright holders nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from m5.objects.X86CPU import X86NonCachingSimpleCPU - -NonCachingSimpleCPU = X86NonCachingSimpleCPU diff --git a/src/arch/x86/O3CPU.py b/src/arch/x86/O3CPU.py deleted file mode 100644 index a81acf1234..0000000000 --- a/src/arch/x86/O3CPU.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2021 Google, Inc. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer; -# redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution; -# neither the name of the copyright holders nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from m5.objects.X86CPU import X86O3CPU - -O3CPU = X86O3CPU - -# Deprecated -DerivO3CPU = O3CPU diff --git a/src/arch/x86/SConscript b/src/arch/x86/SConscript index 9eeefd76f4..97c34f51c3 100644 --- a/src/arch/x86/SConscript +++ b/src/arch/x86/SConscript @@ -40,6 +40,9 @@ Import('*') +if env['USE_X86_ISA']: + env.TagImplies('x86 isa', 'gem5 lib') + Source('cpuid.cc', tags='x86 isa') Source('decoder.cc', tags='x86 isa') Source('decoder_tables.cc', tags='x86 isa') @@ -74,10 +77,6 @@ SimObject('X86TLB.py', sim_objects=['X86PagetableWalker', 'X86TLB'], tags='x86 isa') SimObject('X86CPU.py', sim_objects=[], tags='x86 isa') -SimObject('AtomicSimpleCPU.py', sim_objects=[], tags='x86 isa') -SimObject('TimingSimpleCPU.py', sim_objects=[], tags='x86 isa') -SimObject('NonCachingSimpleCPU.py', sim_objects=[], tags='x86 isa') -SimObject('O3CPU.py', sim_objects=[], tags='x86 isa') DebugFlag('LocalApic', "Local APIC debugging", tags='x86 isa') DebugFlag('X86', "Generic X86 ISA debugging", tags='x86 isa') diff --git a/src/arch/x86/SConsopts b/src/arch/x86/SConsopts index 93dff8ca5a..425c92145f 100644 --- a/src/arch/x86/SConsopts +++ b/src/arch/x86/SConsopts @@ -1,7 +1,4 @@ -# -*- mode:python -*- - -# Copyright (c) 2007 The Hewlett-Packard Development Company -# All rights reserved. +# Copyright 2021 Google, Inc. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are @@ -27,5 +24,4 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Import('*') - -main.Append(ALL_ISAS=['x86']) +sticky_vars.Add(BoolVariable('USE_X86_ISA', 'Enable X86 ISA support', False)) diff --git a/src/arch/x86/TimingSimpleCPU.py b/src/arch/x86/TimingSimpleCPU.py deleted file mode 100644 index cf6c529b13..0000000000 --- a/src/arch/x86/TimingSimpleCPU.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2021 Google, Inc. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer; -# redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution; -# neither the name of the copyright holders nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from m5.objects.X86CPU import X86TimingSimpleCPU - -TimingSimpleCPU = X86TimingSimpleCPU diff --git a/src/arch/x86/X86CPU.py b/src/arch/x86/X86CPU.py index 0b46c94c6e..bd39f6d0f5 100644 --- a/src/arch/x86/X86CPU.py +++ b/src/arch/x86/X86CPU.py @@ -29,26 +29,62 @@ from m5.objects.BaseAtomicSimpleCPU import BaseAtomicSimpleCPU from m5.objects.BaseNonCachingSimpleCPU import BaseNonCachingSimpleCPU from m5.objects.BaseTimingSimpleCPU import BaseTimingSimpleCPU from m5.objects.BaseO3CPU import BaseO3CPU +from m5.objects.BaseMinorCPU import BaseMinorCPU +from m5.objects.FuncUnit import * +from m5.objects.FUPool import * from m5.objects.X86Decoder import X86Decoder from m5.objects.X86MMU import X86MMU from m5.objects.X86LocalApic import X86LocalApic from m5.objects.X86ISA import X86ISA + class X86CPU: ArchDecoder = X86Decoder ArchMMU = X86MMU ArchInterrupts = X86LocalApic ArchISA = X86ISA + class X86AtomicSimpleCPU(BaseAtomicSimpleCPU, X86CPU): mmu = X86MMU() + class X86NonCachingSimpleCPU(BaseNonCachingSimpleCPU, X86CPU): mmu = X86MMU() + class X86TimingSimpleCPU(BaseTimingSimpleCPU, X86CPU): mmu = X86MMU() + +class X86IntMultDiv(IntMultDiv): + # DIV and IDIV instructions in x86 are implemented using a loop which + # issues division microops. The latency of these microops should really be + # one (or a small number) cycle each since each of these computes one bit + # of the quotient. + opList = [ + OpDesc(opClass="IntMult", opLat=3), + OpDesc(opClass="IntDiv", opLat=1, pipelined=False), + ] + + count = 2 + + +class DefaultX86FUPool(FUPool): + FUList = [ + IntALU(), + X86IntMultDiv(), + FP_ALU(), + FP_MultDiv(), + ReadPort(), + SIMD_Unit(), + PredALU(), + WritePort(), + RdWrPort(), + IprPort(), + ] + + class X86O3CPU(BaseO3CPU, X86CPU): mmu = X86MMU() needsTSO = True @@ -60,3 +96,13 @@ class X86O3CPU(BaseO3CPU, X86CPU): # (it's a side effect of int reg renaming), so they should # never be the bottleneck here. numPhysCCRegs = Self.numPhysIntRegs * 5 + + # DIV and IDIV instructions in x86 are implemented using a loop which + # issues division microops. The latency of these microops should really be + # one (or a small number) cycle each since each of these computes one bit + # of the quotient. + fuPool = DefaultX86FUPool() + + +class X86MinorCPU(BaseMinorCPU, X86CPU): + mmu = X86MMU() diff --git a/src/arch/x86/X86Decoder.py b/src/arch/x86/X86Decoder.py index e73367b61f..e36e16c3e4 100644 --- a/src/arch/x86/X86Decoder.py +++ b/src/arch/x86/X86Decoder.py @@ -25,7 +25,8 @@ from m5.objects.InstDecoder import InstDecoder + class X86Decoder(InstDecoder): - type = 'X86Decoder' - cxx_class = 'gem5::X86ISA::Decoder' + type = "X86Decoder" + cxx_class = "gem5::X86ISA::Decoder" cxx_header = "arch/x86/decoder.hh" diff --git a/src/arch/x86/X86FsWorkload.py b/src/arch/x86/X86FsWorkload.py index 52dbadfe55..294241b51c 100644 --- a/src/arch/x86/X86FsWorkload.py +++ b/src/arch/x86/X86FsWorkload.py @@ -41,31 +41,37 @@ from m5.objects.IntelMP import X86IntelMPFloatingPointer, X86IntelMPConfigTable from m5.objects.ACPI import X86ACPIRSDP from m5.objects.Workload import KernelWorkload, Workload + class X86BareMetalWorkload(Workload): - type = 'X86BareMetalWorkload' - cxx_header = 'arch/x86/bare_metal/workload.hh' - cxx_class = 'gem5::X86ISA::BareMetalWorkload' + type = "X86BareMetalWorkload" + cxx_header = "arch/x86/bare_metal/workload.hh" + cxx_class = "gem5::X86ISA::BareMetalWorkload" + class X86FsWorkload(KernelWorkload): - type = 'X86FsWorkload' - cxx_header = 'arch/x86/fs_workload.hh' - cxx_class = 'gem5::X86ISA::FsWorkload' + type = "X86FsWorkload" + cxx_header = "arch/x86/fs_workload.hh" + cxx_class = "gem5::X86ISA::FsWorkload" smbios_table = Param.X86SMBiosSMBiosTable( - X86SMBiosSMBiosTable(), 'table of smbios/dmi information') + X86SMBiosSMBiosTable(), "table of smbios/dmi information" + ) intel_mp_pointer = Param.X86IntelMPFloatingPointer( - X86IntelMPFloatingPointer(), - 'intel mp spec floating pointer structure') + X86IntelMPFloatingPointer(), "intel mp spec floating pointer structure" + ) intel_mp_table = Param.X86IntelMPConfigTable( - X86IntelMPConfigTable(), - 'intel mp spec configuration table') + X86IntelMPConfigTable(), "intel mp spec configuration table" + ) acpi_description_table_pointer = Param.X86ACPIRSDP( - X86ACPIRSDP(), 'ACPI root description pointer structure') + X86ACPIRSDP(), "ACPI root description pointer structure" + ) + class X86FsLinux(X86FsWorkload): - type = 'X86FsLinux' - cxx_header = 'arch/x86/linux/fs_workload.hh' - cxx_class = 'gem5::X86ISA::FsLinux' + type = "X86FsLinux" + cxx_header = "arch/x86/linux/fs_workload.hh" + cxx_class = "gem5::X86ISA::FsLinux" e820_table = Param.X86E820Table( - X86E820Table(), 'E820 map of physical memory') + X86E820Table(), "E820 map of physical memory" + ) diff --git a/src/arch/x86/X86ISA.py b/src/arch/x86/X86ISA.py index ea27bf8e1f..bb72c415e9 100644 --- a/src/arch/x86/X86ISA.py +++ b/src/arch/x86/X86ISA.py @@ -36,10 +36,21 @@ from m5.objects.BaseISA import BaseISA from m5.params import * + class X86ISA(BaseISA): - type = 'X86ISA' - cxx_class = 'gem5::X86ISA::ISA' + type = "X86ISA" + cxx_class = "gem5::X86ISA::ISA" cxx_header = "arch/x86/isa.hh" - vendor_string = Param.String("M5 Simulator", - "Vendor string for CPUID instruction") + # Here we set the default vector string to "HygonGenuine". Previously this + # "M5 Simulator" but due to stricter checks in newer versions of GLIBC, + # the CPUID is checked for the required features. As "M5 Simulator" is not + # genuine CPUID, an error is returned. This change + # https://gem5-review.googlesource.com/c/public/gem5/+/64831 changed this + # to "GenuineAMD" but due to issues with booting the Linux Kernel using + # this vector string (highlighted here: + # https://gem5.atlassian.net/browse/GEM5-1300) we opted to use + # "HygonGenuine" instead. + vendor_string = Param.String( + "HygonGenuine", "Vendor string for CPUID instruction" + ) diff --git a/src/arch/x86/X86LocalApic.py b/src/arch/x86/X86LocalApic.py index e9a31aaf5d..d7defce7e5 100644 --- a/src/arch/x86/X86LocalApic.py +++ b/src/arch/x86/X86LocalApic.py @@ -44,33 +44,38 @@ from m5.objects.BaseInterrupts import BaseInterrupts from m5.objects.ClockDomain import DerivedClockDomain from m5.objects.IntPin import IntSinkPin + class X86LocalApic(BaseInterrupts): - type = 'X86LocalApic' - cxx_class = 'gem5::X86ISA::Interrupts' - cxx_header = 'arch/x86/interrupts.hh' + type = "X86LocalApic" + cxx_class = "gem5::X86ISA::Interrupts" + cxx_header = "arch/x86/interrupts.hh" int_requestor = RequestPort("Port for sending interrupt messages") - int_master = DeprecatedParam(int_requestor, - '`int_master` is now called `int_requestor`') + int_master = DeprecatedParam( + int_requestor, "`int_master` is now called `int_requestor`" + ) int_responder = ResponsePort("Port for receiving interrupt messages") - int_slave = DeprecatedParam(int_responder, - '`int_slave` is now called `int_responder`') + int_slave = DeprecatedParam( + int_responder, "`int_slave` is now called `int_responder`" + ) - lint0 = IntSinkPin('Local interrupt pin 0') - lint1 = IntSinkPin('Local interrupt pin 1') + lint0 = IntSinkPin("Local interrupt pin 0") + lint1 = IntSinkPin("Local interrupt pin 1") - int_latency = Param.Latency('1ns', \ - "Latency for an interrupt to propagate through this device.") + int_latency = Param.Latency( + "1ns", "Latency for an interrupt to propagate through this device." + ) pio = ResponsePort("Programmed I/O port") system = Param.System(Parent.any, "System this device is part of") - pio_latency = Param.Latency('100ns', 'Programmed IO latency') + pio_latency = Param.Latency("100ns", "Programmed IO latency") # The clock rate for the local APIC timer is supposed to be the "bus clock" # which we assume is 1/16th the rate of the CPU clock. I don't think this # is a hard rule, but seems to be true in practice. This can be overriden # in configs that use it. clk_domain = Param.DerivedClockDomain( - DerivedClockDomain(clk_domain=Parent.clk_domain, clk_divider=16), - "The clock for the local APIC. Should not be modified.") + DerivedClockDomain(clk_domain=Parent.clk_domain, clk_divider=16), + "The clock for the local APIC. Should not be modified.", + ) diff --git a/src/arch/x86/X86MMU.py b/src/arch/x86/X86MMU.py index cbee8a3f08..4cb91774b7 100644 --- a/src/arch/x86/X86MMU.py +++ b/src/arch/x86/X86MMU.py @@ -38,10 +38,11 @@ from m5.objects.BaseMMU import BaseMMU from m5.objects.X86TLB import X86TLB + class X86MMU(BaseMMU): - type = 'X86MMU' - cxx_class = 'gem5::X86ISA::MMU' - cxx_header = 'arch/x86/mmu.hh' + type = "X86MMU" + cxx_class = "gem5::X86ISA::MMU" + cxx_header = "arch/x86/mmu.hh" itb = X86TLB(entry_type="instruction") dtb = X86TLB(entry_type="data") diff --git a/src/arch/x86/X86NativeTrace.py b/src/arch/x86/X86NativeTrace.py index 1be9c941ae..d0b94ecc40 100644 --- a/src/arch/x86/X86NativeTrace.py +++ b/src/arch/x86/X86NativeTrace.py @@ -29,7 +29,8 @@ from m5.params import * from m5.objects.CPUTracers import NativeTrace + class X86NativeTrace(NativeTrace): - type = 'X86NativeTrace' - cxx_class = 'gem5::Trace::X86NativeTrace' - cxx_header = 'arch/x86/nativetrace.hh' + type = "X86NativeTrace" + cxx_class = "gem5::trace::X86NativeTrace" + cxx_header = "arch/x86/nativetrace.hh" diff --git a/src/arch/x86/X86SeWorkload.py b/src/arch/x86/X86SeWorkload.py index 4a70e01e72..6674bdb9a5 100644 --- a/src/arch/x86/X86SeWorkload.py +++ b/src/arch/x86/X86SeWorkload.py @@ -27,12 +27,15 @@ from m5.params import * from m5.objects.Workload import SEWorkload + class X86EmuLinux(SEWorkload): - type = 'X86EmuLinux' + type = "X86EmuLinux" cxx_header = "arch/x86/linux/se_workload.hh" - cxx_class = 'gem5::X86ISA::EmuLinux' + cxx_class = "gem5::X86ISA::EmuLinux" @classmethod def _is_compatible_with(cls, obj): - return obj.get_arch() in ('x86_64', 'i386') and \ - obj.get_op_sys() in ('linux', 'unknown') + return obj.get_arch() in ("x86_64", "i386") and obj.get_op_sys() in ( + "linux", + "unknown", + ) diff --git a/src/arch/x86/X86TLB.py b/src/arch/x86/X86TLB.py index 8abc93c19b..8532ddf8c6 100644 --- a/src/arch/x86/X86TLB.py +++ b/src/arch/x86/X86TLB.py @@ -39,22 +39,26 @@ from m5.proxy import * from m5.objects.BaseTLB import BaseTLB from m5.objects.ClockedObject import ClockedObject + class X86PagetableWalker(ClockedObject): - type = 'X86PagetableWalker' - cxx_class = 'gem5::X86ISA::Walker' - cxx_header = 'arch/x86/pagetable_walker.hh' + type = "X86PagetableWalker" + cxx_class = "gem5::X86ISA::Walker" + cxx_header = "arch/x86/pagetable_walker.hh" port = RequestPort("Port for the hardware table walker") system = Param.System(Parent.any, "system object") - num_squash_per_cycle = Param.Unsigned(4, - "Number of outstanding walks that can be squashed per cycle") + num_squash_per_cycle = Param.Unsigned( + 4, "Number of outstanding walks that can be squashed per cycle" + ) + class X86TLB(BaseTLB): - type = 'X86TLB' - cxx_class = 'gem5::X86ISA::TLB' - cxx_header = 'arch/x86/tlb.hh' + type = "X86TLB" + cxx_class = "gem5::X86ISA::TLB" + cxx_header = "arch/x86/tlb.hh" size = Param.Unsigned(64, "TLB size") system = Param.System(Parent.any, "system object") - walker = Param.X86PagetableWalker(\ - X86PagetableWalker(), "page table walker") + walker = Param.X86PagetableWalker( + X86PagetableWalker(), "page table walker" + ) diff --git a/src/arch/x86/bios/ACPI.py b/src/arch/x86/bios/ACPI.py index c20096f734..fbbeda015c 100644 --- a/src/arch/x86/bios/ACPI.py +++ b/src/arch/x86/bios/ACPI.py @@ -39,106 +39,115 @@ from m5.SimObject import SimObject # ACPI description table header. Subclasses contain and handle the actual # contents as appropriate for that type of table. class X86ACPISysDescTable(SimObject): - type = 'X86ACPISysDescTable' - cxx_class = 'gem5::X86ISA::ACPI::SysDescTable' - cxx_header = 'arch/x86/bios/acpi.hh' + type = "X86ACPISysDescTable" + cxx_class = "gem5::X86ISA::ACPI::SysDescTable" + cxx_header = "arch/x86/bios/acpi.hh" abstract = True - oem_id = Param.String('', 'string identifying the oem') - oem_table_id = Param.String('', 'oem table ID') - oem_revision = Param.UInt32(0, 'oem revision number for the table') + oem_id = Param.String("", "string identifying the oem") + oem_table_id = Param.String("", "oem table ID") + oem_revision = Param.UInt32(0, "oem revision number for the table") + + creator_id = Param.UInt32(0, "ID identifying the generator of the table") + creator_revision = Param.UInt32( + 0, "revision number for the creator of the table" + ) - creator_id = Param.UInt32(0, - 'ID identifying the generator of the table') - creator_revision = Param.UInt32(0, - 'revision number for the creator of the table') class X86ACPIRSDT(X86ACPISysDescTable): - type = 'X86ACPIRSDT' - cxx_class = 'gem5::X86ISA::ACPI::RSDT' - cxx_header = 'arch/x86/bios/acpi.hh' + type = "X86ACPIRSDT" + cxx_class = "gem5::X86ISA::ACPI::RSDT" + cxx_header = "arch/x86/bios/acpi.hh" + + entries = VectorParam.X86ACPISysDescTable([], "system description tables") - entries = VectorParam.X86ACPISysDescTable([], 'system description tables') class X86ACPIXSDT(X86ACPISysDescTable): - type = 'X86ACPIXSDT' - cxx_class = 'gem5::X86ISA::ACPI::XSDT' - cxx_header = 'arch/x86/bios/acpi.hh' + type = "X86ACPIXSDT" + cxx_class = "gem5::X86ISA::ACPI::XSDT" + cxx_header = "arch/x86/bios/acpi.hh" - entries = VectorParam.X86ACPISysDescTable([], 'system description tables') + entries = VectorParam.X86ACPISysDescTable([], "system description tables") class X86ACPIMadtRecord(SimObject): - type = 'X86ACPIMadtRecord' - cxx_class = 'gem5::X86ISA::ACPI::MADT::Record' - cxx_header = 'arch/x86/bios/acpi.hh' + type = "X86ACPIMadtRecord" + cxx_class = "gem5::X86ISA::ACPI::MADT::Record" + cxx_header = "arch/x86/bios/acpi.hh" abstract = True -class X86ACPIMadt(X86ACPISysDescTable): - type = 'X86ACPIMadt' - cxx_class = 'gem5::X86ISA::ACPI::MADT::MADT' - cxx_header = 'arch/x86/bios/acpi.hh' - local_apic_address = Param.UInt32(0, 'Address of the local apic') - flags = Param.UInt32(0, 'Flags') - records = VectorParam.X86ACPIMadtRecord([], 'Records in this MADT') +class X86ACPIMadt(X86ACPISysDescTable): + type = "X86ACPIMadt" + cxx_class = "gem5::X86ISA::ACPI::MADT::MADT" + cxx_header = "arch/x86/bios/acpi.hh" + + local_apic_address = Param.UInt32(0, "Address of the local apic") + flags = Param.UInt32(0, "Flags") + records = VectorParam.X86ACPIMadtRecord([], "Records in this MADT") + class X86ACPIMadtLAPIC(X86ACPIMadtRecord): - type = 'X86ACPIMadtLAPIC' - cxx_header = 'arch/x86/bios/acpi.hh' - cxx_class = 'gem5::X86ISA::ACPI::MADT::LAPIC' + type = "X86ACPIMadtLAPIC" + cxx_header = "arch/x86/bios/acpi.hh" + cxx_class = "gem5::X86ISA::ACPI::MADT::LAPIC" + + acpi_processor_id = Param.UInt8(0, "ACPI Processor ID") + apic_id = Param.UInt8(0, "APIC ID") + flags = Param.UInt32(0, "Flags") - acpi_processor_id = Param.UInt8(0, 'ACPI Processor ID') - apic_id = Param.UInt8(0, 'APIC ID') - flags = Param.UInt32(0, 'Flags') class X86ACPIMadtIOAPIC(X86ACPIMadtRecord): - type = 'X86ACPIMadtIOAPIC' - cxx_header = 'arch/x86/bios/acpi.hh' - cxx_class = 'gem5::X86ISA::ACPI::MADT::IOAPIC' + type = "X86ACPIMadtIOAPIC" + cxx_header = "arch/x86/bios/acpi.hh" + cxx_class = "gem5::X86ISA::ACPI::MADT::IOAPIC" + + id = Param.UInt8(0, "I/O APIC ID") + address = Param.Addr(0, "I/O APIC Address") + int_base = Param.UInt32(0, "Global Interrupt Base") - id = Param.UInt8(0, 'I/O APIC ID') - address = Param.Addr(0, 'I/O APIC Address') - int_base = Param.UInt32(0, 'Global Interrupt Base') class X86ACPIMadtIntSourceOverride(X86ACPIMadtRecord): - type = 'X86ACPIMadtIntSourceOverride' - cxx_header = 'arch/x86/bios/acpi.hh' - cxx_class = 'gem5::X86ISA::ACPI::MADT::IntSourceOverride' + type = "X86ACPIMadtIntSourceOverride" + cxx_header = "arch/x86/bios/acpi.hh" + cxx_class = "gem5::X86ISA::ACPI::MADT::IntSourceOverride" + + bus_source = Param.UInt8(0, "Bus Source") + irq_source = Param.UInt8(0, "IRQ Source") + sys_int = Param.UInt32(0, "Global System Interrupt") + flags = Param.UInt16(0, "Flags") - bus_source = Param.UInt8(0, 'Bus Source') - irq_source = Param.UInt8(0, 'IRQ Source') - sys_int = Param.UInt32(0, 'Global System Interrupt') - flags = Param.UInt16(0, 'Flags') class X86ACPIMadtNMI(X86ACPIMadtRecord): - type = 'X86ACPIMadtNMI' - cxx_header = 'arch/x86/bios/acpi.hh' - cxx_class = 'gem5::X86ISA::ACPI::MADT::NMI' + type = "X86ACPIMadtNMI" + cxx_header = "arch/x86/bios/acpi.hh" + cxx_class = "gem5::X86ISA::ACPI::MADT::NMI" + + acpi_processor_id = Param.UInt8(0, "ACPI Processor ID") + flags = Param.UInt16(0, "Flags") + lint_no = Param.UInt8(0, "LINT# (0 or 1)") - acpi_processor_id = Param.UInt8(0, 'ACPI Processor ID') - flags = Param.UInt16(0, 'Flags') - lint_no = Param.UInt8(0, 'LINT# (0 or 1)') class X86ACPIMadtLAPICOverride(X86ACPIMadtRecord): - type = 'X86ACPIMadtLAPICOverride' - cxx_header = 'arch/x86/bios/acpi.hh' - cxx_class = 'gem5::X86ISA::ACPI::MADT::LAPICOverride' + type = "X86ACPIMadtLAPICOverride" + cxx_header = "arch/x86/bios/acpi.hh" + cxx_class = "gem5::X86ISA::ACPI::MADT::LAPICOverride" + + address = Param.Addr(0, "64-bit Physical Address of Local APIC") - address = Param.Addr(0, '64-bit Physical Address of Local APIC') # Root System Description Pointer Structure class X86ACPIRSDP(SimObject): - type = 'X86ACPIRSDP' - cxx_class = 'gem5::X86ISA::ACPI::RSDP' - cxx_header = 'arch/x86/bios/acpi.hh' + type = "X86ACPIRSDP" + cxx_class = "gem5::X86ISA::ACPI::RSDP" + cxx_header = "arch/x86/bios/acpi.hh" - oem_id = Param.String('', 'string identifying the oem') + oem_id = Param.String("", "string identifying the oem") # Because 0 encodes ACPI 1.0, 2 encodes ACPI 3.0, the version implemented # here. - revision = Param.UInt8(2, 'revision of ACPI being used, zero indexed') + revision = Param.UInt8(2, "revision of ACPI being used, zero indexed") - rsdt = Param.X86ACPIRSDT(X86ACPIRSDT(), - 'root system description table') - xsdt = Param.X86ACPIXSDT(X86ACPIXSDT(), - 'extended system description table') + rsdt = Param.X86ACPIRSDT(X86ACPIRSDT(), "root system description table") + xsdt = Param.X86ACPIXSDT( + X86ACPIXSDT(), "extended system description table" + ) diff --git a/src/arch/x86/bios/E820.py b/src/arch/x86/bios/E820.py index 222e4a4454..613d4e7f85 100644 --- a/src/arch/x86/bios/E820.py +++ b/src/arch/x86/bios/E820.py @@ -36,18 +36,20 @@ from m5.params import * from m5.SimObject import SimObject -class X86E820Entry(SimObject): - type = 'X86E820Entry' - cxx_class = 'gem5::X86ISA::E820Entry' - cxx_header = 'arch/x86/bios/e820.hh' - addr = Param.Addr(0, 'address of the beginning of the region') - size = Param.MemorySize('0B', 'size of the region') - range_type = Param.UInt64('type of the region') +class X86E820Entry(SimObject): + type = "X86E820Entry" + cxx_class = "gem5::X86ISA::E820Entry" + cxx_header = "arch/x86/bios/e820.hh" + + addr = Param.Addr(0, "address of the beginning of the region") + size = Param.MemorySize("0B", "size of the region") + range_type = Param.UInt64("type of the region") + class X86E820Table(SimObject): - type = 'X86E820Table' - cxx_class = 'gem5::X86ISA::E820Table' - cxx_header = 'arch/x86/bios/e820.hh' + type = "X86E820Table" + cxx_class = "gem5::X86ISA::E820Table" + cxx_header = "arch/x86/bios/e820.hh" - entries = VectorParam.X86E820Entry('entries for the e820 table') + entries = VectorParam.X86E820Entry("entries for the e820 table") diff --git a/src/arch/x86/bios/IntelMP.py b/src/arch/x86/bios/IntelMP.py index 18bd487bc5..3471f50540 100644 --- a/src/arch/x86/bios/IntelMP.py +++ b/src/arch/x86/bios/IntelMP.py @@ -36,37 +36,43 @@ from m5.params import * from m5.SimObject import SimObject + class X86IntelMPFloatingPointer(SimObject): - type = 'X86IntelMPFloatingPointer' - cxx_class = 'gem5::X86ISA::intelmp::FloatingPointer' - cxx_header = 'arch/x86/bios/intelmp.hh' + type = "X86IntelMPFloatingPointer" + cxx_class = "gem5::X86ISA::intelmp::FloatingPointer" + cxx_header = "arch/x86/bios/intelmp.hh" # The minor revision of the spec to support. The major version is assumed # to be 1 in accordance with the spec. - spec_rev = Param.UInt8(4, 'minor revision of the MP spec supported') + spec_rev = Param.UInt8(4, "minor revision of the MP spec supported") # If no default configuration is used, set this to 0. - default_config = Param.UInt8(0, 'which default configuration to use') - imcr_present = Param.Bool(True, - 'whether the IMCR register is present in the APIC') + default_config = Param.UInt8(0, "which default configuration to use") + imcr_present = Param.Bool( + True, "whether the IMCR register is present in the APIC" + ) + class X86IntelMPConfigTable(SimObject): - type = 'X86IntelMPConfigTable' - cxx_class = 'gem5::X86ISA::intelmp::ConfigTable' - cxx_header = 'arch/x86/bios/intelmp.hh' + type = "X86IntelMPConfigTable" + cxx_class = "gem5::X86ISA::intelmp::ConfigTable" + cxx_header = "arch/x86/bios/intelmp.hh" - spec_rev = Param.UInt8(4, 'minor revision of the MP spec supported') - oem_id = Param.String("", 'system manufacturer') - product_id = Param.String("", 'product family') - oem_table_addr = Param.UInt32(0, - 'pointer to the optional oem configuration table') - oem_table_size = Param.UInt16(0, 'size of the oem configuration table') - local_apic = Param.UInt32(0xFEE00000, 'address of the local APIC') + spec_rev = Param.UInt8(4, "minor revision of the MP spec supported") + oem_id = Param.String("", "system manufacturer") + product_id = Param.String("", "product family") + oem_table_addr = Param.UInt32( + 0, "pointer to the optional oem configuration table" + ) + oem_table_size = Param.UInt16(0, "size of the oem configuration table") + local_apic = Param.UInt32(0xFEE00000, "address of the local APIC") - base_entries = VectorParam.X86IntelMPBaseConfigEntry([], - 'base configuration table entries') + base_entries = VectorParam.X86IntelMPBaseConfigEntry( + [], "base configuration table entries" + ) - ext_entries = VectorParam.X86IntelMPExtConfigEntry([], - 'extended configuration table entries') + ext_entries = VectorParam.X86IntelMPExtConfigEntry( + [], "extended configuration table entries" + ) def add_entry(self, entry): if isinstance(entry, X86IntelMPBaseConfigEntry): @@ -74,161 +80,177 @@ class X86IntelMPConfigTable(SimObject): elif isinstance(entry, X86IntelMPExtConfigEntry): self.ext_entries.append(entry) else: - panic("Don't know what type of Intel MP entry %s is." \ - % entry.__class__.__name__) + panic( + "Don't know what type of Intel MP entry %s is." + % entry.__class__.__name__ + ) + class X86IntelMPBaseConfigEntry(SimObject): - type = 'X86IntelMPBaseConfigEntry' - cxx_class = 'gem5::X86ISA::intelmp::BaseConfigEntry' - cxx_header = 'arch/x86/bios/intelmp.hh' + type = "X86IntelMPBaseConfigEntry" + cxx_class = "gem5::X86ISA::intelmp::BaseConfigEntry" + cxx_header = "arch/x86/bios/intelmp.hh" abstract = True + class X86IntelMPExtConfigEntry(SimObject): - type = 'X86IntelMPExtConfigEntry' - cxx_class = 'gem5::X86ISA::intelmp::ExtConfigEntry' - cxx_header = 'arch/x86/bios/intelmp.hh' + type = "X86IntelMPExtConfigEntry" + cxx_class = "gem5::X86ISA::intelmp::ExtConfigEntry" + cxx_header = "arch/x86/bios/intelmp.hh" abstract = True + class X86IntelMPProcessor(X86IntelMPBaseConfigEntry): - type = 'X86IntelMPProcessor' - cxx_class = 'gem5::X86ISA::intelmp::Processor' - cxx_header = 'arch/x86/bios/intelmp.hh' + type = "X86IntelMPProcessor" + cxx_class = "gem5::X86ISA::intelmp::Processor" + cxx_header = "arch/x86/bios/intelmp.hh" - local_apic_id = Param.UInt8(0, 'local APIC id') - local_apic_version = Param.UInt8(0, - 'bits 0-7 of the local APIC version register') - enable = Param.Bool(True, 'if this processor is usable') - bootstrap = Param.Bool(False, 'if this is the bootstrap processor') + local_apic_id = Param.UInt8(0, "local APIC id") + local_apic_version = Param.UInt8( + 0, "bits 0-7 of the local APIC version register" + ) + enable = Param.Bool(True, "if this processor is usable") + bootstrap = Param.Bool(False, "if this is the bootstrap processor") - stepping = Param.UInt8(0, 'Processor stepping') - model = Param.UInt8(0, 'Processor model') - family = Param.UInt8(0, 'Processor family') + stepping = Param.UInt8(0, "Processor stepping") + model = Param.UInt8(0, "Processor model") + family = Param.UInt8(0, "Processor family") + + feature_flags = Param.UInt32(0, "flags returned by the CPUID instruction") - feature_flags = Param.UInt32(0, 'flags returned by the CPUID instruction') class X86IntelMPBus(X86IntelMPBaseConfigEntry): - type = 'X86IntelMPBus' - cxx_class = 'gem5::X86ISA::intelmp::Bus' - cxx_header = 'arch/x86/bios/intelmp.hh' + type = "X86IntelMPBus" + cxx_class = "gem5::X86ISA::intelmp::Bus" + cxx_header = "arch/x86/bios/intelmp.hh" - bus_id = Param.UInt8(0, 'bus id assigned by the bios') - bus_type = Param.String("", 'string that identify the bus type') + bus_id = Param.UInt8(0, "bus id assigned by the bios") + bus_type = Param.String("", "string that identify the bus type") # Legal values for bus_type are [space padded to 6 bytes]: # # "CBUS", "CBUSII", "EISA", "FUTURE", "INTERN", "ISA", "MBI", "MBII", # "MCA", "MPI", "MPSA", "NUBUS", "PCI", "PCMCIA", "TC", "VL", "VME", # "XPRESS" + class X86IntelMPIOAPIC(X86IntelMPBaseConfigEntry): - type = 'X86IntelMPIOAPIC' - cxx_class = 'gem5::X86ISA::intelmp::IOAPIC' - cxx_header = 'arch/x86/bios/intelmp.hh' + type = "X86IntelMPIOAPIC" + cxx_class = "gem5::X86ISA::intelmp::IOAPIC" + cxx_header = "arch/x86/bios/intelmp.hh" - id = Param.UInt8(0, 'id of this APIC') - version = Param.UInt8(0, 'bits 0-7 of the version register') + id = Param.UInt8(0, "id of this APIC") + version = Param.UInt8(0, "bits 0-7 of the version register") - enable = Param.Bool(True, 'if this APIC is usable') + enable = Param.Bool(True, "if this APIC is usable") + + address = Param.UInt32(0xFEC00000, "address of this APIC") - address = Param.UInt32(0xfec00000, 'address of this APIC') class X86IntelMPInterruptType(Enum): - map = {'INT' : 0, - 'NMI' : 1, - 'SMI' : 2, - 'ExtInt' : 3 - } + map = {"INT": 0, "NMI": 1, "SMI": 2, "ExtInt": 3} + class X86IntelMPPolarity(Enum): - map = {'ConformPolarity' : 0, - 'ActiveHigh' : 1, - 'ActiveLow' : 3 - } + map = {"ConformPolarity": 0, "ActiveHigh": 1, "ActiveLow": 3} + class X86IntelMPTriggerMode(Enum): - map = {'ConformTrigger' : 0, - 'EdgeTrigger' : 1, - 'LevelTrigger' : 3 - } + map = {"ConformTrigger": 0, "EdgeTrigger": 1, "LevelTrigger": 3} + class X86IntelMPIOIntAssignment(X86IntelMPBaseConfigEntry): - type = 'X86IntelMPIOIntAssignment' - cxx_class = 'gem5::X86ISA::intelmp::IOIntAssignment' - cxx_header = 'arch/x86/bios/intelmp.hh' + type = "X86IntelMPIOIntAssignment" + cxx_class = "gem5::X86ISA::intelmp::IOIntAssignment" + cxx_header = "arch/x86/bios/intelmp.hh" - interrupt_type = Param.X86IntelMPInterruptType('INT', 'type of interrupt') + interrupt_type = Param.X86IntelMPInterruptType("INT", "type of interrupt") - polarity = Param.X86IntelMPPolarity('ConformPolarity', 'polarity') - trigger = Param.X86IntelMPTriggerMode('ConformTrigger', 'trigger mode') + polarity = Param.X86IntelMPPolarity("ConformPolarity", "polarity") + trigger = Param.X86IntelMPTriggerMode("ConformTrigger", "trigger mode") - source_bus_id = Param.UInt8(0, - 'id of the bus from which the interrupt signal comes') - source_bus_irq = Param.UInt8(0, - 'which interrupt signal from the source bus') + source_bus_id = Param.UInt8( + 0, "id of the bus from which the interrupt signal comes" + ) + source_bus_irq = Param.UInt8( + 0, "which interrupt signal from the source bus" + ) + + dest_io_apic_id = Param.UInt8( + 0, "id of the IO APIC the interrupt is going to" + ) + dest_io_apic_intin = Param.UInt8( + 0, "the INTIN pin on the IO APIC the interrupt is connected to" + ) - dest_io_apic_id = Param.UInt8(0, - 'id of the IO APIC the interrupt is going to') - dest_io_apic_intin = Param.UInt8(0, - 'the INTIN pin on the IO APIC the interrupt is connected to') class X86IntelMPLocalIntAssignment(X86IntelMPBaseConfigEntry): - type = 'X86IntelMPLocalIntAssignment' - cxx_class = 'gem5::X86ISA::intelmp::LocalIntAssignment' - cxx_header = 'arch/x86/bios/intelmp.hh' + type = "X86IntelMPLocalIntAssignment" + cxx_class = "gem5::X86ISA::intelmp::LocalIntAssignment" + cxx_header = "arch/x86/bios/intelmp.hh" - interrupt_type = Param.X86IntelMPInterruptType('INT', 'type of interrupt') + interrupt_type = Param.X86IntelMPInterruptType("INT", "type of interrupt") - polarity = Param.X86IntelMPPolarity('ConformPolarity', 'polarity') - trigger = Param.X86IntelMPTriggerMode('ConformTrigger', 'trigger mode') + polarity = Param.X86IntelMPPolarity("ConformPolarity", "polarity") + trigger = Param.X86IntelMPTriggerMode("ConformTrigger", "trigger mode") - source_bus_id = Param.UInt8(0, - 'id of the bus from which the interrupt signal comes') - source_bus_irq = Param.UInt8(0, - 'which interrupt signal from the source bus') + source_bus_id = Param.UInt8( + 0, "id of the bus from which the interrupt signal comes" + ) + source_bus_irq = Param.UInt8( + 0, "which interrupt signal from the source bus" + ) + + dest_local_apic_id = Param.UInt8( + 0, "id of the local APIC the interrupt is going to" + ) + dest_local_apic_intin = Param.UInt8( + 0, "the INTIN pin on the local APIC the interrupt is connected to" + ) - dest_local_apic_id = Param.UInt8(0, - 'id of the local APIC the interrupt is going to') - dest_local_apic_intin = Param.UInt8(0, - 'the INTIN pin on the local APIC the interrupt is connected to') class X86IntelMPAddressType(Enum): - map = {"IOAddress" : 0, - "MemoryAddress" : 1, - "PrefetchAddress" : 2 - } + map = {"IOAddress": 0, "MemoryAddress": 1, "PrefetchAddress": 2} + class X86IntelMPAddrSpaceMapping(X86IntelMPExtConfigEntry): - type = 'X86IntelMPAddrSpaceMapping' - cxx_class = 'gem5::X86ISA::intelmp::AddrSpaceMapping' - cxx_header = 'arch/x86/bios/intelmp.hh' + type = "X86IntelMPAddrSpaceMapping" + cxx_class = "gem5::X86ISA::intelmp::AddrSpaceMapping" + cxx_header = "arch/x86/bios/intelmp.hh" + + bus_id = Param.UInt8(0, "id of the bus the address space is mapped to") + address_type = Param.X86IntelMPAddressType( + "IOAddress", "address type used to access bus" + ) + address = Param.Addr(0, "starting address of the mapping") + length = Param.UInt64(0, "length of mapping in bytes") - bus_id = Param.UInt8(0, 'id of the bus the address space is mapped to') - address_type = Param.X86IntelMPAddressType('IOAddress', - 'address type used to access bus') - address = Param.Addr(0, 'starting address of the mapping') - length = Param.UInt64(0, 'length of mapping in bytes') class X86IntelMPBusHierarchy(X86IntelMPExtConfigEntry): - type = 'X86IntelMPBusHierarchy' - cxx_class = 'gem5::X86ISA::intelmp::BusHierarchy' - cxx_header = 'arch/x86/bios/intelmp.hh' + type = "X86IntelMPBusHierarchy" + cxx_class = "gem5::X86ISA::intelmp::BusHierarchy" + cxx_header = "arch/x86/bios/intelmp.hh" + + bus_id = Param.UInt8(0, "id of the bus being described") + subtractive_decode = Param.Bool( + False, + "whether this bus contains all addresses not used by its children", + ) + parent_bus = Param.UInt8(0, "bus id of this busses parent") - bus_id = Param.UInt8(0, 'id of the bus being described') - subtractive_decode = Param.Bool(False, - 'whether this bus contains all addresses not used by its children') - parent_bus = Param.UInt8(0, 'bus id of this busses parent') class X86IntelMPRangeList(Enum): - map = {"ISACompatible" : 0, - "VGACompatible" : 1 - } + map = {"ISACompatible": 0, "VGACompatible": 1} + class X86IntelMPCompatAddrSpaceMod(X86IntelMPExtConfigEntry): - type = 'X86IntelMPCompatAddrSpaceMod' - cxx_class = 'gem5::X86ISA::intelmp::CompatAddrSpaceMod' - cxx_header = 'arch/x86/bios/intelmp.hh' + type = "X86IntelMPCompatAddrSpaceMod" + cxx_class = "gem5::X86ISA::intelmp::CompatAddrSpaceMod" + cxx_header = "arch/x86/bios/intelmp.hh" - bus_id = Param.UInt8(0, 'id of the bus being described') - add = Param.Bool(False, - 'if the range should be added to the original mapping') - range_list = Param.X86IntelMPRangeList('ISACompatible', - 'which predefined range of addresses to use') + bus_id = Param.UInt8(0, "id of the bus being described") + add = Param.Bool( + False, "if the range should be added to the original mapping" + ) + range_list = Param.X86IntelMPRangeList( + "ISACompatible", "which predefined range of addresses to use" + ) diff --git a/src/arch/x86/bios/SMBios.py b/src/arch/x86/bios/SMBios.py index 67abc3ee20..6832fa7449 100644 --- a/src/arch/x86/bios/SMBios.py +++ b/src/arch/x86/bios/SMBios.py @@ -36,86 +36,98 @@ from m5.params import * from m5.SimObject import SimObject + class X86SMBiosSMBiosStructure(SimObject): - type = 'X86SMBiosSMBiosStructure' - cxx_class = 'gem5::X86ISA::smbios::SMBiosStructure' - cxx_header = 'arch/x86/bios/smbios.hh' + type = "X86SMBiosSMBiosStructure" + cxx_class = "gem5::X86ISA::smbios::SMBiosStructure" + cxx_header = "arch/x86/bios/smbios.hh" abstract = True + class Characteristic(Enum): - map = {'Unknown' : 2, - 'Unsupported' : 3, - 'ISA' : 4, - 'MCA' : 5, - 'EISA' : 6, - 'PCI' : 7, - 'PCMCIA' : 8, - 'PnP' : 9, - 'APM' : 10, - 'Flash' : 11, - 'Shadow' : 12, - 'VL_Vesa' : 13, - 'ESCD' : 14, - 'CDBoot' : 15, - 'SelectBoot' : 16, - 'Socketed' : 17, - 'PCMCIABoot' : 18, - 'EDD' : 19, - 'NEC9800' : 20, - 'Toshiba' : 21, - 'Floppy_5_25_360KB' : 22, - 'Floppy_5_25_1_2MB' : 23, - 'Floppy_3_5_720KB' : 24, - 'Floppy_3_5_2_88MB' : 25, - 'PrintScreen' : 26, - 'Keyboard8024' : 27, - 'Serial' : 28, - 'Printer' : 29, - 'CGA_Mono' : 30, - 'NEC_PC_98' : 31 + map = { + "Unknown": 2, + "Unsupported": 3, + "ISA": 4, + "MCA": 5, + "EISA": 6, + "PCI": 7, + "PCMCIA": 8, + "PnP": 9, + "APM": 10, + "Flash": 11, + "Shadow": 12, + "VL_Vesa": 13, + "ESCD": 14, + "CDBoot": 15, + "SelectBoot": 16, + "Socketed": 17, + "PCMCIABoot": 18, + "EDD": 19, + "NEC9800": 20, + "Toshiba": 21, + "Floppy_5_25_360KB": 22, + "Floppy_5_25_1_2MB": 23, + "Floppy_3_5_720KB": 24, + "Floppy_3_5_2_88MB": 25, + "PrintScreen": 26, + "Keyboard8024": 27, + "Serial": 28, + "Printer": 29, + "CGA_Mono": 30, + "NEC_PC_98": 31, } + class ExtCharacteristic(Enum): - map = {'ACPI' : 0, - 'USBLegacy' : 1, - 'AGP' : 2, - 'I20Boot' : 3, - 'LS_120Boot' : 4, - 'ZIPBoot' : 5, - 'FirewireBoot' : 6, - 'SmartBattery' : 7, - 'BootSpec' : 8, - 'NetServiceBoot' : 9, - 'TargetContent' : 10 + map = { + "ACPI": 0, + "USBLegacy": 1, + "AGP": 2, + "I20Boot": 3, + "LS_120Boot": 4, + "ZIPBoot": 5, + "FirewireBoot": 6, + "SmartBattery": 7, + "BootSpec": 8, + "NetServiceBoot": 9, + "TargetContent": 10, } + class X86SMBiosBiosInformation(X86SMBiosSMBiosStructure): - type = 'X86SMBiosBiosInformation' - cxx_class = 'gem5::X86ISA::smbios::BiosInformation' - cxx_header = 'arch/x86/bios/smbios.hh' + type = "X86SMBiosBiosInformation" + cxx_class = "gem5::X86ISA::smbios::BiosInformation" + cxx_header = "arch/x86/bios/smbios.hh" vendor = Param.String("", "vendor name string") version = Param.String("", "version string") - starting_addr_segment = \ - Param.UInt16(0, "segment location of bios starting address") + starting_addr_segment = Param.UInt16( + 0, "segment location of bios starting address" + ) release_date = Param.String("06/08/2008", "release date") rom_size = Param.UInt8(0, "rom size") - characteristics = VectorParam.Characteristic([], - "bios characteristic bit vector") - characteristic_ext_bytes = VectorParam.ExtCharacteristic([], - "extended bios characteristic bit vector") + characteristics = VectorParam.Characteristic( + [], "bios characteristic bit vector" + ) + characteristic_ext_bytes = VectorParam.ExtCharacteristic( + [], "extended bios characteristic bit vector" + ) major = Param.UInt8(0, "major version number") minor = Param.UInt8(0, "minor version number") - emb_cont_firmware_major = Param.UInt8(0, - "embedded controller firmware major version number") + emb_cont_firmware_major = Param.UInt8( + 0, "embedded controller firmware major version number" + ) + + emb_cont_firmware_minor = Param.UInt8( + 0, "embedded controller firmware minor version number" + ) - emb_cont_firmware_minor = Param.UInt8(0, - "embedded controller firmware minor version number") class X86SMBiosSMBiosTable(SimObject): - type = 'X86SMBiosSMBiosTable' - cxx_class = 'gem5::X86ISA::smbios::SMBiosTable' - cxx_header = 'arch/x86/bios/smbios.hh' + type = "X86SMBiosSMBiosTable" + cxx_class = "gem5::X86ISA::smbios::SMBiosTable" + cxx_header = "arch/x86/bios/smbios.hh" major_version = Param.UInt8(2, "major version number") minor_version = Param.UInt8(5, "minor version number") diff --git a/src/arch/x86/cpuid.cc b/src/arch/x86/cpuid.cc index 9d1390d110..4ce66df777 100644 --- a/src/arch/x86/cpuid.cc +++ b/src/arch/x86/cpuid.cc @@ -109,7 +109,7 @@ namespace X86ISA { break; case FamilyModelSteppingBrandFeatures: result = CpuidResult(0x00020f51, 0x00000405, - 0xebd3fbff, 0x00000001); + 0xebd3fbff, 0x00020001); break; case NameString1: case NameString2: diff --git a/src/arch/x86/decoder.hh b/src/arch/x86/decoder.hh index 29415ef757..e4b1de96d7 100644 --- a/src/arch/x86/decoder.hh +++ b/src/arch/x86/decoder.hh @@ -49,10 +49,11 @@ namespace gem5 { +class BaseISA; + namespace X86ISA { -class ISA; class Decoder : public InstDecoder { private: diff --git a/src/arch/x86/emulenv.cc b/src/arch/x86/emulenv.cc index 1f9c5f65fd..6b5b1b1dd3 100644 --- a/src/arch/x86/emulenv.cc +++ b/src/arch/x86/emulenv.cc @@ -52,8 +52,8 @@ void EmulEnv::doModRM(const ExtMachInst & machInst) //Use the SIB byte for addressing if the modrm byte calls for it. if (machInst.modRM.rm == 4 && machInst.addrSize != 2) { scale = 1 << machInst.sib.scale; - index = RegId(IntRegClass, machInst.sib.index | (machInst.rex.x << 3)); - base = RegId(IntRegClass, machInst.sib.base | (machInst.rex.b << 3)); + index = intRegClass[machInst.sib.index | (machInst.rex.x << 3)]; + base = intRegClass[machInst.sib.base | (machInst.rex.b << 3)]; //In this special case, we don't use a base. The displacement also //changes, but that's managed by the decoder. if (machInst.sib.base == (RegIndex)int_reg::Rbp && @@ -72,8 +72,7 @@ void EmulEnv::doModRM(const ExtMachInst & machInst) } else { base = int_reg::Rbp; } - index = RegId(IntRegClass, - (rm % 2) ? int_reg::Rdi : int_reg::Rsi); + index = intRegClass[(rm % 2) ? int_reg::Rdi : int_reg::Rsi]; } else { scale = 0; switch (rm) { @@ -95,8 +94,7 @@ void EmulEnv::doModRM(const ExtMachInst & machInst) } } else { scale = 0; - base = RegId(IntRegClass, - machInst.modRM.rm | (machInst.rex.b << 3)); + base = intRegClass[machInst.modRM.rm | (machInst.rex.b << 3)]; if (machInst.modRM.mod == 0 && machInst.modRM.rm == 5) { //Since we need to use a different encoding of this //instruction anyway, just ignore the base in those cases diff --git a/src/arch/x86/faults.cc b/src/arch/x86/faults.cc index 3ef886eac2..fce92b1fb4 100644 --- a/src/arch/x86/faults.cc +++ b/src/arch/x86/faults.cc @@ -43,6 +43,7 @@ #include "arch/x86/generated/decoder.hh" #include "arch/x86/insts/static_inst.hh" #include "arch/x86/mmu.hh" +#include "arch/x86/regs/int.hh" #include "arch/x86/regs/misc.hh" #include "base/loader/symtab.hh" #include "base/trace.hh" @@ -184,7 +185,7 @@ InitInterrupt::invoke(ThreadContext *tc, const StaticInstPtr &inst) DPRINTF(Faults, "Init interrupt.\n"); // The otherwise unmodified integer registers should be set to 0. for (int index = 0; index < int_reg::NumArchRegs; index++) { - tc->setReg(RegId(IntRegClass, index), (RegVal)0); + tc->setReg(intRegClass[index], (RegVal)0); } CR0 cr0 = tc->readMiscReg(misc_reg::Cr0); diff --git a/src/arch/x86/fs_workload.hh b/src/arch/x86/fs_workload.hh index 5edadaed87..b40b69b3c4 100644 --- a/src/arch/x86/fs_workload.hh +++ b/src/arch/x86/fs_workload.hh @@ -78,7 +78,7 @@ void installSegDesc(ThreadContext *tc, int seg, SegDescriptor desc, class FsWorkload : public KernelWorkload { public: - using Params = X86FsWorkloadParams; + PARAMS(X86FsWorkload); FsWorkload(const Params &p); public: @@ -88,7 +88,8 @@ class FsWorkload : public KernelWorkload setSystem(System *sys) override { KernelWorkload::setSystem(sys); - gdb = BaseRemoteGDB::build(system); + gdb = BaseRemoteGDB::build( + params().remote_gdb_port, system); } ByteOrder byteOrder() const override { return ByteOrder::little; } diff --git a/src/arch/x86/insts/decode_fault.hh b/src/arch/x86/insts/decode_fault.hh index ba70b3c7d1..16b0f6ba96 100644 --- a/src/arch/x86/insts/decode_fault.hh +++ b/src/arch/x86/insts/decode_fault.hh @@ -58,7 +58,7 @@ class DecodeFaultInst : public X86StaticInst {} Fault - execute(ExecContext *tc, Trace::InstRecord *traceData) const override + execute(ExecContext *tc, trace::InstRecord *traceData) const override { return fault; } diff --git a/src/arch/x86/insts/microdebug.hh b/src/arch/x86/insts/microdebug.hh index 488ed1473e..1951705de1 100644 --- a/src/arch/x86/insts/microdebug.hh +++ b/src/arch/x86/insts/microdebug.hh @@ -49,7 +49,7 @@ class MicroDebug : public X86ISA::X86MicroopBase {} Fault - execute(ExecContext *xc, Trace::InstRecord *traceData) const override + execute(ExecContext *xc, trace::InstRecord *traceData) const override { return fault; } diff --git a/src/arch/x86/insts/microop_args.hh b/src/arch/x86/insts/microop_args.hh index 81ed412160..9dd121b3b1 100644 --- a/src/arch/x86/insts/microop_args.hh +++ b/src/arch/x86/insts/microop_args.hh @@ -36,7 +36,9 @@ #include #include "arch/x86/insts/static_inst.hh" +#include "arch/x86/regs/float.hh" #include "arch/x86/regs/int.hh" +#include "arch/x86/regs/misc.hh" #include "arch/x86/regs/segment.hh" #include "arch/x86/types.hh" #include "base/compiler.hh" @@ -144,8 +146,7 @@ struct IntOp : public Base void print(std::ostream &os) const { - X86StaticInst::printReg(os, RegId(IntRegClass, this->opIndex()), - this->size); + X86StaticInst::printReg(os, intRegClass[this->opIndex()], this->size); } }; @@ -162,8 +163,7 @@ struct FoldedOp : public Base void print(std::ostream &os) const { - X86StaticInst::printReg(os, RegId(IntRegClass, this->opIndex()), - this->size); + X86StaticInst::printReg(os, intRegClass[this->opIndex()], this->size); } }; @@ -224,8 +224,7 @@ struct MiscOp : public Base void print(std::ostream &os) const { - X86StaticInst::printReg(os, RegId(MiscRegClass, this->opIndex()), - this->size); + X86StaticInst::printReg(os, miscRegClass[this->opIndex()], this->size); } }; @@ -247,7 +246,7 @@ struct FloatOp : public Base void print(std::ostream &os) const { - X86StaticInst::printReg(os, RegId(FloatRegClass, this->opIndex()), + X86StaticInst::printReg(os, floatRegClass[this->opIndex()], this->size); } }; diff --git a/src/arch/x86/insts/microspecop.hh b/src/arch/x86/insts/microspecop.hh index 942bfd7ace..3e26dd3c61 100644 --- a/src/arch/x86/insts/microspecop.hh +++ b/src/arch/x86/insts/microspecop.hh @@ -49,7 +49,7 @@ class MicroHalt : public InstOperands {} Fault - execute(ExecContext *xc, Trace::InstRecord *) const override + execute(ExecContext *xc, trace::InstRecord *) const override { xc->tcBase()->suspend(); return NoFault; diff --git a/src/arch/x86/insts/static_inst.cc b/src/arch/x86/insts/static_inst.cc index 03d844bdc1..1f3df36a10 100644 --- a/src/arch/x86/insts/static_inst.cc +++ b/src/arch/x86/insts/static_inst.cc @@ -269,13 +269,13 @@ X86StaticInst::printMem(std::ostream &os, uint8_t segment, if (scale != 0 && index != int_reg::NumRegs) { if (scale != 1) ccprintf(os, "%d*", scale); - printReg(os, RegId(IntRegClass, index), addressSize); + printReg(os, intRegClass[index], addressSize); someAddr = true; } if (base != int_reg::NumRegs) { if (someAddr) os << " + "; - printReg(os, RegId(IntRegClass, base), addressSize); + printReg(os, intRegClass[base], addressSize); someAddr = true; } } diff --git a/src/arch/x86/interrupts.cc b/src/arch/x86/interrupts.cc index 74df9ab150..bfea600535 100644 --- a/src/arch/x86/interrupts.cc +++ b/src/arch/x86/interrupts.cc @@ -317,7 +317,8 @@ X86ISA::Interrupts::setThreadContext(ThreadContext *_tc) BaseInterrupts::setThreadContext(_tc); - initialApicId = tc->cpuId(); + // Update APIC ID to consider SMT threads + initialApicId = tc->contextId(); regs[APIC_ID] = (initialApicId << 24); pioAddr = x86LocalAPICAddress(initialApicId, 0); } diff --git a/src/arch/x86/isa.cc b/src/arch/x86/isa.cc index 45962c8f64..6578b1c716 100644 --- a/src/arch/x86/isa.cc +++ b/src/arch/x86/isa.cc @@ -31,15 +31,12 @@ #include "arch/x86/decoder.hh" #include "arch/x86/mmu.hh" #include "arch/x86/regs/ccr.hh" +#include "arch/x86/regs/float.hh" #include "arch/x86/regs/int.hh" #include "arch/x86/regs/misc.hh" #include "base/compiler.hh" #include "cpu/base.hh" #include "cpu/thread_context.hh" -#include "debug/CCRegs.hh" -#include "debug/FloatRegs.hh" -#include "debug/IntRegs.hh" -#include "debug/MiscRegs.hh" #include "params/X86ISA.hh" #include "sim/serialize.hh" @@ -141,18 +138,29 @@ ISA::clear() regVal[misc_reg::ApicBase] = lApicBase; } +namespace +{ + +/* Not applicable to X86 */ +RegClass vecRegClass(VecRegClass, VecRegClassName, 1, debug::IntRegs); +RegClass vecElemClass(VecElemClass, VecElemClassName, 2, debug::IntRegs); +RegClass vecPredRegClass(VecPredRegClass, VecPredRegClassName, 1, + debug::IntRegs); + +} // anonymous namespace + ISA::ISA(const X86ISAParams &p) : BaseISA(p), vendorString(p.vendor_string) { fatal_if(vendorString.size() != 12, "CPUID vendor string must be 12 characters\n"); - _regClasses.emplace_back(int_reg::NumRegs, debug::IntRegs); - _regClasses.emplace_back(float_reg::NumRegs, debug::FloatRegs); - _regClasses.emplace_back(1, debug::IntRegs); // Not applicable to X86 - _regClasses.emplace_back(2, debug::IntRegs); // Not applicable to X86 - _regClasses.emplace_back(1, debug::IntRegs); // Not applicable to X86 - _regClasses.emplace_back(cc_reg::NumRegs, debug::CCRegs); - _regClasses.emplace_back(misc_reg::NumRegs, debug::MiscRegs); + _regClasses.push_back(&flatIntRegClass); + _regClasses.push_back(&flatFloatRegClass); + _regClasses.push_back(&vecRegClass); + _regClasses.push_back(&vecElemClass); + _regClasses.push_back(&vecPredRegClass); + _regClasses.push_back(&ccRegClass); + _regClasses.push_back(&miscRegClass); clear(); } @@ -181,68 +189,62 @@ void ISA::copyRegsFrom(ThreadContext *src) { //copy int regs - for (int i = 0; i < int_reg::NumRegs; ++i) { - RegId reg(IntRegClass, i); - tc->setRegFlat(reg, src->getRegFlat(reg)); - } + for (auto &id: flatIntRegClass) + tc->setReg(id, src->getReg(id)); //copy float regs - for (int i = 0; i < float_reg::NumRegs; ++i) { - RegId reg(FloatRegClass, i); - tc->setRegFlat(reg, src->getRegFlat(reg)); - } + for (auto &id: flatFloatRegClass) + tc->setReg(id, src->getReg(id)); //copy condition-code regs - for (int i = 0; i < cc_reg::NumRegs; ++i) { - RegId reg(CCRegClass, i); - tc->setRegFlat(reg, src->getRegFlat(reg)); - } + for (auto &id: ccRegClass) + tc->setReg(id, src->getReg(id)); copyMiscRegs(src, tc); tc->pcState(src->pcState()); } RegVal -ISA::readMiscRegNoEffect(int miscReg) const +ISA::readMiscRegNoEffect(RegIndex idx) const { // Make sure we're not dealing with an illegal control register. // Instructions should filter out these indexes, and nothing else should // attempt to read them directly. - assert(misc_reg::isValid(miscReg)); + assert(misc_reg::isValid(idx)); - return regVal[miscReg]; + return regVal[idx]; } RegVal -ISA::readMiscReg(int miscReg) +ISA::readMiscReg(RegIndex idx) { - if (miscReg == misc_reg::Tsc) { + if (idx == misc_reg::Tsc) { return regVal[misc_reg::Tsc] + tc->getCpuPtr()->curCycle(); } - if (miscReg == misc_reg::Fsw) { + if (idx == misc_reg::Fsw) { RegVal fsw = regVal[misc_reg::Fsw]; RegVal top = regVal[misc_reg::X87Top]; return insertBits(fsw, 13, 11, top); } - if (miscReg == misc_reg::ApicBase) { + if (idx == misc_reg::ApicBase) { LocalApicBase base = regVal[misc_reg::ApicBase]; base.bsp = (tc->contextId() == 0); return base; } - return readMiscRegNoEffect(miscReg); + return readMiscRegNoEffect(idx); } void -ISA::setMiscRegNoEffect(int miscReg, RegVal val) +ISA::setMiscRegNoEffect(RegIndex idx, RegVal val) { // Make sure we're not dealing with an illegal control register. // Instructions should filter out these indexes, and nothing else should // attempt to write to them directly. - assert(misc_reg::isValid(miscReg)); + assert(misc_reg::isValid(idx)); HandyM5Reg m5Reg = regVal[misc_reg::M5Reg]; int reg_width = 64; - switch (miscReg) { + switch (idx) { case misc_reg::X87Top: reg_width = 3; break; @@ -271,18 +273,17 @@ ISA::setMiscRegNoEffect(int miscReg, RegVal val) break; } - regVal[miscReg] = val & mask(reg_width); + regVal[idx] = val & mask(reg_width); } void -ISA::setMiscReg(int miscReg, RegVal val) +ISA::setMiscReg(RegIndex idx, RegVal val) { RegVal newVal = val; - switch(miscReg) - { + switch (idx) { case misc_reg::Cr0: { - CR0 toggled = regVal[miscReg] ^ val; + CR0 toggled = regVal[idx] ^ val; CR0 newCR0 = val; Efer efer = regVal[misc_reg::Efer]; if (toggled.pg && efer.lme) { @@ -316,7 +317,7 @@ ISA::setMiscReg(int miscReg, RegVal val) break; case misc_reg::Cr4: { - CR4 toggled = regVal[miscReg] ^ val; + CR4 toggled = regVal[idx] ^ val; if (toggled.pae || toggled.pse || toggled.pge) { tc->getMMUPtr()->flushAll(); } @@ -332,7 +333,7 @@ ISA::setMiscReg(int miscReg, RegVal val) } case misc_reg::CsAttr: { - SegAttr toggled = regVal[miscReg] ^ val; + SegAttr toggled = regVal[idx] ^ val; SegAttr newCSAttr = val; if (toggled.longMode) { if (newCSAttr.longMode) { @@ -370,7 +371,7 @@ ISA::setMiscReg(int miscReg, RegVal val) case misc_reg::TsgBase: case misc_reg::TrBase: case misc_reg::IdtrBase: - regVal[misc_reg::segEffBase(miscReg - misc_reg::SegBaseBase)] = val; + regVal[misc_reg::segEffBase(idx - misc_reg::SegBaseBase)] = val; break; // These segments ignore their bases in 64 bit mode. // their effective bases must stay equal to their actual bases. @@ -382,7 +383,7 @@ ISA::setMiscReg(int miscReg, RegVal val) Efer efer = regVal[misc_reg::Efer]; SegAttr csAttr = regVal[misc_reg::CsAttr]; if (!efer.lma || !csAttr.longMode) // Check for non 64 bit mode. - regVal[misc_reg::segEffBase(miscReg - + regVal[misc_reg::segEffBase(idx - misc_reg::SegBaseBase)] = val; } break; @@ -396,7 +397,7 @@ ISA::setMiscReg(int miscReg, RegVal val) /* These should eventually set up breakpoints. */ break; case misc_reg::Dr4: - miscReg = misc_reg::Dr6; + idx = misc_reg::Dr6; [[fallthrough]]; case misc_reg::Dr6: { @@ -413,7 +414,7 @@ ISA::setMiscReg(int miscReg, RegVal val) } break; case misc_reg::Dr5: - miscReg = misc_reg::Dr7; + idx = misc_reg::Dr7; [[fallthrough]]; case misc_reg::Dr7: { @@ -471,7 +472,7 @@ ISA::setMiscReg(int miscReg, RegVal val) default: break; } - setMiscRegNoEffect(miscReg, newVal); + setMiscRegNoEffect(idx, newVal); } void diff --git a/src/arch/x86/isa.hh b/src/arch/x86/isa.hh index f19ed9f61d..f7ae210f96 100644 --- a/src/arch/x86/isa.hh +++ b/src/arch/x86/isa.hh @@ -34,6 +34,7 @@ #include "arch/generic/isa.hh" #include "arch/x86/pcstate.hh" +#include "arch/x86/regs/ccr.hh" #include "arch/x86/regs/float.hh" #include "arch/x86/regs/int.hh" #include "arch/x86/regs/misc.hh" @@ -59,7 +60,7 @@ class ISA : public BaseISA std::string vendorString; public: - void clear(); + void clear() override; PCStateBase * newPCState(Addr new_inst_addr=0) const override @@ -71,47 +72,11 @@ class ISA : public BaseISA ISA(const Params &p); - RegVal readMiscRegNoEffect(int miscReg) const; - RegVal readMiscReg(int miscReg); + RegVal readMiscRegNoEffect(RegIndex idx) const override; + RegVal readMiscReg(RegIndex idx) override; - void setMiscRegNoEffect(int miscReg, RegVal val); - void setMiscReg(int miscReg, RegVal val); - - RegId - flattenRegId(const RegId& regId) const - { - switch (regId.classValue()) { - case IntRegClass: - return RegId(IntRegClass, flattenIntIndex(regId.index())); - case FloatRegClass: - return RegId(FloatRegClass, flattenFloatIndex(regId.index())); - case CCRegClass: - return RegId(CCRegClass, flattenCCIndex(regId.index())); - case MiscRegClass: - return RegId(MiscRegClass, flattenMiscIndex(regId.index())); - default: - break; - } - return regId; - } - - int flattenIntIndex(int reg) const { return reg & ~IntFoldBit; } - - int - flattenFloatIndex(int reg) const - { - if (reg >= float_reg::NumRegs) { - reg = float_reg::stack(reg - float_reg::NumRegs, - regVal[misc_reg::X87Top]); - } - return reg; - } - - int flattenVecIndex(int reg) const { return reg; } - int flattenVecElemIndex(int reg) const { return reg; } - int flattenVecPredIndex(int reg) const { return reg; } - int flattenCCIndex(int reg) const { return reg; } - int flattenMiscIndex(int reg) const { return reg; } + void setMiscRegNoEffect(RegIndex idx, RegVal val) override; + void setMiscReg(RegIndex idx, RegVal val) override; bool inUserMode() const override diff --git a/src/arch/x86/isa/decoder/two_byte_opcodes.isa b/src/arch/x86/isa/decoder/two_byte_opcodes.isa index 22c20e59f2..38937cb3e2 100644 --- a/src/arch/x86/isa/decoder/two_byte_opcodes.isa +++ b/src/arch/x86/isa/decoder/two_byte_opcodes.isa @@ -133,7 +133,13 @@ 0x3: Inst::SMSW(Rv); default: Inst::SMSW(Mw); } - 0x6: Cpl0Inst::LMSW(Ew); + 0x5: decode MODRM_MOD { + 0x3: decode MODRM_RM { + 0x0: BasicOperate::SERIALIZE({{/*Nothing*/}}, + IsSerializeAfter); + } + } + 0x6: Inst::LMSW(Ew); 0x7: decode MODRM_MOD { 0x3: decode MODRM_RM { 0x0: Cpl0Inst::SWAPGS(); @@ -151,8 +157,10 @@ // instructions. //0x04: loadall_or_reset_or_hang(); 0x4: BasicOperate::gem5Op({{ + uint64_t result; bool recognized = pseudo_inst::pseudoInst( - xc->tcBase(), IMMEDIATE); + xc->tcBase(), IMMEDIATE, result); + Rax = result; if (!recognized) fault = std::make_shared(); }}, IsNonSpeculative); diff --git a/src/arch/x86/isa/formats/basic.isa b/src/arch/x86/isa/formats/basic.isa index 32e40870d8..8e2f8d6223 100644 --- a/src/arch/x86/isa/formats/basic.isa +++ b/src/arch/x86/isa/formats/basic.isa @@ -49,7 +49,7 @@ def template BasicDeclare {{ public: // Constructor. %(class_name)s(ExtMachInst machInst); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -66,7 +66,7 @@ def template BasicConstructor {{ // Basic instruction class execute method template. def template BasicExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; diff --git a/src/arch/x86/isa/formats/cpuid.isa b/src/arch/x86/isa/formats/cpuid.isa index 3a69e4c506..aeffc1035f 100644 --- a/src/arch/x86/isa/formats/cpuid.isa +++ b/src/arch/x86/isa/formats/cpuid.isa @@ -66,7 +66,7 @@ output decoder {{ def template CPUIDExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { // If the CPUID instruction used a valid function number, this will // be set to true. Otherwise, the instruction does nothing. @@ -85,4 +85,3 @@ def format CPUIDInst(code, *opt_flags) {{ decode_block = BasicDecode.subst(iop) exec_output = CPUIDExecute.subst(iop) }}; - diff --git a/src/arch/x86/isa/formats/error.isa b/src/arch/x86/isa/formats/error.isa index 5ad0f229a0..25ba277477 100644 --- a/src/arch/x86/isa/formats/error.isa +++ b/src/arch/x86/isa/formats/error.isa @@ -54,4 +54,3 @@ def format M5InternalError(error_message) {{ iop.message = error_message decode_block = ErrorDecode.subst(iop) }}; - diff --git a/src/arch/x86/isa/formats/monitor_mwait.isa b/src/arch/x86/isa/formats/monitor_mwait.isa index 59baf3cac9..d28a690b01 100644 --- a/src/arch/x86/isa/formats/monitor_mwait.isa +++ b/src/arch/x86/isa/formats/monitor_mwait.isa @@ -51,16 +51,16 @@ def template MwaitDeclare {{ public: // Constructor. %(class_name)s(ExtMachInst machInst); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; }; }}; def template MwaitInitiateAcc {{ Fault %(class_name)s::initiateAcc(ExecContext * xc, - Trace::InstRecord * traceData) const + trace::InstRecord * traceData) const { unsigned s = 0x8; //size unsigned f = 0; //flags @@ -71,7 +71,7 @@ def template MwaitInitiateAcc {{ def template MwaitCompleteAcc {{ Fault %(class_name)s::completeAcc(PacketPtr pkt, ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { MicroHalt hltObj(machInst, mnemonic, 0x0); if(xc->mwait(pkt)) { @@ -122,4 +122,3 @@ def format MwaitInst(code, *opt_flags) {{ exec_output += MwaitInitiateAcc.subst(iop) exec_output += MwaitCompleteAcc.subst(iop) }}; - diff --git a/src/arch/x86/isa/formats/nop.isa b/src/arch/x86/isa/formats/nop.isa index c0f6e13082..86d3af21cf 100644 --- a/src/arch/x86/isa/formats/nop.isa +++ b/src/arch/x86/isa/formats/nop.isa @@ -71,7 +71,7 @@ output decoder {{ def template NopExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { return NoFault; } @@ -84,4 +84,3 @@ def format NopInst(*opt_flags) {{ decode_block = BasicDecode.subst(iop) exec_output = NopExecute.subst(iop) }}; - diff --git a/src/arch/x86/isa/formats/syscall.isa b/src/arch/x86/isa/formats/syscall.isa index 65fa9a25a5..c36904f162 100644 --- a/src/arch/x86/isa/formats/syscall.isa +++ b/src/arch/x86/isa/formats/syscall.isa @@ -71,7 +71,7 @@ output decoder {{ def template SyscallExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; %(op_decl)s; @@ -88,4 +88,3 @@ def format SyscallInst(code, *opt_flags) {{ decode_block = BasicDecode.subst(iop) exec_output = SyscallExecute.subst(iop) }}; - diff --git a/src/arch/x86/isa/formats/unimp.isa b/src/arch/x86/isa/formats/unimp.isa index ac7fad7c6e..2950f55891 100644 --- a/src/arch/x86/isa/formats/unimp.isa +++ b/src/arch/x86/isa/formats/unimp.isa @@ -60,7 +60,7 @@ output header {{ flags[IsNonSpeculative] = true; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; std::string generateDisassembly(Addr pc, @@ -92,7 +92,7 @@ output header {{ flags[IsNonSpeculative] = true; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; std::string generateDisassembly(Addr pc, @@ -119,7 +119,7 @@ output decoder {{ output exec {{ Fault FailUnimplemented::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { panic("attempt to execute unimplemented instruction '%s' %s", mnemonic, machInst); @@ -128,7 +128,7 @@ output exec {{ Fault WarnUnimplemented::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { if (!warned) { warn("instruction '%s' unimplemented\n", mnemonic); @@ -149,4 +149,3 @@ def format WarnUnimpl() {{ iop = InstObjParams(name, 'WarnUnimplemented') decode_block = BasicDecodeWithMnemonic.subst(iop) }}; - diff --git a/src/arch/x86/isa/formats/unknown.isa b/src/arch/x86/isa/formats/unknown.isa index 3e0dc550da..eca297bab2 100644 --- a/src/arch/x86/isa/formats/unknown.isa +++ b/src/arch/x86/isa/formats/unknown.isa @@ -55,7 +55,7 @@ output header {{ { } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; std::string generateDisassembly( Addr pc, const loader::SymbolTable *symtab) const override; @@ -74,7 +74,7 @@ output decoder {{ output exec {{ Fault - Unknown::execute(ExecContext *xc, Trace::InstRecord *traceData) const + Unknown::execute(ExecContext *xc, trace::InstRecord *traceData) const { return std::make_shared(); } diff --git a/src/arch/x86/isa/insts/__init__.py b/src/arch/x86/isa/insts/__init__.py index 918951c95e..2d10d98647 100644 --- a/src/arch/x86/isa/insts/__init__.py +++ b/src/arch/x86/isa/insts/__init__.py @@ -33,16 +33,18 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["romutil", - "general_purpose", - "simd128", - "simd64", - "system", - "x87"] +categories = [ + "romutil", + "general_purpose", + "simd128", + "simd64", + "system", + "x87", +] -microcode = ''' +microcode = """ # X86 microcode -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/general_purpose/__init__.py b/src/arch/x86/isa/insts/general_purpose/__init__.py index 95d55a782a..eef0150ae8 100644 --- a/src/arch/x86/isa/insts/general_purpose/__init__.py +++ b/src/arch/x86/isa/insts/general_purpose/__init__.py @@ -33,26 +33,28 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["arithmetic", - "cache_and_memory_management", - "compare_and_test", - "control_transfer", - "data_conversion", - "data_transfer", - "flags", - "input_output", - "load_effective_address", - "load_segment_registers", - "logical", - "no_operation", - "rotate_and_shift", - "semaphores", - "string", - "system_calls"] +categories = [ + "arithmetic", + "cache_and_memory_management", + "compare_and_test", + "control_transfer", + "data_conversion", + "data_transfer", + "flags", + "input_output", + "load_effective_address", + "load_segment_registers", + "logical", + "no_operation", + "rotate_and_shift", + "semaphores", + "string", + "system_calls", +] -microcode = ''' +microcode = """ # Microcode for general purpose instructions -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/general_purpose/arithmetic/__init__.py b/src/arch/x86/isa/insts/general_purpose/arithmetic/__init__.py index 27d3807a27..287d1de9eb 100644 --- a/src/arch/x86/isa/insts/general_purpose/arithmetic/__init__.py +++ b/src/arch/x86/isa/insts/general_purpose/arithmetic/__init__.py @@ -33,12 +33,13 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["add_and_subtract", - "increment_and_decrement", - "multiply_and_divide"] +categories = [ + "add_and_subtract", + "increment_and_decrement", + "multiply_and_divide", +] microcode = "" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode - diff --git a/src/arch/x86/isa/insts/general_purpose/arithmetic/add_and_subtract.py b/src/arch/x86/isa/insts/general_purpose/arithmetic/add_and_subtract.py index c1262721e3..1e0ead9199 100644 --- a/src/arch/x86/isa/insts/general_purpose/arithmetic/add_and_subtract.py +++ b/src/arch/x86/isa/insts/general_purpose/arithmetic/add_and_subtract.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop ADD_R_R { add reg, reg, regm, flags=(OF,SF,ZF,AF,PF,CF) @@ -456,4 +456,4 @@ def macroop NEG_LOCKED_P stul t1, seg, riprel, disp mfence }; -''' +""" diff --git a/src/arch/x86/isa/insts/general_purpose/arithmetic/increment_and_decrement.py b/src/arch/x86/isa/insts/general_purpose/arithmetic/increment_and_decrement.py index dd2c6c83f8..3fe883e1ad 100644 --- a/src/arch/x86/isa/insts/general_purpose/arithmetic/increment_and_decrement.py +++ b/src/arch/x86/isa/insts/general_purpose/arithmetic/increment_and_decrement.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop INC_R { addi reg, reg, 1, flags=(OF, SF, ZF, AF, PF) @@ -111,4 +111,4 @@ def macroop DEC_LOCKED_P stul t1, seg, riprel, disp mfence }; -''' +""" diff --git a/src/arch/x86/isa/insts/general_purpose/arithmetic/multiply_and_divide.py b/src/arch/x86/isa/insts/general_purpose/arithmetic/multiply_and_divide.py index e5f30b8abe..cc6c03a9ce 100644 --- a/src/arch/x86/isa/insts/general_purpose/arithmetic/multiply_and_divide.py +++ b/src/arch/x86/isa/insts/general_purpose/arithmetic/multiply_and_divide.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ # # Byte version of one operand unsigned multiply. @@ -201,21 +201,21 @@ def macroop IMUL_R_P_I mulel reg muleh t0 }; -''' +""" -pcRel = ''' +pcRel = """ rdip t7 ld %s, seg, riprel, disp -''' -sibRel = ''' +""" +sibRel = """ ld %s, seg, sib, disp -''' +""" # # One byte version of unsigned division # -divcode = ''' +divcode = """ def macroop DIV_B_%(suffix)s { %(readOp1)s @@ -237,13 +237,13 @@ divLoopTop: divq rax, dataSize=1 divr ah, dataSize=1 }; -''' +""" # # Unsigned division # -divcode += ''' +divcode += """ def macroop DIV_%(suffix)s { %(readOp1)s @@ -268,13 +268,13 @@ divLoopTop: divq rax divr rdx }; -''' +""" # # One byte version of signed division # -divcode += ''' +divcode += """ def macroop IDIV_B_%(suffix)s { # Negate dividend @@ -345,13 +345,13 @@ divLoopTop: # Otherwise put the one that wasn't negated (at least here) in rax. mov rax, rax, t5, (nCECF,), dataSize=1 }; -''' +""" # # Signed division # -divcode += ''' +divcode += """ def macroop IDIV_%(suffix)s { # Negate dividend @@ -424,11 +424,8 @@ divLoopTop: # Otherwise put the one that wasn't negated (at least here) in rax. mov rax, rax, t5, (nCECF,) }; -''' +""" -microcode += divcode % {"suffix": "R", - "readOp1": "", "op1": "reg"} -microcode += divcode % {"suffix": "M", - "readOp1": sibRel % "t2", "op1": "t2"} -microcode += divcode % {"suffix": "P", - "readOp1": pcRel % "t2", "op1": "t2"} +microcode += divcode % {"suffix": "R", "readOp1": "", "op1": "reg"} +microcode += divcode % {"suffix": "M", "readOp1": sibRel % "t2", "op1": "t2"} +microcode += divcode % {"suffix": "P", "readOp1": pcRel % "t2", "op1": "t2"} diff --git a/src/arch/x86/isa/insts/general_purpose/cache_and_memory_management.py b/src/arch/x86/isa/insts/general_purpose/cache_and_memory_management.py index 4dd4943edb..b971496133 100644 --- a/src/arch/x86/isa/insts/general_purpose/cache_and_memory_management.py +++ b/src/arch/x86/isa/insts/general_purpose/cache_and_memory_management.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PREFETCH_M { ld t0, seg, sib, disp, dataSize=1, prefetch=True @@ -91,9 +91,9 @@ def macroop CLWB_P clwb seg, riprel, disp, dataSize=1 }; -''' +""" -#let {{ +# let {{ # class LFENCE(Inst): # "GenFault ${new UnimpInstFault}" # class SFENCE(Inst): @@ -104,4 +104,4 @@ def macroop CLWB_P # "GenFault ${new UnimpInstFault}" # class PREFETCHW(Inst): # "GenFault ${new UnimpInstFault}" -#}}; +# }}; diff --git a/src/arch/x86/isa/insts/general_purpose/compare_and_test/__init__.py b/src/arch/x86/isa/insts/general_purpose/compare_and_test/__init__.py index 48cc1c2fb2..fc7b35f867 100644 --- a/src/arch/x86/isa/insts/general_purpose/compare_and_test/__init__.py +++ b/src/arch/x86/isa/insts/general_purpose/compare_and_test/__init__.py @@ -33,12 +33,14 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["bit_scan", - "bit_test", - "bounds", - "compare", - "set_byte_on_condition", - "test"] +categories = [ + "bit_scan", + "bit_test", + "bounds", + "compare", + "set_byte_on_condition", + "test", +] microcode = "" for category in categories: diff --git a/src/arch/x86/isa/insts/general_purpose/compare_and_test/bit_scan.py b/src/arch/x86/isa/insts/general_purpose/compare_and_test/bit_scan.py index 05bd3c4cf6..dcf8f2b6e8 100644 --- a/src/arch/x86/isa/insts/general_purpose/compare_and_test/bit_scan.py +++ b/src/arch/x86/isa/insts/general_purpose/compare_and_test/bit_scan.py @@ -36,7 +36,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop BSR_R_R { # Determine if the input was zero, and also move it to a temp reg. mov t1, t1, t0, dataSize=8 @@ -365,4 +365,4 @@ def macroop POPCNT_R_P { ld t1, seg, riprel, disp popcnt reg, t1, reg, dataSize=8 }; -''' +""" diff --git a/src/arch/x86/isa/insts/general_purpose/compare_and_test/bit_test.py b/src/arch/x86/isa/insts/general_purpose/compare_and_test/bit_test.py index 58c4b9a960..c192770799 100644 --- a/src/arch/x86/isa/insts/general_purpose/compare_and_test/bit_test.py +++ b/src/arch/x86/isa/insts/general_purpose/compare_and_test/bit_test.py @@ -36,7 +36,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop BT_R_I { sexti t0, reg, imm, flags=(CF,) }; @@ -417,4 +417,4 @@ def macroop BTS_LOCKED_P_R { stul t1, seg, [1, t3, t7], disp mfence }; -''' +""" diff --git a/src/arch/x86/isa/insts/general_purpose/compare_and_test/bounds.py b/src/arch/x86/isa/insts/general_purpose/compare_and_test/bounds.py index 1a8c8cfd62..01cd6bbd56 100644 --- a/src/arch/x86/isa/insts/general_purpose/compare_and_test/bounds.py +++ b/src/arch/x86/isa/insts/general_purpose/compare_and_test/bounds.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop BOUND_R_M { ld t1, seg, sib, disp, dataSize="env.dataSize * 2" srli t2, t1, "env.dataSize * 8" @@ -46,4 +46,4 @@ def macroop BOUND_R_M { def macroop BOUND_R_P { fault "std::make_shared()" }; -''' +""" diff --git a/src/arch/x86/isa/insts/general_purpose/compare_and_test/compare.py b/src/arch/x86/isa/insts/general_purpose/compare_and_test/compare.py index ba783edd1f..597ee127f3 100644 --- a/src/arch/x86/isa/insts/general_purpose/compare_and_test/compare.py +++ b/src/arch/x86/isa/insts/general_purpose/compare_and_test/compare.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop CMP_R_M { ld t1, seg, sib, disp @@ -85,4 +85,4 @@ def macroop CMP_R_I limm t1, imm sub t0, reg, t1, flags=(OF, SF, ZF, AF, PF, CF) }; -''' +""" diff --git a/src/arch/x86/isa/insts/general_purpose/compare_and_test/set_byte_on_condition.py b/src/arch/x86/isa/insts/general_purpose/compare_and_test/set_byte_on_condition.py index 9705731a73..99a79c9518 100644 --- a/src/arch/x86/isa/insts/general_purpose/compare_and_test/set_byte_on_condition.py +++ b/src/arch/x86/isa/insts/general_purpose/compare_and_test/set_byte_on_condition.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop SALC_R { sbb reg, reg, reg, dataSize=1 @@ -374,4 +374,4 @@ def macroop SETNO_P movi t1, t1, 0, flags=(COF,) st t1, seg, riprel, disp }; -''' +""" diff --git a/src/arch/x86/isa/insts/general_purpose/compare_and_test/test.py b/src/arch/x86/isa/insts/general_purpose/compare_and_test/test.py index 91cfeb8d52..87e8fd3f8f 100644 --- a/src/arch/x86/isa/insts/general_purpose/compare_and_test/test.py +++ b/src/arch/x86/isa/insts/general_purpose/compare_and_test/test.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop TEST_M_R { ld t1, seg, sib, disp @@ -72,4 +72,4 @@ def macroop TEST_R_I limm t1, imm and t0, reg, t1, flags=(OF, SF, ZF, PF, CF) }; -''' +""" diff --git a/src/arch/x86/isa/insts/general_purpose/control_transfer/__init__.py b/src/arch/x86/isa/insts/general_purpose/control_transfer/__init__.py index 38ca336288..b651278990 100644 --- a/src/arch/x86/isa/insts/general_purpose/control_transfer/__init__.py +++ b/src/arch/x86/isa/insts/general_purpose/control_transfer/__init__.py @@ -33,12 +33,14 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["call", - "conditional_jump", - "interrupts_and_exceptions", - "jump", - "loop", - "xreturn"] +categories = [ + "call", + "conditional_jump", + "interrupts_and_exceptions", + "jump", + "loop", + "xreturn", +] microcode = "" for category in categories: diff --git a/src/arch/x86/isa/insts/general_purpose/control_transfer/call.py b/src/arch/x86/isa/insts/general_purpose/control_transfer/call.py index 8d11256f51..ec96f682c1 100644 --- a/src/arch/x86/isa/insts/general_purpose/control_transfer/call.py +++ b/src/arch/x86/isa/insts/general_purpose/control_transfer/call.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop CALL_NEAR_I { # Make the default data size of calls 64 bits in 64 bit mode @@ -163,8 +163,8 @@ def macroop CALL_FAR_REAL_M { def macroop CALL_FAR_REAL_P { panic "Far call in real mode doesn't support RIP addressing." }; -''' -#let {{ +""" +# let {{ # class CALL(Inst): # "GenFault ${new UnimpInstFault}" -#}}; +# }}; diff --git a/src/arch/x86/isa/insts/general_purpose/control_transfer/conditional_jump.py b/src/arch/x86/isa/insts/general_purpose/control_transfer/conditional_jump.py index d0fa31a5a6..48d6751bb6 100644 --- a/src/arch/x86/isa/insts/general_purpose/control_transfer/conditional_jump.py +++ b/src/arch/x86/isa/insts/general_purpose/control_transfer/conditional_jump.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop JZ_I { # Make the defualt data size of jumps 64 bits in 64 bit mode @@ -220,4 +220,4 @@ def macroop JRCXZ_I add t0, t0, rcx, flags=(EZF,), dataSize=asz wripi t1, imm, flags=(CEZF,) }; -''' +""" diff --git a/src/arch/x86/isa/insts/general_purpose/control_transfer/interrupts_and_exceptions.py b/src/arch/x86/isa/insts/general_purpose/control_transfer/interrupts_and_exceptions.py index 718484950a..278d432e51 100644 --- a/src/arch/x86/isa/insts/general_purpose/control_transfer/interrupts_and_exceptions.py +++ b/src/arch/x86/isa/insts/general_purpose/control_transfer/interrupts_and_exceptions.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop IRET_REAL { .serialize_after @@ -341,10 +341,10 @@ def macroop INT_REAL_I { def macroop INT_VIRT_I { panic "Virtual mode int3 isn't implemented!" }; -''' -#let {{ +""" +# let {{ # class INT(Inst): # "GenFault ${new UnimpInstFault}" # class INTO(Inst): # "GenFault ${new UnimpInstFault}" -#}}; +# }}; diff --git a/src/arch/x86/isa/insts/general_purpose/control_transfer/jump.py b/src/arch/x86/isa/insts/general_purpose/control_transfer/jump.py index 77c696838d..30c1997fb3 100644 --- a/src/arch/x86/isa/insts/general_purpose/control_transfer/jump.py +++ b/src/arch/x86/isa/insts/general_purpose/control_transfer/jump.py @@ -34,7 +34,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop JMP_I { # Make the default data size of jumps 64 bits in 64 bit mode @@ -187,4 +187,4 @@ def macroop JMP_FAR_REAL_I # Put t2 first so it isn't sign extended. wrip t2, t0 }; -''' +""" diff --git a/src/arch/x86/isa/insts/general_purpose/control_transfer/loop.py b/src/arch/x86/isa/insts/general_purpose/control_transfer/loop.py index 28a8308a98..a445885855 100644 --- a/src/arch/x86/isa/insts/general_purpose/control_transfer/loop.py +++ b/src/arch/x86/isa/insts/general_purpose/control_transfer/loop.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop LOOP_I { .control_direct @@ -63,4 +63,4 @@ def macroop LOOPE_I { subi rcx, rcx, 1, flags=(EZF,), dataSize=asz wripi t1, imm, flags=(CSTRZnEZF,) }; -''' +""" diff --git a/src/arch/x86/isa/insts/general_purpose/control_transfer/xreturn.py b/src/arch/x86/isa/insts/general_purpose/control_transfer/xreturn.py index e925f7d68e..1baef5b2b9 100644 --- a/src/arch/x86/isa/insts/general_purpose/control_transfer/xreturn.py +++ b/src/arch/x86/isa/insts/general_purpose/control_transfer/xreturn.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop RET_NEAR { # Make the default data size of rets 64 bits in 64 bit mode @@ -157,4 +157,4 @@ processDescriptor: #end: # fault "NoFault" }; -''' +""" diff --git a/src/arch/x86/isa/insts/general_purpose/data_conversion/__init__.py b/src/arch/x86/isa/insts/general_purpose/data_conversion/__init__.py index d90e302428..e6eca02d15 100644 --- a/src/arch/x86/isa/insts/general_purpose/data_conversion/__init__.py +++ b/src/arch/x86/isa/insts/general_purpose/data_conversion/__init__.py @@ -33,12 +33,14 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["ascii_adjust", - "bcd_adjust", - "endian_conversion", - "extract_sign_mask", - "sign_extension", - "translate"] +categories = [ + "ascii_adjust", + "bcd_adjust", + "endian_conversion", + "extract_sign_mask", + "sign_extension", + "translate", +] microcode = "" for category in categories: diff --git a/src/arch/x86/isa/insts/general_purpose/data_conversion/ascii_adjust.py b/src/arch/x86/isa/insts/general_purpose/data_conversion/ascii_adjust.py index 4a35765b8b..d8560d8fe4 100644 --- a/src/arch/x86/isa/insts/general_purpose/data_conversion/ascii_adjust.py +++ b/src/arch/x86/isa/insts/general_purpose/data_conversion/ascii_adjust.py @@ -34,7 +34,7 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. microcode = "" -#let {{ +# let {{ # class AAA(Inst): # "GenFault ${new UnimpInstFault}" # class AAD(Inst): @@ -43,4 +43,4 @@ microcode = "" # "GenFault ${new UnimpInstFault}" # class AAS(Inst): # "GenFault ${new UnimpInstFault}" -#}}; +# }}; diff --git a/src/arch/x86/isa/insts/general_purpose/data_conversion/bcd_adjust.py b/src/arch/x86/isa/insts/general_purpose/data_conversion/bcd_adjust.py index 8d4d6e473e..1d12ee31e8 100644 --- a/src/arch/x86/isa/insts/general_purpose/data_conversion/bcd_adjust.py +++ b/src/arch/x86/isa/insts/general_purpose/data_conversion/bcd_adjust.py @@ -34,9 +34,9 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. microcode = "" -#let {{ +# let {{ # class DAA(Inst): # "GenFault ${new UnimpInstFault}" # class DAS(Inst): # "GenFault ${new UnimpInstFault}" -#}}; +# }}; diff --git a/src/arch/x86/isa/insts/general_purpose/data_conversion/endian_conversion.py b/src/arch/x86/isa/insts/general_purpose/data_conversion/endian_conversion.py index bfb7fe073c..9d4d3b8b3b 100644 --- a/src/arch/x86/isa/insts/general_purpose/data_conversion/endian_conversion.py +++ b/src/arch/x86/isa/insts/general_purpose/data_conversion/endian_conversion.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop BSWAP_D_R { roli reg, reg, 8, dataSize=2 @@ -55,4 +55,4 @@ def macroop BSWAP_Q_R roli t2, t2, 8, dataSize=2 or reg, t1, t2, dataSize=8 }; -''' +""" diff --git a/src/arch/x86/isa/insts/general_purpose/data_conversion/extract_sign_mask.py b/src/arch/x86/isa/insts/general_purpose/data_conversion/extract_sign_mask.py index 239b88ba69..5ed1c05c1d 100644 --- a/src/arch/x86/isa/insts/general_purpose/data_conversion/extract_sign_mask.py +++ b/src/arch/x86/isa/insts/general_purpose/data_conversion/extract_sign_mask.py @@ -34,9 +34,9 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. microcode = "" -#let {{ +# let {{ # class MOVMSKPS(Inst): # "GenFault ${new UnimpInstFault}" # class MOVMSKPD(Inst): # "GenFault ${new UnimpInstFault}" -#}}; +# }}; diff --git a/src/arch/x86/isa/insts/general_purpose/data_conversion/sign_extension.py b/src/arch/x86/isa/insts/general_purpose/data_conversion/sign_extension.py index 3684301608..607f7d9af5 100644 --- a/src/arch/x86/isa/insts/general_purpose/data_conversion/sign_extension.py +++ b/src/arch/x86/isa/insts/general_purpose/data_conversion/sign_extension.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop CDQE_R { sexti reg, reg, "env.dataSize * 4 - 1" }; @@ -44,4 +44,4 @@ def macroop CQO_R_R { mov regm, regm, reg srai regm, regm, "env.dataSize * 8 - 1" }; -''' +""" diff --git a/src/arch/x86/isa/insts/general_purpose/data_conversion/translate.py b/src/arch/x86/isa/insts/general_purpose/data_conversion/translate.py index a45d0527f2..10ad0d6048 100644 --- a/src/arch/x86/isa/insts/general_purpose/data_conversion/translate.py +++ b/src/arch/x86/isa/insts/general_purpose/data_conversion/translate.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop XLAT { zexti t1, rax, 7, dataSize=8 # Here, t1 can be used directly. The value of al is supposed to be treated @@ -41,4 +41,4 @@ def macroop XLAT { # size has to be at least 16 bits, t1 will not be sign extended. ld rax, seg, [1, rbx, t1], dataSize=1 }; -''' +""" diff --git a/src/arch/x86/isa/insts/general_purpose/data_transfer/__init__.py b/src/arch/x86/isa/insts/general_purpose/data_transfer/__init__.py index 1cdccec48e..cef9e595b4 100644 --- a/src/arch/x86/isa/insts/general_purpose/data_transfer/__init__.py +++ b/src/arch/x86/isa/insts/general_purpose/data_transfer/__init__.py @@ -33,10 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["conditional_move", - "move", - "stack_operations", - "xchg"] +categories = ["conditional_move", "move", "stack_operations", "xchg"] microcode = "" for category in categories: diff --git a/src/arch/x86/isa/insts/general_purpose/data_transfer/conditional_move.py b/src/arch/x86/isa/insts/general_purpose/data_transfer/conditional_move.py index 31093a6ce4..195a968f83 100644 --- a/src/arch/x86/isa/insts/general_purpose/data_transfer/conditional_move.py +++ b/src/arch/x86/isa/insts/general_purpose/data_transfer/conditional_move.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop CMOVZ_R_R { mov reg, reg, reg, flags=(nCZF,) @@ -369,4 +369,4 @@ def macroop CMOVNO_R_P mov reg, reg, reg, flags=(COF,) mov reg, reg, t1, flags=(nCOF,) }; -''' +""" diff --git a/src/arch/x86/isa/insts/general_purpose/data_transfer/move.py b/src/arch/x86/isa/insts/general_purpose/data_transfer/move.py index 2a3fe99236..45ca0e8788 100644 --- a/src/arch/x86/isa/insts/general_purpose/data_transfer/move.py +++ b/src/arch/x86/isa/insts/general_purpose/data_transfer/move.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ # # Regular moves @@ -370,8 +370,8 @@ def macroop MOVD_P_XMM { stfp xmml, seg, riprel, disp, dataSize=dsz }; -''' -#let {{ +""" +# let {{ # class MOVD(Inst): # "GenFault ${new UnimpInstFault}" -#}}; +# }}; diff --git a/src/arch/x86/isa/insts/general_purpose/data_transfer/stack_operations.py b/src/arch/x86/isa/insts/general_purpose/data_transfer/stack_operations.py index e97d17f1b4..72270fed2f 100644 --- a/src/arch/x86/isa/insts/general_purpose/data_transfer/stack_operations.py +++ b/src/arch/x86/isa/insts/general_purpose/data_transfer/stack_operations.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop POP_R { # Make the default data size of pops 64 bits in 64 bit mode .adjust_env oszIn64Override @@ -202,4 +202,4 @@ skipLoop: sub rsp, rsp, t2, dataSize=ssz mov rbp, rbp, t6 }; -''' +""" diff --git a/src/arch/x86/isa/insts/general_purpose/data_transfer/xchg.py b/src/arch/x86/isa/insts/general_purpose/data_transfer/xchg.py index 71255490cb..fffba61b61 100644 --- a/src/arch/x86/isa/insts/general_purpose/data_transfer/xchg.py +++ b/src/arch/x86/isa/insts/general_purpose/data_transfer/xchg.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ # All the memory versions need to use LOCK, regardless of if it was set @@ -102,4 +102,4 @@ def macroop XCHG_LOCKED_P_R mfence mov reg, reg, t1 }; -''' +""" diff --git a/src/arch/x86/isa/insts/general_purpose/flags/__init__.py b/src/arch/x86/isa/insts/general_purpose/flags/__init__.py index 673f167bd6..ef1585ab26 100644 --- a/src/arch/x86/isa/insts/general_purpose/flags/__init__.py +++ b/src/arch/x86/isa/insts/general_purpose/flags/__init__.py @@ -33,9 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["load_and_store", - "push_and_pop", - "set_and_clear"] +categories = ["load_and_store", "push_and_pop", "set_and_clear"] microcode = "" for category in categories: diff --git a/src/arch/x86/isa/insts/general_purpose/flags/load_and_store.py b/src/arch/x86/isa/insts/general_purpose/flags/load_and_store.py index 31723b3442..d62c48f362 100644 --- a/src/arch/x86/isa/insts/general_purpose/flags/load_and_store.py +++ b/src/arch/x86/isa/insts/general_purpose/flags/load_and_store.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop SAHF { ruflags t1, dataSize=8 mov t1, t1, ah, dataSize=1 @@ -44,4 +44,4 @@ def macroop LAHF { rflags t1, dataSize=8 andi ah, t1, "CFBit | PFBit | AFBit | ZFBit | SFBit | (1 << 1)", dataSize=1 }; -''' +""" diff --git a/src/arch/x86/isa/insts/general_purpose/flags/push_and_pop.py b/src/arch/x86/isa/insts/general_purpose/flags/push_and_pop.py index cebe65dac5..8b1f3b97dd 100644 --- a/src/arch/x86/isa/insts/general_purpose/flags/push_and_pop.py +++ b/src/arch/x86/isa/insts/general_purpose/flags/push_and_pop.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PUSHF { .adjust_env oszIn64Override @@ -57,4 +57,4 @@ def macroop POPF { def macroop POPF_VIRT { panic "Virtual mode popf isn't implemented!" }; -''' +""" diff --git a/src/arch/x86/isa/insts/general_purpose/flags/set_and_clear.py b/src/arch/x86/isa/insts/general_purpose/flags/set_and_clear.py index 2f2b2cefa7..4c6ee6abdf 100644 --- a/src/arch/x86/isa/insts/general_purpose/flags/set_and_clear.py +++ b/src/arch/x86/isa/insts/general_purpose/flags/set_and_clear.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop CLD { ruflags t1, dataSize=8 limm t2, "~((uint64_t)DFBit)", dataSize=8 @@ -175,4 +175,4 @@ def macroop CLI_REAL { def macroop CLI_VIRT { panic "Virtual mode cli isn't implemented!" }; -''' +""" diff --git a/src/arch/x86/isa/insts/general_purpose/input_output/__init__.py b/src/arch/x86/isa/insts/general_purpose/input_output/__init__.py index a5c5a57df6..08b88dd9bd 100644 --- a/src/arch/x86/isa/insts/general_purpose/input_output/__init__.py +++ b/src/arch/x86/isa/insts/general_purpose/input_output/__init__.py @@ -33,8 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["general_io", - "string_io"] +categories = ["general_io", "string_io"] microcode = "" for category in categories: diff --git a/src/arch/x86/isa/insts/general_purpose/input_output/general_io.py b/src/arch/x86/isa/insts/general_purpose/input_output/general_io.py index 94d026c88c..e7b99c4f41 100644 --- a/src/arch/x86/isa/insts/general_purpose/input_output/general_io.py +++ b/src/arch/x86/isa/insts/general_purpose/input_output/general_io.py @@ -36,7 +36,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop IN_R_I { .adjust_imm trimImm(8) limm t1, imm, dataSize=8 @@ -86,4 +86,4 @@ microcode = ''' def macroop OUT_VIRT_R_R { panic "Virtual mode out isn't implemented!" }; -''' +""" diff --git a/src/arch/x86/isa/insts/general_purpose/input_output/string_io.py b/src/arch/x86/isa/insts/general_purpose/input_output/string_io.py index ec386a0621..caad6db4d0 100644 --- a/src/arch/x86/isa/insts/general_purpose/input_output/string_io.py +++ b/src/arch/x86/isa/insts/general_purpose/input_output/string_io.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop INS_M_R { # Find the constant we need to either add or subtract from rdi ruflag t0, 10 @@ -135,4 +135,4 @@ end: def macroop OUTS_VIRT_E_R_M { panic "Virtual mode outs isn't implemented!" }; -''' +""" diff --git a/src/arch/x86/isa/insts/general_purpose/load_effective_address.py b/src/arch/x86/isa/insts/general_purpose/load_effective_address.py index c1aed3d2d0..1d7d8fc969 100644 --- a/src/arch/x86/isa/insts/general_purpose/load_effective_address.py +++ b/src/arch/x86/isa/insts/general_purpose/load_effective_address.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop LEA_R_M { lea reg, seg, sib, disp }; @@ -42,4 +42,4 @@ def macroop LEA_R_P { rdip t7 lea reg, seg, riprel, disp }; -''' +""" diff --git a/src/arch/x86/isa/insts/general_purpose/load_segment_registers.py b/src/arch/x86/isa/insts/general_purpose/load_segment_registers.py index 1967820114..923c7888c3 100644 --- a/src/arch/x86/isa/insts/general_purpose/load_segment_registers.py +++ b/src/arch/x86/isa/insts/general_purpose/load_segment_registers.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ # # Real mode versions of the load far pointer instructions. @@ -147,8 +147,8 @@ def macroop LSS_REAL_R_P { panic "Real mode LSS doesn't support RIP relative addressing." }; -''' -#let {{ +""" +# let {{ # class LDS(Inst): # "GenFault ${new UnimpInstFault}" # class LES(Inst): @@ -163,4 +163,4 @@ def macroop LSS_REAL_R_P { # "GenFault ${new UnimpInstFault}" # class POP(Inst): # "GenFault ${new UnimpInstFault}" -#}}; +# }}; diff --git a/src/arch/x86/isa/insts/general_purpose/logical.py b/src/arch/x86/isa/insts/general_purpose/logical.py index dc6e363b56..85c70ac707 100644 --- a/src/arch/x86/isa/insts/general_purpose/logical.py +++ b/src/arch/x86/isa/insts/general_purpose/logical.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop OR_R_R { or reg, reg, regm, flags=(OF,SF,ZF,PF,CF) @@ -365,4 +365,4 @@ def macroop NOT_LOCKED_P stul t2, seg, riprel, disp mfence }; -''' +""" diff --git a/src/arch/x86/isa/insts/general_purpose/no_operation.py b/src/arch/x86/isa/insts/general_purpose/no_operation.py index fd97554d5b..0dabd28856 100644 --- a/src/arch/x86/isa/insts/general_purpose/no_operation.py +++ b/src/arch/x86/isa/insts/general_purpose/no_operation.py @@ -36,7 +36,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop NOP { fault "NoFault" @@ -46,4 +46,4 @@ def macroop HINT_NOP { fault "NoFault" }; -''' +""" diff --git a/src/arch/x86/isa/insts/general_purpose/rotate_and_shift/__init__.py b/src/arch/x86/isa/insts/general_purpose/rotate_and_shift/__init__.py index b72512184b..2675ed2429 100644 --- a/src/arch/x86/isa/insts/general_purpose/rotate_and_shift/__init__.py +++ b/src/arch/x86/isa/insts/general_purpose/rotate_and_shift/__init__.py @@ -33,8 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["rotate", - "shift"] +categories = ["rotate", "shift"] microcode = "" for category in categories: diff --git a/src/arch/x86/isa/insts/general_purpose/rotate_and_shift/rotate.py b/src/arch/x86/isa/insts/general_purpose/rotate_and_shift/rotate.py index ddbee2daa6..31d07a2377 100644 --- a/src/arch/x86/isa/insts/general_purpose/rotate_and_shift/rotate.py +++ b/src/arch/x86/isa/insts/general_purpose/rotate_and_shift/rotate.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop ROL_R_I { roli reg, reg, imm, flags=(OF,CF) @@ -273,4 +273,4 @@ def macroop RCR_P_R rcr t1, t1, reg, flags=(OF,CF) st t1, seg, riprel, disp }; -''' +""" diff --git a/src/arch/x86/isa/insts/general_purpose/rotate_and_shift/shift.py b/src/arch/x86/isa/insts/general_purpose/rotate_and_shift/shift.py index 888db1af25..ba4a7b68fb 100644 --- a/src/arch/x86/isa/insts/general_purpose/rotate_and_shift/shift.py +++ b/src/arch/x86/isa/insts/general_purpose/rotate_and_shift/shift.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop SAL_R_I { slli reg, reg, imm, flags=(CF,OF,SF,ZF,PF) @@ -305,4 +305,4 @@ def macroop SAR_P_R sra t1, t1, reg, flags=(CF,OF,SF,ZF,PF) st t1, seg, riprel, disp }; -''' +""" diff --git a/src/arch/x86/isa/insts/general_purpose/semaphores.py b/src/arch/x86/isa/insts/general_purpose/semaphores.py index aae67fac70..6eeb46ea08 100644 --- a/src/arch/x86/isa/insts/general_purpose/semaphores.py +++ b/src/arch/x86/isa/insts/general_purpose/semaphores.py @@ -34,7 +34,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop CMPXCHG_R_R { sub t0, rax, reg, flags=(OF, SF, ZF, AF, PF, CF) mov reg, reg, regm, flags=(CZF,) @@ -123,12 +123,12 @@ def macroop XADD_R_R { mov reg, reg, t2 }; -''' +""" # Despite the name, this microcode sequence implements both # cmpxchg8b and cmpxchg16b, depending on the dynamic value # of dataSize. -cmpxchg8bCode = ''' +cmpxchg8bCode = """ def macroop CMPXCHG8B_%(suffix)s { .adjust_env clampOsz %(rdip)s @@ -153,26 +153,42 @@ doneComparing: stsplit%(ul)s (t2, t3), seg, [1, t0, t1], disp=0 %(mfence)s }; -''' +""" -microcode += cmpxchg8bCode % {"rdip": "", "sib": "sib", - "l": "", "ul": "", - "mfence": "", - "suffix": "M"} -microcode += cmpxchg8bCode % {"rdip": "rdip t7", "sib": "riprel", - "l": "", "ul": "", - "mfence": "", - "suffix": "P"} -microcode += cmpxchg8bCode % {"rdip": "", "sib": "sib", - "l": "l", "ul": "ul", - "mfence": "mfence", - "suffix": "LOCKED_M"} -microcode += cmpxchg8bCode % {"rdip": "rdip t7", "sib": "riprel", - "l": "l", "ul": "ul", - "mfence": "mfence", - "suffix": "LOCKED_P"} +microcode += cmpxchg8bCode % { + "rdip": "", + "sib": "sib", + "l": "", + "ul": "", + "mfence": "", + "suffix": "M", +} +microcode += cmpxchg8bCode % { + "rdip": "rdip t7", + "sib": "riprel", + "l": "", + "ul": "", + "mfence": "", + "suffix": "P", +} +microcode += cmpxchg8bCode % { + "rdip": "", + "sib": "sib", + "l": "l", + "ul": "ul", + "mfence": "mfence", + "suffix": "LOCKED_M", +} +microcode += cmpxchg8bCode % { + "rdip": "rdip t7", + "sib": "riprel", + "l": "l", + "ul": "ul", + "mfence": "mfence", + "suffix": "LOCKED_P", +} -#let {{ +# let {{ # class XCHG(Inst): # "GenFault ${new UnimpInstFault}" -#}}; +# }}; diff --git a/src/arch/x86/isa/insts/general_purpose/string/__init__.py b/src/arch/x86/isa/insts/general_purpose/string/__init__.py index 2bf155270c..0f7e81a82c 100644 --- a/src/arch/x86/isa/insts/general_purpose/string/__init__.py +++ b/src/arch/x86/isa/insts/general_purpose/string/__init__.py @@ -33,11 +33,13 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["compare_strings", - "load_string", - "move_string", - "scan_string", - "store_string"] +categories = [ + "compare_strings", + "load_string", + "move_string", + "scan_string", + "store_string", +] microcode = "" for category in categories: diff --git a/src/arch/x86/isa/insts/general_purpose/string/compare_strings.py b/src/arch/x86/isa/insts/general_purpose/string/compare_strings.py index 0bcf18ba64..f2016a9101 100644 --- a/src/arch/x86/isa/insts/general_purpose/string/compare_strings.py +++ b/src/arch/x86/isa/insts/general_purpose/string/compare_strings.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop CMPS_M_M { # Find the constant we need to either add or subtract from rdi ruflag t0, 10 @@ -99,4 +99,4 @@ topOfLoop: end: fault "NoFault" }; -''' +""" diff --git a/src/arch/x86/isa/insts/general_purpose/string/load_string.py b/src/arch/x86/isa/insts/general_purpose/string/load_string.py index ef5a2f0dbd..1af7aea972 100644 --- a/src/arch/x86/isa/insts/general_purpose/string/load_string.py +++ b/src/arch/x86/isa/insts/general_purpose/string/load_string.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop LODS_M { # Find the constant we need to either add or subtract from rdi ruflag t0, 10 @@ -64,4 +64,4 @@ topOfLoop: end: fault "NoFault" }; -''' +""" diff --git a/src/arch/x86/isa/insts/general_purpose/string/move_string.py b/src/arch/x86/isa/insts/general_purpose/string/move_string.py index 97588cf273..0d575c3e8c 100644 --- a/src/arch/x86/isa/insts/general_purpose/string/move_string.py +++ b/src/arch/x86/isa/insts/general_purpose/string/move_string.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop MOVS_M_M { # Find the constant we need to either add or subtract from rdi ruflag t0, 10 @@ -68,4 +68,4 @@ topOfLoop: end: fault "NoFault" }; -''' +""" diff --git a/src/arch/x86/isa/insts/general_purpose/string/scan_string.py b/src/arch/x86/isa/insts/general_purpose/string/scan_string.py index 50d76cf461..682316d1f1 100644 --- a/src/arch/x86/isa/insts/general_purpose/string/scan_string.py +++ b/src/arch/x86/isa/insts/general_purpose/string/scan_string.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop SCAS_M { # Find the constant we need to either add or subtract from rdi ruflag t0, 10 @@ -94,4 +94,4 @@ end: fault "NoFault" }; -''' +""" diff --git a/src/arch/x86/isa/insts/general_purpose/string/store_string.py b/src/arch/x86/isa/insts/general_purpose/string/store_string.py index cd2424bfd0..f997f323d1 100644 --- a/src/arch/x86/isa/insts/general_purpose/string/store_string.py +++ b/src/arch/x86/isa/insts/general_purpose/string/store_string.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop STOS_M { # Find the constant we need to either add or subtract from rdi ruflag t0, 10 @@ -64,4 +64,4 @@ topOfLoop: end: fault "NoFault" }; -''' +""" diff --git a/src/arch/x86/isa/insts/general_purpose/system_calls.py b/src/arch/x86/isa/insts/general_purpose/system_calls.py index 7eff742635..130d576a82 100644 --- a/src/arch/x86/isa/insts/general_purpose/system_calls.py +++ b/src/arch/x86/isa/insts/general_purpose/system_calls.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop SYSCALL_64 { # All 1s. @@ -218,10 +218,10 @@ def macroop SYSRET_NON_64 { panic "The sysret instruction isn't implemented in legacy mode." }; -''' -#let {{ +""" +# let {{ # class SYSENTER(Inst): # "GenFault ${new UnimpInstFault}" # class SYSEXIT(Inst): # "GenFault ${new UnimpInstFault}" -#}}; +# }}; diff --git a/src/arch/x86/isa/insts/romutil.py b/src/arch/x86/isa/insts/romutil.py index 847b9c4b10..7082b4d4a5 100644 --- a/src/arch/x86/isa/insts/romutil.py +++ b/src/arch/x86/isa/insts/romutil.py @@ -24,7 +24,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -intCodeTemplate = ''' +intCodeTemplate = """ def rom { # This vectors the CPU into an interrupt handler in long mode. @@ -174,30 +174,33 @@ def rom eret }; -''' +""" -microcode = \ -intCodeTemplate % {\ - "startLabel" : "longModeInterrupt", - "gateCheckType" : "IntGateCheck", - "errorCodeSize" : 0, - "errorCodeCode" : "" -} + \ -intCodeTemplate % {\ - "startLabel" : "longModeSoftInterrupt", - "gateCheckType" : "SoftIntGateCheck", - "errorCodeSize" : 0, - "errorCodeCode" : "" -} + \ -intCodeTemplate % {\ - "startLabel" : "longModeInterruptWithError", - "gateCheckType" : "IntGateCheck", - "errorCodeSize" : 8, - "errorCodeCode" : ''' +microcode = ( + intCodeTemplate + % { + "startLabel": "longModeInterrupt", + "gateCheckType": "IntGateCheck", + "errorCodeSize": 0, + "errorCodeCode": "", + } + + intCodeTemplate + % { + "startLabel": "longModeSoftInterrupt", + "gateCheckType": "SoftIntGateCheck", + "errorCodeSize": 0, + "errorCodeCode": "", + } + + intCodeTemplate + % { + "startLabel": "longModeInterruptWithError", + "gateCheckType": "IntGateCheck", + "errorCodeSize": 8, + "errorCodeCode": """ st t15, hs, [1, t0, t6], dataSize=8, addressSize=8 - ''' -} + \ -''' + """, + } + + """ def rom { # This vectors the CPU into an interrupt handler in legacy mode. @@ -266,4 +269,5 @@ def rom eret }; -''' +""" +) diff --git a/src/arch/x86/isa/insts/simd128/__init__.py b/src/arch/x86/isa/insts/simd128/__init__.py index 8a3fbafe7c..5f0b52c50a 100644 --- a/src/arch/x86/isa/insts/simd128/__init__.py +++ b/src/arch/x86/isa/insts/simd128/__init__.py @@ -33,12 +33,11 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["integer", - "floating_point"] +categories = ["integer", "floating_point"] -microcode = ''' +microcode = """ # SSE instructions -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/simd128/floating_point/__init__.py b/src/arch/x86/isa/insts/simd128/floating_point/__init__.py index bffb9b9aff..4becf25c29 100644 --- a/src/arch/x86/isa/insts/simd128/floating_point/__init__.py +++ b/src/arch/x86/isa/insts/simd128/floating_point/__init__.py @@ -33,16 +33,18 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["data_transfer", - "data_conversion", - "data_reordering", - "arithmetic", - "compare", - "logical"] +categories = [ + "data_transfer", + "data_conversion", + "data_reordering", + "arithmetic", + "compare", + "logical", +] -microcode = ''' +microcode = """ # SSE instructions -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/__init__.py b/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/__init__.py index f3119f3856..08bfb09f03 100644 --- a/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/__init__.py +++ b/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/__init__.py @@ -33,20 +33,22 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["addition", - "horizontal_addition", - "subtraction", - "horizontal_subtraction", - "simultaneous_addition_and_subtraction", - "multiplication", - "division", - "square_root", - "reciprocal_square_root", - "reciprocal_estimation"] +categories = [ + "addition", + "horizontal_addition", + "subtraction", + "horizontal_subtraction", + "simultaneous_addition_and_subtraction", + "multiplication", + "division", + "square_root", + "reciprocal_square_root", + "reciprocal_estimation", +] -microcode = ''' +microcode = """ # SSE instructions -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/addition.py b/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/addition.py index 2f6ec4017b..590272b9eb 100644 --- a/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/addition.py +++ b/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/addition.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop ADDSS_XMM_XMM { maddf xmml, xmml, xmmlm, size=4, ext=Scalar }; @@ -103,4 +103,4 @@ def macroop ADDPD_XMM_P { maddf xmml, xmml, ufp1, size=8, ext=0 maddf xmmh, xmmh, ufp2, size=8, ext=0 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/division.py b/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/division.py index 9d6498b5b0..341a4151c7 100644 --- a/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/division.py +++ b/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/division.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop DIVSS_XMM_XMM { mdivf xmml, xmml, xmmlm, size=4, ext=Scalar }; @@ -103,4 +103,4 @@ def macroop DIVPD_XMM_P { mdivf xmml, xmml, ufp1, size=8, ext=0 mdivf xmmh, xmmh, ufp2, size=8, ext=0 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/horizontal_addition.py b/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/horizontal_addition.py index 60a9a75275..bfaaf1cb47 100644 --- a/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/horizontal_addition.py +++ b/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/horizontal_addition.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop HADDPS_XMM_XMM { shuffle ufp1, xmml, xmmh, ext=((0 << 0) | (2 << 2)), size=4 shuffle ufp2, xmml, xmmh, ext=((1 << 0) | (3 << 2)), size=4 @@ -91,4 +91,4 @@ def macroop HADDPD_XMM_P { maddf xmml, xmmh, xmml, size=8, ext=Scalar maddf xmmh, ufp1, ufp2, size=8, ext=Scalar }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/horizontal_subtraction.py b/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/horizontal_subtraction.py index 43523c1be5..a7aea21bc4 100644 --- a/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/horizontal_subtraction.py +++ b/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/horizontal_subtraction.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop HSUBPS_XMM_XMM { shuffle ufp1, xmml, xmmh, ext=((0 << 0) | (2 << 2)), size=4 shuffle ufp2, xmml, xmmh, ext=((1 << 0) | (3 << 2)), size=4 @@ -81,4 +81,4 @@ def macroop HSUBPD_XMM_P { msubf xmml, xmml, xmmh, size=8, ext=Scalar msubf xmmh, ufp1, ufp2, size=8, ext=Scalar }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/multiplication.py b/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/multiplication.py index d4d99381ec..a3127ce30c 100644 --- a/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/multiplication.py +++ b/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/multiplication.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop MULSS_XMM_XMM { mmulf xmml, xmml, xmmlm, size=4, ext=Scalar }; @@ -103,4 +103,4 @@ def macroop MULPD_XMM_P { mmulf xmml, xmml, ufp1, size=8, ext=0 mmulf xmmh, xmmh, ufp2, size=8, ext=0 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/reciprocal_estimation.py b/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/reciprocal_estimation.py index 83dfa0c09f..13c6f5cb16 100644 --- a/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/reciprocal_estimation.py +++ b/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/reciprocal_estimation.py @@ -35,7 +35,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop RCPSS_XMM_XMM { mrcp xmml, xmmlm, size=4, ext=Scalar }; @@ -70,4 +70,4 @@ def macroop RCPPS_XMM_P { mrcp xmml, ufp1, size=4, ext=0 mrcp xmmh, ufp2, size=4, ext=0 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/reciprocal_square_root.py b/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/reciprocal_square_root.py index 2c68df7e4a..d56310b349 100644 --- a/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/reciprocal_square_root.py +++ b/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/reciprocal_square_root.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ # RSQRTPS # RSQRTPD -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/simultaneous_addition_and_subtraction.py b/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/simultaneous_addition_and_subtraction.py index 0e23ac7ab7..d4540d605c 100644 --- a/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/simultaneous_addition_and_subtraction.py +++ b/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/simultaneous_addition_and_subtraction.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ # ADDSUBPS def macroop ADDSUBPD_XMM_XMM { @@ -55,4 +55,4 @@ def macroop ADDSUBPD_XMM_P { msubf xmmlm, xmml, ufp1, size=8, ext=0 maddf xmmhm, xmmh, ufp2, size=8, ext=0 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/square_root.py b/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/square_root.py index 1181bf7ac6..0d661812c1 100644 --- a/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/square_root.py +++ b/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/square_root.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop SQRTSS_XMM_XMM { msqrt xmml, xmmlm, size=4, ext=Scalar }; @@ -103,4 +103,4 @@ def macroop SQRTPD_XMM_P { msqrt xmml, ufp1, size=8, ext=0 msqrt xmmh, ufp2, size=8, ext=0 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/subtraction.py b/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/subtraction.py index 765c10c6df..7989289219 100644 --- a/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/subtraction.py +++ b/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/subtraction.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop SUBSS_XMM_XMM { msubf xmml, xmml, xmmlm, size=4, ext=Scalar }; @@ -103,4 +103,4 @@ def macroop SUBPD_XMM_P { msubf xmml, xmml, ufp1, size=8, ext=0 msubf xmmh, xmmh, ufp2, size=8, ext=0 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/floating_point/compare/__init__.py b/src/arch/x86/isa/insts/simd128/floating_point/compare/__init__.py index 85b1c9fae1..d9f10dcedc 100644 --- a/src/arch/x86/isa/insts/simd128/floating_point/compare/__init__.py +++ b/src/arch/x86/isa/insts/simd128/floating_point/compare/__init__.py @@ -33,13 +33,15 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["compare_and_write_mask", - "compare_and_write_minimum_or_maximum", - "compare_and_write_rflags"] +categories = [ + "compare_and_write_mask", + "compare_and_write_minimum_or_maximum", + "compare_and_write_rflags", +] -microcode = ''' +microcode = """ # SSE instructions -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/simd128/floating_point/compare/compare_and_write_mask.py b/src/arch/x86/isa/insts/simd128/floating_point/compare/compare_and_write_mask.py index a439e59108..d48234cb4b 100644 --- a/src/arch/x86/isa/insts/simd128/floating_point/compare/compare_and_write_mask.py +++ b/src/arch/x86/isa/insts/simd128/floating_point/compare/compare_and_write_mask.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop CMPPS_XMM_XMM_I { mcmpf2r xmml, xmml, xmmlm, size=4, ext="IMMEDIATE & mask(3)" mcmpf2r xmmh, xmmh, xmmhm, size=4, ext="IMMEDIATE & mask(3)" @@ -103,4 +103,4 @@ def macroop CMPSD_XMM_P_I { ldfp ufp1, seg, riprel, "DISPLACEMENT", dataSize=8 mcmpf2r xmml, xmml, ufp1, size=8, ext="IMMEDIATE |" + Scalar }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/floating_point/compare/compare_and_write_minimum_or_maximum.py b/src/arch/x86/isa/insts/simd128/floating_point/compare/compare_and_write_minimum_or_maximum.py index 0bbd7f4399..ec9bf0e06c 100644 --- a/src/arch/x86/isa/insts/simd128/floating_point/compare/compare_and_write_minimum_or_maximum.py +++ b/src/arch/x86/isa/insts/simd128/floating_point/compare/compare_and_write_minimum_or_maximum.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop MINPS_XMM_XMM { mminf xmml, xmml, xmmlm, ext=0, size=4 mminf xmmh, xmmh, xmmhm, ext=0, size=4 @@ -173,4 +173,4 @@ def macroop MAXSD_XMM_P { ldfp ufp1, seg, riprel, "DISPLACEMENT", dataSize=8 mmaxf xmml, xmml, ufp1, ext=Scalar, size=8 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/floating_point/compare/compare_and_write_rflags.py b/src/arch/x86/isa/insts/simd128/floating_point/compare/compare_and_write_rflags.py index 4cf3378307..811d38d58b 100644 --- a/src/arch/x86/isa/insts/simd128/floating_point/compare/compare_and_write_rflags.py +++ b/src/arch/x86/isa/insts/simd128/floating_point/compare/compare_and_write_rflags.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop UCOMISS_XMM_XMM { mcmpf2rf xmml, xmmlm, size=4 }; @@ -93,4 +93,4 @@ def macroop COMISD_XMM_P { ldfp ufp1, seg, riprel, disp, dataSize=8 mcmpf2rf xmml, ufp1, size=8 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/floating_point/data_conversion/__init__.py b/src/arch/x86/isa/insts/simd128/floating_point/data_conversion/__init__.py index 42afc9a7f9..c8a2d2f2b3 100644 --- a/src/arch/x86/isa/insts/simd128/floating_point/data_conversion/__init__.py +++ b/src/arch/x86/isa/insts/simd128/floating_point/data_conversion/__init__.py @@ -33,14 +33,16 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["convert_floating_point_to_floating_point", - "convert_floating_point_to_xmm_integer", - "convert_floating_point_to_mmx_integer", - "convert_floating_point_to_gpr_integer"] +categories = [ + "convert_floating_point_to_floating_point", + "convert_floating_point_to_xmm_integer", + "convert_floating_point_to_mmx_integer", + "convert_floating_point_to_gpr_integer", +] -microcode = ''' +microcode = """ # SSE instructions -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/simd128/floating_point/data_conversion/convert_floating_point_to_floating_point.py b/src/arch/x86/isa/insts/simd128/floating_point/data_conversion/convert_floating_point_to_floating_point.py index 632af373b0..138fe1a9a7 100644 --- a/src/arch/x86/isa/insts/simd128/floating_point/data_conversion/convert_floating_point_to_floating_point.py +++ b/src/arch/x86/isa/insts/simd128/floating_point/data_conversion/convert_floating_point_to_floating_point.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop CVTSS2SD_XMM_XMM { cvtf2f xmml, xmmlm, destSize=8, srcSize=4, ext=Scalar }; @@ -104,4 +104,4 @@ def macroop CVTPD2PS_XMM_P { cvtf2f xmml, ufp2, destSize=4, srcSize=8, ext=2 lfpimm xmmh, 0 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/floating_point/data_conversion/convert_floating_point_to_gpr_integer.py b/src/arch/x86/isa/insts/simd128/floating_point/data_conversion/convert_floating_point_to_gpr_integer.py index a96591b818..81ff90084d 100644 --- a/src/arch/x86/isa/insts/simd128/floating_point/data_conversion/convert_floating_point_to_gpr_integer.py +++ b/src/arch/x86/isa/insts/simd128/floating_point/data_conversion/convert_floating_point_to_gpr_integer.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop CVTSS2SI_R_XMM { cvtf2i ufp1, xmmlm, srcSize=4, destSize=dsz, ext = Scalar + "| 4" mov2int reg, ufp1, size=dsz @@ -105,4 +105,4 @@ def macroop CVTTSD2SI_R_P { cvtf2i ufp1, ufp1, srcSize=8, destSize=dsz, ext=Scalar mov2int reg, ufp1, size=dsz }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/floating_point/data_conversion/convert_floating_point_to_mmx_integer.py b/src/arch/x86/isa/insts/simd128/floating_point/data_conversion/convert_floating_point_to_mmx_integer.py index 52bf9ae51c..259426c469 100644 --- a/src/arch/x86/isa/insts/simd128/floating_point/data_conversion/convert_floating_point_to_mmx_integer.py +++ b/src/arch/x86/isa/insts/simd128/floating_point/data_conversion/convert_floating_point_to_mmx_integer.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop CVTPS2PI_MMX_XMM { cvtf2i mmx, xmmlm, size=4, ext=4 }; @@ -103,4 +103,4 @@ def macroop CVTTPD2PI_MMX_P { cvtf2i mmx, ufp1, srcSize=8, destSize=4, ext=0 cvtf2i mmx, ufp2, srcSize=8, destSize=4, ext=2 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/floating_point/data_conversion/convert_floating_point_to_xmm_integer.py b/src/arch/x86/isa/insts/simd128/floating_point/data_conversion/convert_floating_point_to_xmm_integer.py index a4d434f0ba..da9d89ec77 100644 --- a/src/arch/x86/isa/insts/simd128/floating_point/data_conversion/convert_floating_point_to_xmm_integer.py +++ b/src/arch/x86/isa/insts/simd128/floating_point/data_conversion/convert_floating_point_to_xmm_integer.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop CVTPS2DQ_XMM_XMM { cvtf2i xmml, xmmlm, size=4, ext=4 cvtf2i xmmh, xmmhm, size=4, ext=4 @@ -119,4 +119,4 @@ def macroop CVTTPD2DQ_XMM_P { cvtf2i xmml, ufp2, srcSize=8, destSize=4, ext=2 lfpimm xmmh, 0 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/floating_point/data_reordering/__init__.py b/src/arch/x86/isa/insts/simd128/floating_point/data_reordering/__init__.py index 78c6e941bf..2584d21ede 100644 --- a/src/arch/x86/isa/insts/simd128/floating_point/data_reordering/__init__.py +++ b/src/arch/x86/isa/insts/simd128/floating_point/data_reordering/__init__.py @@ -33,12 +33,11 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["unpack_and_interleave", - "shuffle"] +categories = ["unpack_and_interleave", "shuffle"] -microcode = ''' +microcode = """ # SSE instructions -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/simd128/floating_point/data_reordering/shuffle.py b/src/arch/x86/isa/insts/simd128/floating_point/data_reordering/shuffle.py index e7dbab00d9..e4885822fc 100644 --- a/src/arch/x86/isa/insts/simd128/floating_point/data_reordering/shuffle.py +++ b/src/arch/x86/isa/insts/simd128/floating_point/data_reordering/shuffle.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop SHUFPS_XMM_XMM_I { shuffle ufp1, xmml, xmmh, size=4, ext="IMMEDIATE" shuffle xmmh, xmmlm, xmmhm, size=4, ext="IMMEDIATE >> 4" @@ -75,4 +75,4 @@ def macroop SHUFPD_XMM_P_I { shuffle xmml, xmml, xmmh, size=8, ext="IMMEDIATE" shuffle xmmh, ufp1, ufp2, size=8, ext="IMMEDIATE >> 1" }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/floating_point/data_reordering/unpack_and_interleave.py b/src/arch/x86/isa/insts/simd128/floating_point/data_reordering/unpack_and_interleave.py index 06b26e788d..7ed8f6e854 100644 --- a/src/arch/x86/isa/insts/simd128/floating_point/data_reordering/unpack_and_interleave.py +++ b/src/arch/x86/isa/insts/simd128/floating_point/data_reordering/unpack_and_interleave.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop UNPCKLPS_XMM_XMM { unpack xmmh, xmml, xmmlm, ext=1, size=4 unpack xmml, xmml, xmmlm, ext=0, size=4 @@ -104,4 +104,4 @@ def macroop UNPCKHPD_XMM_P { movfp xmml, xmmh movfp xmmh, ufp1 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/floating_point/data_transfer/__init__.py b/src/arch/x86/isa/insts/simd128/floating_point/data_transfer/__init__.py index b9845a9f70..d1e90960ac 100644 --- a/src/arch/x86/isa/insts/simd128/floating_point/data_transfer/__init__.py +++ b/src/arch/x86/isa/insts/simd128/floating_point/data_transfer/__init__.py @@ -33,14 +33,16 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["move", - "move_with_duplication", - "move_non_temporal", - "move_mask"] +categories = [ + "move", + "move_with_duplication", + "move_non_temporal", + "move_mask", +] -microcode = ''' +microcode = """ # SSE instructions -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/simd128/floating_point/data_transfer/move.py b/src/arch/x86/isa/insts/simd128/floating_point/data_transfer/move.py index 8d71f6dc40..2a4a152c9f 100644 --- a/src/arch/x86/isa/insts/simd128/floating_point/data_transfer/move.py +++ b/src/arch/x86/isa/insts/simd128/floating_point/data_transfer/move.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop MOVAPS_XMM_M { # Check low address. ldfp xmmh, seg, sib, "DISPLACEMENT + 8", dataSize=8 @@ -276,4 +276,4 @@ def macroop MOVSD_P_XMM { def macroop MOVSD_XMM_XMM { movfp xmml, xmmlm, dataSize=8 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/floating_point/data_transfer/move_mask.py b/src/arch/x86/isa/insts/simd128/floating_point/data_transfer/move_mask.py index 5de44a71d8..628ee88c10 100644 --- a/src/arch/x86/isa/insts/simd128/floating_point/data_transfer/move_mask.py +++ b/src/arch/x86/isa/insts/simd128/floating_point/data_transfer/move_mask.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop MOVMSKPS_R_XMM { limm reg, 0 movsign reg, xmmlm, size=4, ext=0 @@ -45,4 +45,4 @@ def macroop MOVMSKPD_R_XMM { movsign reg, xmmlm, size=8, ext=0 movsign reg, xmmhm, size=8, ext=1 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/floating_point/data_transfer/move_non_temporal.py b/src/arch/x86/isa/insts/simd128/floating_point/data_transfer/move_non_temporal.py index 0f979a5030..db64985a5e 100644 --- a/src/arch/x86/isa/insts/simd128/floating_point/data_transfer/move_non_temporal.py +++ b/src/arch/x86/isa/insts/simd128/floating_point/data_transfer/move_non_temporal.py @@ -34,7 +34,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ # movntps is basically the same as movaps, excepting the caching hint and # ordering constraints # We are ignoring the non-temporal hint. @@ -70,4 +70,4 @@ def macroop MOVNTPD_P_XMM { stfp xmml, seg, riprel, "DISPLACEMENT", dataSize=8 stfp xmmh, seg, riprel, "DISPLACEMENT + 8", dataSize=8 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/floating_point/data_transfer/move_with_duplication.py b/src/arch/x86/isa/insts/simd128/floating_point/data_transfer/move_with_duplication.py index ebe20d6c04..3207d933fe 100644 --- a/src/arch/x86/isa/insts/simd128/floating_point/data_transfer/move_with_duplication.py +++ b/src/arch/x86/isa/insts/simd128/floating_point/data_transfer/move_with_duplication.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop MOVDDUP_XMM_XMM { movfp xmmh, xmmlm, dataSize=8 movfp xmml, xmmlm, dataSize=8 @@ -52,4 +52,4 @@ def macroop MOVDDUP_XMM_P { # MOVSLDUP # MOVSHDUP -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/floating_point/logical/__init__.py b/src/arch/x86/isa/insts/simd128/floating_point/logical/__init__.py index 6fc94c2867..996aa74e93 100644 --- a/src/arch/x86/isa/insts/simd128/floating_point/logical/__init__.py +++ b/src/arch/x86/isa/insts/simd128/floating_point/logical/__init__.py @@ -33,13 +33,11 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["andp", - "orp", - "exclusive_or"] +categories = ["andp", "orp", "exclusive_or"] -microcode = ''' +microcode = """ # SSE instructions -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/simd128/floating_point/logical/andp.py b/src/arch/x86/isa/insts/simd128/floating_point/logical/andp.py index d3e88f8b1e..d0f32254a9 100644 --- a/src/arch/x86/isa/insts/simd128/floating_point/logical/andp.py +++ b/src/arch/x86/isa/insts/simd128/floating_point/logical/andp.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop ANDPS_XMM_XMM { mand xmml, xmml, xmmlm mand xmmh, xmmh, xmmhm @@ -121,4 +121,4 @@ def macroop ANDNPD_XMM_P { mandn xmml, xmml, ufp1 mandn xmmh, xmmh, ufp2 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/floating_point/logical/exclusive_or.py b/src/arch/x86/isa/insts/simd128/floating_point/logical/exclusive_or.py index 405c5bdc0a..976df19673 100644 --- a/src/arch/x86/isa/insts/simd128/floating_point/logical/exclusive_or.py +++ b/src/arch/x86/isa/insts/simd128/floating_point/logical/exclusive_or.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop XORPD_XMM_XMM { mxor xmml, xmml, xmmlm mxor xmmh, xmmh, xmmhm @@ -73,4 +73,4 @@ def macroop XORPS_XMM_P { mxor xmml, xmml, ufp1 mxor xmmh, xmmh, ufp2 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/floating_point/logical/orp.py b/src/arch/x86/isa/insts/simd128/floating_point/logical/orp.py index 8cdcd4df1a..e8b338abd8 100644 --- a/src/arch/x86/isa/insts/simd128/floating_point/logical/orp.py +++ b/src/arch/x86/isa/insts/simd128/floating_point/logical/orp.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop ORPS_XMM_XMM { mor xmml, xmml, xmmlm mor xmmh, xmmh, xmmhm @@ -77,4 +77,4 @@ def macroop ORPD_XMM_P { mor xmml, xmml, ufp1 mor xmmh, xmmh, ufp2 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/integer/__init__.py b/src/arch/x86/isa/insts/simd128/integer/__init__.py index ecf20086cd..cf4491f9bf 100644 --- a/src/arch/x86/isa/insts/simd128/integer/__init__.py +++ b/src/arch/x86/isa/insts/simd128/integer/__init__.py @@ -33,18 +33,20 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["data_transfer", - "data_conversion", - "data_reordering", - "arithmetic", - "shift", - "compare", - "logical", - "save_and_restore_state"] +categories = [ + "data_transfer", + "data_conversion", + "data_reordering", + "arithmetic", + "shift", + "compare", + "logical", + "save_and_restore_state", +] -microcode = ''' +microcode = """ # 128 bit multimedia and scientific instructions -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/simd128/integer/arithmetic/__init__.py b/src/arch/x86/isa/insts/simd128/integer/arithmetic/__init__.py index 8b4681180f..01ae49f88e 100644 --- a/src/arch/x86/isa/insts/simd128/integer/arithmetic/__init__.py +++ b/src/arch/x86/isa/insts/simd128/integer/arithmetic/__init__.py @@ -33,16 +33,18 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["addition", - "subtraction", - "multiplication", - "multiply_add", - "average", - "sum_of_absolute_differences"] +categories = [ + "addition", + "subtraction", + "multiplication", + "multiply_add", + "average", + "sum_of_absolute_differences", +] -microcode = ''' +microcode = """ # 128 bit multimedia and scientific instructions -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/simd128/integer/arithmetic/addition.py b/src/arch/x86/isa/insts/simd128/integer/arithmetic/addition.py index 706e122461..717e435f4a 100644 --- a/src/arch/x86/isa/insts/simd128/integer/arithmetic/addition.py +++ b/src/arch/x86/isa/insts/simd128/integer/arithmetic/addition.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PADDB_XMM_XMM { maddi xmml, xmml, xmmlm, size=1, ext=0 maddi xmmh, xmmh, xmmhm, size=1, ext=0 @@ -193,4 +193,4 @@ def macroop PADDUSW_XMM_P { maddi xmml, xmml, ufp1, size=2, ext=2 maddi xmmh, xmmh, ufp2, size=2, ext=2 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/integer/arithmetic/average.py b/src/arch/x86/isa/insts/simd128/integer/arithmetic/average.py index a79e9eb20e..e1c0c2bce2 100644 --- a/src/arch/x86/isa/insts/simd128/integer/arithmetic/average.py +++ b/src/arch/x86/isa/insts/simd128/integer/arithmetic/average.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PAVGB_XMM_XMM { mavg xmml, xmml, xmmlm, size=1, ext=0 mavg xmmh, xmmh, xmmhm, size=1, ext=0 @@ -93,4 +93,4 @@ def macroop PAVGW_XMM_P { mavg xmml, xmml, ufp1, size=2, ext=0 mavg xmmh, xmmh, ufp2, size=2, ext=0 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/integer/arithmetic/multiplication.py b/src/arch/x86/isa/insts/simd128/integer/arithmetic/multiplication.py index 3896bce1bc..3246686d2c 100644 --- a/src/arch/x86/isa/insts/simd128/integer/arithmetic/multiplication.py +++ b/src/arch/x86/isa/insts/simd128/integer/arithmetic/multiplication.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PMULHW_XMM_XMM { mmuli xmml, xmml, xmmlm, size=2, ext = Signed + "|" + MultHi mmuli xmmh, xmmh, xmmhm, size=2, ext = Signed + "|" + MultHi @@ -113,4 +113,4 @@ def macroop PMULUDQ_XMM_P { mmuli xmml, xmml, ufp1, srcSize=4, destSize=8, ext=Scalar mmuli xmmh, xmmh, ufp2, srcSize=4, destSize=8, ext=Scalar }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/integer/arithmetic/multiply_add.py b/src/arch/x86/isa/insts/simd128/integer/arithmetic/multiply_add.py index 78c1723566..a54a577f56 100644 --- a/src/arch/x86/isa/insts/simd128/integer/arithmetic/multiply_add.py +++ b/src/arch/x86/isa/insts/simd128/integer/arithmetic/multiply_add.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PMADDWD_XMM_XMM { mmuli ufp3, xmml, xmmlm, srcSize=2, destSize=4, ext = Signed + "| 0x10 | 0x20" mmuli ufp4, xmml, xmmlm, srcSize=2, destSize=4, ext = Signed + "| 0x10" @@ -65,4 +65,4 @@ def macroop PMADDWD_XMM_P { mmuli ufp4, xmmh, ufp2, srcSize=2, destSize=4, ext = Signed + "| 0x10" maddi xmmh, ufp3, ufp4, size=4, ext=0 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/integer/arithmetic/subtraction.py b/src/arch/x86/isa/insts/simd128/integer/arithmetic/subtraction.py index 0da84bc4a0..dcbb3ace69 100644 --- a/src/arch/x86/isa/insts/simd128/integer/arithmetic/subtraction.py +++ b/src/arch/x86/isa/insts/simd128/integer/arithmetic/subtraction.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PSUBB_XMM_XMM { msubi xmml, xmml, xmmlm, size=1, ext=0 msubi xmmh, xmmh, xmmhm, size=1, ext=0 @@ -193,4 +193,4 @@ def macroop PSUBUSW_XMM_P { msubi xmml, xmml, ufp1, size=2, ext=2 msubi xmmh, xmmh, ufp2, size=2, ext=2 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/integer/arithmetic/sum_of_absolute_differences.py b/src/arch/x86/isa/insts/simd128/integer/arithmetic/sum_of_absolute_differences.py index e6dad7d852..a5c35ad163 100644 --- a/src/arch/x86/isa/insts/simd128/integer/arithmetic/sum_of_absolute_differences.py +++ b/src/arch/x86/isa/insts/simd128/integer/arithmetic/sum_of_absolute_differences.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PSADBW_XMM_XMM { msad xmml, xmml, xmmlm, srcSize=1, destSize=2 msad xmmh, xmmh, xmmhm, srcSize=1, destSize=2 @@ -53,4 +53,4 @@ def macroop PSADBW_XMM_P { msad xmml, xmml, ufp1, srcSize=1, destSize=2 msad xmmh, xmmh, ufp2, srcSize=1, destSize=2 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/integer/compare/__init__.py b/src/arch/x86/isa/insts/simd128/integer/compare/__init__.py index 08c0e8632b..df0bc81b10 100644 --- a/src/arch/x86/isa/insts/simd128/integer/compare/__init__.py +++ b/src/arch/x86/isa/insts/simd128/integer/compare/__init__.py @@ -33,12 +33,11 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["compare_and_write_mask", - "compare_and_write_minimum_or_maximum"] +categories = ["compare_and_write_mask", "compare_and_write_minimum_or_maximum"] -microcode = ''' +microcode = """ # 128 bit multimedia and scientific instructions -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/simd128/integer/compare/compare_and_write_mask.py b/src/arch/x86/isa/insts/simd128/integer/compare/compare_and_write_mask.py index fad83988f7..7fb4fe621f 100644 --- a/src/arch/x86/isa/insts/simd128/integer/compare/compare_and_write_mask.py +++ b/src/arch/x86/isa/insts/simd128/integer/compare/compare_and_write_mask.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PCMPEQB_XMM_XMM { mcmpi2r xmml, xmml, xmmlm, size=1, ext=0 mcmpi2r xmmh, xmmh, xmmhm, size=1, ext=0 @@ -153,4 +153,4 @@ def macroop PCMPGTD_XMM_P { mcmpi2r xmml, xmml, ufp1, size=4, ext=2 mcmpi2r xmmh, xmmh, ufp2, size=4, ext=2 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/integer/compare/compare_and_write_minimum_or_maximum.py b/src/arch/x86/isa/insts/simd128/integer/compare/compare_and_write_minimum_or_maximum.py index 0bf0492fa9..7e863091a0 100644 --- a/src/arch/x86/isa/insts/simd128/integer/compare/compare_and_write_minimum_or_maximum.py +++ b/src/arch/x86/isa/insts/simd128/integer/compare/compare_and_write_minimum_or_maximum.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PMINUB_XMM_XMM { mmini xmml, xmml, xmmlm, size=1, ext=0 mmini xmmh, xmmh, xmmhm, size=1, ext=0 @@ -113,4 +113,4 @@ def macroop PMAXSW_XMM_P { mmaxi xmml, xmml, ufp1, size=2, ext=Signed mmaxi xmmh, xmmh, ufp2, size=2, ext=Signed }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/integer/data_conversion/__init__.py b/src/arch/x86/isa/insts/simd128/integer/data_conversion/__init__.py index 8beaf881c6..3212cf0636 100644 --- a/src/arch/x86/isa/insts/simd128/integer/data_conversion/__init__.py +++ b/src/arch/x86/isa/insts/simd128/integer/data_conversion/__init__.py @@ -33,13 +33,15 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["convert_integer_to_floating_point", - "convert_mmx_integer_to_floating_point", - "convert_gpr_integer_to_floating_point"] +categories = [ + "convert_integer_to_floating_point", + "convert_mmx_integer_to_floating_point", + "convert_gpr_integer_to_floating_point", +] -microcode = ''' +microcode = """ # 128 bit multimedia and scientific conversion instructions -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/simd128/integer/data_conversion/convert_gpr_integer_to_floating_point.py b/src/arch/x86/isa/insts/simd128/integer/data_conversion/convert_gpr_integer_to_floating_point.py index ad2b86e472..3671784d5b 100644 --- a/src/arch/x86/isa/insts/simd128/integer/data_conversion/convert_gpr_integer_to_floating_point.py +++ b/src/arch/x86/isa/insts/simd128/integer/data_conversion/convert_gpr_integer_to_floating_point.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop CVTSI2SS_XMM_R { mov2fp ufp1, regm, destSize=dsz, srcSize=dsz cvti2f xmml, ufp1, srcSize=dsz, destSize=4, ext=Scalar @@ -65,4 +65,4 @@ def macroop CVTSI2SD_XMM_P { ldfp ufp1, seg, riprel, disp, dataSize=8 cvti2f xmml, ufp1, srcSize=dsz, destSize=8, ext=Scalar }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/integer/data_conversion/convert_integer_to_floating_point.py b/src/arch/x86/isa/insts/simd128/integer/data_conversion/convert_integer_to_floating_point.py index 75f63683ad..0bbb55a08e 100644 --- a/src/arch/x86/isa/insts/simd128/integer/data_conversion/convert_integer_to_floating_point.py +++ b/src/arch/x86/isa/insts/simd128/integer/data_conversion/convert_integer_to_floating_point.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop CVTDQ2PS_XMM_XMM { cvti2f xmml, xmmlm, size=4, ext=0 cvti2f xmmh, xmmhm, size=4, ext=0 @@ -71,4 +71,4 @@ def macroop CVTDQ2PD_XMM_P { cvti2f xmml, ufp1, srcSize=4, destSize=8, ext=0 cvti2f xmmh, ufp1, srcSize=4, destSize=8, ext=2 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/integer/data_conversion/convert_mmx_integer_to_floating_point.py b/src/arch/x86/isa/insts/simd128/integer/data_conversion/convert_mmx_integer_to_floating_point.py index e60735f307..ba77057fba 100644 --- a/src/arch/x86/isa/insts/simd128/integer/data_conversion/convert_mmx_integer_to_floating_point.py +++ b/src/arch/x86/isa/insts/simd128/integer/data_conversion/convert_mmx_integer_to_floating_point.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop CVTPI2PS_XMM_MMX { cvti2f xmml, mmxm, size=4, ext=0 }; @@ -66,4 +66,4 @@ def macroop CVTPI2PD_XMM_P { cvti2f xmml, ufp1, srcSize=4, destSize=8, ext=0 cvti2f xmmh, ufp1, srcSize=4, destSize=8, ext=2 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/integer/data_reordering/__init__.py b/src/arch/x86/isa/insts/simd128/integer/data_reordering/__init__.py index ceb6e45ba4..1c0d574847 100644 --- a/src/arch/x86/isa/insts/simd128/integer/data_reordering/__init__.py +++ b/src/arch/x86/isa/insts/simd128/integer/data_reordering/__init__.py @@ -33,14 +33,16 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["pack_with_saturation", - "unpack_and_interleave", - "extract_and_insert", - "shuffle"] +categories = [ + "pack_with_saturation", + "unpack_and_interleave", + "extract_and_insert", + "shuffle", +] -microcode = ''' +microcode = """ # 128 bit multimedia and scientific instructions -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/simd128/integer/data_reordering/extract_and_insert.py b/src/arch/x86/isa/insts/simd128/integer/data_reordering/extract_and_insert.py index 939f4a65d4..f955cbaa16 100644 --- a/src/arch/x86/isa/insts/simd128/integer/data_reordering/extract_and_insert.py +++ b/src/arch/x86/isa/insts/simd128/integer/data_reordering/extract_and_insert.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PEXTRW_R_XMM_I { mov2int reg, xmmlm, "IMMEDIATE & mask(3)", size=2, ext=1 mov2int reg, xmmhm, "IMMEDIATE & mask(3)", size=2, ext=1 @@ -56,4 +56,4 @@ def macroop PINSRW_XMM_P_I { mov2fp xmml, t1, "IMMEDIATE & mask(3)", size=2, ext=1 mov2fp xmmh, t1, "IMMEDIATE & mask(3)", size=2, ext=1 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/integer/data_reordering/pack_with_saturation.py b/src/arch/x86/isa/insts/simd128/integer/data_reordering/pack_with_saturation.py index 9514c8f1d9..2307ecfcef 100644 --- a/src/arch/x86/isa/insts/simd128/integer/data_reordering/pack_with_saturation.py +++ b/src/arch/x86/isa/insts/simd128/integer/data_reordering/pack_with_saturation.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PACKSSDW_XMM_XMM { pack ufp1, xmml, xmmh, ext=Signed, srcSize=4, destSize=2 pack xmmh, xmmlm, xmmhm, ext=Signed, srcSize=4, destSize=2 @@ -96,4 +96,4 @@ def macroop PACKUSWB_XMM_P { pack xmml, xmml, xmmh, ext=0, srcSize=2, destSize=1 pack xmmh, ufp1, ufp2, ext=0, srcSize=2, destSize=1 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/integer/data_reordering/shuffle.py b/src/arch/x86/isa/insts/simd128/integer/data_reordering/shuffle.py index 4187c4f59c..0fe09b6d37 100644 --- a/src/arch/x86/isa/insts/simd128/integer/data_reordering/shuffle.py +++ b/src/arch/x86/isa/insts/simd128/integer/data_reordering/shuffle.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PSHUFD_XMM_XMM_I { shuffle ufp1, xmmlm, xmmhm, size=4, ext="IMMEDIATE" shuffle xmmh, xmmlm, xmmhm, size=4, ext="IMMEDIATE >> 4" @@ -112,4 +112,4 @@ def macroop PSHUFB_XMM_P { movfp xmml, ufp1, dataSize=8 movfp xmmh, ufp2, dataSize=8 }; -''' \ No newline at end of file +""" diff --git a/src/arch/x86/isa/insts/simd128/integer/data_reordering/unpack_and_interleave.py b/src/arch/x86/isa/insts/simd128/integer/data_reordering/unpack_and_interleave.py index 54434fb074..36854742ca 100644 --- a/src/arch/x86/isa/insts/simd128/integer/data_reordering/unpack_and_interleave.py +++ b/src/arch/x86/isa/insts/simd128/integer/data_reordering/unpack_and_interleave.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PUNPCKLBW_XMM_XMM { unpack xmmh, xmml, xmmlm, ext=1, size=1 unpack xmml, xmml, xmmlm, ext=0, size=1 @@ -180,4 +180,4 @@ def macroop PUNPCKLQDQ_XMM_P { rdip t7 ldfp xmmh, seg, riprel, disp, dataSize=8 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/integer/data_transfer/__init__.py b/src/arch/x86/isa/insts/simd128/integer/data_transfer/__init__.py index 1231aa178d..a539b156fc 100644 --- a/src/arch/x86/isa/insts/simd128/integer/data_transfer/__init__.py +++ b/src/arch/x86/isa/insts/simd128/integer/data_transfer/__init__.py @@ -33,13 +33,11 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["move", - "move_non_temporal", - "move_mask"] +categories = ["move", "move_non_temporal", "move_mask"] -microcode = ''' +microcode = """ # 128 bit multimedia and scientific data transfer instructions -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/simd128/integer/data_transfer/move.py b/src/arch/x86/isa/insts/simd128/integer/data_transfer/move.py index d1dd37aba9..576b5dc81c 100644 --- a/src/arch/x86/isa/insts/simd128/integer/data_transfer/move.py +++ b/src/arch/x86/isa/insts/simd128/integer/data_transfer/move.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop MOVQ_XMM_XMM { movfp xmml, xmmlm lfpimm xmmh, 0 @@ -132,4 +132,4 @@ def macroop LDDQU_XMM_P { ldfp xmml, seg, sib, "DISPLACEMENT", dataSize=8 ldfp xmmh, seg, sib, "DISPLACEMENT + 8", dataSize=8 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/integer/data_transfer/move_mask.py b/src/arch/x86/isa/insts/simd128/integer/data_transfer/move_mask.py index 33a8fe65e2..0190969f5b 100644 --- a/src/arch/x86/isa/insts/simd128/integer/data_transfer/move_mask.py +++ b/src/arch/x86/isa/insts/simd128/integer/data_transfer/move_mask.py @@ -33,10 +33,10 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PMOVMSKB_R_XMM { limm reg, 0 movsign reg, xmmlm, size=1, ext=0 movsign reg, xmmhm, size=1, ext=1 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/integer/data_transfer/move_non_temporal.py b/src/arch/x86/isa/insts/simd128/integer/data_transfer/move_non_temporal.py index 6949110565..eee295734f 100644 --- a/src/arch/x86/isa/insts/simd128/integer/data_transfer/move_non_temporal.py +++ b/src/arch/x86/isa/insts/simd128/integer/data_transfer/move_non_temporal.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop MOVNTDQ_M_XMM { warn_once "MOVNTDQ: Ignoring non-temporal hint, modeling as cacheable!" cda seg, sib, "DISPLACEMENT + 8", dataSize=8 @@ -57,4 +57,4 @@ def macroop MASKMOVDQU_XMM_XMM { stfp ufp1, ds, [1, t0, rdi], dataSize=8 stfp ufp2, ds, [1, t0, rdi], 8, dataSize=8 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/integer/logical/__init__.py b/src/arch/x86/isa/insts/simd128/integer/logical/__init__.py index b1da9719a7..72fc2cfd56 100644 --- a/src/arch/x86/isa/insts/simd128/integer/logical/__init__.py +++ b/src/arch/x86/isa/insts/simd128/integer/logical/__init__.py @@ -33,13 +33,11 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["pand", - "por", - "exclusive_or"] +categories = ["pand", "por", "exclusive_or"] -microcode = ''' +microcode = """ # 128 bit multimedia and scientific instructions -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/simd128/integer/logical/exclusive_or.py b/src/arch/x86/isa/insts/simd128/integer/logical/exclusive_or.py index f5279497ad..24d3e56ad5 100644 --- a/src/arch/x86/isa/insts/simd128/integer/logical/exclusive_or.py +++ b/src/arch/x86/isa/insts/simd128/integer/logical/exclusive_or.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PXOR_XMM_XMM { mxor xmml, xmml, xmmlm mxor xmmh, xmmh, xmmhm @@ -55,4 +55,4 @@ def macroop PXOR_XMM_P { mxor xmml, xmml, ufp1 mxor xmmh, xmmh, ufp2 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/integer/logical/pand.py b/src/arch/x86/isa/insts/simd128/integer/logical/pand.py index 927cd043c9..ca81106054 100644 --- a/src/arch/x86/isa/insts/simd128/integer/logical/pand.py +++ b/src/arch/x86/isa/insts/simd128/integer/logical/pand.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PAND_XMM_XMM { mand xmml, xmml, xmmlm mand xmmh, xmmh, xmmhm @@ -77,4 +77,4 @@ def macroop PANDN_XMM_P { mandn xmml, xmml, ufp1 mandn xmmh, xmmh, ufp2 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/integer/logical/por.py b/src/arch/x86/isa/insts/simd128/integer/logical/por.py index 21df88efbf..10bf286df6 100644 --- a/src/arch/x86/isa/insts/simd128/integer/logical/por.py +++ b/src/arch/x86/isa/insts/simd128/integer/logical/por.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop POR_XMM_XMM { mor xmml, xmml, xmmlm mor xmmh, xmmh, xmmhm @@ -55,4 +55,4 @@ def macroop POR_XMM_P { mor xmml, xmml, ufp1 mor xmmh, xmmh, ufp2 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/integer/save_and_restore_state/__init__.py b/src/arch/x86/isa/insts/simd128/integer/save_and_restore_state/__init__.py index 53d4819070..ee0ee06639 100644 --- a/src/arch/x86/isa/insts/simd128/integer/save_and_restore_state/__init__.py +++ b/src/arch/x86/isa/insts/simd128/integer/save_and_restore_state/__init__.py @@ -33,12 +33,11 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["save_and_restore_state", - "save_and_restore_control_and_status"] +categories = ["save_and_restore_state", "save_and_restore_control_and_status"] -microcode = ''' +microcode = """ # 128 bit multimedia and scientific instructions -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/simd128/integer/save_and_restore_state/save_and_restore_control_and_status.py b/src/arch/x86/isa/insts/simd128/integer/save_and_restore_state/save_and_restore_control_and_status.py index 0fcc3dc935..6d6794ae29 100644 --- a/src/arch/x86/isa/insts/simd128/integer/save_and_restore_state/save_and_restore_control_and_status.py +++ b/src/arch/x86/isa/insts/simd128/integer/save_and_restore_state/save_and_restore_control_and_status.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop STMXCSR_M { rdval t1, ctrlRegIdx("misc_reg::Mxcsr") st t1, seg, sib, disp @@ -55,4 +55,4 @@ def macroop LDMXCSR_P { ld t1, seg, riprel, disp wrval ctrlRegIdx("misc_reg::Mxcsr"), t1 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/integer/save_and_restore_state/save_and_restore_state.py b/src/arch/x86/isa/insts/simd128/integer/save_and_restore_state/save_and_restore_state.py index 9addadb78b..9351a40a8a 100644 --- a/src/arch/x86/isa/insts/simd128/integer/save_and_restore_state/save_and_restore_state.py +++ b/src/arch/x86/isa/insts/simd128/integer/save_and_restore_state/save_and_restore_state.py @@ -29,46 +29,47 @@ # t7 == base address (RIP or SIB) -loadX87RegTemplate = ''' +loadX87RegTemplate = """ ld t1, seg, %(mode)s, "DISPLACEMENT + 32 + 16 * %(idx)i", dataSize=8 ld t2, seg, %(mode)s, "DISPLACEMENT + 32 + 16 * %(idx)i + 8", dataSize=2 cvtint_fp80 st(%(idx)i), t1, t2 -''' +""" -storeX87RegTemplate = ''' +storeX87RegTemplate = """ cvtfp80h_int t1, st(%(idx)i) cvtfp80l_int t2, st(%(idx)i) st t1, seg, %(mode)s, "DISPLACEMENT + 32 + 16 * %(idx)i", dataSize=8 st t2, seg, %(mode)s, "DISPLACEMENT + 32 + 16 * %(idx)i + 8", dataSize=2 -''' +""" -loadXMMRegTemplate = ''' +loadXMMRegTemplate = """ ldfp fpRegIdx("float_reg::xmmLow(%(idx)i)"), seg, %(mode)s, \ "DISPLACEMENT + 160 + 16 * %(idx)i", dataSize=8 ldfp fpRegIdx("float_reg::xmmHigh(%(idx)i)"), seg, %(mode)s, \ "DISPLACEMENT + 160 + 16 * %(idx)i + 8", dataSize=8 -''' +""" -storeXMMRegTemplate = ''' +storeXMMRegTemplate = """ stfp fpRegIdx("float_reg::xmmLow(%(idx)i)"), seg, %(mode)s, \ "DISPLACEMENT + 160 + 16 * %(idx)i", dataSize=8 stfp fpRegIdx("float_reg::xmmHigh(%(idx)i)"), seg, %(mode)s, \ "DISPLACEMENT + 160 + 16 * %(idx)i + 8", dataSize=8 -''' +""" -loadAllDataRegs = \ - "".join([loadX87RegTemplate % { "idx" : i, "mode" : "%(mode)s" } - for i in range(8)]) + \ - "".join([loadXMMRegTemplate % { "idx" : i, "mode" : "%(mode)s" } - for i in range(16)]) +loadAllDataRegs = "".join( + [loadX87RegTemplate % {"idx": i, "mode": "%(mode)s"} for i in range(8)] +) + "".join( + [loadXMMRegTemplate % {"idx": i, "mode": "%(mode)s"} for i in range(16)] +) -storeAllDataRegs = \ - "".join([storeX87RegTemplate % { "idx" : i, "mode" : "%(mode)s" } - for i in range(8)]) + \ - "".join([storeXMMRegTemplate % { "idx" : i, "mode" : "%(mode)s" } - for i in range(16)]) +storeAllDataRegs = "".join( + [storeX87RegTemplate % {"idx": i, "mode": "%(mode)s"} for i in range(8)] +) + "".join( + [storeXMMRegTemplate % {"idx": i, "mode": "%(mode)s"} for i in range(16)] +) -fxsaveCommonTemplate = """ +fxsaveCommonTemplate = ( + """ rdval t1, fcw st t1, seg, %(mode)s, "DISPLACEMENT + 0", dataSize=2 @@ -89,9 +90,12 @@ fxsaveCommonTemplate = """ # MXCSR_MASK, software assumes the default (0xFFBF) if 0. limm t1, 0xFFFF st t1, seg, %(mode)s, "DISPLACEMENT + 16 + 12", dataSize=4 -""" + storeAllDataRegs +""" + + storeAllDataRegs +) -fxsave32Template = """ +fxsave32Template = ( + """ rdval t1, ctrlRegIdx("misc_reg::Fioff") st t1, seg, %(mode)s, "DISPLACEMENT + 8", dataSize=4 @@ -103,17 +107,23 @@ fxsave32Template = """ rdval t1, ctrlRegIdx("misc_reg::Foseg") st t1, seg, %(mode)s, "DISPLACEMENT + 16 + 4", dataSize=2 -""" + fxsaveCommonTemplate +""" + + fxsaveCommonTemplate +) -fxsave64Template = """ +fxsave64Template = ( + """ rdval t1, ctrlRegIdx("misc_reg::Fioff") st t1, seg, %(mode)s, "DISPLACEMENT + 8", dataSize=8 rdval t1, ctrlRegIdx("misc_reg::Fooff") st t1, seg, %(mode)s, "DISPLACEMENT + 16 + 0", dataSize=8 -""" + fxsaveCommonTemplate +""" + + fxsaveCommonTemplate +) -fxrstorCommonTemplate = """ +fxrstorCommonTemplate = ( + """ ld t1, seg, %(mode)s, "DISPLACEMENT + 0", dataSize=2 wrval fcw, t1 @@ -130,9 +140,12 @@ fxrstorCommonTemplate = """ ld t1, seg, %(mode)s, "DISPLACEMENT + 16 + 8", dataSize=4 wrval ctrlRegIdx("misc_reg::Mxcsr"), t1 -""" + loadAllDataRegs +""" + + loadAllDataRegs +) -fxrstor32Template = """ +fxrstor32Template = ( + """ ld t1, seg, %(mode)s, "DISPLACEMENT + 8", dataSize=4 wrval ctrlRegIdx("misc_reg::Fioff"), t1 @@ -144,9 +157,12 @@ fxrstor32Template = """ ld t1, seg, %(mode)s, "DISPLACEMENT + 16 + 4", dataSize=2 wrval ctrlRegIdx("misc_reg::Foseg"), t1 -""" + fxrstorCommonTemplate +""" + + fxrstorCommonTemplate +) -fxrstor64Template = """ +fxrstor64Template = ( + """ limm t2, 0, dataSize=8 ld t1, seg, %(mode)s, "DISPLACEMENT + 8", dataSize=8 @@ -156,42 +172,62 @@ fxrstor64Template = """ ld t1, seg, %(mode)s, "DISPLACEMENT + 16 + 0", dataSize=8 wrval ctrlRegIdx("misc_reg::Fooff"), t1 wrval ctrlRegIdx("misc_reg::Foseg"), t2 -""" + fxrstorCommonTemplate +""" + + fxrstorCommonTemplate +) -microcode = ''' +microcode = ( + """ def macroop FXSAVE_M { -''' + fxsave32Template % { "mode" : "sib" } + ''' +""" + + fxsave32Template % {"mode": "sib"} + + """ }; def macroop FXSAVE_P { rdip t7 -''' + fxsave32Template % { "mode" : "riprel" } + ''' +""" + + fxsave32Template % {"mode": "riprel"} + + """ }; def macroop FXSAVE64_M { -''' + fxsave64Template % { "mode" : "sib" } + ''' +""" + + fxsave64Template % {"mode": "sib"} + + """ }; def macroop FXSAVE64_P { rdip t7 -''' + fxsave64Template % { "mode" : "riprel" } + ''' +""" + + fxsave64Template % {"mode": "riprel"} + + """ }; def macroop FXRSTOR_M { -''' + fxrstor32Template % { "mode" : "sib" } + ''' +""" + + fxrstor32Template % {"mode": "sib"} + + """ }; def macroop FXRSTOR_P { rdip t7 -''' + fxrstor32Template % { "mode" : "riprel" } + ''' +""" + + fxrstor32Template % {"mode": "riprel"} + + """ }; def macroop FXRSTOR64_M { -''' + fxrstor64Template % { "mode" : "sib" } + ''' +""" + + fxrstor64Template % {"mode": "sib"} + + """ }; def macroop FXRSTOR64_P { rdip t7 -''' + fxrstor64Template % { "mode" : "riprel" } + ''' +""" + + fxrstor64Template % {"mode": "riprel"} + + """ }; -''' +""" +) diff --git a/src/arch/x86/isa/insts/simd128/integer/shift/__init__.py b/src/arch/x86/isa/insts/simd128/integer/shift/__init__.py index a4a6d44f53..b3a35cb812 100644 --- a/src/arch/x86/isa/insts/simd128/integer/shift/__init__.py +++ b/src/arch/x86/isa/insts/simd128/integer/shift/__init__.py @@ -33,13 +33,15 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["left_logical_shift", - "right_logical_shift", - "right_arithmetic_shift"] +categories = [ + "left_logical_shift", + "right_logical_shift", + "right_arithmetic_shift", +] -microcode = ''' +microcode = """ # 128 bit multimedia and scientific instructions -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/simd128/integer/shift/left_logical_shift.py b/src/arch/x86/isa/insts/simd128/integer/shift/left_logical_shift.py index 3c363693bf..21e40114d1 100644 --- a/src/arch/x86/isa/insts/simd128/integer/shift/left_logical_shift.py +++ b/src/arch/x86/isa/insts/simd128/integer/shift/left_logical_shift.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PSLLW_XMM_XMM { msll xmmh, xmmh, xmmlm, size=2, ext=0 msll xmml, xmml, xmmlm, size=2, ext=0 @@ -141,4 +141,4 @@ pslldq_less_8: pslldq_end: fault "NoFault" }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/integer/shift/right_arithmetic_shift.py b/src/arch/x86/isa/insts/simd128/integer/shift/right_arithmetic_shift.py index 7191a065b2..ff1dd4a28e 100644 --- a/src/arch/x86/isa/insts/simd128/integer/shift/right_arithmetic_shift.py +++ b/src/arch/x86/isa/insts/simd128/integer/shift/right_arithmetic_shift.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PSRAW_XMM_XMM { msra xmmh, xmmh, xmmlm, size=2, ext=0 msra xmml, xmml, xmmlm, size=2, ext=0 @@ -79,4 +79,4 @@ def macroop PSRAD_XMM_I { msrai xmml, xmml, imm, size=4, ext=0 msrai xmmh, xmmh, imm, size=4, ext=0 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd128/integer/shift/right_logical_shift.py b/src/arch/x86/isa/insts/simd128/integer/shift/right_logical_shift.py index 91370e1109..76a08ddab6 100644 --- a/src/arch/x86/isa/insts/simd128/integer/shift/right_logical_shift.py +++ b/src/arch/x86/isa/insts/simd128/integer/shift/right_logical_shift.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PSRLW_XMM_XMM { msrl xmmh, xmmh, xmmlm, size=2, ext=0 msrl xmml, xmml, xmmlm, size=2, ext=0 @@ -139,4 +139,4 @@ psrldq_less_8: psrldq_end: fault "NoFault" }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd64/__init__.py b/src/arch/x86/isa/insts/simd64/__init__.py index 461a2ff901..5109e99634 100644 --- a/src/arch/x86/isa/insts/simd64/__init__.py +++ b/src/arch/x86/isa/insts/simd64/__init__.py @@ -33,12 +33,11 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["integer", - "floating_point"] +categories = ["integer", "floating_point"] -microcode = ''' +microcode = """ # 64 bit multimedia instructions -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/simd64/floating_point/__init__.py b/src/arch/x86/isa/insts/simd64/floating_point/__init__.py index dcdb8690da..1d4d70f700 100644 --- a/src/arch/x86/isa/insts/simd64/floating_point/__init__.py +++ b/src/arch/x86/isa/insts/simd64/floating_point/__init__.py @@ -33,13 +33,11 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["data_conversion", - "arithmetic", - "compare"] +categories = ["data_conversion", "arithmetic", "compare"] -microcode = ''' +microcode = """ # 64 bit multimedia instructions -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/simd64/floating_point/arithmetic/__init__.py b/src/arch/x86/isa/insts/simd64/floating_point/arithmetic/__init__.py index 553946778d..59cb06036f 100644 --- a/src/arch/x86/isa/insts/simd64/floating_point/arithmetic/__init__.py +++ b/src/arch/x86/isa/insts/simd64/floating_point/arithmetic/__init__.py @@ -33,16 +33,18 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["addition", - "subtraction", - "multiplication", - "accumulation", - "reciprocal_estimation", - "reciprocal_square_root"] +categories = [ + "addition", + "subtraction", + "multiplication", + "accumulation", + "reciprocal_estimation", + "reciprocal_square_root", +] -microcode = ''' +microcode = """ # 64 bit multimedia instructions -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/simd64/floating_point/arithmetic/accumulation.py b/src/arch/x86/isa/insts/simd64/floating_point/arithmetic/accumulation.py index 67afba92ab..b97bad137f 100644 --- a/src/arch/x86/isa/insts/simd64/floating_point/arithmetic/accumulation.py +++ b/src/arch/x86/isa/insts/simd64/floating_point/arithmetic/accumulation.py @@ -33,8 +33,8 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ # PFACC # PFNACC # PFPNACC -''' +""" diff --git a/src/arch/x86/isa/insts/simd64/floating_point/arithmetic/addition.py b/src/arch/x86/isa/insts/simd64/floating_point/arithmetic/addition.py index c68a8acec2..f1cced066a 100644 --- a/src/arch/x86/isa/insts/simd64/floating_point/arithmetic/addition.py +++ b/src/arch/x86/isa/insts/simd64/floating_point/arithmetic/addition.py @@ -33,6 +33,6 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ # PFADD -''' +""" diff --git a/src/arch/x86/isa/insts/simd64/floating_point/arithmetic/multiplication.py b/src/arch/x86/isa/insts/simd64/floating_point/arithmetic/multiplication.py index 6fa375470e..47c6678c9d 100644 --- a/src/arch/x86/isa/insts/simd64/floating_point/arithmetic/multiplication.py +++ b/src/arch/x86/isa/insts/simd64/floating_point/arithmetic/multiplication.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PFMUL_MMX_MMX { mmulf mmx, mmx, mmxm, size=4, ext=0 }; @@ -48,4 +48,4 @@ def macroop PFMUL_MMX_P { ldfp ufp1, seg, riprel, disp, dataSize=8 mmulf mmx, mmx, ufp1, size=4, ext=0 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd64/floating_point/arithmetic/reciprocal_estimation.py b/src/arch/x86/isa/insts/simd64/floating_point/arithmetic/reciprocal_estimation.py index 1eb162a4cc..c40c6ffd45 100644 --- a/src/arch/x86/isa/insts/simd64/floating_point/arithmetic/reciprocal_estimation.py +++ b/src/arch/x86/isa/insts/simd64/floating_point/arithmetic/reciprocal_estimation.py @@ -33,8 +33,8 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ # PFRCP # PFRCPIT1 # PFRCPIT2 -''' +""" diff --git a/src/arch/x86/isa/insts/simd64/floating_point/arithmetic/reciprocal_square_root.py b/src/arch/x86/isa/insts/simd64/floating_point/arithmetic/reciprocal_square_root.py index f0b9021417..5f20805792 100644 --- a/src/arch/x86/isa/insts/simd64/floating_point/arithmetic/reciprocal_square_root.py +++ b/src/arch/x86/isa/insts/simd64/floating_point/arithmetic/reciprocal_square_root.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ # PFRSQRT # PFRSQIT1 -''' +""" diff --git a/src/arch/x86/isa/insts/simd64/floating_point/arithmetic/subtraction.py b/src/arch/x86/isa/insts/simd64/floating_point/arithmetic/subtraction.py index 03c33e7777..cf027baf53 100644 --- a/src/arch/x86/isa/insts/simd64/floating_point/arithmetic/subtraction.py +++ b/src/arch/x86/isa/insts/simd64/floating_point/arithmetic/subtraction.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PFSUB_MMX_MMX { msubf mmx, mmx, mmxm, size=4, ext=0 }; @@ -63,4 +63,4 @@ def macroop PFSUBR_MMX_P { ldfp ufp1, seg, riprel, disp, dataSize=8 msubf mmx, ufp1, mmx, size=4, ext=0 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd64/floating_point/compare/__init__.py b/src/arch/x86/isa/insts/simd64/floating_point/compare/__init__.py index 2f7365b34e..1226c61f98 100644 --- a/src/arch/x86/isa/insts/simd64/floating_point/compare/__init__.py +++ b/src/arch/x86/isa/insts/simd64/floating_point/compare/__init__.py @@ -33,12 +33,11 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["compare_and_write_mask", - "compare_and_write_minimum_or_maximum"] +categories = ["compare_and_write_mask", "compare_and_write_minimum_or_maximum"] -microcode = ''' +microcode = """ # 64 bit multimedia instructions -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/simd64/floating_point/compare/compare_and_write_mask.py b/src/arch/x86/isa/insts/simd64/floating_point/compare/compare_and_write_mask.py index a8039d4b07..63197a141e 100644 --- a/src/arch/x86/isa/insts/simd64/floating_point/compare/compare_and_write_mask.py +++ b/src/arch/x86/isa/insts/simd64/floating_point/compare/compare_and_write_mask.py @@ -33,8 +33,8 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ # PFCMPEQ # PFCMPGT # PFCMPGE -''' +""" diff --git a/src/arch/x86/isa/insts/simd64/floating_point/compare/compare_and_write_minimum_or_maximum.py b/src/arch/x86/isa/insts/simd64/floating_point/compare/compare_and_write_minimum_or_maximum.py index 1f8f323088..bc4b0542a0 100644 --- a/src/arch/x86/isa/insts/simd64/floating_point/compare/compare_and_write_minimum_or_maximum.py +++ b/src/arch/x86/isa/insts/simd64/floating_point/compare/compare_and_write_minimum_or_maximum.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ # PFMAX # PFMIN -''' +""" diff --git a/src/arch/x86/isa/insts/simd64/floating_point/data_conversion.py b/src/arch/x86/isa/insts/simd64/floating_point/data_conversion.py index 6212efa187..d45f2a7801 100644 --- a/src/arch/x86/isa/insts/simd64/floating_point/data_conversion.py +++ b/src/arch/x86/isa/insts/simd64/floating_point/data_conversion.py @@ -33,8 +33,8 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ # CVTPS2PI, CVTTPS2PI, CVTPD2PI, and CVTTPD2PI are implemented in simd128 # PF2IW # PF2ID -''' +""" diff --git a/src/arch/x86/isa/insts/simd64/integer/__init__.py b/src/arch/x86/isa/insts/simd64/integer/__init__.py index 4c71d0dc6a..8e5209b926 100644 --- a/src/arch/x86/isa/insts/simd64/integer/__init__.py +++ b/src/arch/x86/isa/insts/simd64/integer/__init__.py @@ -33,19 +33,21 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["exit_media_state", - "data_transfer", - "data_conversion", - "data_reordering", - "arithmetic", - "shift", - "compare", - "logical", - "save_and_restore_state"] +categories = [ + "exit_media_state", + "data_transfer", + "data_conversion", + "data_reordering", + "arithmetic", + "shift", + "compare", + "logical", + "save_and_restore_state", +] -microcode = ''' +microcode = """ # 64 bit multimedia instructions -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/simd64/integer/arithmetic/__init__.py b/src/arch/x86/isa/insts/simd64/integer/arithmetic/__init__.py index 0e60ebad5a..4458ee80f6 100644 --- a/src/arch/x86/isa/insts/simd64/integer/arithmetic/__init__.py +++ b/src/arch/x86/isa/insts/simd64/integer/arithmetic/__init__.py @@ -33,16 +33,18 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["addition", - "subtraction", - "multiplication", - "multiply_add", - "average", - "sum_of_absolute_differences"] +categories = [ + "addition", + "subtraction", + "multiplication", + "multiply_add", + "average", + "sum_of_absolute_differences", +] -microcode = ''' +microcode = """ # 64 bit multimedia instructions -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/simd64/integer/arithmetic/addition.py b/src/arch/x86/isa/insts/simd64/integer/arithmetic/addition.py index a990b5b8f9..2cfc6a1ba3 100644 --- a/src/arch/x86/isa/insts/simd64/integer/arithmetic/addition.py +++ b/src/arch/x86/isa/insts/simd64/integer/arithmetic/addition.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PADDB_MMX_MMX { maddi mmx, mmx, mmxm, size=1, ext=0 }; @@ -153,4 +153,4 @@ def macroop PADDUSW_MMX_P { ldfp ufp1, seg, riprel, disp, dataSize=8 maddi mmx, mmx, ufp1, size=2, ext=2 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd64/integer/arithmetic/average.py b/src/arch/x86/isa/insts/simd64/integer/arithmetic/average.py index 57c9a6c56c..d057b6b38d 100644 --- a/src/arch/x86/isa/insts/simd64/integer/arithmetic/average.py +++ b/src/arch/x86/isa/insts/simd64/integer/arithmetic/average.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PAVGB_MMX_MMX { mavg mmx, mmx, mmxm, size=1, ext=0 }; @@ -64,4 +64,4 @@ def macroop PAVGW_MMX_P { mavg mmx, mmx, ufp1, size=2, ext=0 }; # PAVGUSB -''' +""" diff --git a/src/arch/x86/isa/insts/simd64/integer/arithmetic/multiplication.py b/src/arch/x86/isa/insts/simd64/integer/arithmetic/multiplication.py index 8994ca4522..c068317470 100644 --- a/src/arch/x86/isa/insts/simd64/integer/arithmetic/multiplication.py +++ b/src/arch/x86/isa/insts/simd64/integer/arithmetic/multiplication.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PMULHW_MMX_MMX { mmuli mmx, mmx, mmxm, size=2, ext = Signed + "|" + MultHi }; @@ -108,4 +108,4 @@ def macroop PMULUDQ_MMX_P { ldfp ufp1, seg, riprel, disp, dataSize=8 mmuli mmx, mmx, ufp1, srcSize=4, destSize=8, ext=Scalar }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd64/integer/arithmetic/multiply_add.py b/src/arch/x86/isa/insts/simd64/integer/arithmetic/multiply_add.py index fcb3710c02..9fabb6a722 100644 --- a/src/arch/x86/isa/insts/simd64/integer/arithmetic/multiply_add.py +++ b/src/arch/x86/isa/insts/simd64/integer/arithmetic/multiply_add.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PMADDWD_MMX_MMX { mmuli ufp3, mmx, mmxm, srcSize=2, destSize=4, ext = Signed + "| 0x10 | 0x20" mmuli ufp4, mmx, mmxm, srcSize=2, destSize=4, ext = Signed + "| 0x10" @@ -54,4 +54,4 @@ def macroop PMADDWD_MMX_P { mmuli ufp4, mmx, ufp1, srcSize=2, destSize=4, ext = Signed + "| 0x10" maddi mmx, ufp3, ufp4, size=4, ext=0 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd64/integer/arithmetic/subtraction.py b/src/arch/x86/isa/insts/simd64/integer/arithmetic/subtraction.py index 34e8eb6a88..112f99499c 100644 --- a/src/arch/x86/isa/insts/simd64/integer/arithmetic/subtraction.py +++ b/src/arch/x86/isa/insts/simd64/integer/arithmetic/subtraction.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PSUBB_MMX_MMX { msubi mmx, mmx, mmxm, size=1, ext=0 }; @@ -153,4 +153,4 @@ def macroop PSUBUSW_MMX_P { ldfp ufp1, seg, riprel, disp, dataSize=8 msubi mmx, mmx, ufp1, size=2, ext=2 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd64/integer/arithmetic/sum_of_absolute_differences.py b/src/arch/x86/isa/insts/simd64/integer/arithmetic/sum_of_absolute_differences.py index 1ccd8d8c22..510d6767bc 100644 --- a/src/arch/x86/isa/insts/simd64/integer/arithmetic/sum_of_absolute_differences.py +++ b/src/arch/x86/isa/insts/simd64/integer/arithmetic/sum_of_absolute_differences.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PSADBW_MMX_MMX { msad mmx, mmx, mmxm, srcSize=1, destSize=2 }; @@ -48,4 +48,4 @@ def macroop PSADBW_MMX_P { ldfp ufp1, seg, riprel, disp, dataSize=8 msad mmx, mmx, ufp1, srcSize=1, destSize=2 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd64/integer/compare/__init__.py b/src/arch/x86/isa/insts/simd64/integer/compare/__init__.py index 2f7365b34e..1226c61f98 100644 --- a/src/arch/x86/isa/insts/simd64/integer/compare/__init__.py +++ b/src/arch/x86/isa/insts/simd64/integer/compare/__init__.py @@ -33,12 +33,11 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["compare_and_write_mask", - "compare_and_write_minimum_or_maximum"] +categories = ["compare_and_write_mask", "compare_and_write_minimum_or_maximum"] -microcode = ''' +microcode = """ # 64 bit multimedia instructions -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/simd64/integer/compare/compare_and_write_mask.py b/src/arch/x86/isa/insts/simd64/integer/compare/compare_and_write_mask.py index cf8fe43fa8..55376d79cf 100644 --- a/src/arch/x86/isa/insts/simd64/integer/compare/compare_and_write_mask.py +++ b/src/arch/x86/isa/insts/simd64/integer/compare/compare_and_write_mask.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PCMPEQB_MMX_MMX { mcmpi2r mmx, mmx, mmxm, size=1, ext=0 }; @@ -123,4 +123,4 @@ def macroop PCMPGTD_MMX_P { ldfp ufp1, seg, riprel, disp, dataSize=8 mcmpi2r mmx, mmx, ufp1, size=4, ext=2 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd64/integer/compare/compare_and_write_minimum_or_maximum.py b/src/arch/x86/isa/insts/simd64/integer/compare/compare_and_write_minimum_or_maximum.py index f456520427..ae13ca486e 100644 --- a/src/arch/x86/isa/insts/simd64/integer/compare/compare_and_write_minimum_or_maximum.py +++ b/src/arch/x86/isa/insts/simd64/integer/compare/compare_and_write_minimum_or_maximum.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PMINUB_MMX_MMX { mmini mmx, mmx, mmxm, size=1, ext=0 }; @@ -93,4 +93,4 @@ def macroop PMAXSW_MMX_P { ldfp ufp1, seg, riprel, disp, dataSize=8 mmaxi mmx, mmx, ufp1, size=2, ext=Signed }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd64/integer/data_conversion.py b/src/arch/x86/isa/insts/simd64/integer/data_conversion.py index 31008569cf..9bbc525a8d 100644 --- a/src/arch/x86/isa/insts/simd64/integer/data_conversion.py +++ b/src/arch/x86/isa/insts/simd64/integer/data_conversion.py @@ -33,8 +33,8 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ # CVTPI2PS and CVTPI2PD are implemented in simd128 # PI2FW # PI2FD -''' +""" diff --git a/src/arch/x86/isa/insts/simd64/integer/data_reordering/__init__.py b/src/arch/x86/isa/insts/simd64/integer/data_reordering/__init__.py index ce2a85918f..a3ea862ec7 100644 --- a/src/arch/x86/isa/insts/simd64/integer/data_reordering/__init__.py +++ b/src/arch/x86/isa/insts/simd64/integer/data_reordering/__init__.py @@ -33,14 +33,16 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["pack_with_saturation", - "unpack_and_interleave", - "extract_and_insert", - "shuffle_and_swap"] +categories = [ + "pack_with_saturation", + "unpack_and_interleave", + "extract_and_insert", + "shuffle_and_swap", +] -microcode = ''' +microcode = """ # 64 bit multimedia instructions -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/simd64/integer/data_reordering/extract_and_insert.py b/src/arch/x86/isa/insts/simd64/integer/data_reordering/extract_and_insert.py index c177b2d705..99182aded5 100644 --- a/src/arch/x86/isa/insts/simd64/integer/data_reordering/extract_and_insert.py +++ b/src/arch/x86/isa/insts/simd64/integer/data_reordering/extract_and_insert.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PEXTRW_R_MMX_I { mov2int reg, mmxm, "IMMEDIATE & mask(2)", size=2, ext=0 }; @@ -52,4 +52,4 @@ def macroop PINSRW_MMX_P_I { ld t1, seg, riprel, disp, dataSize=2 mov2fp mmx, t1, "IMMEDIATE & mask(2)", size=2, ext=0 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd64/integer/data_reordering/pack_with_saturation.py b/src/arch/x86/isa/insts/simd64/integer/data_reordering/pack_with_saturation.py index 6dc58dc4f4..dbf793fd0d 100644 --- a/src/arch/x86/isa/insts/simd64/integer/data_reordering/pack_with_saturation.py +++ b/src/arch/x86/isa/insts/simd64/integer/data_reordering/pack_with_saturation.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PACKSSDW_MMX_MMX { pack mmx, mmx, mmxm, ext=Signed, srcSize=4, destSize=2 }; @@ -78,4 +78,4 @@ def macroop PACKUSWB_MMX_P { ldfp ufp1, seg, riprel, disp, dataSize=8 pack mmx, mmx, ufp1, ext=0, srcSize=2, destSize=1 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd64/integer/data_reordering/shuffle_and_swap.py b/src/arch/x86/isa/insts/simd64/integer/data_reordering/shuffle_and_swap.py index c1a0d4d87e..917b98ae13 100644 --- a/src/arch/x86/isa/insts/simd64/integer/data_reordering/shuffle_and_swap.py +++ b/src/arch/x86/isa/insts/simd64/integer/data_reordering/shuffle_and_swap.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PSHUFW_MMX_MMX_I { shuffle mmx, mmxm, mmxm, size=2, ext=imm }; @@ -49,5 +49,5 @@ def macroop PSHUFW_MMX_P_I { shuffle mmx, ufp1, ufp1, size=2, ext=imm }; -''' +""" # PSWAPD diff --git a/src/arch/x86/isa/insts/simd64/integer/data_reordering/unpack_and_interleave.py b/src/arch/x86/isa/insts/simd64/integer/data_reordering/unpack_and_interleave.py index 1623ae8ea1..b47fb60206 100644 --- a/src/arch/x86/isa/insts/simd64/integer/data_reordering/unpack_and_interleave.py +++ b/src/arch/x86/isa/insts/simd64/integer/data_reordering/unpack_and_interleave.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PUNPCKLBW_MMX_MMX { unpack mmx, mmx, mmxm, ext=0, size=1 }; @@ -123,4 +123,4 @@ def macroop PUNPCKHDQ_MMX_P { ldfp ufp1, seg, riprel, disp, dataSize=8 unpack mmx, mmx, ufp1, ext=1, size=4 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd64/integer/data_transfer/__init__.py b/src/arch/x86/isa/insts/simd64/integer/data_transfer/__init__.py index 5c12feb829..1417770087 100644 --- a/src/arch/x86/isa/insts/simd64/integer/data_transfer/__init__.py +++ b/src/arch/x86/isa/insts/simd64/integer/data_transfer/__init__.py @@ -33,13 +33,11 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["move", - "move_non_temporal", - "move_mask"] +categories = ["move", "move_non_temporal", "move_mask"] -microcode = ''' +microcode = """ # 64 bit multimedia instructions -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/simd64/integer/data_transfer/move.py b/src/arch/x86/isa/insts/simd64/integer/data_transfer/move.py index 560c85ab43..51df89cfc5 100644 --- a/src/arch/x86/isa/insts/simd64/integer/data_transfer/move.py +++ b/src/arch/x86/isa/insts/simd64/integer/data_transfer/move.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop MOVD_MMX_R { mov2fp mmx, regm, srcSize=dsz, destSize=8 }; @@ -81,6 +81,6 @@ def macroop MOVQ_P_MMX { rdip t7 stfp mmx, seg, riprel, disp, dataSize=8 }; -''' +""" # MOVDQ2Q # MOVQ2DQ diff --git a/src/arch/x86/isa/insts/simd64/integer/data_transfer/move_mask.py b/src/arch/x86/isa/insts/simd64/integer/data_transfer/move_mask.py index bfdc92ca1b..1ed1dd31e5 100644 --- a/src/arch/x86/isa/insts/simd64/integer/data_transfer/move_mask.py +++ b/src/arch/x86/isa/insts/simd64/integer/data_transfer/move_mask.py @@ -33,9 +33,9 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PMOVMSKB_R_MMX { limm reg, 0 movsign reg, mmxm, size=1, ext=0 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd64/integer/data_transfer/move_non_temporal.py b/src/arch/x86/isa/insts/simd64/integer/data_transfer/move_non_temporal.py index 16a884ee2c..d30f326582 100644 --- a/src/arch/x86/isa/insts/simd64/integer/data_transfer/move_non_temporal.py +++ b/src/arch/x86/isa/insts/simd64/integer/data_transfer/move_non_temporal.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop MOVNTQ_M_MMX { warn_once "MOVNTQ: Ignoring non-temporal hint, modeling as cacheable!" stfp mmx, seg, sib, "DISPLACEMENT", dataSize=8 @@ -50,4 +50,4 @@ def macroop MASKMOVQ_MMX_MMX { maskmov ufp1, mmx, mmxm, size=1 stfp ufp1, ds, [1, t0, rdi], dataSize=8 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd64/integer/exit_media_state.py b/src/arch/x86/isa/insts/simd64/integer/exit_media_state.py index 3182feeddf..fe9f2ced98 100644 --- a/src/arch/x86/isa/insts/simd64/integer/exit_media_state.py +++ b/src/arch/x86/isa/insts/simd64/integer/exit_media_state.py @@ -33,10 +33,10 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop EMMS { emms }; # FEMMS -''' +""" diff --git a/src/arch/x86/isa/insts/simd64/integer/logical/__init__.py b/src/arch/x86/isa/insts/simd64/integer/logical/__init__.py index a77545ddb2..385af7ecac 100644 --- a/src/arch/x86/isa/insts/simd64/integer/logical/__init__.py +++ b/src/arch/x86/isa/insts/simd64/integer/logical/__init__.py @@ -33,13 +33,11 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["pand", - "por", - "exclusive_or"] +categories = ["pand", "por", "exclusive_or"] -microcode = ''' +microcode = """ # 64 bit multimedia instructions -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/simd64/integer/logical/exclusive_or.py b/src/arch/x86/isa/insts/simd64/integer/logical/exclusive_or.py index b6eae4e85a..fba5cbfbd6 100644 --- a/src/arch/x86/isa/insts/simd64/integer/logical/exclusive_or.py +++ b/src/arch/x86/isa/insts/simd64/integer/logical/exclusive_or.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PXOR_MMX_MMX { mxor mmx, mmx, mmxm }; @@ -48,4 +48,4 @@ def macroop PXOR_MMX_P { ldfp ufp1, seg, riprel, disp, dataSize=8 mxor mmx, mmx, ufp1 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd64/integer/logical/pand.py b/src/arch/x86/isa/insts/simd64/integer/logical/pand.py index 47468fb68a..6405691ad7 100644 --- a/src/arch/x86/isa/insts/simd64/integer/logical/pand.py +++ b/src/arch/x86/isa/insts/simd64/integer/logical/pand.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PAND_MMX_MMX { mand mmx, mmx, mmxm }; @@ -63,4 +63,4 @@ def macroop PANDN_MMX_P { ldfp ufp1, seg, riprel, disp, dataSize=8 mandn mmx, mmx, ufp1 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd64/integer/logical/por.py b/src/arch/x86/isa/insts/simd64/integer/logical/por.py index f5e25193ea..2391788780 100644 --- a/src/arch/x86/isa/insts/simd64/integer/logical/por.py +++ b/src/arch/x86/isa/insts/simd64/integer/logical/por.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop POR_MMX_MMX { mor mmx, mmx, mmxm }; @@ -48,4 +48,4 @@ def macroop POR_MMX_P { ldfp ufp1, seg, riprel, disp, dataSize=8 mor mmx, mmx, ufp1 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd64/integer/save_and_restore_state.py b/src/arch/x86/isa/insts/simd64/integer/save_and_restore_state.py index 15a0a3f996..2b1134f6c2 100644 --- a/src/arch/x86/isa/insts/simd64/integer/save_and_restore_state.py +++ b/src/arch/x86/isa/insts/simd64/integer/save_and_restore_state.py @@ -33,8 +33,8 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ # FSAVE # FNSAVE # FRSTOR -''' +""" diff --git a/src/arch/x86/isa/insts/simd64/integer/shift/__init__.py b/src/arch/x86/isa/insts/simd64/integer/shift/__init__.py index bad162960c..fd8c75fa12 100644 --- a/src/arch/x86/isa/insts/simd64/integer/shift/__init__.py +++ b/src/arch/x86/isa/insts/simd64/integer/shift/__init__.py @@ -33,13 +33,15 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["left_logical_shift", - "right_logical_shift", - "right_arithmetic_shift"] +categories = [ + "left_logical_shift", + "right_logical_shift", + "right_arithmetic_shift", +] -microcode = ''' +microcode = """ # 64 bit multimedia instructions -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/simd64/integer/shift/left_logical_shift.py b/src/arch/x86/isa/insts/simd64/integer/shift/left_logical_shift.py index c4b3bb61ef..4cd581966d 100644 --- a/src/arch/x86/isa/insts/simd64/integer/shift/left_logical_shift.py +++ b/src/arch/x86/isa/insts/simd64/integer/shift/left_logical_shift.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PSLLW_MMX_MMX { msll mmx, mmx, mmxm, size=2, ext=0 }; @@ -90,4 +90,4 @@ def macroop PSLLQ_MMX_P { def macroop PSLLQ_MMX_I { mslli mmx, mmx, imm, size=8, ext=0 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd64/integer/shift/right_arithmetic_shift.py b/src/arch/x86/isa/insts/simd64/integer/shift/right_arithmetic_shift.py index f013427366..d517055d25 100644 --- a/src/arch/x86/isa/insts/simd64/integer/shift/right_arithmetic_shift.py +++ b/src/arch/x86/isa/insts/simd64/integer/shift/right_arithmetic_shift.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PSRAW_MMX_MMX { msra mmx, mmx, mmxm, size=2, ext=0 }; @@ -71,4 +71,4 @@ def macroop PSRAD_MMX_P { def macroop PSRAD_MMX_I { msrai mmx, mmx, imm, size=4, ext=0 }; -''' +""" diff --git a/src/arch/x86/isa/insts/simd64/integer/shift/right_logical_shift.py b/src/arch/x86/isa/insts/simd64/integer/shift/right_logical_shift.py index 76a54af66d..b18fba3bca 100644 --- a/src/arch/x86/isa/insts/simd64/integer/shift/right_logical_shift.py +++ b/src/arch/x86/isa/insts/simd64/integer/shift/right_logical_shift.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop PSRLW_MMX_MMX { msrl mmx, mmx, mmxm, size=2, ext=0 }; @@ -90,4 +90,4 @@ def macroop PSRLQ_MMX_P { def macroop PSRLQ_MMX_I { msrli mmx, mmx, imm, size=8, ext=0 }; -''' +""" diff --git a/src/arch/x86/isa/insts/system/__init__.py b/src/arch/x86/isa/insts/system/__init__.py index af1f1992b4..e84ee3e732 100644 --- a/src/arch/x86/isa/insts/system/__init__.py +++ b/src/arch/x86/isa/insts/system/__init__.py @@ -36,15 +36,16 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["control_registers", - "halt", - "invlpg", - "undefined_operation", - "msrs", - "segmentation"] +categories = [ + "control_registers", + "halt", + "invlpg", + "undefined_operation", + "msrs", + "segmentation", +] microcode = "" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode - diff --git a/src/arch/x86/isa/insts/system/control_registers.py b/src/arch/x86/isa/insts/system/control_registers.py index 610525824f..089123c0e5 100644 --- a/src/arch/x86/isa/insts/system/control_registers.py +++ b/src/arch/x86/isa/insts/system/control_registers.py @@ -24,7 +24,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop CLTS { rdcr t1, cr0, dataSize=8 andi t1, t1, 0xF7, dataSize=1 @@ -81,4 +81,4 @@ def macroop SMSW_P { rdip t7, dataSize=asz st t1, seg, riprel, disp, dataSize=2 }; -''' +""" diff --git a/src/arch/x86/isa/insts/system/halt.py b/src/arch/x86/isa/insts/system/halt.py index 3895018191..7a90f2d846 100644 --- a/src/arch/x86/isa/insts/system/halt.py +++ b/src/arch/x86/isa/insts/system/halt.py @@ -33,8 +33,8 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop HLT { halt }; -''' +""" diff --git a/src/arch/x86/isa/insts/system/invlpg.py b/src/arch/x86/isa/insts/system/invlpg.py index 68126ef287..2aa3b84ed0 100644 --- a/src/arch/x86/isa/insts/system/invlpg.py +++ b/src/arch/x86/isa/insts/system/invlpg.py @@ -36,7 +36,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop INVLPG_M { .serialize_after tia seg, sib, disp @@ -47,4 +47,4 @@ def macroop INVLPG_P { rdip t7 tia seg, riprel, disp }; -''' +""" diff --git a/src/arch/x86/isa/insts/system/msrs.py b/src/arch/x86/isa/insts/system/msrs.py index 78a9fa1315..ce99e93753 100644 --- a/src/arch/x86/isa/insts/system/msrs.py +++ b/src/arch/x86/isa/insts/system/msrs.py @@ -36,7 +36,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop RDMSR { ld t2, intseg, [8, rcx, t0], "IntAddrPrefixMSR << 3", \ @@ -73,4 +73,4 @@ def macroop RDTSCP srli rdx, t1, 32, dataSize=8 rdval rcx, ctrlRegIdx("misc_reg::TscAux"), dataSize=4 }; -''' +""" diff --git a/src/arch/x86/isa/insts/system/segmentation.py b/src/arch/x86/isa/insts/system/segmentation.py index 448f5c7886..8e3673cf7a 100644 --- a/src/arch/x86/isa/insts/system/segmentation.py +++ b/src/arch/x86/isa/insts/system/segmentation.py @@ -34,7 +34,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop LGDT_M { .serialize_after @@ -340,4 +340,4 @@ def macroop SWAPGS wrbase gs, t1, dataSize=8 wrval kernel_gs_base, t2, dataSize=8 }; -''' +""" diff --git a/src/arch/x86/isa/insts/system/undefined_operation.py b/src/arch/x86/isa/insts/system/undefined_operation.py index 301c0d6ee5..2b7c75854e 100644 --- a/src/arch/x86/isa/insts/system/undefined_operation.py +++ b/src/arch/x86/isa/insts/system/undefined_operation.py @@ -33,9 +33,9 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop UD2 { fault "std::make_shared()" }; -''' +""" diff --git a/src/arch/x86/isa/insts/x87/__init__.py b/src/arch/x86/isa/insts/x87/__init__.py index 95fd3be19a..169ac7275e 100644 --- a/src/arch/x86/isa/insts/x87/__init__.py +++ b/src/arch/x86/isa/insts/x87/__init__.py @@ -33,18 +33,20 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["data_transfer_and_conversion", - "load_constants", - "arithmetic", - "transcendental_functions", - "compare_and_test", - "stack_management", - "no_operation", - "control"] +categories = [ + "data_transfer_and_conversion", + "load_constants", + "arithmetic", + "transcendental_functions", + "compare_and_test", + "stack_management", + "no_operation", + "control", +] -microcode = ''' +microcode = """ # X86 microcode -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/x87/arithmetic/__init__.py b/src/arch/x86/isa/insts/x87/arithmetic/__init__.py index 62fca5e07d..a64665722d 100644 --- a/src/arch/x86/isa/insts/x87/arithmetic/__init__.py +++ b/src/arch/x86/isa/insts/x87/arithmetic/__init__.py @@ -33,18 +33,20 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["addition", - "subtraction", - "multiplication", - "division", - "change_sign", - "round", - "partial_remainder", - "square_root"] +categories = [ + "addition", + "subtraction", + "multiplication", + "division", + "change_sign", + "round", + "partial_remainder", + "square_root", +] -microcode = ''' +microcode = """ # X86 microcode -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/x87/arithmetic/addition.py b/src/arch/x86/isa/insts/x87/arithmetic/addition.py index 1a59ac53d3..7c2f37af65 100644 --- a/src/arch/x86/isa/insts/x87/arithmetic/addition.py +++ b/src/arch/x86/isa/insts/x87/arithmetic/addition.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop FADD1_R { addfp st(0), sti, st(0) @@ -92,4 +92,4 @@ def macroop FADDP_P }; # FIADD -''' +""" diff --git a/src/arch/x86/isa/insts/x87/arithmetic/change_sign.py b/src/arch/x86/isa/insts/x87/arithmetic/change_sign.py index 9756b1ae4e..b3f4aa6a7a 100644 --- a/src/arch/x86/isa/insts/x87/arithmetic/change_sign.py +++ b/src/arch/x86/isa/insts/x87/arithmetic/change_sign.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop FABS { absfp st(0), st(0), SetStatus=True @@ -42,4 +42,4 @@ def macroop FABS { def macroop FCHS { chsfp st(0), st(0), SetStatus=True }; -''' +""" diff --git a/src/arch/x86/isa/insts/x87/arithmetic/division.py b/src/arch/x86/isa/insts/x87/arithmetic/division.py index df4a9fbb4f..d5aa9f23e4 100644 --- a/src/arch/x86/isa/insts/x87/arithmetic/division.py +++ b/src/arch/x86/isa/insts/x87/arithmetic/division.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop FDIV1_R { divfp st(0), st(0), sti @@ -95,4 +95,4 @@ def macroop FDIVP_P # FDIVR # FDIVRP # FIDIVR -''' +""" diff --git a/src/arch/x86/isa/insts/x87/arithmetic/multiplication.py b/src/arch/x86/isa/insts/x87/arithmetic/multiplication.py index f8584d4811..815bacc10b 100644 --- a/src/arch/x86/isa/insts/x87/arithmetic/multiplication.py +++ b/src/arch/x86/isa/insts/x87/arithmetic/multiplication.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop FMUL1_R { mulfp st(0), sti, st(0) @@ -92,4 +92,4 @@ def macroop FMULP_P }; # FIMUL -''' +""" diff --git a/src/arch/x86/isa/insts/x87/arithmetic/partial_remainder.py b/src/arch/x86/isa/insts/x87/arithmetic/partial_remainder.py index 4193e7d67b..b370c1200f 100644 --- a/src/arch/x86/isa/insts/x87/arithmetic/partial_remainder.py +++ b/src/arch/x86/isa/insts/x87/arithmetic/partial_remainder.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop FPREM { premfp st(0), st(1), st(0), SetStatus=True }; @@ -41,4 +41,4 @@ def macroop FPREM { def macroop FPREM1 { premfp st(0), st(1), st(0), SetStatus=True }; -''' +""" diff --git a/src/arch/x86/isa/insts/x87/arithmetic/round.py b/src/arch/x86/isa/insts/x87/arithmetic/round.py index 675a3b4352..c9d93524c5 100644 --- a/src/arch/x86/isa/insts/x87/arithmetic/round.py +++ b/src/arch/x86/isa/insts/x87/arithmetic/round.py @@ -33,6 +33,6 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ # FRNDINT -''' +""" diff --git a/src/arch/x86/isa/insts/x87/arithmetic/square_root.py b/src/arch/x86/isa/insts/x87/arithmetic/square_root.py index e81192bea4..d645188cd6 100644 --- a/src/arch/x86/isa/insts/x87/arithmetic/square_root.py +++ b/src/arch/x86/isa/insts/x87/arithmetic/square_root.py @@ -33,6 +33,6 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ # FSQRT -''' +""" diff --git a/src/arch/x86/isa/insts/x87/arithmetic/subtraction.py b/src/arch/x86/isa/insts/x87/arithmetic/subtraction.py index dea12774e4..0835fcb4d8 100644 --- a/src/arch/x86/isa/insts/x87/arithmetic/subtraction.py +++ b/src/arch/x86/isa/insts/x87/arithmetic/subtraction.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop FSUB1_R { subfp st(0), st(0), sti @@ -99,4 +99,4 @@ def macroop FSUBRP_R # FISUB # FSUBR # FISUBR -''' +""" diff --git a/src/arch/x86/isa/insts/x87/compare_and_test/__init__.py b/src/arch/x86/isa/insts/x87/compare_and_test/__init__.py index cdef0380b7..cb4b1093fe 100644 --- a/src/arch/x86/isa/insts/x87/compare_and_test/__init__.py +++ b/src/arch/x86/isa/insts/x87/compare_and_test/__init__.py @@ -33,15 +33,17 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["floating_point_ordered_compare", - "floating_point_unordered_compare", - "integer_compare", - "test", - "classify"] +categories = [ + "floating_point_ordered_compare", + "floating_point_unordered_compare", + "integer_compare", + "test", + "classify", +] -microcode = ''' +microcode = """ # X86 microcode -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/x87/compare_and_test/classify.py b/src/arch/x86/isa/insts/x87/compare_and_test/classify.py index 89d7df874c..31ddb2fe96 100644 --- a/src/arch/x86/isa/insts/x87/compare_and_test/classify.py +++ b/src/arch/x86/isa/insts/x87/compare_and_test/classify.py @@ -33,6 +33,6 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ # FXAM -''' +""" diff --git a/src/arch/x86/isa/insts/x87/compare_and_test/floating_point_ordered_compare.py b/src/arch/x86/isa/insts/x87/compare_and_test/floating_point_ordered_compare.py index cd348cd133..a9f849fd83 100644 --- a/src/arch/x86/isa/insts/x87/compare_and_test/floating_point_ordered_compare.py +++ b/src/arch/x86/isa/insts/x87/compare_and_test/floating_point_ordered_compare.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ # FCOM # FCOMP # FCOMPP @@ -44,4 +44,4 @@ def macroop FCOMI_R compfp st(0), sti }; -''' +""" diff --git a/src/arch/x86/isa/insts/x87/compare_and_test/floating_point_unordered_compare.py b/src/arch/x86/isa/insts/x87/compare_and_test/floating_point_unordered_compare.py index 0e334e687a..aaed025cb9 100644 --- a/src/arch/x86/isa/insts/x87/compare_and_test/floating_point_unordered_compare.py +++ b/src/arch/x86/isa/insts/x87/compare_and_test/floating_point_unordered_compare.py @@ -35,7 +35,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ # FUCOM # FUCOMP # FUCOMPP @@ -50,4 +50,4 @@ def macroop FUCOMIP_R { compfp st(0), sti, spm=1 }; -''' +""" diff --git a/src/arch/x86/isa/insts/x87/compare_and_test/integer_compare.py b/src/arch/x86/isa/insts/x87/compare_and_test/integer_compare.py index e8a772089a..593073fd5c 100644 --- a/src/arch/x86/isa/insts/x87/compare_and_test/integer_compare.py +++ b/src/arch/x86/isa/insts/x87/compare_and_test/integer_compare.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ # FICOM # FICOMP -''' +""" diff --git a/src/arch/x86/isa/insts/x87/compare_and_test/test.py b/src/arch/x86/isa/insts/x87/compare_and_test/test.py index bb2cd3d23a..81badb6026 100644 --- a/src/arch/x86/isa/insts/x87/compare_and_test/test.py +++ b/src/arch/x86/isa/insts/x87/compare_and_test/test.py @@ -33,6 +33,6 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ # FTST -''' +""" diff --git a/src/arch/x86/isa/insts/x87/control/__init__.py b/src/arch/x86/isa/insts/x87/control/__init__.py index 01a1e68baf..00d86f468c 100644 --- a/src/arch/x86/isa/insts/x87/control/__init__.py +++ b/src/arch/x86/isa/insts/x87/control/__init__.py @@ -33,16 +33,18 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["initialize", - "wait_for_exceptions", - "clear_exceptions", - "save_and_restore_x87_control_word", - "save_x87_status_word", - "save_and_restore_x87_environment"] +categories = [ + "initialize", + "wait_for_exceptions", + "clear_exceptions", + "save_and_restore_x87_control_word", + "save_x87_status_word", + "save_and_restore_x87_environment", +] -microcode = ''' +microcode = """ # X86 microcode -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/x87/control/clear_exceptions.py b/src/arch/x86/isa/insts/x87/control/clear_exceptions.py index 8c1319cd9a..2a1b98f52e 100644 --- a/src/arch/x86/isa/insts/x87/control/clear_exceptions.py +++ b/src/arch/x86/isa/insts/x87/control/clear_exceptions.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ # FCLEX # FNCLEX -''' +""" diff --git a/src/arch/x86/isa/insts/x87/control/initialize.py b/src/arch/x86/isa/insts/x87/control/initialize.py index daf089043b..2e3dd2ebff 100644 --- a/src/arch/x86/isa/insts/x87/control/initialize.py +++ b/src/arch/x86/isa/insts/x87/control/initialize.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ # FINIT # FNINIT -''' +""" diff --git a/src/arch/x86/isa/insts/x87/control/save_and_restore_x87_control_word.py b/src/arch/x86/isa/insts/x87/control/save_and_restore_x87_control_word.py index ed76799a0f..254c43e836 100644 --- a/src/arch/x86/isa/insts/x87/control/save_and_restore_x87_control_word.py +++ b/src/arch/x86/isa/insts/x87/control/save_and_restore_x87_control_word.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop FLDCW_M { ld t1, seg, sib, disp, dataSize=2 @@ -57,4 +57,4 @@ def macroop FNSTCW_P { rdval t1, fcw st t1, seg, riprel, disp, dataSize=2 }; -''' +""" diff --git a/src/arch/x86/isa/insts/x87/control/save_and_restore_x87_environment.py b/src/arch/x86/isa/insts/x87/control/save_and_restore_x87_environment.py index 4e49ff6847..c65a8c9241 100644 --- a/src/arch/x86/isa/insts/x87/control/save_and_restore_x87_environment.py +++ b/src/arch/x86/isa/insts/x87/control/save_and_restore_x87_environment.py @@ -88,22 +88,32 @@ fnstenvTemplate = """ wrval fcw, t2 """ -microcode = ''' +microcode = ( + """ def macroop FLDENV_M { -''' + fldenvTemplate % { "mode" : "sib" } + ''' +""" + + fldenvTemplate % {"mode": "sib"} + + """ }; def macroop FLDENV_P { rdip t7 -''' + fldenvTemplate % { "mode" : "riprel" } + ''' +""" + + fldenvTemplate % {"mode": "riprel"} + + """ }; def macroop FNSTENV_M { -''' + fnstenvTemplate % { "mode" : "sib" } + ''' +""" + + fnstenvTemplate % {"mode": "sib"} + + """ }; def macroop FNSTENV_P { rdip t7 -''' + fnstenvTemplate % { "mode" : "riprel" } + ''' +""" + + fnstenvTemplate % {"mode": "riprel"} + + """ }; -''' +""" +) diff --git a/src/arch/x86/isa/insts/x87/control/save_x87_status_word.py b/src/arch/x86/isa/insts/x87/control/save_x87_status_word.py index 9b131bc995..c34107e483 100644 --- a/src/arch/x86/isa/insts/x87/control/save_x87_status_word.py +++ b/src/arch/x86/isa/insts/x87/control/save_x87_status_word.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ # FSTSW @@ -52,4 +52,4 @@ def macroop FNSTSW_P { rdval t1, fsw st t1, seg, riprel, disp, dataSize=2 }; -''' +""" diff --git a/src/arch/x86/isa/insts/x87/control/wait_for_exceptions.py b/src/arch/x86/isa/insts/x87/control/wait_for_exceptions.py index 3c2dc85913..eb2127753c 100644 --- a/src/arch/x86/isa/insts/x87/control/wait_for_exceptions.py +++ b/src/arch/x86/isa/insts/x87/control/wait_for_exceptions.py @@ -33,6 +33,6 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ # FWAIT -''' +""" diff --git a/src/arch/x86/isa/insts/x87/data_transfer_and_conversion/__init__.py b/src/arch/x86/isa/insts/x87/data_transfer_and_conversion/__init__.py index 5f1d4cce44..dcb581e158 100644 --- a/src/arch/x86/isa/insts/x87/data_transfer_and_conversion/__init__.py +++ b/src/arch/x86/isa/insts/x87/data_transfer_and_conversion/__init__.py @@ -33,16 +33,18 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["load_or_store_floating_point", - "convert_and_load_or_store_integer", - "convert_and_load_or_store_bcd", - "conditional_move", - "exchange", - "extract"] +categories = [ + "load_or_store_floating_point", + "convert_and_load_or_store_integer", + "convert_and_load_or_store_bcd", + "conditional_move", + "exchange", + "extract", +] -microcode = ''' +microcode = """ # X86 microcode -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/x87/data_transfer_and_conversion/conditional_move.py b/src/arch/x86/isa/insts/x87/data_transfer_and_conversion/conditional_move.py index c2bd3013bb..918f0d4321 100644 --- a/src/arch/x86/isa/insts/x87/data_transfer_and_conversion/conditional_move.py +++ b/src/arch/x86/isa/insts/x87/data_transfer_and_conversion/conditional_move.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ # FCMOVB # FCMOVBE # FCMOVE @@ -42,4 +42,4 @@ microcode = ''' # FCMOVNE # FCMOVNU # FCMOVU -''' +""" diff --git a/src/arch/x86/isa/insts/x87/data_transfer_and_conversion/convert_and_load_or_store_bcd.py b/src/arch/x86/isa/insts/x87/data_transfer_and_conversion/convert_and_load_or_store_bcd.py index ff2b6eb47e..b85a85ab5c 100644 --- a/src/arch/x86/isa/insts/x87/data_transfer_and_conversion/convert_and_load_or_store_bcd.py +++ b/src/arch/x86/isa/insts/x87/data_transfer_and_conversion/convert_and_load_or_store_bcd.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ # FBLD # FBSTP -''' +""" diff --git a/src/arch/x86/isa/insts/x87/data_transfer_and_conversion/convert_and_load_or_store_integer.py b/src/arch/x86/isa/insts/x87/data_transfer_and_conversion/convert_and_load_or_store_integer.py index e06a0865d0..36738034b9 100644 --- a/src/arch/x86/isa/insts/x87/data_transfer_and_conversion/convert_and_load_or_store_integer.py +++ b/src/arch/x86/isa/insts/x87/data_transfer_and_conversion/convert_and_load_or_store_integer.py @@ -35,7 +35,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ # fild common case def macroop FILD_M { ldifp87 ufp1, seg, sib, disp @@ -67,4 +67,4 @@ def macroop FISTP_P }; # FISTTP -''' +""" diff --git a/src/arch/x86/isa/insts/x87/data_transfer_and_conversion/exchange.py b/src/arch/x86/isa/insts/x87/data_transfer_and_conversion/exchange.py index 5e49f5d811..6ed407d0cb 100644 --- a/src/arch/x86/isa/insts/x87/data_transfer_and_conversion/exchange.py +++ b/src/arch/x86/isa/insts/x87/data_transfer_and_conversion/exchange.py @@ -33,11 +33,11 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop FXCH_R { movfp ufp1, sti movfp sti, st(0) movfp st(0), ufp1 }; -''' +""" diff --git a/src/arch/x86/isa/insts/x87/data_transfer_and_conversion/extract.py b/src/arch/x86/isa/insts/x87/data_transfer_and_conversion/extract.py index 2a9baab30b..7d8cf296b3 100644 --- a/src/arch/x86/isa/insts/x87/data_transfer_and_conversion/extract.py +++ b/src/arch/x86/isa/insts/x87/data_transfer_and_conversion/extract.py @@ -33,6 +33,6 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ # FXTRACT -''' +""" diff --git a/src/arch/x86/isa/insts/x87/data_transfer_and_conversion/load_or_store_floating_point.py b/src/arch/x86/isa/insts/x87/data_transfer_and_conversion/load_or_store_floating_point.py index 2516c1a3cf..a9314a5b09 100644 --- a/src/arch/x86/isa/insts/x87/data_transfer_and_conversion/load_or_store_floating_point.py +++ b/src/arch/x86/isa/insts/x87/data_transfer_and_conversion/load_or_store_floating_point.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop FLD_M { ldfp87 ufp1, seg, sib, disp movfp st(-1), ufp1, spm=-1 @@ -109,4 +109,4 @@ def macroop FST80P_P { pop87 }; -''' +""" diff --git a/src/arch/x86/isa/insts/x87/load_constants/__init__.py b/src/arch/x86/isa/insts/x87/load_constants/__init__.py index bf57ce6b70..b89e81525f 100644 --- a/src/arch/x86/isa/insts/x87/load_constants/__init__.py +++ b/src/arch/x86/isa/insts/x87/load_constants/__init__.py @@ -33,12 +33,11 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["load_0_1_or_pi", - "load_logarithm"] +categories = ["load_0_1_or_pi", "load_logarithm"] -microcode = ''' +microcode = """ # X86 microcode -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/x87/load_constants/load_0_1_or_pi.py b/src/arch/x86/isa/insts/x87/load_constants/load_0_1_or_pi.py index 8a346f17a8..dcd43f5d64 100644 --- a/src/arch/x86/isa/insts/x87/load_constants/load_0_1_or_pi.py +++ b/src/arch/x86/isa/insts/x87/load_constants/load_0_1_or_pi.py @@ -34,7 +34,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop FLDZ { lfpimm ufp1, 0.0 @@ -51,4 +51,4 @@ def macroop FLDPI { movfp st(-1), ufp1, spm=-1 }; -''' +""" diff --git a/src/arch/x86/isa/insts/x87/load_constants/load_logarithm.py b/src/arch/x86/isa/insts/x87/load_constants/load_logarithm.py index df56f0011c..c7c944cc75 100644 --- a/src/arch/x86/isa/insts/x87/load_constants/load_logarithm.py +++ b/src/arch/x86/isa/insts/x87/load_constants/load_logarithm.py @@ -34,7 +34,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop FLDL2E { lfpimm ufp1, 1.44269504089 @@ -56,4 +56,4 @@ def macroop FLDLN2 { movfp st(-1), ufp1, spm=-1 }; -''' +""" diff --git a/src/arch/x86/isa/insts/x87/no_operation.py b/src/arch/x86/isa/insts/x87/no_operation.py index b88c2f3986..a1ebf24195 100644 --- a/src/arch/x86/isa/insts/x87/no_operation.py +++ b/src/arch/x86/isa/insts/x87/no_operation.py @@ -33,6 +33,6 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ # FNOP -''' +""" diff --git a/src/arch/x86/isa/insts/x87/stack_management/__init__.py b/src/arch/x86/isa/insts/x87/stack_management/__init__.py index 8135a9230f..ffbabaf89f 100644 --- a/src/arch/x86/isa/insts/x87/stack_management/__init__.py +++ b/src/arch/x86/isa/insts/x87/stack_management/__init__.py @@ -33,12 +33,11 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["stack_control", - "clear_state"] +categories = ["stack_control", "clear_state"] -microcode = ''' +microcode = """ # X86 microcode -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/x87/stack_management/clear_state.py b/src/arch/x86/isa/insts/x87/stack_management/clear_state.py index 8f5a1bafdf..d21b8fc306 100644 --- a/src/arch/x86/isa/insts/x87/stack_management/clear_state.py +++ b/src/arch/x86/isa/insts/x87/stack_management/clear_state.py @@ -33,6 +33,6 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ # FFREE -''' +""" diff --git a/src/arch/x86/isa/insts/x87/stack_management/stack_control.py b/src/arch/x86/isa/insts/x87/stack_management/stack_control.py index e11fd27e8a..ef53e0309a 100644 --- a/src/arch/x86/isa/insts/x87/stack_management/stack_control.py +++ b/src/arch/x86/isa/insts/x87/stack_management/stack_control.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ # FDECSTP # FINCSTP -''' +""" diff --git a/src/arch/x86/isa/insts/x87/transcendental_functions/__init__.py b/src/arch/x86/isa/insts/x87/transcendental_functions/__init__.py index ac8dae2566..d8651fe2ae 100644 --- a/src/arch/x86/isa/insts/x87/transcendental_functions/__init__.py +++ b/src/arch/x86/isa/insts/x87/transcendental_functions/__init__.py @@ -33,12 +33,11 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -categories = ["trigonometric_functions", - "logarithmic_functions"] +categories = ["trigonometric_functions", "logarithmic_functions"] -microcode = ''' +microcode = """ # X86 microcode -''' +""" for category in categories: exec("from . import %s as cat" % category) microcode += cat.microcode diff --git a/src/arch/x86/isa/insts/x87/transcendental_functions/logarithmic_functions.py b/src/arch/x86/isa/insts/x87/transcendental_functions/logarithmic_functions.py index 39b7fd5ded..7077efc0c4 100644 --- a/src/arch/x86/isa/insts/x87/transcendental_functions/logarithmic_functions.py +++ b/src/arch/x86/isa/insts/x87/transcendental_functions/logarithmic_functions.py @@ -33,7 +33,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ # F2XM1 # FSCALE @@ -42,4 +42,4 @@ def macroop FYL2X { }; # FYL2XP1 -''' +""" diff --git a/src/arch/x86/isa/insts/x87/transcendental_functions/trigonometric_functions.py b/src/arch/x86/isa/insts/x87/transcendental_functions/trigonometric_functions.py index 639dfb3635..416ccce70e 100644 --- a/src/arch/x86/isa/insts/x87/transcendental_functions/trigonometric_functions.py +++ b/src/arch/x86/isa/insts/x87/transcendental_functions/trigonometric_functions.py @@ -34,7 +34,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -microcode = ''' +microcode = """ def macroop FSIN { sinfp st(0), st(0) }; @@ -57,4 +57,4 @@ def macroop FPTAN { }; # FPATAN -''' +""" diff --git a/src/arch/x86/isa/macroop.isa b/src/arch/x86/isa/macroop.isa index 6230760474..691e8d011c 100644 --- a/src/arch/x86/isa/macroop.isa +++ b/src/arch/x86/isa/macroop.isa @@ -43,7 +43,7 @@ // Execute method for macroops. def template MacroExecPanic {{ Fault - execute(ExecContext *, Trace::InstRecord *) const override + execute(ExecContext *, trace::InstRecord *) const override { panic("Tried to execute macroop directly!"); return NoFault; @@ -61,7 +61,7 @@ output header {{ {} Fault - execute(ExecContext *, Trace::InstRecord *) const override + execute(ExecContext *, trace::InstRecord *) const override { panic("Tried to execute macroop directly!"); } diff --git a/src/arch/x86/isa/microops/debug.isa b/src/arch/x86/isa/microops/debug.isa index 62c313f8df..44ccfbb0f9 100644 --- a/src/arch/x86/isa/microops/debug.isa +++ b/src/arch/x86/isa/microops/debug.isa @@ -53,14 +53,14 @@ def template MicroDebugFlagsDeclare {{ const char *inst_mnem, uint64_t set_flags, GenericISA::M5DebugFault *_fault, uint8_t _cc); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; def template MicroDebugFlagsExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *trace_data) const + trace::InstRecord *trace_data) const { %(op_decl)s %(op_rd)s diff --git a/src/arch/x86/isa/microops/fpop.isa b/src/arch/x86/isa/microops/fpop.isa index 6c6c5c151c..5365c587ec 100644 --- a/src/arch/x86/isa/microops/fpop.isa +++ b/src/arch/x86/isa/microops/fpop.isa @@ -45,7 +45,7 @@ def template MicroFpOpExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; @@ -82,7 +82,7 @@ def template MicroFpOpDeclare {{ uint64_t set_flags, uint8_t data_size, int8_t _spm, Args... args); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; diff --git a/src/arch/x86/isa/microops/ldstop.isa b/src/arch/x86/isa/microops/ldstop.isa index 99a381a9c2..5336f3a5fc 100644 --- a/src/arch/x86/isa/microops/ldstop.isa +++ b/src/arch/x86/isa/microops/ldstop.isa @@ -47,7 +47,7 @@ def template MicroLeaExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; Addr EA; @@ -84,7 +84,7 @@ def template MicroLeaDeclare {{ %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -93,7 +93,7 @@ def template MicroLeaDeclare {{ def template MicroLoadExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; Addr EA; @@ -122,7 +122,7 @@ def template MicroLoadExecute {{ def template MicroLoadInitiateAcc {{ Fault %(class_name)s::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; Addr EA; @@ -141,7 +141,7 @@ def template MicroLoadInitiateAcc {{ def template MicroLoadCompleteAcc {{ Fault %(class_name)s::completeAcc(PacketPtr pkt, ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; @@ -165,7 +165,7 @@ def template MicroLoadCompleteAcc {{ def template MicroStoreExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; @@ -192,7 +192,7 @@ def template MicroStoreExecute {{ def template MicroStoreInitiateAcc {{ Fault %(class_name)s::initiateAcc(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; @@ -215,7 +215,7 @@ def template MicroStoreInitiateAcc {{ def template MicroStoreCompleteAcc {{ Fault %(class_name)s::completeAcc(PacketPtr pkt, ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { %(op_decl)s; %(op_rd)s; @@ -244,10 +244,10 @@ def template MicroLdStOpDeclare {{ } - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; }; }}; @@ -268,10 +268,10 @@ def template MicroLdStSplitOpDeclare {{ uint8_t data_size, uint8_t address_size, Request::FlagsType mem_flags); - Fault execute(ExecContext *, Trace::InstRecord *) const override; - Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; + Fault initiateAcc(ExecContext *, trace::InstRecord *) const override; Fault completeAcc(PacketPtr, ExecContext *, - Trace::InstRecord *) const override; + trace::InstRecord *) const override; }; }}; diff --git a/src/arch/x86/isa/microops/limmop.isa b/src/arch/x86/isa/microops/limmop.isa index c1312e6073..003407cfb1 100644 --- a/src/arch/x86/isa/microops/limmop.isa +++ b/src/arch/x86/isa/microops/limmop.isa @@ -42,7 +42,7 @@ def template MicroLimmOpExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { %(op_decl)s; %(op_rd)s; @@ -70,7 +70,7 @@ def template MicroLimmOpDeclare {{ %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; diff --git a/src/arch/x86/isa/microops/mediaop.isa b/src/arch/x86/isa/microops/mediaop.isa index 7d765b21fc..5fcf1d378b 100644 --- a/src/arch/x86/isa/microops/mediaop.isa +++ b/src/arch/x86/isa/microops/mediaop.isa @@ -29,7 +29,7 @@ def template MediaOpExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; @@ -64,7 +64,7 @@ def template MediaOpDeclare {{ %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; diff --git a/src/arch/x86/isa/microops/regop.isa b/src/arch/x86/isa/microops/regop.isa index 404abb2a44..c7e9f46f10 100644 --- a/src/arch/x86/isa/microops/regop.isa +++ b/src/arch/x86/isa/microops/regop.isa @@ -42,7 +42,7 @@ def template MicroRegOpExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { Fault fault = NoFault; @@ -86,7 +86,7 @@ def template MicroRegOpDeclare {{ %(cond_control_flag_init)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; @@ -109,7 +109,7 @@ def template MicroRegOpBranchDeclare {{ %(cond_control_flag_init)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; std::unique_ptr branchTarget( const PCStateBase &branchPC) const override; @@ -898,8 +898,9 @@ let {{ uint64_t top = flags.cf << (dataSize * 8 - realShiftAmt); if (realShiftAmt > 1) top |= PSrcReg1 << (dataSize * 8 - realShiftAmt + 1); - uint64_t bottom = - bits(PSrcReg1, dataSize * 8 - 1, realShiftAmt); + uint64_t bottom = 0; + if (realShiftAmt != dataSize * 8) + bottom = bits(PSrcReg1, dataSize * 8 - 1, realShiftAmt); DestReg = merge(DestReg, dest, top | bottom, dataSize); } else DestReg = merge(DestReg, dest, DestReg, dataSize); diff --git a/src/arch/x86/isa/microops/seqop.isa b/src/arch/x86/isa/microops/seqop.isa index 1c8ce63dd4..b24ef9eede 100644 --- a/src/arch/x86/isa/microops/seqop.isa +++ b/src/arch/x86/isa/microops/seqop.isa @@ -49,7 +49,7 @@ def template BrDeclare {{ %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; std::unique_ptr branchTarget(const PCStateBase &branch_pc) const override @@ -84,14 +84,14 @@ def template EretDeclare {{ %(constructor)s; } - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; def template SeqOpExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { %(op_decl)s; %(op_rd)s; diff --git a/src/arch/x86/isa/microops/specop.isa b/src/arch/x86/isa/microops/specop.isa index 0a720ee0f9..da2f340a27 100644 --- a/src/arch/x86/isa/microops/specop.isa +++ b/src/arch/x86/isa/microops/specop.isa @@ -50,14 +50,14 @@ def template MicroFaultDeclare {{ %(class_name)s(ExtMachInst mach_inst, const char *inst_mnem, uint64_t set_flags, Fault _fault, uint8_t _cc); - Fault execute(ExecContext *, Trace::InstRecord *) const override; + Fault execute(ExecContext *, trace::InstRecord *) const override; }; }}; def template MicroFaultExecute {{ Fault %(class_name)s::execute(ExecContext *xc, - Trace::InstRecord *traceData) const + trace::InstRecord *traceData) const { %(op_decl)s; %(op_rd)s; @@ -134,7 +134,7 @@ let {{ }}; def template MicroFenceOpDeclare {{ - class %(class_name)s : public X86ISA::X86MicroopBase + class %(class_name)s : public %(base_class)s { private: %(reg_idx_arr_decl)s; @@ -144,7 +144,7 @@ def template MicroFenceOpDeclare {{ uint64_t set_flags); Fault - execute(ExecContext *, Trace::InstRecord *) const override + execute(ExecContext *, trace::InstRecord *) const override { return NoFault; } @@ -190,4 +190,95 @@ let {{ {"code" : ""}) header_output += MicroFenceOpDeclare.subst(iop) decoder_output += MicroFenceOpConstructor.subst(iop) + # exec_output += BasicExecute.subst(iop) +}}; + +let {{ + class SfenceOp(X86Microop): + def __init__(self): + self.className = "Sfence" + self.mnemonic = "sfence" + self.instFlags = "| (1ULL << StaticInst::IsWriteBarrier)" + + def getAllocator(self, microFlags): + allocString = ''' + (StaticInstPtr)(new %(class_name)s(machInst, + macrocodeBlock, %(flags)s)) + ''' + allocator = allocString % { + "class_name" : self.className, + "mnemonic" : self.mnemonic, + "flags" : self.microFlagsText(microFlags) + self.instFlags} + return allocator + + microopClasses["sfence"] = SfenceOp +}}; + +let {{ + # Build up the all register version of this micro op + iop = InstObjParams("sfence", "Sfence", 'X86MicroopBase', + {"code" : ""}) + header_output += MicroFenceOpDeclare.subst(iop) + decoder_output += MicroFenceOpConstructor.subst(iop) + # exec_output += BasicExecute.subst(iop) +}}; + +let {{ + class LfenceOp(X86Microop): + def __init__(self): + self.className = "Lfence" + self.mnemonic = "lfence" + self.instFlags = "| (1ULL << StaticInst::IsReadBarrier)" + + def getAllocator(self, microFlags): + allocString = ''' + (StaticInstPtr)(new %(class_name)s(machInst, + macrocodeBlock, %(flags)s)) + ''' + allocator = allocString % { + "class_name" : self.className, + "mnemonic" : self.mnemonic, + "flags" : self.microFlagsText(microFlags) + self.instFlags} + return allocator + + microopClasses["lfence"] = LfenceOp +}}; + +let {{ + # Build up the all register version of this micro op + iop = InstObjParams("lfence", "Lfence", 'X86MicroopBase', + {"code" : ""}) + header_output += MicroFenceOpDeclare.subst(iop) + decoder_output += MicroFenceOpConstructor.subst(iop) + # exec_output += BasicExecute.subst(iop) +}}; + +let {{ + class SerializeOp(X86Microop): + def __init__(self): + self.className = "Serialize" + self.mnemonic = "serialize" + self.instFlags = "| (1ULL << StaticInst::IsSerializeAfter)" + + def getAllocator(self, microFlags): + allocString = ''' + (StaticInstPtr)(new %(class_name)s(machInst, + macrocodeBlock, %(flags)s)) + ''' + allocator = allocString % { + "class_name" : self.className, + "mnemonic" : self.mnemonic, + "flags" : self.microFlagsText(microFlags) + self.instFlags} + return allocator + + microopClasses["serialize"] = SerializeOp +}}; + +let {{ + # Build up the all register version of this micro op + iop = InstObjParams("serialize", "Serialize", 'X86MicroopBase', + {"code" : ""}) + header_output += MicroFenceOpDeclare.subst(iop) + decoder_output += MicroFenceOpConstructor.subst(iop) + # exec_output += BasicExecute.subst(iop) }}; diff --git a/src/arch/x86/isa/operands.isa b/src/arch/x86/isa/operands.isa index 8c5df9b9e2..d8bc947085 100644 --- a/src/arch/x86/isa/operands.isa +++ b/src/arch/x86/isa/operands.isa @@ -58,7 +58,7 @@ let {{ @overrideInOperand def regId(self): return f'(({self.reg_spec}) == gem5::X86ISA::int_reg::T0) ? ' \ - f'RegId() : RegId({self.reg_class}, {self.reg_spec})' + f'RegId() : {self.reg_class}[{self.reg_spec}]' def __init__(self, idx, id, data_size='dataSize', *args, **kwargs): super().__init__('uqw', idx, 'IsInteger', id, *args, **kwargs) self.attrs['data_size'] = data_size diff --git a/src/arch/x86/isa/specialize.isa b/src/arch/x86/isa/specialize.isa index d1ce18ece9..a86d5126b6 100644 --- a/src/arch/x86/isa/specialize.isa +++ b/src/arch/x86/isa/specialize.isa @@ -151,7 +151,7 @@ let {{ else: regString = "env.reg" env.addToDisassembly( - "printReg(out, RegId(IntRegClass, %s), regSize);\n" % + "printReg(out, intRegClass[%s], regSize);\n" % regString) Name += "_R" @@ -170,7 +170,7 @@ let {{ # This refers to registers whose index is encoded as part of # the opcode. env.addToDisassembly( - "printReg(out, RegId(IntRegClass, %s), regSize);\n" % + "printReg(out, intRegClass[%s], regSize);\n" % InstRegIndex) Name += "_R" @@ -216,7 +216,7 @@ let {{ Name += "_XMM" else: regFormat = \ - "printReg(out, RegId(IntRegClass, %s), regSize);\n" + "printReg(out, intRegClass[%s], regSize);\n" Name += "_R" env.addToDisassembly(regFormat % ModRMRegIndex) elif opType.tag in ("E", "Q", "W"): @@ -238,7 +238,7 @@ let {{ regSuffix = "_XMM" else: regFormat = \ - "printReg(out, RegId(IntRegClass, %s), regSize);\n" + "printReg(out, intRegClass[%s], regSize);\n" regSuffix = "_R" env.addToDisassembly(regFormat % ModRMRegIndex) return doSplitDecode("MODRM_MOD", @@ -266,7 +266,7 @@ let {{ Name += "_XMM" else: regFormat = \ - "printReg(out, RegId(IntRegClass, %s), regSize);\n" + "printReg(out, intRegClass[%s], regSize);\n" Name += "_R" env.addToDisassembly(regFormat % ModRMRegIndex) elif opType.tag in ("X", "Y"): diff --git a/src/arch/x86/kvm/SConscript b/src/arch/x86/kvm/SConscript index 5dba1cb420..7a763fee2c 100644 --- a/src/arch/x86/kvm/SConscript +++ b/src/arch/x86/kvm/SConscript @@ -37,9 +37,5 @@ Import('*') -if not env['CONF']['USE_KVM'] or \ - env['CONF']['TARGET_ISA'] != env['CONF']['KVM_ISA']: - Return() - -SimObject('X86KvmCPU.py', sim_objects=['X86KvmCPU'], tags='x86 isa') -Source('x86_cpu.cc', tags='x86 isa') +SimObject('X86KvmCPU.py', sim_objects=['X86KvmCPU'], tags='x86 kvm') +Source('x86_cpu.cc', tags='x86 kvm') diff --git a/src/arch/arm/O3CPU.py b/src/arch/x86/kvm/SConsopts similarity index 76% rename from src/arch/arm/O3CPU.py rename to src/arch/x86/kvm/SConsopts index 54782e746e..3f8822b7ca 100644 --- a/src/arch/arm/O3CPU.py +++ b/src/arch/x86/kvm/SConsopts @@ -23,9 +23,23 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from m5.objects.ArmCPU import ArmO3CPU +Import('*') -O3CPU = ArmO3CPU +from gem5_scons import warning -# Deprecated -DerivO3CPU = O3CPU +import gem5_scons + +host_isa = None +try: + import platform + host_isa = platform.machine() +except: + pass + +if host_isa == 'x86_64': + with gem5_scons.Configure(main) as conf: + if conf.CheckTypeSize('struct kvm_xsave', + '#include ') != 0: + conf.env['CONF']['KVM_ISA'] = 'x86' + else: + warning("KVM on x86 requires xsave support in kernel headers.") diff --git a/src/arch/x86/kvm/X86KvmCPU.py b/src/arch/x86/kvm/X86KvmCPU.py index 59de5eafda..df32fe8b40 100644 --- a/src/arch/x86/kvm/X86KvmCPU.py +++ b/src/arch/x86/kvm/X86KvmCPU.py @@ -31,10 +31,11 @@ from m5.objects.BaseKvmCPU import BaseKvmCPU from m5.objects.X86CPU import X86CPU from m5.objects.X86MMU import X86MMU + class X86KvmCPU(BaseKvmCPU, X86CPU): - type = 'X86KvmCPU' + type = "X86KvmCPU" cxx_header = "arch/x86/kvm/x86_cpu.hh" - cxx_class = 'gem5::X86KvmCPU' + cxx_class = "gem5::X86KvmCPU" mmu = X86MMU() diff --git a/src/arch/x86/kvm/x86_cpu.cc b/src/arch/x86/kvm/x86_cpu.cc index cc70a57b38..7faa9159ab 100644 --- a/src/arch/x86/kvm/x86_cpu.cc +++ b/src/arch/x86/kvm/x86_cpu.cc @@ -37,6 +37,7 @@ #include "arch/x86/cpuid.hh" #include "arch/x86/faults.hh" #include "arch/x86/interrupts.hh" +#include "arch/x86/regs/float.hh" #include "arch/x86/regs/int.hh" #include "arch/x86/regs/msr.hh" #include "arch/x86/utility.hh" diff --git a/src/arch/x86/linux/linux.hh b/src/arch/x86/linux/linux.hh index 7f71b5fb0f..0c34d09330 100644 --- a/src/arch/x86/linux/linux.hh +++ b/src/arch/x86/linux/linux.hh @@ -242,6 +242,21 @@ class X86Linux64 : public X86Linux, public OpenFlagTable uint64_t mem_unit; /* Memory unit size in bytes */ }; + struct tgt_clone_args + { + uint64_t flags; + uint64_t pidfd; + uint64_t child_tid; + uint64_t parent_tid; + uint64_t exit_signal; + uint64_t stack; + uint64_t stack_size; + uint64_t tls; + uint64_t set_tid; + uint64_t set_tid_size; + uint64_t cgroup; + }; + }; class X86Linux32 : public X86Linux, public OpenFlagTable diff --git a/src/arch/x86/linux/se_workload.cc b/src/arch/x86/linux/se_workload.cc index 329dbbdc3e..c8961347d4 100644 --- a/src/arch/x86/linux/se_workload.cc +++ b/src/arch/x86/linux/se_workload.cc @@ -97,12 +97,12 @@ namespace X86ISA EmuLinux::EmuLinux(const Params &p) : SEWorkload(p, PageShift) {} -const std::vector EmuLinux::SyscallABI64::ArgumentRegs = { +const std::vector EmuLinux::SyscallABI64::ArgumentRegs = { int_reg::Rdi, int_reg::Rsi, int_reg::Rdx, int_reg::R10, int_reg::R8, int_reg::R9 }; -const std::vector EmuLinux::SyscallABI32::ArgumentRegs = { +const std::vector EmuLinux::SyscallABI32::ArgumentRegs = { int_reg::Ebx, int_reg::Ecx, int_reg::Edx, int_reg::Esi, int_reg::Edi, int_reg::Ebp }; diff --git a/src/arch/x86/linux/se_workload.hh b/src/arch/x86/linux/se_workload.hh index 4cb3ade261..f170776d82 100644 --- a/src/arch/x86/linux/se_workload.hh +++ b/src/arch/x86/linux/se_workload.hh @@ -57,7 +57,7 @@ namespace X86ISA class EmuLinux : public SEWorkload { public: - using Params = X86EmuLinuxParams; + PARAMS(X86EmuLinux); EmuLinux(const Params &p); @@ -65,7 +65,8 @@ class EmuLinux : public SEWorkload setSystem(System *sys) override { SEWorkload::setSystem(sys); - gdb = BaseRemoteGDB::build(system); + gdb = BaseRemoteGDB::build( + params().remote_gdb_port, system); } loader::Arch getArch() const override { return loader::X86_64; } @@ -79,13 +80,13 @@ class EmuLinux : public SEWorkload struct SyscallABI64 : public GenericSyscallABI64, public X86Linux::SyscallABI { - static const std::vector ArgumentRegs; + static const std::vector ArgumentRegs; }; struct SyscallABI32 : public GenericSyscallABI32, public X86Linux::SyscallABI { - static const std::vector ArgumentRegs; + static const std::vector ArgumentRegs; }; private: diff --git a/src/arch/x86/linux/syscall_tbl64.cc b/src/arch/x86/linux/syscall_tbl64.cc index 6b6fa2aa49..1e7274cc42 100644 --- a/src/arch/x86/linux/syscall_tbl64.cc +++ b/src/arch/x86/linux/syscall_tbl64.cc @@ -361,7 +361,50 @@ SyscallDescTable EmuLinux::syscallDescs64 = { { 311, "proess_vm_writev" }, { 312, "kcmp" }, { 313, "finit_module" }, - { 318, "getrandom", getrandomFunc } + { 318, "getrandom", getrandomFunc }, + { 319, "memfd_create" }, + { 320, "kexec_file_load" }, + { 321, "bpf" }, + { 322, "execveat" }, + { 323, "userfaultfd" }, + { 324, "membarrier" }, + { 325, "mlock2" }, + { 326, "copy_file_range" }, + { 327, "preadv2" }, + { 328, "pwritev2" }, + { 329, "pkey_mprotect" }, + { 330, "pkey_alloc" }, + { 331, "pkey_free" }, + { 332, "statx" }, + { 333, "io_pgetevents" }, + { 334, "rseq", ignoreFunc }, + { 424, "pidfd_send_signal" }, + { 425, "io_uring_setup" }, + { 426, "io_uring_enter" }, + { 427, "io_uring_register" }, + { 428, "open_tree" }, + { 429, "move_mount" }, + { 430, "fsopen" }, + { 431, "fsconfig" }, + { 432, "fsmount" }, + { 433, "fspick" }, + { 434, "pidfd_open" }, + { 435, "clone3", clone3Func }, + { 436, "close_range" }, + { 437, "openat2" }, + { 438, "pidfd_getfd" }, + { 439, "faccessat2" }, + { 440, "process_madvise" }, + { 441, "epoll_pwait2" }, + { 442, "mount_setattr" }, + { 443, "quotactl_fd" }, + { 444, "landlock_create_ruleset" }, + { 445, "landlock_add_rule" }, + { 446, "landlock_restrict_self" }, + { 447, "memfd_secret" }, + { 448, "process_mrelease" }, + { 449, "futex_waitv" }, + { 450, "set_mempolicy_home_node" } }; } // namespace X86ISA diff --git a/src/arch/x86/memhelpers.hh b/src/arch/x86/memhelpers.hh index 9ba4af883d..54cbadf697 100644 --- a/src/arch/x86/memhelpers.hh +++ b/src/arch/x86/memhelpers.hh @@ -45,7 +45,7 @@ namespace X86ISA /// Initiate a read from memory in timing mode. static Fault -initiateMemRead(ExecContext *xc, Trace::InstRecord *traceData, Addr addr, +initiateMemRead(ExecContext *xc, trace::InstRecord *traceData, Addr addr, unsigned dataSize, Request::Flags flags) { const std::vector byte_enable(dataSize, true); @@ -54,7 +54,7 @@ initiateMemRead(ExecContext *xc, Trace::InstRecord *traceData, Addr addr, static void getMem(PacketPtr pkt, uint64_t &mem, unsigned dataSize, - Trace::InstRecord *traceData) + trace::InstRecord *traceData) { switch (dataSize) { case 1: @@ -88,7 +88,7 @@ getPackedMem(PacketPtr pkt, std::array &mem, unsigned dataSize) template static void getMem(PacketPtr pkt, std::array &mem, unsigned dataSize, - Trace::InstRecord *traceData) + trace::InstRecord *traceData) { switch (dataSize) { case 4: @@ -106,7 +106,7 @@ getMem(PacketPtr pkt, std::array &mem, unsigned dataSize, static Fault -readMemAtomic(ExecContext *xc, Trace::InstRecord *traceData, Addr addr, +readMemAtomic(ExecContext *xc, trace::InstRecord *traceData, Addr addr, uint64_t &mem, unsigned dataSize, Request::Flags flags) { memset(&mem, 0, sizeof(mem)); @@ -145,7 +145,7 @@ readPackedMemAtomic(ExecContext *xc, Addr addr, std::array &mem, template static Fault -readMemAtomic(ExecContext *xc, Trace::InstRecord *traceData, Addr addr, +readMemAtomic(ExecContext *xc, trace::InstRecord *traceData, Addr addr, std::array &mem, unsigned dataSize, unsigned flags) { @@ -183,7 +183,7 @@ writePackedMem(ExecContext *xc, std::array &mem, Addr addr, } static Fault -writeMemTiming(ExecContext *xc, Trace::InstRecord *traceData, uint64_t mem, +writeMemTiming(ExecContext *xc, trace::InstRecord *traceData, uint64_t mem, unsigned dataSize, Addr addr, Request::Flags flags, uint64_t *res) { @@ -197,7 +197,7 @@ writeMemTiming(ExecContext *xc, Trace::InstRecord *traceData, uint64_t mem, template static Fault -writeMemTiming(ExecContext *xc, Trace::InstRecord *traceData, +writeMemTiming(ExecContext *xc, trace::InstRecord *traceData, std::array &mem, unsigned dataSize, Addr addr, unsigned flags, uint64_t *res) { @@ -215,7 +215,7 @@ writeMemTiming(ExecContext *xc, Trace::InstRecord *traceData, } static Fault -writeMemAtomic(ExecContext *xc, Trace::InstRecord *traceData, uint64_t mem, +writeMemAtomic(ExecContext *xc, trace::InstRecord *traceData, uint64_t mem, unsigned dataSize, Addr addr, Request::Flags flags, uint64_t *res) { @@ -232,7 +232,7 @@ writeMemAtomic(ExecContext *xc, Trace::InstRecord *traceData, uint64_t mem, template static Fault -writeMemAtomic(ExecContext *xc, Trace::InstRecord *traceData, +writeMemAtomic(ExecContext *xc, trace::InstRecord *traceData, std::array &mem, unsigned dataSize, Addr addr, unsigned flags, uint64_t *res) { diff --git a/src/arch/x86/nativetrace.cc b/src/arch/x86/nativetrace.cc index 864825c8cd..c999ffa6a7 100644 --- a/src/arch/x86/nativetrace.cc +++ b/src/arch/x86/nativetrace.cc @@ -39,7 +39,7 @@ namespace gem5 { -namespace Trace { +namespace trace { void X86NativeTrace::ThreadState::update(NativeTrace *parent) @@ -188,5 +188,5 @@ X86NativeTrace::check(NativeTraceRecord *record) checkXMM(15, mState.xmm, nState.xmm); } -} // namespace Trace +} // namespace trace } // namespace gem5 diff --git a/src/arch/x86/nativetrace.hh b/src/arch/x86/nativetrace.hh index 295be72736..a4e17bc3fd 100644 --- a/src/arch/x86/nativetrace.hh +++ b/src/arch/x86/nativetrace.hh @@ -37,7 +37,7 @@ namespace gem5 class ThreadContext; -namespace Trace { +namespace trace { class X86NativeTrace : public NativeTrace { @@ -87,7 +87,7 @@ class X86NativeTrace : public NativeTrace void check(NativeTraceRecord *record); }; -} // namespace Trace +} // namespace trace } // namespace gem5 #endif // __ARCH_X86_NATIVETRACE_HH__ diff --git a/src/arch/x86/pagetable_walker.cc b/src/arch/x86/pagetable_walker.cc index 705a50917e..bb1ccdb51b 100644 --- a/src/arch/x86/pagetable_walker.cc +++ b/src/arch/x86/pagetable_walker.cc @@ -504,8 +504,22 @@ Walker::WalkerState::stepWalk(PacketPtr &write) } if (doEndWalk) { if (doTLBInsert) - if (!functional) - walker->tlb->insert(entry.vaddr, entry); + if (!functional) { + + // Check if PCIDE is set in CR4 + CR4 cr4 = tc->readMiscRegNoEffect(misc_reg::Cr4); + if (cr4.pcide){ + CR3 cr3 = tc->readMiscRegNoEffect(misc_reg::Cr3); + walker->tlb->insert(entry.vaddr, entry, cr3.pcid); + } + else{ + // The current PCID is always 000H if PCIDE + // is not set [sec 4.10.1 of Intel's Software + // Developer Manual] + walker->tlb->insert(entry.vaddr, entry, 0x000); + } + } + endWalk(); } else { PacketPtr oldRead = read; @@ -546,6 +560,7 @@ Walker::WalkerState::setupWalk(Addr vaddr) { VAddr addr = vaddr; CR3 cr3 = tc->readMiscRegNoEffect(misc_reg::Cr3); + CR4 cr4 = tc->readMiscRegNoEffect(misc_reg::Cr4); // Check if we're in long mode or not Efer efer = tc->readMiscRegNoEffect(misc_reg::Efer); dataSize = 8; @@ -557,7 +572,6 @@ Walker::WalkerState::setupWalk(Addr vaddr) enableNX = efer.nxe; } else { // We're in some flavor of legacy mode. - CR4 cr4 = tc->readMiscRegNoEffect(misc_reg::Cr4); if (cr4.pae) { // Do legacy PAE. state = PAEPDP; @@ -581,7 +595,10 @@ Walker::WalkerState::setupWalk(Addr vaddr) entry.vaddr = vaddr; Request::Flags flags = Request::PHYSICAL; - if (cr3.pcd) + + // PCD can't be used if CR4.PCIDE=1 [sec 2.5 + // of Intel's Software Developer's manual] + if (!cr4.pcide && cr3.pcd) flags.set(Request::UNCACHEABLE); RequestPtr request = std::make_shared( diff --git a/src/arch/x86/process.cc b/src/arch/x86/process.cc index bca74d5e9c..a195fdf888 100644 --- a/src/arch/x86/process.cc +++ b/src/arch/x86/process.cc @@ -593,6 +593,17 @@ X86_64Process::initState() tc->setMiscReg(misc_reg::Cr0, cr0); tc->setMiscReg(misc_reg::Mxcsr, 0x1f80); + + // Setting CR3 to the process pid so that concatinated + // page addr with lower 12 bits of CR3 can be used in SE + // mode as well to avoid conflicts between tlb entries with + // same virtual addresses belonging to different processes + tc->setMiscReg(misc_reg::Cr3, pTable->pid()); + + // Setting pcide bit in CR4 + CR4 cr4 = tc->readMiscRegNoEffect(misc_reg::Cr4); + cr4.pcide = 1; + tc->setMiscReg(misc_reg::Cr4, cr4); } } } diff --git a/src/arch/x86/regs/SConscript b/src/arch/x86/regs/SConscript index 25e0677845..c3d35bd47b 100644 --- a/src/arch/x86/regs/SConscript +++ b/src/arch/x86/regs/SConscript @@ -28,4 +28,6 @@ Import('*') +Source('float.cc', tags='x86 isa') +Source('int.cc', tags='x86 isa') Source('msr.cc', tags='x86 isa') diff --git a/src/arch/x86/regs/ccr.hh b/src/arch/x86/regs/ccr.hh index 1073edd06c..8433ade44a 100644 --- a/src/arch/x86/regs/ccr.hh +++ b/src/arch/x86/regs/ccr.hh @@ -39,6 +39,7 @@ #define __ARCH_X86_REGS_CCR_HH__ #include "cpu/reg_class.hh" +#include "debug/CCRegs.hh" namespace gem5 { @@ -58,14 +59,23 @@ enum : RegIndex NumRegs }; +} // namespace cc_reg + +inline constexpr RegClass ccRegClass(CCRegClass, CCRegClassName, + cc_reg::NumRegs, debug::CCRegs); + +namespace cc_reg +{ + inline constexpr RegId - Zaps(CCRegClass, _ZapsIdx), - Cfof(CCRegClass, _CfofIdx), - Df(CCRegClass, _DfIdx), - Ecf(CCRegClass, _EcfIdx), - Ezf(CCRegClass, _EzfIdx); + Zaps = ccRegClass[_ZapsIdx], + Cfof = ccRegClass[_CfofIdx], + Df = ccRegClass[_DfIdx], + Ecf = ccRegClass[_EcfIdx], + Ezf = ccRegClass[_EzfIdx]; } // namespace cc_reg + } // namespace X86ISA } // namespace gem5 diff --git a/src/arch/x86/vecregs.hh b/src/arch/x86/regs/float.cc similarity index 66% rename from src/arch/x86/vecregs.hh rename to src/arch/x86/regs/float.cc index 64b7911cd2..44bb774dc8 100644 --- a/src/arch/x86/vecregs.hh +++ b/src/arch/x86/regs/float.cc @@ -1,6 +1,5 @@ /* * Copyright (c) 2007 The Hewlett-Packard Development Company - * Copyright (c) 2013 Advanced Micro Devices, Inc. * All rights reserved. * * The license below extends only to copyright in the software and shall @@ -36,23 +35,57 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef __ARCH_X86_VECREGS_HH__ -#define __ARCH_X86_VECREGS_HH__ +#include -#include "arch/generic/vec_pred_reg.hh" -#include "arch/generic/vec_reg.hh" +#include "arch/x86/isa.hh" +#include "arch/x86/regs/misc.hh" namespace gem5 { - namespace X86ISA { -// Not applicable to x86 -using VecRegContainer = ::gem5::DummyVecRegContainer; -using VecPredRegContainer = ::gem5::DummyVecPredRegContainer; +std::string +FlatFloatRegClassOps::regName(const RegId &id) const +{ + std::ostringstream ss; + + RegIndex reg_idx = id.index(); + + if (reg_idx < NumMMXRegs) { + ccprintf(ss, "%%mmx%d", reg_idx); + return ss.str(); + } + reg_idx -= NumMMXRegs; + if (reg_idx < NumXMMRegs * 2) { + ccprintf(ss, "%%xmm%d_%s", reg_idx / 2, + (reg_idx % 2) ? "high": "low"); + return ss.str(); + } + reg_idx -= NumXMMRegs * 2; + if (reg_idx < NumMicroFpRegs) { + ccprintf(ss, "%%ufp%d", reg_idx); + return ss.str(); + } + reg_idx -= NumMicroFpRegs; + ccprintf(ss, "%%st(%d)", reg_idx); + + return ss.str(); +} + +RegId +FloatRegClassOps::flatten(const BaseISA &isa, const RegId &id) const +{ + RegIndex idx = id.index(); + + if (idx >= float_reg::NumRegs) { + auto &x86_isa = static_cast(isa); + auto x87_top = x86_isa.readMiscRegNoEffect(misc_reg::X87Top); + idx = float_reg::stack(idx - float_reg::NumRegs, x87_top); + } + + return {flatFloatRegClass, idx}; +} } // namespace X86ISA } // namespace gem5 - -#endif // __ARCH_X86_VECREGS_HH__ diff --git a/src/arch/x86/regs/float.hh b/src/arch/x86/regs/float.hh index 45e2169c0c..77f4517b06 100644 --- a/src/arch/x86/regs/float.hh +++ b/src/arch/x86/regs/float.hh @@ -35,11 +35,13 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef __ARCH_X86_FLOATREGS_HH__ -#define __ARCH_X86_FLOATREGS_HH__ +#ifndef __ARCH_X86_REGS_FLOAT_HH__ +#define __ARCH_X86_REGS_FLOAT_HH__ #include "arch/x86/x86_traits.hh" #include "base/bitunion.hh" +#include "cpu/reg_class.hh" +#include "debug/FloatRegs.hh" namespace gem5 { @@ -117,40 +119,70 @@ enum FloatRegIndex NumRegs = MicrofpBase + NumMicroFpRegs }; +} // namespace float_reg + +class FlatFloatRegClassOps : public RegClassOps +{ + std::string regName(const RegId &id) const override; +}; + +inline constexpr FlatFloatRegClassOps flatFloatRegClassOps; + +inline constexpr RegClass flatFloatRegClass = + RegClass(FloatRegClass, FloatRegClassName, float_reg::NumRegs, + debug::FloatRegs). + ops(flatFloatRegClassOps); + +class FloatRegClassOps : public FlatFloatRegClassOps +{ + RegId flatten(const BaseISA &isa, const RegId &id) const override; +}; + +inline constexpr FloatRegClassOps floatRegClassOps; + +inline constexpr RegClass floatRegClass = + RegClass(FloatRegClass, FloatRegClassName, float_reg::NumRegs, + debug::FloatRegs). + ops(floatRegClassOps). + needsFlattening(); + +namespace float_reg +{ + static inline RegId mmx(int index) { - return RegId(FloatRegClass, MmxBase + index); + return floatRegClass[MmxBase + index]; } static inline RegId fpr(int index) { - return RegId(FloatRegClass, FprBase + index); + return floatRegClass[FprBase + index]; } static inline RegId xmm(int index) { - return RegId(FloatRegClass, XmmBase + index); + return floatRegClass[XmmBase + index]; } static inline RegId xmmLow(int index) { - return RegId(FloatRegClass, XmmBase + 2 * index); + return floatRegClass[XmmBase + 2 * index]; } static inline RegId xmmHigh(int index) { - return RegId(FloatRegClass, XmmBase + 2 * index + 1); + return floatRegClass[XmmBase + 2 * index + 1]; } static inline RegId microfp(int index) { - return RegId(FloatRegClass, MicrofpBase + index); + return floatRegClass[MicrofpBase + index]; } static inline RegId @@ -164,4 +196,4 @@ stack(int index, int top) } // namespace X86ISA } // namespace gem5 -#endif // __ARCH_X86_FLOATREGS_HH__ +#endif // __ARCH_X86_REGS_FLOAT_HH__ diff --git a/src/arch/x86/regs/int.cc b/src/arch/x86/regs/int.cc new file mode 100644 index 0000000000..fa09c411c4 --- /dev/null +++ b/src/arch/x86/regs/int.cc @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2007 The Hewlett-Packard Development Company + * All rights reserved. + * + * The license below extends only to copyright in the software and shall + * not be construed as granting a license to any other intellectual + * property including but not limited to intellectual property relating + * to a hardware implementation of the functionality of the software + * licensed hereunder. You may use the software subject to the license + * terms below provided that you ensure that this notice is replicated + * unmodified and in its entirety in all distributions of the software, + * modified or unmodified, in source code or in binary form. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "arch/x86/regs/int.hh" + +#include + +namespace gem5 +{ + +namespace X86ISA +{ + +std::string +FlatIntRegClassOps::regName(const RegId &id) const +{ + constexpr const char *abcdFormats[9] = + {"", "%s", "%sx", "", "e%sx", "", "", "", "r%sx"}; + constexpr const char *piFormats[9] = + {"", "%s", "%s", "", "e%s", "", "", "", "r%s"}; + constexpr const char *longFormats[9] = + {"", "r%sb", "r%sw", "", "r%sd", "", "", "", "r%s"}; + constexpr const char *microFormats[9] = + {"", "t%db", "t%dw", "", "t%dd", "", "", "", "t%d"}; + + // Fix size at 8 for now. + constexpr unsigned size = 8; + + RegIndex reg_idx = id.index(); + + std::ostringstream ss; + + const char * suffix = ""; + bool fold = reg_idx & IntFoldBit; + reg_idx &= ~IntFoldBit; + + if (fold) + suffix = "h"; + else if (reg_idx < 8 && size == 1) + suffix = "l"; + + switch (reg_idx) { + case int_reg::Rax: + ccprintf(ss, abcdFormats[size], "a"); + break; + case int_reg::Rbx: + ccprintf(ss, abcdFormats[size], "b"); + break; + case int_reg::Rcx: + ccprintf(ss, abcdFormats[size], "c"); + break; + case int_reg::Rdx: + ccprintf(ss, abcdFormats[size], "d"); + break; + case int_reg::Rsp: + ccprintf(ss, piFormats[size], "sp"); + break; + case int_reg::Rbp: + ccprintf(ss, piFormats[size], "bp"); + break; + case int_reg::Rsi: + ccprintf(ss, piFormats[size], "si"); + break; + case int_reg::Rdi: + ccprintf(ss, piFormats[size], "di"); + break; + case int_reg::R8: + ccprintf(ss, longFormats[size], "8"); + break; + case int_reg::R9: + ccprintf(ss, longFormats[size], "9"); + break; + case int_reg::R10: + ccprintf(ss, longFormats[size], "10"); + break; + case int_reg::R11: + ccprintf(ss, longFormats[size], "11"); + break; + case int_reg::R12: + ccprintf(ss, longFormats[size], "12"); + break; + case int_reg::R13: + ccprintf(ss, longFormats[size], "13"); + break; + case int_reg::R14: + ccprintf(ss, longFormats[size], "14"); + break; + case int_reg::R15: + ccprintf(ss, longFormats[size], "15"); + break; + default: + ccprintf(ss, microFormats[size], + reg_idx - int_reg::MicroBegin); + } + ccprintf(ss, suffix); + + return ss.str(); +} + +RegId +IntRegClassOps::flatten(const BaseISA &isa, const RegId &id) const +{ + return {flatIntRegClass, (RegIndex)(id.index() & ~IntFoldBit)}; +} + +} // namespace X86ISA +} // namespace gem5 diff --git a/src/arch/x86/regs/int.hh b/src/arch/x86/regs/int.hh index 445e6a8c9e..06c6767a20 100644 --- a/src/arch/x86/regs/int.hh +++ b/src/arch/x86/regs/int.hh @@ -35,13 +35,14 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef __ARCH_X86_INTREGS_HH__ -#define __ARCH_X86_INTREGS_HH__ +#ifndef __ARCH_X86_REGS_INT_HH__ +#define __ARCH_X86_REGS_INT_HH__ #include "arch/x86/x86_traits.hh" #include "base/bitunion.hh" #include "base/logging.hh" #include "cpu/reg_class.hh" +#include "debug/IntRegs.hh" namespace gem5 { @@ -99,30 +100,58 @@ enum : RegIndex NumRegs }; +} // namespace int_reg + +class FlatIntRegClassOps : public RegClassOps +{ + std::string regName(const RegId &id) const override; +}; + +inline constexpr FlatIntRegClassOps flatIntRegClassOps; + +inline constexpr RegClass flatIntRegClass = + RegClass(IntRegClass, IntRegClassName, int_reg::NumRegs, debug::IntRegs). + ops(flatIntRegClassOps); + +class IntRegClassOps : public FlatIntRegClassOps +{ + RegId flatten(const BaseISA &isa, const RegId &id) const override; +}; + +inline constexpr IntRegClassOps intRegClassOps; + +inline constexpr RegClass intRegClass = + RegClass(IntRegClass, IntRegClassName, int_reg::NumRegs, debug::IntRegs). + ops(intRegClassOps). + needsFlattening(); + +namespace int_reg +{ + inline constexpr RegId - Rax(IntRegClass, _RaxIdx), - Rcx(IntRegClass, _RcxIdx), - Rdx(IntRegClass, _RdxIdx), - Rbx(IntRegClass, _RbxIdx), - Rsp(IntRegClass, _RspIdx), - Rbp(IntRegClass, _RbpIdx), - Rsi(IntRegClass, _RsiIdx), - Rdi(IntRegClass, _RdiIdx), - R8(IntRegClass, _R8Idx), - R9(IntRegClass, _R9Idx), - R10(IntRegClass, _R10Idx), - R11(IntRegClass, _R11Idx), - R12(IntRegClass, _R12Idx), - R13(IntRegClass, _R13Idx), - R14(IntRegClass, _R14Idx), - R15(IntRegClass, _R15Idx), - T0(IntRegClass, _T0Idx), - Prodlow(IntRegClass, _ProdlowIdx), - Prodhi(IntRegClass, _ProdhiIdx), - Quotient(IntRegClass, _QuotientIdx), - Remainder(IntRegClass, _RemainderIdx), - Divisor(IntRegClass, _DivisorIdx), - Doublebits(IntRegClass, _DoublebitsIdx); + Rax = intRegClass[_RaxIdx], + Rcx = intRegClass[_RcxIdx], + Rdx = intRegClass[_RdxIdx], + Rbx = intRegClass[_RbxIdx], + Rsp = intRegClass[_RspIdx], + Rbp = intRegClass[_RbpIdx], + Rsi = intRegClass[_RsiIdx], + Rdi = intRegClass[_RdiIdx], + R8 = intRegClass[_R8Idx], + R9 = intRegClass[_R9Idx], + R10 = intRegClass[_R10Idx], + R11 = intRegClass[_R11Idx], + R12 = intRegClass[_R12Idx], + R13 = intRegClass[_R13Idx], + R14 = intRegClass[_R14Idx], + R15 = intRegClass[_R15Idx], + T0 = intRegClass[_T0Idx], + Prodlow = intRegClass[_ProdlowIdx], + Prodhi = intRegClass[_ProdhiIdx], + Quotient = intRegClass[_QuotientIdx], + Remainder = intRegClass[_RemainderIdx], + Divisor = intRegClass[_DivisorIdx], + Doublebits = intRegClass[_DoublebitsIdx]; // Aliases for other register sizes. inline constexpr auto @@ -151,7 +180,7 @@ inline constexpr RegIndex IntFoldBit = 1 << 6; inline static constexpr RegId intRegMicro(int index) { - return RegId(IntRegClass, int_reg::MicroBegin + index); + return intRegClass[int_reg::MicroBegin + index]; } inline static constexpr RegId @@ -159,10 +188,10 @@ intRegFolded(RegIndex index, RegIndex foldBit) { if ((index & 0x1C) == 4 && foldBit) index = (index - 4) | foldBit; - return RegId(IntRegClass, index); + return intRegClass[index]; } } // namespace X86ISA } // namespace gem5 -#endif // __ARCH_X86_INTREGS_HH__ +#endif // __ARCH_X86_REGS_INT_HH__ diff --git a/src/arch/x86/regs/misc.hh b/src/arch/x86/regs/misc.hh index f9c526bca1..1784142dbe 100644 --- a/src/arch/x86/regs/misc.hh +++ b/src/arch/x86/regs/misc.hh @@ -42,6 +42,8 @@ #include "arch/x86/x86_traits.hh" #include "base/bitunion.hh" #include "base/logging.hh" +#include "cpu/reg_class.hh" +#include "debug/MiscRegs.hh" //These get defined in some system headers (at least termbits.h). That confuses //things here significantly. @@ -536,6 +538,9 @@ segAttr(int index) } // namespace misc_reg +inline constexpr RegClass miscRegClass(MiscRegClass, MiscRegClassName, + misc_reg::NumRegs, debug::MiscRegs); + /** * A type to describe the condition code bits of the RFLAGS register, * plus two flags, EZF and ECF, which are only visible to microcode. @@ -616,12 +621,14 @@ BitUnion64(CR3) // Base Address Bitfield<31, 5> paePdtb; // PAE Addressing Page-Directory-Table // Base Address + Bitfield<11, 0> pcid; // Process-Context Identifier Bitfield<4> pcd; // Page-Level Cache Disable Bitfield<3> pwt; // Page-Level Writethrough EndBitUnion(CR3) BitUnion64(CR4) Bitfield<18> osxsave; // Enable XSAVE and Proc Extended States + Bitfield<17> pcide; // PCID Enable Bitfield<16> fsgsbase; // Enable RDFSBASE, RDGSBASE, WRFSBASE, // WRGSBASE instructions Bitfield<10> osxmmexcpt; // Operating System Unmasked diff --git a/src/arch/x86/tlb.cc b/src/arch/x86/tlb.cc index 9d2665324f..5ccd3e832d 100644 --- a/src/arch/x86/tlb.cc +++ b/src/arch/x86/tlb.cc @@ -97,8 +97,14 @@ TLB::evictLRU() } TlbEntry * -TLB::insert(Addr vpn, const TlbEntry &entry) +TLB::insert(Addr vpn, const TlbEntry &entry, uint64_t pcid) { + //Adding pcid to the page address so + //that multiple processes using the same + //tlb do not conflict when using the same + //virtual addresses + vpn = concAddrPcid(vpn, pcid); + // If somebody beat us to it, just use that existing entry. TlbEntry *newEntry = trie.lookup(vpn); if (newEntry) { @@ -115,8 +121,14 @@ TLB::insert(Addr vpn, const TlbEntry &entry) *newEntry = entry; newEntry->lruSeq = nextSeq(); newEntry->vaddr = vpn; - newEntry->trieHandle = - trie.insert(vpn, TlbEntryTrie::MaxBits - entry.logBytes, newEntry); + if (FullSystem) { + newEntry->trieHandle = + trie.insert(vpn, TlbEntryTrie::MaxBits-entry.logBytes, newEntry); + } + else { + newEntry->trieHandle = + trie.insert(vpn, TlbEntryTrie::MaxBits, newEntry); + } return newEntry; } @@ -390,7 +402,22 @@ TLB::translate(const RequestPtr &req, if (m5Reg.paging) { DPRINTF(TLB, "Paging enabled.\n"); // The vaddr already has the segment base applied. - TlbEntry *entry = lookup(vaddr); + + //Appending the pcid (last 12 bits of CR3) to the + //page aligned vaddr if pcide is set + CR4 cr4 = tc->readMiscRegNoEffect(misc_reg::Cr4); + Addr pageAlignedVaddr = vaddr & (~mask(X86ISA::PageShift)); + CR3 cr3 = tc->readMiscRegNoEffect(misc_reg::Cr3); + uint64_t pcid; + + if (cr4.pcide) + pcid = cr3.pcid; + else + pcid = 0x000; + + pageAlignedVaddr = concAddrPcid(pageAlignedVaddr, pcid); + TlbEntry *entry = lookup(pageAlignedVaddr); + if (mode == BaseMMU::Read) { stats.rdAccesses++; } else { @@ -412,7 +439,7 @@ TLB::translate(const RequestPtr &req, delayedResponse = true; return fault; } - entry = lookup(vaddr); + entry = lookup(pageAlignedVaddr); assert(entry); } else { Process *p = tc->getProcessPtr(); @@ -428,7 +455,8 @@ TLB::translate(const RequestPtr &req, entry = insert(alignedVaddr, TlbEntry( p->pTable->pid(), alignedVaddr, pte->paddr, pte->flags & EmulationPageTable::Uncacheable, - pte->flags & EmulationPageTable::ReadOnly)); + pte->flags & EmulationPageTable::ReadOnly), + pcid); } DPRINTF(TLB, "Miss was serviced.\n"); } diff --git a/src/arch/x86/tlb.hh b/src/arch/x86/tlb.hh index 68fe259c4d..95c34824df 100644 --- a/src/arch/x86/tlb.hh +++ b/src/arch/x86/tlb.hh @@ -76,6 +76,11 @@ namespace X86ISA TlbEntry *lookup(Addr va, bool update_lru = true); void setConfigAddress(uint32_t addr); + //concatenate Page Addr and pcid + inline Addr concAddrPcid(Addr vpn, uint64_t pcid) + { + return (vpn | pcid); + } protected: @@ -156,7 +161,7 @@ namespace X86ISA Fault finalizePhysical(const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode) const override; - TlbEntry *insert(Addr vpn, const TlbEntry &entry); + TlbEntry *insert(Addr vpn, const TlbEntry &entry, uint64_t pcid); // Checkpointing void serialize(CheckpointOut &cp) const override; diff --git a/src/base/Graphics.py b/src/base/Graphics.py index c444bb9f85..b0bec3b137 100644 --- a/src/base/Graphics.py +++ b/src/base/Graphics.py @@ -38,4 +38,5 @@ from m5.params import * # Image Formats: # Auto option will let gem5 to choose the image format it prefers. -class ImageFormat(Enum): vals = ['Auto', 'Bitmap', 'Png'] +class ImageFormat(Enum): + vals = ["Auto", "Bitmap", "Png"] diff --git a/src/base/SConscript b/src/base/SConscript index e3091822d6..e751d0b5ef 100644 --- a/src/base/SConscript +++ b/src/base/SConscript @@ -66,8 +66,7 @@ Source('pixel.cc') GTest('pixel.test', 'pixel.test.cc', 'pixel.cc') Source('pollevent.cc') Source('random.cc') -if env['CONF']['TARGET_ISA'] != 'null': - Source('remote_gdb.cc') +Source('remote_gdb.cc') Source('socket.cc') GTest('socket.test', 'socket.test.cc', 'socket.cc') Source('statistics.cc') diff --git a/src/base/bitfield.test.cc b/src/base/bitfield.test.cc index 37830eabcf..1711ea68bf 100644 --- a/src/base/bitfield.test.cc +++ b/src/base/bitfield.test.cc @@ -456,4 +456,3 @@ TEST(BitfieldTest, CountLeadingZero64AllZeros) uint64_t value = 0; EXPECT_EQ(64, clz64(value)); } - diff --git a/src/base/filters/BloomFilters.py b/src/base/filters/BloomFilters.py index 7832f47dee..27630866e0 100644 --- a/src/base/filters/BloomFilters.py +++ b/src/base/filters/BloomFilters.py @@ -28,11 +28,12 @@ from m5.params import * from m5.proxy import * from m5.SimObject import SimObject + class BloomFilterBase(SimObject): - type = 'BloomFilterBase' + type = "BloomFilterBase" abstract = True cxx_header = "base/filters/base.hh" - cxx_class = 'gem5::bloom_filter::Base' + cxx_class = "gem5::bloom_filter::Base" size = Param.Int(4096, "Number of entries in the filter") @@ -43,19 +44,25 @@ class BloomFilterBase(SimObject): num_bits = Param.Int(1, "Number of bits in a filter entry") threshold = Param.Int(1, "Value at which an entry is considered as set") + class BloomFilterBlock(BloomFilterBase): - type = 'BloomFilterBlock' - cxx_class = 'gem5::bloom_filter::Block' + type = "BloomFilterBlock" + cxx_class = "gem5::bloom_filter::Block" cxx_header = "base/filters/block_bloom_filter.hh" - masks_lsbs = VectorParam.Unsigned([Self.offset_bits, - 2 * Self.offset_bits], "Position of the LSB of each mask") - masks_sizes = VectorParam.Unsigned([Self.offset_bits, Self.offset_bits], - "Size, in number of bits, of each mask") + masks_lsbs = VectorParam.Unsigned( + [Self.offset_bits, 2 * Self.offset_bits], + "Position of the LSB of each mask", + ) + masks_sizes = VectorParam.Unsigned( + [Self.offset_bits, Self.offset_bits], + "Size, in number of bits, of each mask", + ) + class BloomFilterMultiBitSel(BloomFilterBase): - type = 'BloomFilterMultiBitSel' - cxx_class = 'gem5::bloom_filter::MultiBitSel' + type = "BloomFilterMultiBitSel" + cxx_class = "gem5::bloom_filter::MultiBitSel" cxx_header = "base/filters/multi_bit_sel_bloom_filter.hh" num_hashes = Param.Int(4, "Number of hashes") @@ -63,19 +70,22 @@ class BloomFilterMultiBitSel(BloomFilterBase): skip_bits = Param.Int(2, "Offset from block number") is_parallel = Param.Bool(False, "Whether hashing is done in parallel") + class BloomFilterBulk(BloomFilterMultiBitSel): - type = 'BloomFilterBulk' - cxx_class = 'gem5::bloom_filter::Bulk' + type = "BloomFilterBulk" + cxx_class = "gem5::bloom_filter::Bulk" cxx_header = "base/filters/bulk_bloom_filter.hh" + class BloomFilterH3(BloomFilterMultiBitSel): - type = 'BloomFilterH3' - cxx_class = 'gem5::bloom_filter::H3' + type = "BloomFilterH3" + cxx_class = "gem5::bloom_filter::H3" cxx_header = "base/filters/h3_bloom_filter.hh" + class BloomFilterMulti(BloomFilterBase): - type = 'BloomFilterMulti' - cxx_class = 'gem5::bloom_filter::Multi' + type = "BloomFilterMulti" + cxx_class = "gem5::bloom_filter::Multi" cxx_header = "base/filters/multi_bloom_filter.hh" # The base filter should not be used, since this filter is the combination @@ -83,17 +93,21 @@ class BloomFilterMulti(BloomFilterBase): size = 1 # By default there are two sub-filters that hash sequential bitfields - filters = VectorParam.BloomFilterBase([ - BloomFilterBlock(size = 4096, masks_lsbs = [6, 12]), - BloomFilterBlock(size = 1024, masks_lsbs = [18, 24])], - "Sub-filters to be combined") + filters = VectorParam.BloomFilterBase( + [ + BloomFilterBlock(size=4096, masks_lsbs=[6, 12]), + BloomFilterBlock(size=1024, masks_lsbs=[18, 24]), + ], + "Sub-filters to be combined", + ) # By default match this with the number of sub-filters threshold = 2 + class BloomFilterPerfect(BloomFilterBase): - type = 'BloomFilterPerfect' - cxx_class = 'gem5::bloom_filter::Perfect' + type = "BloomFilterPerfect" + cxx_class = "gem5::bloom_filter::Perfect" cxx_header = "base/filters/perfect_bloom_filter.hh" # The base filter is not needed. Use a dummy value. diff --git a/src/base/filters/multi_bit_sel_bloom_filter.cc b/src/base/filters/multi_bit_sel_bloom_filter.cc index 4bb3d08315..f12d1f766d 100644 --- a/src/base/filters/multi_bit_sel_bloom_filter.cc +++ b/src/base/filters/multi_bit_sel_bloom_filter.cc @@ -100,4 +100,3 @@ MultiBitSel::hash(Addr addr, int hash_number) const } // namespace bloom_filter } // namespace gem5 - diff --git a/src/base/inet.cc b/src/base/inet.cc index ca83fa4a61..ab4bfe460c 100644 --- a/src/base/inet.cc +++ b/src/base/inet.cc @@ -46,6 +46,7 @@ #include #include +#include "base/compiler.hh" #include "base/cprintf.hh" #include "base/logging.hh" #include "base/types.hh" @@ -301,7 +302,7 @@ Ip6Hdr::extensionLength() const const uint8_t *data = bytes() + IP6_HDR_LEN; uint8_t nxt = ip6_nxt; int len = 0; - int all = plen(); + GEM5_VAR_USED int all = plen(); while (ip6Extension(nxt)) { const Ip6Opt *ext = (const Ip6Opt *)data; @@ -324,7 +325,7 @@ Ip6Hdr::getExt(uint8_t ext_type) const const uint8_t *data = bytes() + IP6_HDR_LEN; uint8_t nxt = ip6_nxt; Ip6Opt* opt = NULL; - int all = plen(); + GEM5_VAR_USED int all = plen(); while (ip6Extension(nxt)) { opt = (Ip6Opt *)data; @@ -349,7 +350,7 @@ Ip6Hdr::proto() const { const uint8_t *data = bytes() + IP6_HDR_LEN; uint8_t nxt = ip6_nxt; - int all = plen(); + GEM5_VAR_USED int all = plen(); while (ip6Extension(nxt)) { const Ip6Opt *ext = (const Ip6Opt *)data; diff --git a/src/base/remote_gdb.cc b/src/base/remote_gdb.cc index 798d09f535..da3f113f38 100644 --- a/src/base/remote_gdb.cc +++ b/src/base/remote_gdb.cc @@ -129,7 +129,9 @@ #include "base/remote_gdb.hh" +#include #include +#include #include #include @@ -190,7 +192,7 @@ class HardBreakpoint : public PCEvent DPRINTF(GDBMisc, "handling hardware breakpoint at %#x\n", pc()); if (tc == gdb->tc) - gdb->trap(tc->contextId(), SIGTRAP); + gdb->trap(tc->contextId(), SIGTRAP,""); } }; @@ -230,6 +232,39 @@ digit2i(char c) return -1; } +//convert a hex to a char +char +hex2c(char c0,char c1) +{ + char temp[3] = {c0,c1,'\0'}; + return std::stoi(temp,0,16); +} + +//this function will be used in a future patch +//convert a encoded string to a string +[[maybe_unused]] std::string +hexS2string(std::string hex_in) +{ + std::string out=""; + for (unsigned int i = 0; i + 1 < hex_in.length();i += 2){ + out.push_back(hex2c(hex_in[i],hex_in[i+1])); + } + return out; +} + +//convert a string to a hex encoded string +std::string +string2hexS(std::string in) +{ + std::string out = ""; + for (auto ch : in){ + char temp[3] = " "; + std::snprintf(temp,3,"%02hhx",ch); + out.append(temp); + } + return out; +} + // Convert the low 4 bits of an integer into an hex digit. char i2digit(int n) @@ -514,12 +549,18 @@ BaseRemoteGDB::selectThreadContext(ContextID id) // makes sense to use POSIX errno values, because that is what the // gdb/remote.c functions want to return. void -BaseRemoteGDB::trap(ContextID id, int signum) +BaseRemoteGDB::trap(ContextID id, int signum,const std::string& stopReason) { if (!attached) return; if (tc->contextId() != id) { + + //prevent thread switch when single stepping + if (singleStepEvent.scheduled()){ + return; + } + DPRINTF(GDBMisc, "Finishing thread switch"); if (!selectThreadContext(id)) return; } @@ -534,12 +575,22 @@ BaseRemoteGDB::trap(ContextID id, int signum) send("OK"); } else { // Tell remote host that an exception has occurred. - send("S%02x", signum); + sendTPacket(signum,id,stopReason); } processCommands(signum); } +bool +BaseRemoteGDB::sendMessage(std::string message) +{ + if (!attached) + return false; + DPRINTF(GDBMisc, "passing message %s\n", message); + sendOPacket(message); + return true; +} + void BaseRemoteGDB::incomingConnection(int revent) { @@ -562,9 +613,7 @@ BaseRemoteGDB::incomingData(int revent) } if (revent & POLLIN) { - trapEvent.type(SIGILL); - trapEvent.id(tc->contextId()); - scheduleInstCommitEvent(&trapEvent, 0); + scheduleTrapEvent(tc->contextId(),SIGILL,0,""); } else if (revent & POLLNVAL) { descheduleInstCommitEvent(&trapEvent); scheduleInstCommitEvent(&disconnectEvent, 0); @@ -575,12 +624,54 @@ uint8_t BaseRemoteGDB::getbyte() { uint8_t b; - if (::read(fd, &b, sizeof(b)) == sizeof(b)) - return b; - - throw BadClient("Couldn't read data from debugger."); + while (!try_getbyte(&b,-1));//no timeout + return b; } +bool +BaseRemoteGDB::try_getbyte(uint8_t* c,int timeout_ms) +{ + if (!c) + panic("try_getbyte called with a null pointer as c"); + int res,retval; + //Allow read to fail if it was interrupted by a signal (EINTR). + errno = 0; + //preparing fd_sets + fd_set rfds; + FD_ZERO(&rfds); + FD_SET(fd, &rfds); + + //setting up a timeout if timeout_ms is positive + struct timeval tv;struct timeval* tv_ptr; + if (timeout_ms >= 0){ + tv.tv_sec = timeout_ms/1000; + tv.tv_usec = timeout_ms%1000; + tv_ptr = &tv; + }else{ + tv_ptr = NULL; + } + //Using select to check if the FD is ready to be read. + while(true){ + do { + errno = 0; + retval = ::select(fd + 1, &rfds, NULL, NULL, tv_ptr); + if (retval < 0 && errno != EINTR){//error + DPRINTF(GDBMisc,"getbyte failed errno=%i retval=%i\n", + errno,retval); + throw BadClient("Couldn't read data from debugger."); + } + //a EINTR error means that the select call was interrupted + //by another signal + }while (errno == EINTR); + if (retval == 0) + return false;//timed out + //reading (retval>0) + res = ::read(fd, c, sizeof(*c)); + if (res == sizeof(*c)) + return true;//read successfully + //read failed (?) retrying select + } +} void BaseRemoteGDB::putbyte(uint8_t b) { @@ -650,7 +741,8 @@ BaseRemoteGDB::send(const char *bp) uint8_t csum, c; DPRINTF(GDBSend, "send: %s\n", bp); - + //removing GDBBadP that could be waiting in the buffer + while (try_getbyte(&c,0)); do { p = bp; // Start sending a packet @@ -668,6 +760,8 @@ BaseRemoteGDB::send(const char *bp) // Try transmitting over and over again until the other end doesn't // send an error back. c = getbyte(); + if ((c & 0x7f) == GDBBadP) + DPRINTF(GDBSend, "PacketError\n"); } while ((c & 0x7f) == GDBBadP); } @@ -720,14 +814,38 @@ BaseRemoteGDB::processCommands(int signum) } } +bool +BaseRemoteGDB::readBlob(Addr vaddr, size_t size, char *data) +{ + TranslatingPortProxy fs_proxy(tc); + SETranslatingPortProxy se_proxy(tc); + PortProxy &virt_proxy = FullSystem ? fs_proxy : se_proxy; + + virt_proxy.readBlob(vaddr, data, size); + return true; +} + +bool +BaseRemoteGDB::writeBlob(Addr vaddr, size_t size, const char *data) +{ + TranslatingPortProxy fs_proxy(tc); + SETranslatingPortProxy se_proxy(tc); + PortProxy &virt_proxy = FullSystem ? fs_proxy : se_proxy; + + virt_proxy.writeBlob(vaddr, data, size); + return true; +} + // Read bytes from kernel address space for debugger. bool BaseRemoteGDB::read(Addr vaddr, size_t size, char *data) { DPRINTF(GDBRead, "read: addr=%#x, size=%d", vaddr, size); - (FullSystem ? TranslatingPortProxy(tc) : SETranslatingPortProxy(tc)). - readBlob(vaddr, data, size); + bool res = readBlob(vaddr, size, data); + + if (!res) + return false; #if TRACING_ON if (debug::GDBRead) { @@ -756,10 +874,7 @@ BaseRemoteGDB::write(Addr vaddr, size_t size, const char *data) } else DPRINTFNR("\n"); } - (FullSystem ? TranslatingPortProxy(tc) : SETranslatingPortProxy(tc)). - writeBlob(vaddr, data, size); - - return true; + return writeBlob(vaddr, size, data); } void @@ -836,9 +951,40 @@ BaseRemoteGDB::removeHardBreak(Addr addr, size_t kind) } void -BaseRemoteGDB::scheduleInstCommitEvent(Event *ev, int delta) +BaseRemoteGDB::sendTPacket(int errnum, ContextID id, + const std::string& stopReason) { - if (delta == 0 && tc->status() != ThreadContext::Active) { + if (!stopReason.empty()){ + send("T%02xcore:%x;thread:%x;%s;",errnum,id + 1,id + 1,stopReason); + }else{ + send("T%02xcore:%x;thread:%x;",errnum,id + 1,id + 1); + } +} +void +BaseRemoteGDB::sendSPacket(int errnum){ + send("S%02x",errnum); +} +void +BaseRemoteGDB::sendOPacket(const std::string message){ + send("O" + string2hexS(message)); +} + +void +BaseRemoteGDB::scheduleTrapEvent(ContextID id,int type,int delta, + std::string stopReason){ + ThreadContext* _tc = threads[id]; + panic_if(_tc == nullptr, "Unknown context id :%i",id); + trapEvent.id(id); + trapEvent.type(type); + trapEvent.stopReason(stopReason); + if (!trapEvent.scheduled()) + scheduleInstCommitEvent(&trapEvent,delta,_tc); +} + +void +BaseRemoteGDB::scheduleInstCommitEvent(Event *ev, int delta,ThreadContext* _tc) +{ + if (delta == 0 && _tc->status() != ThreadContext::Active) { // If delta is zero, we're just trying to wait for an instruction // boundary. If the CPU is not active, assume we're already at a // boundary without waiting for the CPU to eventually wake up. @@ -846,7 +992,7 @@ BaseRemoteGDB::scheduleInstCommitEvent(Event *ev, int delta) } else { // Here "ticks" aren't simulator ticks which measure time, they're // instructions committed by the CPU. - tc->scheduleInstCountEvent(ev, tc->getCurrentInstCount() + delta); + _tc->scheduleInstCountEvent(ev, _tc->getCurrentInstCount() + delta); } } @@ -903,7 +1049,9 @@ std::map BaseRemoteGDB::commandMap = { // signal and step { 'S', { "KGDB_ASYNC_STEP", &BaseRemoteGDB::cmdAsyncStep } }, // find out if the thread is alive - { 'T', { "KGDB_THREAD_ALIVE", &BaseRemoteGDB::cmdUnsupported } }, + { 'T', { "KGDB_THREAD_ALIVE", &BaseRemoteGDB::cmdIsThreadAlive } }, + //multi letter command + { 'v', { "KGDB_MULTI_LETTER", &BaseRemoteGDB::cmdMultiLetter } }, // target exited { 'W', { "KGDB_TARGET_EXIT", &BaseRemoteGDB::cmdUnsupported } }, // write memory @@ -934,7 +1082,7 @@ BaseRemoteGDB::cmdUnsupported(GdbCommand::Context &ctx) bool BaseRemoteGDB::cmdSignal(GdbCommand::Context &ctx) { - send("S%02x", ctx.type); + sendTPacket(ctx.type,tc->contextId(),""); return true; } @@ -1023,8 +1171,7 @@ BaseRemoteGDB::cmdSetThread(GdbCommand::Context &ctx) throw CmdError("E04"); // Line up on an instruction boundary in the new thread. threadSwitching = true; - trapEvent.id(tid); - scheduleInstCommitEvent(&trapEvent, 0); + scheduleTrapEvent(tid,0,0,""); return false; } } else { @@ -1035,6 +1182,22 @@ BaseRemoteGDB::cmdSetThread(GdbCommand::Context &ctx) return true; } +bool +BaseRemoteGDB::cmdIsThreadAlive(GdbCommand::Context &ctx) +{ + const char *p = ctx.data; + int tid = 0; + bool all, any; + if (!parseThreadId(&p, all, any, tid)) + throw CmdError("E01"); + if (all) + throw CmdError("E03"); + if (threads.find(tid) == threads.end()) + throw CmdError("E04"); + send("OK"); + return true; +} + bool BaseRemoteGDB::cmdMemR(GdbCommand::Context &ctx) { @@ -1083,6 +1246,67 @@ BaseRemoteGDB::cmdMemW(GdbCommand::Context &ctx) return true; } +bool +BaseRemoteGDB::cmdMultiLetter(GdbCommand::Context &ctx) +{ + GdbMultiLetterCommand::Context new_ctx; + new_ctx.type = ctx.type; + strtok(ctx.data,";?"); + char* sep = strtok(NULL,";:?"); + + int txt_len = (sep != NULL) ? (sep - ctx.data) : strlen(ctx.data); + DPRINTF(GDBMisc, "Multi-letter: %s , len=%i\n", ctx.data,txt_len); + new_ctx.cmdTxt = std::string(ctx.data,txt_len); + new_ctx.data = sep; + new_ctx.len = ctx.len - txt_len; + try { + auto cmd_it = multiLetterMap.find(new_ctx.cmdTxt); + if (cmd_it == multiLetterMap.end()) { + DPRINTF(GDBMisc, "Unknown command: %s\n", new_ctx.cmdTxt); + throw Unsupported(); + } + new_ctx.cmd = &(cmd_it->second); + + return (this->*(new_ctx.cmd->func))(new_ctx); + //catching errors: we don't need to catch anything else + //as it will be handled by processCommands + } catch (CmdError &e) { + send(e.error); + } + return false; +} + +std::map +BaseRemoteGDB::multiLetterMap = { + { "MustReplyEmpty", { "KGDB_REPLY_EMPTY", &BaseRemoteGDB::cmdReplyEmpty}}, + { "Kill", { "KGDB_VKILL", &BaseRemoteGDB::cmdVKill}}, +}; + + +bool +BaseRemoteGDB::cmdReplyEmpty(GdbMultiLetterCommand::Context &ctx) +{ + send(""); + return true; +} + +bool +BaseRemoteGDB::cmdVKill(GdbMultiLetterCommand::Context &ctx) +{ + warn("GDB command for kill received detaching instead"); + detach(); + return false; +} + +bool +BaseRemoteGDB::cmdMultiUnsupported(GdbMultiLetterCommand::Context &ctx) +{ + DPRINTF(GDBMisc, "Unsupported Multi name command : %s\n", + ctx.cmd->name); + DDUMP(GDBMisc, ctx.data, ctx.len); + throw Unsupported(); +} + namespace { std::pair @@ -1101,8 +1325,10 @@ splitAt(std::string str, const char * const delim) std::map BaseRemoteGDB::queryMap = { { "C", { &BaseRemoteGDB::queryC } }, + { "Attached", { &BaseRemoteGDB::queryAttached} }, { "Supported", { &BaseRemoteGDB::querySupported, ";" } }, { "Xfer", { &BaseRemoteGDB::queryXfer } }, + { "Symbol", { &BaseRemoteGDB::querySymbol ,":" } }, { "fThreadInfo", { &BaseRemoteGDB::queryFThreadInfo } }, { "sThreadInfo", { &BaseRemoteGDB::querySThreadInfo } }, }; @@ -1166,6 +1392,25 @@ BaseRemoteGDB::queryXfer(QuerySetCommand::Context &ctx) encodeXferResponse(content, encoded, offset, length); send(encoded); } +void +BaseRemoteGDB::querySymbol(QuerySetCommand::Context &ctx) +{ + //The target does not need to look up any (more) symbols. + send("OK"); +} + +void +BaseRemoteGDB::queryAttached(QuerySetCommand::Context &ctx) +{ + std::string pid=""; + if (!ctx.args.empty() && !ctx.args[0].empty()){ + pid=ctx.args[0]; + } + DPRINTF(GDBMisc, "QAttached : pid=%s\n",pid); + //The remote server is attached to an existing process. + send("1"); +} + void BaseRemoteGDB::queryFThreadInfo(QuerySetCommand::Context &ctx) diff --git a/src/base/remote_gdb.hh b/src/base/remote_gdb.hh index b297e0877f..ad64bc721c 100644 --- a/src/base/remote_gdb.hh +++ b/src/base/remote_gdb.hh @@ -171,15 +171,17 @@ class BaseRemoteGDB void replaceThreadContext(ThreadContext *_tc); bool selectThreadContext(ContextID id); - void trap(ContextID id, int signum); - + void trap(ContextID id, int signum,const std::string& stopReason=""); + bool sendMessage(std::string message); + //schedule a trap event with these properties + void scheduleTrapEvent(ContextID id,int type, int delta, + std::string stopReason); /** @} */ // end of api_remote_gdb template static BaseRemoteGDB * - build(Args... args) + build(int port, Args... args) { - int port = getRemoteGDBPort(); if (port) return new GDBStub(args..., port); else @@ -237,6 +239,7 @@ class BaseRemoteGDB // Transfer data to/from GDB. uint8_t getbyte(); + bool try_getbyte(uint8_t* c,int timeout=-1);//return true if successful void putbyte(uint8_t b); void recv(std::vector &bp); @@ -279,6 +282,7 @@ class BaseRemoteGDB protected: int _type; ContextID _id; + std::string _stopReason; BaseRemoteGDB *gdb; public: @@ -286,16 +290,18 @@ class BaseRemoteGDB {} void type(int t) { _type = t; } + void stopReason(std::string s) {_stopReason = s; } void id(ContextID id) { _id = id; } - void process() { gdb->trap(_id, _type); } + void process() { gdb->trap(_id, _type,_stopReason); } } trapEvent; /* * The interface to the simulated system. */ - // Machine memory. - bool read(Addr addr, size_t size, char *data); - bool write(Addr addr, size_t size, const char *data); + virtual bool readBlob(Addr vaddr, size_t size, char *data); + virtual bool writeBlob(Addr vaddr, size_t size, const char *data); + bool read(Addr vaddr, size_t size, char *data); + bool write(Addr vaddr, size_t size, const char *data); template T read(Addr addr); template void write(Addr addr, T data); @@ -308,7 +314,10 @@ class BaseRemoteGDB void setSingleStep(); /// Schedule an event which will be triggered "delta" instructions later. - void scheduleInstCommitEvent(Event *ev, int delta); + void scheduleInstCommitEvent(Event *ev, int delta,ThreadContext* _tc); + void scheduleInstCommitEvent(Event *ev, int delta){ + scheduleInstCommitEvent(ev, delta,tc); + }; /// Deschedule an instruction count based event. void descheduleInstCommitEvent(Event *ev); @@ -318,6 +327,10 @@ class BaseRemoteGDB void insertHardBreak(Addr addr, size_t kind); void removeHardBreak(Addr addr, size_t kind); + void sendTPacket(int errnum, ContextID id,const std::string& stopReason); + void sendSPacket(int errnum); + //The OPacket allow to send string to be displayed by the remote GDB + void sendOPacket(const std::string message); /* * GDB commands. */ @@ -343,6 +356,30 @@ class BaseRemoteGDB static std::map commandMap; + struct GdbMultiLetterCommand + { + public: + struct Context + { + const GdbMultiLetterCommand *cmd; + std::string cmdTxt; + int type; + char *data; + int len; + }; + + typedef bool (BaseRemoteGDB::*Func)(Context &ctx); + + const char * const name; + const Func func; + + GdbMultiLetterCommand(const char *_name, Func _func) : + name(_name), func(_func) {} + }; + + + static std::map multiLetterMap; + bool cmdUnsupported(GdbCommand::Context &ctx); bool cmdSignal(GdbCommand::Context &ctx); @@ -352,6 +389,7 @@ class BaseRemoteGDB bool cmdRegR(GdbCommand::Context &ctx); bool cmdRegW(GdbCommand::Context &ctx); bool cmdSetThread(GdbCommand::Context &ctx); + bool cmdIsThreadAlive(GdbCommand::Context &ctx); bool cmdMemR(GdbCommand::Context &ctx); bool cmdMemW(GdbCommand::Context &ctx); bool cmdQueryVar(GdbCommand::Context &ctx); @@ -360,6 +398,13 @@ class BaseRemoteGDB bool cmdClrHwBkpt(GdbCommand::Context &ctx); bool cmdSetHwBkpt(GdbCommand::Context &ctx); bool cmdDumpPageTable(GdbCommand::Context &ctx); + bool cmdMultiLetter(GdbCommand::Context &ctx); + + //Multi letter command + bool cmdMultiUnsupported(GdbMultiLetterCommand::Context &ctx); + + bool cmdReplyEmpty(GdbMultiLetterCommand::Context &ctx); + bool cmdVKill(GdbMultiLetterCommand::Context &ctx); struct QuerySetCommand { @@ -386,6 +431,8 @@ class BaseRemoteGDB void queryC(QuerySetCommand::Context &ctx); void querySupported(QuerySetCommand::Context &ctx); void queryXfer(QuerySetCommand::Context &ctx); + void querySymbol(QuerySetCommand::Context &ctx); + void queryAttached(QuerySetCommand::Context &ctx); size_t threadInfoIdx = 0; void queryFThreadInfo(QuerySetCommand::Context &ctx); diff --git a/src/base/trace.cc b/src/base/trace.cc index b7a3a44ca0..52faa8d725 100644 --- a/src/base/trace.cc +++ b/src/base/trace.cc @@ -56,7 +56,7 @@ name() namespace gem5 { -namespace Trace +namespace trace { // This variable holds the output logger for debug information. Other @@ -170,5 +170,5 @@ OstreamLogger::logMessage(Tick when, const std::string &name, } } -} // namespace Trace +} // namespace trace } // namespace gem5 diff --git a/src/base/trace.hh b/src/base/trace.hh index 05ad70e08e..a7f52ebe23 100644 --- a/src/base/trace.hh +++ b/src/base/trace.hh @@ -51,7 +51,7 @@ const std::string &name(); namespace gem5 { -namespace Trace { +namespace trace { /** Debug logging base class. Handles formatting and outputting * time/name/message messages */ @@ -138,7 +138,7 @@ void setDebugLogger(Logger *logger); void enable(); void disable(); -} // namespace Trace +} // namespace trace // This silly little class allows us to wrap a string in a functor // object so that we can give a name() that DPRINTF will like @@ -179,48 +179,48 @@ struct StringWrap #define DDUMP(x, data, count) do { \ if (GEM5_UNLIKELY(TRACING_ON && ::gem5::debug::x)) \ - ::gem5::Trace::getDebugLogger()->dump( \ + ::gem5::trace::getDebugLogger()->dump( \ ::gem5::curTick(), name(), data, count, #x); \ } while (0) #define DPRINTF(x, ...) do { \ if (GEM5_UNLIKELY(TRACING_ON && ::gem5::debug::x)) { \ - ::gem5::Trace::getDebugLogger()->dprintf_flag( \ + ::gem5::trace::getDebugLogger()->dprintf_flag( \ ::gem5::curTick(), name(), #x, __VA_ARGS__); \ } \ } while (0) #define DPRINTFS(x, s, ...) do { \ if (GEM5_UNLIKELY(TRACING_ON && ::gem5::debug::x)) { \ - ::gem5::Trace::getDebugLogger()->dprintf_flag( \ + ::gem5::trace::getDebugLogger()->dprintf_flag( \ ::gem5::curTick(), (s)->name(), #x, __VA_ARGS__); \ } \ } while (0) #define DPRINTFR(x, ...) do { \ if (GEM5_UNLIKELY(TRACING_ON && ::gem5::debug::x)) { \ - ::gem5::Trace::getDebugLogger()->dprintf_flag( \ + ::gem5::trace::getDebugLogger()->dprintf_flag( \ (::gem5::Tick)-1, std::string(), #x, __VA_ARGS__); \ } \ } while (0) #define DPRINTFV(x, ...) do { \ if (GEM5_UNLIKELY(TRACING_ON && (x))) { \ - ::gem5::Trace::getDebugLogger()->dprintf_flag( \ + ::gem5::trace::getDebugLogger()->dprintf_flag( \ ::gem5::curTick(), name(), x.name(), __VA_ARGS__); \ } \ } while (0) #define DPRINTFN(...) do { \ if (TRACING_ON) { \ - ::gem5::Trace::getDebugLogger()->dprintf( \ + ::gem5::trace::getDebugLogger()->dprintf( \ ::gem5::curTick(), name(), __VA_ARGS__); \ } \ } while (0) #define DPRINTFNR(...) do { \ if (TRACING_ON) { \ - ::gem5::Trace::getDebugLogger()->dprintf( \ + ::gem5::trace::getDebugLogger()->dprintf( \ (::gem5::Tick)-1, "", __VA_ARGS__); \ } \ } while (0) @@ -229,7 +229,7 @@ struct StringWrap GEM5_DEPRECATED_MACRO_STMT(DPRINTF_UNCONDITIONAL, \ do { \ if (TRACING_ON) { \ - ::gem5::Trace::getDebugLogger()->dprintf_flag( \ + ::gem5::trace::getDebugLogger()->dprintf_flag( \ ::gem5::curTick(), name(), #x, __VA_ARGS__); \ } \ } while (0), \ diff --git a/src/base/trace.test.cc b/src/base/trace.test.cc index 526e8ddedd..c53dcd7324 100644 --- a/src/base/trace.test.cc +++ b/src/base/trace.test.cc @@ -43,7 +43,7 @@ using namespace gem5; // that getDebugLogger() returns a cerr-based logger, and all tests after // that test should assume that this logger is returned std::stringstream ss; -Trace::OstreamLogger main_logger(ss); +trace::OstreamLogger main_logger(ss); // Instantiate the mock class to have a valid curTick of 0 GTestTickHandler tickHandler; @@ -70,7 +70,7 @@ getString(std::ostream &os) /** @return The logger's ostream as a std::string. */ std::string -getString(Trace::Logger *logger) +getString(trace::Logger *logger) { return getString(logger->getOstream()); } @@ -79,7 +79,7 @@ getString(Trace::Logger *logger) TEST(TraceTest, LogSimpleMessage) { std::stringstream ss; - Trace::OstreamLogger logger(ss); + trace::OstreamLogger logger(ss); logger.logMessage(Tick(100), "", "", "Test message"); ASSERT_EQ(getString(&logger), " 100: Test message"); @@ -89,7 +89,7 @@ TEST(TraceTest, LogSimpleMessage) TEST(TraceTest, LogMessageName) { std::stringstream ss; - Trace::OstreamLogger logger(ss); + trace::OstreamLogger logger(ss); logger.logMessage(Tick(100), "Foo", "", "Test message"); ASSERT_EQ(getString(&logger), " 100: Foo: Test message"); @@ -99,7 +99,7 @@ TEST(TraceTest, LogMessageName) TEST(TraceTest, LogMessageMaxTick) { std::stringstream ss; - Trace::OstreamLogger logger(ss); + trace::OstreamLogger logger(ss); logger.logMessage(MaxTick, "Foo", "", "Test message"); ASSERT_EQ(getString(&logger), "Foo: Test message"); @@ -109,7 +109,7 @@ TEST(TraceTest, LogMessageMaxTick) TEST(TraceTest, LogMessageFlagDisabled) { std::stringstream ss; - Trace::OstreamLogger logger(ss); + trace::OstreamLogger logger(ss); logger.logMessage(Tick(100), "Foo", "Bar", "Test message"); ASSERT_EQ(getString(&logger), " 100: Foo: Test message"); @@ -122,12 +122,12 @@ TEST(TraceTest, LogMessageFlagDisabled) TEST(TraceTest, LogMessageTickDisabledAndEnableDisable) { std::stringstream ss; - Trace::OstreamLogger logger(ss); + trace::OstreamLogger logger(ss); logger.logMessage(Tick(100), "Foo", "", "Test message"); ASSERT_EQ(getString(&logger), " 100: Foo: Test message"); - Trace::enable(); + trace::enable(); EXPECT_TRUE(debug::changeFlag("FmtTicksOff", true)); logger.logMessage(Tick(200), "Foo", "", "Test message"); @@ -138,7 +138,7 @@ TEST(TraceTest, LogMessageTickDisabledAndEnableDisable) #endif debug::changeFlag("FmtTicksOff", false); - Trace::disable(); + trace::disable(); logger.logMessage(Tick(300), "Foo", "", "Test message"); ASSERT_EQ(getString(&logger), " 300: Foo: Test message"); @@ -151,8 +151,8 @@ TEST(TraceTest, LogMessageTickDisabledAndEnableDisable) TEST(TraceTest, LogMessageFlagEnabled) { std::stringstream ss; - Trace::OstreamLogger logger(ss); - Trace::enable(); + trace::OstreamLogger logger(ss); + trace::enable(); EXPECT_TRUE(debug::changeFlag("FmtFlag", true)); logger.logMessage(Tick(100), "Foo", "Bar", "Test message"); @@ -163,14 +163,14 @@ TEST(TraceTest, LogMessageFlagEnabled) #endif debug::changeFlag("FmtFlag", false); - Trace::disable(); + trace::disable(); } /** Test that log messages are not displayed for ignored objects (single). */ TEST(TraceTest, LogMessageIgnoreOne) { std::stringstream ss; - Trace::OstreamLogger logger(ss); + trace::OstreamLogger logger(ss); ObjectMatch ignore_foo("Foo"); ObjectMatch ignore_bar("Bar"); @@ -194,7 +194,7 @@ TEST(TraceTest, LogMessageIgnoreOne) TEST(TraceTest, LogMessageIgnoreMultiple) { std::stringstream ss; - Trace::OstreamLogger logger(ss); + trace::OstreamLogger logger(ss); ObjectMatch ignore_foo("Foo"); ObjectMatch ignore_bar("Bar"); @@ -225,7 +225,7 @@ TEST(TraceTest, LogMessageIgnoreMultiple) TEST(TraceTest, DumpIgnored) { std::stringstream ss; - Trace::OstreamLogger logger(ss); + trace::OstreamLogger logger(ss); ObjectMatch ignore_foo("Foo"); logger.setIgnore(ignore_foo); @@ -244,9 +244,9 @@ TEST(TraceTest, DumpIgnored) TEST(TraceTest, DumpSimple) { std::stringstream ss; - Trace::OstreamLogger logger(ss); + trace::OstreamLogger logger(ss); - Trace::enable(); + trace::enable(); EXPECT_TRUE(debug::changeFlag("FmtFlag", true)); std::string message = "Test message"; logger.dump(Tick(100), "Foo", message.c_str(), message.size(), "Bar"); @@ -272,7 +272,7 @@ TEST(TraceTest, DumpSimple) " Test message\n"); #endif debug::changeFlag("FmtFlag", false); - Trace::disable(); + trace::disable(); } /** @@ -282,7 +282,7 @@ TEST(TraceTest, DumpSimple) TEST(TraceTest, DumpMultiLine) { std::stringstream ss; - Trace::OstreamLogger logger(ss); + trace::OstreamLogger logger(ss); std::string message = "This is a very long line that will span over multiple lines"; @@ -304,7 +304,7 @@ TEST(TraceTest, DumpMultiLine) */ TEST(TraceTest, DISABLED_GetNullLogger) { - Trace::Logger *logger = Trace::getDebugLogger(); + trace::Logger *logger = trace::getDebugLogger(); ASSERT_FALSE(logger == nullptr); gtestLogOutput.str(""); @@ -318,27 +318,27 @@ TEST(TraceTest, SetGetLogger) // NOTE: From now on getDebugLogger will use main_logger to avoid // having to check cerr. This assumes that tests are run in the order // they appear from line 1 to the last line of this file. - Trace::setDebugLogger(&main_logger); + trace::setDebugLogger(&main_logger); // Set message with local variable, and retrieve the string with // the debug-logger getter main_logger.logMessage(Tick(100), "Foo", "", "Test message"); - auto logger_from_getter = Trace::getDebugLogger(); + auto logger_from_getter = trace::getDebugLogger(); ASSERT_EQ(getString(logger_from_getter), " 100: Foo: Test message"); } /** Test that output() gets the ostream of the current debug logger. */ TEST(TraceTest, Output) { - Trace::getDebugLogger()->logMessage(Tick(100), "Foo", "", "Test message"); - ASSERT_EQ(getString(Trace::output()), " 100: Foo: Test message"); + trace::getDebugLogger()->logMessage(Tick(100), "Foo", "", "Test message"); + ASSERT_EQ(getString(trace::output()), " 100: Foo: Test message"); } /** Test dprintf_flag with ignored name. */ TEST(TraceTest, DprintfFlagIgnore) { std::stringstream ss; - Trace::OstreamLogger logger(ss); + trace::OstreamLogger logger(ss); ObjectMatch ignore_foo("Foo"); logger.setIgnore(ignore_foo); @@ -350,7 +350,7 @@ TEST(TraceTest, DprintfFlagIgnore) TEST(TraceTest, DprintfFlagZeroArgs) { std::stringstream ss; - Trace::OstreamLogger logger(ss); + trace::OstreamLogger logger(ss); logger.dprintf_flag(Tick(100), "Foo", "", "Test message"); ASSERT_EQ(getString(&logger), " 100: Foo: Test message"); @@ -360,7 +360,7 @@ TEST(TraceTest, DprintfFlagZeroArgs) TEST(TraceTest, DprintfFlagOneArg) { std::stringstream ss; - Trace::OstreamLogger logger(ss); + trace::OstreamLogger logger(ss); logger.dprintf_flag(Tick(100), "Foo", "", "Test %s", "message"); ASSERT_EQ(getString(&logger), " 100: Foo: Test message"); @@ -370,7 +370,7 @@ TEST(TraceTest, DprintfFlagOneArg) TEST(TraceTest, DprintfFlagMultipleArgs) { std::stringstream ss; - Trace::OstreamLogger logger(ss); + trace::OstreamLogger logger(ss); logger.dprintf_flag(Tick(100), "Foo", "", "Test %s %c %d %x", "message", 'A', 217, 0x30); @@ -381,9 +381,9 @@ TEST(TraceTest, DprintfFlagMultipleArgs) TEST(TraceTest, DprintfFlagEnabled) { std::stringstream ss; - Trace::OstreamLogger logger(ss); + trace::OstreamLogger logger(ss); - Trace::enable(); + trace::enable(); EXPECT_TRUE(debug::changeFlag("FmtFlag", true)); logger.dprintf_flag(Tick(100), "Foo", "Bar", "Test %s", "message"); #if TRACING_ON @@ -392,14 +392,14 @@ TEST(TraceTest, DprintfFlagEnabled) ASSERT_EQ(getString(&logger), " 100: Foo: Test message"); #endif debug::changeFlag("FmtFlag", false); - Trace::disable(); + trace::disable(); } /** Test dprintf with ignored name. */ TEST(TraceTest, DprintfIgnore) { std::stringstream ss; - Trace::OstreamLogger logger(ss); + trace::OstreamLogger logger(ss); ObjectMatch ignore_foo("Foo"); logger.setIgnore(ignore_foo); @@ -411,22 +411,22 @@ TEST(TraceTest, DprintfIgnore) TEST(TraceTest, DprintfEnabled) { std::stringstream ss; - Trace::OstreamLogger logger(ss); + trace::OstreamLogger logger(ss); - Trace::enable(); + trace::enable(); EXPECT_TRUE(debug::changeFlag("FmtFlag", true)); logger.dprintf(Tick(100), "Foo", "Test %s", "message"); ASSERT_EQ(getString(&logger), " 100: Foo: Test message"); debug::changeFlag("FmtFlag", false); - Trace::disable(); + trace::disable(); } /** Test that dprintf is just a flagless wrapper for dprintf_flag. */ TEST(TraceTest, DprintfWrapper) { std::stringstream ss, ss_flag; - Trace::OstreamLogger logger(ss); - Trace::OstreamLogger logger_flag(ss_flag); + trace::OstreamLogger logger(ss); + trace::OstreamLogger logger_flag(ss_flag); logger.dprintf(Tick(100), "Foo", "Test %s %c %d %x", "message", 'A', 217, 0x30); @@ -442,23 +442,23 @@ TEST(TraceTest, MacroDDUMP) std::string message = "Test message"; // Flag enabled - Trace::enable(); + trace::enable(); EXPECT_TRUE(debug::changeFlag("TraceTestDebugFlag", true)); EXPECT_TRUE(debug::changeFlag("FmtFlag", true)); DDUMP(TraceTestDebugFlag, message.c_str(), message.size()); #if TRACING_ON - ASSERT_EQ(getString(Trace::output()), + ASSERT_EQ(getString(trace::output()), " 0: TraceTestDebugFlag: Foo: 00000000 " "54 65 73 74 20 6d 65 73 73 61 67 65 Test message\n"); #else - ASSERT_EQ(getString(Trace::output()), ""); + ASSERT_EQ(getString(trace::output()), ""); #endif // Flag disabled - Trace::disable(); + trace::disable(); EXPECT_TRUE(debug::changeFlag("TraceTestDebugFlag", false)); DDUMP(TraceTestDebugFlag, message.c_str(), message.size()); - ASSERT_EQ(getString(Trace::output()), ""); + ASSERT_EQ(getString(trace::output()), ""); } /** Test DPRINTF with tracing on. */ @@ -467,22 +467,22 @@ TEST(TraceTest, MacroDPRINTF) StringWrap name("Foo"); // Flag enabled - Trace::enable(); + trace::enable(); EXPECT_TRUE(debug::changeFlag("TraceTestDebugFlag", true)); EXPECT_TRUE(debug::changeFlag("FmtFlag", true)); DPRINTF(TraceTestDebugFlag, "Test message"); #if TRACING_ON - ASSERT_EQ(getString(Trace::output()), + ASSERT_EQ(getString(trace::output()), " 0: TraceTestDebugFlag: Foo: Test message"); #else - ASSERT_EQ(getString(Trace::output()), ""); + ASSERT_EQ(getString(trace::output()), ""); #endif // Flag disabled - Trace::disable(); + trace::disable(); EXPECT_TRUE(debug::changeFlag("TraceTestDebugFlag", false)); DPRINTF(TraceTestDebugFlag, "Test message"); - ASSERT_EQ(getString(Trace::output()), ""); + ASSERT_EQ(getString(trace::output()), ""); } /** Test DPRINTFS with tracing on. */ @@ -494,21 +494,21 @@ TEST(TraceTest, MacroDPRINTFS) #endif // Flag enabled - Trace::enable(); + trace::enable(); EXPECT_TRUE(debug::changeFlag("TraceTestDebugFlag", true)); EXPECT_TRUE(debug::changeFlag("FmtFlag", true)); #if TRACING_ON DPRINTFS(TraceTestDebugFlag, named_ptr, "Test message"); - ASSERT_EQ(getString(Trace::output()), + ASSERT_EQ(getString(trace::output()), " 0: TraceTestDebugFlag: Foo: Test message"); #endif // Flag disabled - Trace::disable(); + trace::disable(); EXPECT_TRUE(debug::changeFlag("TraceTestDebugFlag", false)); #if TRACING_ON DPRINTFS(TraceTestDebugFlag, named_ptr, "Test message"); - ASSERT_EQ(getString(Trace::output()), ""); + ASSERT_EQ(getString(trace::output()), ""); #endif } @@ -516,21 +516,21 @@ TEST(TraceTest, MacroDPRINTFS) TEST(TraceTest, MacroDPRINTFR) { // Flag enabled - Trace::enable(); + trace::enable(); EXPECT_TRUE(debug::changeFlag("TraceTestDebugFlag", true)); EXPECT_TRUE(debug::changeFlag("FmtFlag", true)); DPRINTFR(TraceTestDebugFlag, "Test message"); #if TRACING_ON - ASSERT_EQ(getString(Trace::output()), "TraceTestDebugFlag: Test message"); + ASSERT_EQ(getString(trace::output()), "TraceTestDebugFlag: Test message"); #else - ASSERT_EQ(getString(Trace::output()), ""); + ASSERT_EQ(getString(trace::output()), ""); #endif // Flag disabled - Trace::disable(); + trace::disable(); EXPECT_TRUE(debug::changeFlag("TraceTestDebugFlag", false)); DPRINTFR(TraceTestDebugFlag, "Test message"); - ASSERT_EQ(getString(Trace::output()), ""); + ASSERT_EQ(getString(trace::output()), ""); } /** Test DPRINTFN with tracing on. */ @@ -539,9 +539,9 @@ TEST(TraceTest, MacroDPRINTFN) StringWrap name("Foo"); DPRINTFN("Test message"); #if TRACING_ON - ASSERT_EQ(getString(Trace::output()), " 0: Foo: Test message"); + ASSERT_EQ(getString(trace::output()), " 0: Foo: Test message"); #else - ASSERT_EQ(getString(Trace::output()), ""); + ASSERT_EQ(getString(trace::output()), ""); #endif } @@ -550,9 +550,9 @@ TEST(TraceTest, MacroDPRINTFNR) { DPRINTFNR("Test message"); #if TRACING_ON - ASSERT_EQ(getString(Trace::output()), "Test message"); + ASSERT_EQ(getString(trace::output()), "Test message"); #else - ASSERT_EQ(getString(Trace::output()), ""); + ASSERT_EQ(getString(trace::output()), ""); #endif } @@ -562,25 +562,25 @@ TEST(TraceTest, MacroDPRINTF_UNCONDITIONAL) StringWrap name("Foo"); // Flag enabled - Trace::enable(); + trace::enable(); EXPECT_TRUE(debug::changeFlag("TraceTestDebugFlag", true)); EXPECT_TRUE(debug::changeFlag("FmtFlag", true)); DPRINTF_UNCONDITIONAL(TraceTestDebugFlag, "Test message"); #if TRACING_ON - ASSERT_EQ(getString(Trace::output()), + ASSERT_EQ(getString(trace::output()), " 0: TraceTestDebugFlag: Foo: Test message"); #else - ASSERT_EQ(getString(Trace::output()), ""); + ASSERT_EQ(getString(trace::output()), ""); #endif // Flag disabled - Trace::disable(); + trace::disable(); EXPECT_TRUE(debug::changeFlag("TraceTestDebugFlag", false)); DPRINTF_UNCONDITIONAL(TraceTestDebugFlag, "Test message"); #if TRACING_ON - ASSERT_EQ(getString(Trace::output()), " 0: Foo: Test message"); + ASSERT_EQ(getString(trace::output()), " 0: Foo: Test message"); #else - ASSERT_EQ(getString(Trace::output()), ""); + ASSERT_EQ(getString(trace::output()), ""); #endif } @@ -591,20 +591,20 @@ TEST(TraceTest, MacroDPRINTF_UNCONDITIONAL) TEST(TraceTest, GlobalName) { // Flag enabled - Trace::enable(); + trace::enable(); EXPECT_TRUE(debug::changeFlag("TraceTestDebugFlag", true)); EXPECT_TRUE(debug::changeFlag("FmtFlag", true)); DPRINTF(TraceTestDebugFlag, "Test message"); #if TRACING_ON - ASSERT_EQ(getString(Trace::output()), + ASSERT_EQ(getString(trace::output()), " 0: TraceTestDebugFlag: global: Test message"); #else - ASSERT_EQ(getString(Trace::output()), ""); + ASSERT_EQ(getString(trace::output()), ""); #endif // Flag disabled - Trace::disable(); + trace::disable(); EXPECT_TRUE(debug::changeFlag("TraceTestDebugFlag", false)); DPRINTF(TraceTestDebugFlag, "Test message"); - ASSERT_EQ(getString(Trace::output()), ""); + ASSERT_EQ(getString(trace::output()), ""); } diff --git a/src/base/version.cc b/src/base/version.cc index fa29d1bee3..050aea091f 100644 --- a/src/base/version.cc +++ b/src/base/version.cc @@ -32,6 +32,6 @@ namespace gem5 /** * @ingroup api_base_utils */ -const char *gem5Version = "22.0.0.2"; +const char *gem5Version = "22.1.0.0"; } // namespace gem5 diff --git a/src/base/vnc/Vnc.py b/src/base/vnc/Vnc.py index 8421d34bdc..c0d621283c 100644 --- a/src/base/vnc/Vnc.py +++ b/src/base/vnc/Vnc.py @@ -39,17 +39,16 @@ from m5.objects.Graphics import * class VncInput(SimObject): - type = 'VncInput' + type = "VncInput" cxx_header = "base/vnc/vncinput.hh" - cxx_class = 'gem5::VncInput' + cxx_class = "gem5::VncInput" frame_capture = Param.Bool(False, "capture changed frames to files") - img_format = Param.ImageFormat( - "Auto", "Format of the dumped Framebuffer" - ) + img_format = Param.ImageFormat("Auto", "Format of the dumped Framebuffer") + class VncServer(VncInput): - type = 'VncServer' + type = "VncServer" cxx_header = "base/vnc/vncserver.hh" - cxx_class = 'gem5::VncServer' + cxx_class = "gem5::VncServer" port = Param.TcpPort(5900, "listen port") number = Param.Int(0, "vnc client number") diff --git a/src/cpu/BaseCPU.py b/src/cpu/BaseCPU.py index bf4d43c359..00374d8e54 100644 --- a/src/cpu/BaseCPU.py +++ b/src/cpu/BaseCPU.py @@ -56,11 +56,12 @@ from m5.objects.Platform import Platform default_tracer = ExeTracer() + class BaseCPU(ClockedObject): - type = 'BaseCPU' + type = "BaseCPU" abstract = True cxx_header = "cpu/base.hh" - cxx_class = 'gem5::BaseCPU' + cxx_class = "gem5::BaseCPU" cxx_exports = [ PyBindMethod("switchOut"), @@ -70,12 +71,14 @@ class BaseCPU(ClockedObject): PyBindMethod("totalInsts"), PyBindMethod("scheduleInstStop"), PyBindMethod("getCurrentInstCount"), + PyBindMethod("scheduleSimpointsInstStop"), + PyBindMethod("scheduleInstStopAnyThread"), ] @classmethod def memory_mode(cls): """Which memory mode does this CPU require?""" - return 'invalid' + return "invalid" @classmethod def require_caches(cls): @@ -94,17 +97,21 @@ class BaseCPU(ClockedObject): def takeOverFrom(self, old_cpu): self._ccObject.takeOverFrom(old_cpu._ccObject) - system = Param.System(Parent.any, "system object") cpu_id = Param.Int(-1, "CPU identifier") socket_id = Param.Unsigned(0, "Physical Socket identifier") numThreads = Param.Unsigned(1, "number of HW thread contexts") - pwr_gating_latency = Param.Cycles(300, - "Latency to enter power gating state when all contexts are suspended") + pwr_gating_latency = Param.Cycles( + 300, + "Latency to enter power gating state when all contexts are suspended", + ) - power_gating_on_idle = Param.Bool(False, "Control whether the core goes "\ - "to the OFF power state after all thread are disabled for "\ - "pwr_gating_latency cycles") + power_gating_on_idle = Param.Bool( + False, + "Control whether the core goes " + "to the OFF power state after all thread are disabled for " + "pwr_gating_latency cycles", + ) function_trace = Param.Bool(False, "Enable function trace") function_trace_start = Param.Tick(0, "Tick to start function trace") @@ -113,10 +120,12 @@ class BaseCPU(ClockedObject): syscallRetryLatency = Param.Cycles(10000, "Cycles to wait until retry") - do_checkpoint_insts = Param.Bool(True, - "enable checkpoint pseudo instructions") - do_statistics_insts = Param.Bool(True, - "enable statistics pseudo instructions") + do_checkpoint_insts = Param.Bool( + True, "enable checkpoint pseudo instructions" + ) + do_statistics_insts = Param.Bool( + True, "enable statistics pseudo instructions" + ) workload = VectorParam.Process([], "processes to run") @@ -125,93 +134,105 @@ class BaseCPU(ClockedObject): isa = VectorParam.BaseISA([], "ISA instance") decoder = VectorParam.InstDecoder([], "Decoder instance") - max_insts_all_threads = Param.Counter(0, - "terminate when all threads have reached this inst count") - max_insts_any_thread = Param.Counter(0, - "terminate when any thread reaches this inst count") - simpoint_start_insts = VectorParam.Counter([], - "starting instruction counts of simpoints") - progress_interval = Param.Frequency('0Hz', - "frequency to print out the progress message") + max_insts_all_threads = Param.Counter( + 0, "terminate when all threads have reached this inst count" + ) + max_insts_any_thread = Param.Counter( + 0, "terminate when any thread reaches this inst count" + ) + simpoint_start_insts = VectorParam.Counter( + [], "starting instruction counts of simpoints" + ) + progress_interval = Param.Frequency( + "0Hz", "frequency to print out the progress message" + ) - switched_out = Param.Bool(False, - "Leave the CPU switched out after startup (used when switching " \ - "between CPU models)") + switched_out = Param.Bool( + False, + "Leave the CPU switched out after startup (used when switching " + "between CPU models)", + ) tracer = Param.InstTracer(default_tracer, "Instruction tracer") icache_port = RequestPort("Instruction Port") dcache_port = RequestPort("Data Port") - _cached_ports = ['icache_port', 'dcache_port'] + _cached_ports = ["icache_port", "dcache_port"] _uncached_interrupt_response_ports = [] _uncached_interrupt_request_ports = [] def createInterruptController(self): self.interrupts = [ - self.ArchInterrupts() for i in range(self.numThreads)] + self.ArchInterrupts() for i in range(self.numThreads) + ] def connectCachedPorts(self, in_ports): for p in self._cached_ports: - exec('self.%s = in_ports' % p) + exec("self.%s = in_ports" % p) def connectUncachedPorts(self, in_ports, out_ports): for p in self._uncached_interrupt_response_ports: - exec('self.%s = out_ports' % p) + exec("self.%s = out_ports" % p) for p in self._uncached_interrupt_request_ports: - exec('self.%s = in_ports' % p) + exec("self.%s = in_ports" % p) def connectAllPorts(self, cached_in, uncached_in, uncached_out): self.connectCachedPorts(cached_in) self.connectUncachedPorts(uncached_in, uncached_out) def connectBus(self, bus): - self.connectAllPorts(bus.cpu_side_ports, - bus.cpu_side_ports, bus.mem_side_ports) + self.connectAllPorts( + bus.cpu_side_ports, bus.cpu_side_ports, bus.mem_side_ports + ) - def addPrivateSplitL1Caches(self, ic, dc, iwc = None, dwc = None): + def addPrivateSplitL1Caches(self, ic, dc, iwc=None, dwc=None): self.icache = ic self.dcache = dc self.icache_port = ic.cpu_side self.dcache_port = dc.cpu_side - self._cached_ports = ['icache.mem_side', 'dcache.mem_side'] + self._cached_ports = ["icache.mem_side", "dcache.mem_side"] if iwc and dwc: self.itb_walker_cache = iwc self.dtb_walker_cache = dwc - self.mmu.connectWalkerPorts( - iwc.cpu_side, dwc.cpu_side) - self._cached_ports += ["itb_walker_cache.mem_side", \ - "dtb_walker_cache.mem_side"] + self.mmu.connectWalkerPorts(iwc.cpu_side, dwc.cpu_side) + self._cached_ports += [ + "itb_walker_cache.mem_side", + "dtb_walker_cache.mem_side", + ] else: self._cached_ports += self.ArchMMU.walkerPorts() # Checker doesn't need its own tlb caches because it does # functional accesses only if self.checker != NULL: - self._cached_ports += [ "checker." + port - for port in self.ArchMMU.walkerPorts() ] + self._cached_ports += [ + "checker." + port for port in self.ArchMMU.walkerPorts() + ] - def addTwoLevelCacheHierarchy(self, ic, dc, l2c, iwc=None, dwc=None, - xbar=None): + def addTwoLevelCacheHierarchy( + self, ic, dc, l2c, iwc=None, dwc=None, xbar=None + ): self.addPrivateSplitL1Caches(ic, dc, iwc, dwc) self.toL2Bus = xbar if xbar else L2XBar() self.connectCachedPorts(self.toL2Bus.cpu_side_ports) self.l2cache = l2c self.toL2Bus.mem_side_ports = self.l2cache.cpu_side - self._cached_ports = ['l2cache.mem_side'] + self._cached_ports = ["l2cache.mem_side"] def createThreads(self): # If no ISAs have been created, assume that the user wants the # default ISA. if len(self.isa) == 0: - self.isa = [ self.ArchISA() for i in range(self.numThreads) ] + self.isa = [self.ArchISA() for i in range(self.numThreads)] else: if len(self.isa) != int(self.numThreads): - raise RuntimeError("Number of ISA instances doesn't " - "match thread count") + raise RuntimeError( + "Number of ISA instances doesn't " "match thread count" + ) if len(self.decoder) != 0: raise RuntimeError("Decoders should not be set up manually") - self.decoder = list([ self.ArchDecoder(isa=isa) for isa in self.isa ]) + self.decoder = list([self.ArchDecoder(isa=isa) for isa in self.isa]) if self.checker != NULL: self.checker.createThreads() @@ -221,9 +242,9 @@ class BaseCPU(ClockedObject): def createPhandleKey(self, thread): # This method creates a unique key for this cpu as a function of a # certain thread - return 'CPU-%d-%d-%d' % (self.socket_id, self.cpu_id, thread) + return "CPU-%d-%d-%d" % (self.socket_id, self.cpu_id, thread) - #Generate simple CPU Device Tree structure + # Generate simple CPU Device Tree structure def generateDeviceTree(self, state): """Generate cpu nodes for each thread and the corresponding part of the cpu-map node. Note that this implementation does not support clusters @@ -235,14 +256,14 @@ class BaseCPU(ClockedObject): if bool(self.switched_out): return - cpus_node = FdtNode('cpus') + cpus_node = FdtNode("cpus") cpus_node.append(state.CPUCellsProperty()) - #Special size override of 0 - cpus_node.append(FdtPropertyWords('#size-cells', [0])) + # Special size override of 0 + cpus_node.append(FdtPropertyWords("#size-cells", [0])) # Generate cpu nodes for i in range(int(self.numThreads)): - reg = (int(self.socket_id)<<8) + int(self.cpu_id) + i + reg = (int(self.socket_id) << 8) + int(self.cpu_id) + i node = FdtNode("cpu@%x" % reg) node.append(FdtPropertyStrings("device_type", "cpu")) node.appendCompatible(["gem5,arm-cpu"]) @@ -251,8 +272,10 @@ class BaseCPU(ClockedObject): if found: platform.annotateCpuDeviceNode(node, state) else: - warn("Platform not found for device tree generation; " \ - "system or multiple CPUs may not start") + warn( + "Platform not found for device tree generation; " + "system or multiple CPUs may not start" + ) freq = int(self.clk_domain.unproxy(self).clock[0].frequency) node.append(FdtPropertyWords("clock-frequency", freq)) @@ -272,20 +295,23 @@ class BaseCPU(ClockedObject): def __init__(self, **kwargs): super().__init__(**kwargs) - self.power_state.possible_states=['ON', 'CLK_GATED', 'OFF'] + self.power_state.possible_states = ["ON", "CLK_GATED", "OFF"] self._cached_ports = self._cached_ports + self.ArchMMU.walkerPorts() # Practically speaking, these ports will exist on the x86 interrupt # controller class. if "pio" in self.ArchInterrupts._ports: - self._uncached_interrupt_response_ports = \ + self._uncached_interrupt_response_ports = ( self._uncached_interrupt_response_ports + ["interrupts[0].pio"] + ) if "int_responder" in self.ArchInterrupts._ports: - self._uncached_interrupt_response_ports = \ - self._uncached_interrupt_response_ports + [ - "interrupts[0].int_responder"] + self._uncached_interrupt_response_ports = ( + self._uncached_interrupt_response_ports + + ["interrupts[0].int_responder"] + ) if "int_requestor" in self.ArchInterrupts._ports: - self._uncached_interrupt_request_ports = \ - self._uncached_interrupt_request_ports + [ - "interrupts[0].int_requestor"] + self._uncached_interrupt_request_ports = ( + self._uncached_interrupt_request_ports + + ["interrupts[0].int_requestor"] + ) diff --git a/src/cpu/CPUTracers.py b/src/cpu/CPUTracers.py index 653c2ce99f..f491a0ef43 100644 --- a/src/cpu/CPUTracers.py +++ b/src/cpu/CPUTracers.py @@ -28,19 +28,21 @@ from m5.SimObject import SimObject from m5.params import * from m5.objects.InstTracer import InstTracer + class ExeTracer(InstTracer): - type = 'ExeTracer' - cxx_class = 'gem5::Trace::ExeTracer' + type = "ExeTracer" + cxx_class = "gem5::trace::ExeTracer" cxx_header = "cpu/exetrace.hh" + class IntelTrace(InstTracer): - type = 'IntelTrace' - cxx_class = 'gem5::Trace::IntelTrace' + type = "IntelTrace" + cxx_class = "gem5::trace::IntelTrace" cxx_header = "cpu/inteltrace.hh" + class NativeTrace(ExeTracer): abstract = True - type = 'NativeTrace' - cxx_class = 'gem5::Trace::NativeTrace' - cxx_header = 'cpu/nativetrace.hh' - + type = "NativeTrace" + cxx_class = "gem5::trace::NativeTrace" + cxx_header = "cpu/nativetrace.hh" diff --git a/src/cpu/CheckerCPU.py b/src/cpu/CheckerCPU.py index 14c98c2af2..beb04b79e1 100644 --- a/src/cpu/CheckerCPU.py +++ b/src/cpu/CheckerCPU.py @@ -29,17 +29,21 @@ from m5.params import * from m5.objects.BaseCPU import BaseCPU from m5.SimObject import SimObject + class CheckerCPU(BaseCPU): - type = 'CheckerCPU' + type = "CheckerCPU" abstract = True cxx_header = "cpu/checker/cpu.hh" - cxx_class = 'gem5::CheckerCPU' + cxx_class = "gem5::CheckerCPU" exitOnError = Param.Bool(False, "Exit on an error") - updateOnError = Param.Bool(False, - "Update the checker with the main CPU's state on an error") - warnOnlyOnLoadError = Param.Bool(True, - "If a load result is incorrect, only print a warning and do not exit") + updateOnError = Param.Bool( + False, "Update the checker with the main CPU's state on an error" + ) + warnOnlyOnLoadError = Param.Bool( + True, + "If a load result is incorrect, only print a warning and do not exit", + ) def generateDeviceTree(self, state): # The CheckerCPU is not a real CPU and shouldn't generate a DTB diff --git a/src/cpu/DummyChecker.py b/src/cpu/DummyChecker.py index 5687f7d736..916d25a92a 100644 --- a/src/cpu/DummyChecker.py +++ b/src/cpu/DummyChecker.py @@ -36,7 +36,8 @@ from m5.params import * from m5.objects.CheckerCPU import CheckerCPU + class DummyChecker(CheckerCPU): - type = 'DummyChecker' - cxx_header = 'cpu/dummy_checker.hh' - cxx_class = 'gem5::DummyChecker' + type = "DummyChecker" + cxx_header = "cpu/dummy_checker.hh" + cxx_class = "gem5::DummyChecker" diff --git a/src/cpu/FuncUnit.py b/src/cpu/FuncUnit.py index c5ba1e7e4c..c5137ac970 100644 --- a/src/cpu/FuncUnit.py +++ b/src/cpu/FuncUnit.py @@ -39,38 +39,83 @@ from m5.SimObject import SimObject from m5.params import * + class OpClass(Enum): - vals = ['No_OpClass', 'IntAlu', 'IntMult', 'IntDiv', 'FloatAdd', - 'FloatCmp', 'FloatCvt', 'FloatMult', 'FloatMultAcc', 'FloatDiv', - 'FloatMisc', 'FloatSqrt', - 'SimdAdd', 'SimdAddAcc', 'SimdAlu', 'SimdCmp', 'SimdCvt', - 'SimdMisc', 'SimdMult', 'SimdMultAcc', 'SimdShift', 'SimdShiftAcc', - 'SimdDiv', 'SimdSqrt', 'SimdFloatAdd', 'SimdFloatAlu', - 'SimdFloatCmp', 'SimdFloatCvt', 'SimdFloatDiv', 'SimdFloatMisc', - 'SimdFloatMult', 'SimdFloatMultAcc', 'SimdFloatSqrt', - 'SimdReduceAdd', 'SimdReduceAlu', 'SimdReduceCmp', - 'SimdFloatReduceAdd', 'SimdFloatReduceCmp', - 'SimdAes', 'SimdAesMix', 'SimdSha1Hash', 'SimdSha1Hash2', - 'SimdSha256Hash', 'SimdSha256Hash2', 'SimdShaSigma2', - 'SimdShaSigma3', - 'SimdPredAlu', - 'MemRead', 'MemWrite', 'FloatMemRead', 'FloatMemWrite', - 'IprAccess', 'InstPrefetch'] + vals = [ + "No_OpClass", + "IntAlu", + "IntMult", + "IntDiv", + "FloatAdd", + "FloatCmp", + "FloatCvt", + "FloatMult", + "FloatMultAcc", + "FloatDiv", + "FloatMisc", + "FloatSqrt", + "SimdAdd", + "SimdAddAcc", + "SimdAlu", + "SimdCmp", + "SimdCvt", + "SimdMisc", + "SimdMult", + "SimdMultAcc", + "SimdShift", + "SimdShiftAcc", + "SimdDiv", + "SimdSqrt", + "SimdFloatAdd", + "SimdFloatAlu", + "SimdFloatCmp", + "SimdFloatCvt", + "SimdFloatDiv", + "SimdFloatMisc", + "SimdFloatMult", + "SimdFloatMultAcc", + "SimdFloatSqrt", + "SimdReduceAdd", + "SimdReduceAlu", + "SimdReduceCmp", + "SimdFloatReduceAdd", + "SimdFloatReduceCmp", + "SimdAes", + "SimdAesMix", + "SimdSha1Hash", + "SimdSha1Hash2", + "SimdSha256Hash", + "SimdSha256Hash2", + "SimdShaSigma2", + "SimdShaSigma3", + "SimdPredAlu", + "MemRead", + "MemWrite", + "FloatMemRead", + "FloatMemWrite", + "IprAccess", + "InstPrefetch", + ] + class OpDesc(SimObject): - type = 'OpDesc' + type = "OpDesc" cxx_header = "cpu/func_unit.hh" - cxx_class = 'gem5::OpDesc' + cxx_class = "gem5::OpDesc" opClass = Param.OpClass("type of operation") opLat = Param.Cycles(1, "cycles until result is available") - pipelined = Param.Bool(True, "set to true when the functional unit for" - "this op is fully pipelined. False means not pipelined at all.") + pipelined = Param.Bool( + True, + "set to true when the functional unit for" + "this op is fully pipelined. False means not pipelined at all.", + ) + class FUDesc(SimObject): - type = 'FUDesc' + type = "FUDesc" cxx_header = "cpu/func_unit.hh" - cxx_class = 'gem5::FUDesc' + cxx_class = "gem5::FUDesc" count = Param.Int("number of these FU's available") opList = VectorParam.OpDesc("operation classes for this FU type") diff --git a/src/cpu/InstPBTrace.py b/src/cpu/InstPBTrace.py index e26a6ca085..167443d3cd 100644 --- a/src/cpu/InstPBTrace.py +++ b/src/cpu/InstPBTrace.py @@ -29,8 +29,9 @@ from m5.params import * from m5.objects.InstTracer import InstTracer + class InstPBTrace(InstTracer): - type = 'InstPBTrace' - cxx_class = 'gem5::Trace::InstPBTrace' - cxx_header = 'cpu/inst_pb_trace.hh' + type = "InstPBTrace" + cxx_class = "gem5::trace::InstPBTrace" + cxx_header = "cpu/inst_pb_trace.hh" file_name = Param.String("Instruction trace output file") diff --git a/src/cpu/SConscript b/src/cpu/SConscript index fad601eea2..0466f11433 100644 --- a/src/cpu/SConscript +++ b/src/cpu/SConscript @@ -64,6 +64,7 @@ DebugFlag('ExecAsid', 'Format: Include ASID in trace') DebugFlag('ExecFlags', 'Format: Include instruction flags in trace') DebugFlag('Fetch') DebugFlag('HtmCpu', 'Hardware Transactional Memory (CPU side)') +DebugFlag('InvalidReg') DebugFlag('O3PipeView') DebugFlag('PCEvent') DebugFlag('Quiesce') @@ -85,9 +86,6 @@ Source('pc_event.cc') SimObject('FuncUnit.py', sim_objects=['OpDesc', 'FUDesc'], enums=['OpClass']) SimObject('StaticInstFlags.py', enums=['StaticInstFlags']) -if env['CONF']['TARGET_ISA'] == 'null': - Return() - # Only build the protobuf instructions tracer if we have protobuf support. SimObject('InstPBTrace.py', sim_objects=['InstPBTrace'], tags='protobuf') Source('inst_pb_trace.cc', tags='protobuf') @@ -98,9 +96,8 @@ SimObject('BaseCPU.py', sim_objects=['BaseCPU']) SimObject('CPUTracers.py', sim_objects=[ 'ExeTracer', 'IntelTrace', 'NativeTrace']) SimObject('TimingExpr.py', sim_objects=[ - 'TimingExpr', 'TimingExprLiteral', 'TimingExprSrcReg', - 'TimingExprReadIntReg', 'TimingExprLet', 'TimingExprRef', 'TimingExprUn', - 'TimingExprBin', 'TimingExprIf'], + 'TimingExpr', 'TimingExprLiteral', 'TimingExprSrcReg', 'TimingExprLet', + 'TimingExprRef', 'TimingExprUn', 'TimingExprBin', 'TimingExprIf'], enums=['TimingExprOp']) Source('activity.cc') diff --git a/src/cpu/StaticInstFlags.py b/src/cpu/StaticInstFlags.py index 4775289407..b7e03a6fb9 100644 --- a/src/cpu/StaticInstFlags.py +++ b/src/cpu/StaticInstFlags.py @@ -40,60 +40,51 @@ from m5.params import * # Control will be set, and exactly one of IsCondControl or IsUncondControl # will be set. + class StaticInstFlags(Enum): - wrapper_name = 'StaticInstFlags' + wrapper_name = "StaticInstFlags" wrapper_is_struct = True - enum_name = 'Flags' + enum_name = "Flags" vals = [ - 'IsNop', # Is a no-op (no effect at all). - - 'IsInteger', # References integer regs. - 'IsFloating', # References FP regs. - 'IsVector', # References Vector regs. - 'IsVectorElem', # References Vector reg elems. - - 'IsLoad', # Reads from memory (load or prefetch). - 'IsStore', # Writes to memory. - 'IsAtomic', # Does atomic RMW to memory. - 'IsStoreConditional', # Store conditional instruction. - 'IsInstPrefetch', # Instruction-cache prefetch. - 'IsDataPrefetch', # Data-cache prefetch. - - 'IsControl', # Control transfer instruction. - 'IsDirectControl', # PC relative control transfer. - 'IsIndirectControl',# Register indirect control transfer. - 'IsCondControl', # Conditional control transfer. - 'IsUncondControl', # Unconditional control transfer. - 'IsCall', # Subroutine call. - 'IsReturn', # Subroutine return. - - 'IsSerializing', # Serializes pipeline: won't execute until all - # older instructions have committed. - 'IsSerializeBefore', - 'IsSerializeAfter', - 'IsWriteBarrier', # Is a write barrier - 'IsReadBarrier', # Is a read barrier - - 'IsNonSpeculative', # Should not be executed speculatively - 'IsQuiesce', # Is a quiesce instruction - - 'IsUnverifiable', # Can't be verified by a checker - - 'IsSyscall', # Causes a system call to be emulated in syscall - # emulation mode. - + "IsNop", # Is a no-op (no effect at all). + "IsInteger", # References integer regs. + "IsFloating", # References FP regs. + "IsVector", # References Vector regs. + "IsVectorElem", # References Vector reg elems. + "IsLoad", # Reads from memory (load or prefetch). + "IsStore", # Writes to memory. + "IsAtomic", # Does atomic RMW to memory. + "IsStoreConditional", # Store conditional instruction. + "IsInstPrefetch", # Instruction-cache prefetch. + "IsDataPrefetch", # Data-cache prefetch. + "IsControl", # Control transfer instruction. + "IsDirectControl", # PC relative control transfer. + "IsIndirectControl", # Register indirect control transfer. + "IsCondControl", # Conditional control transfer. + "IsUncondControl", # Unconditional control transfer. + "IsCall", # Subroutine call. + "IsReturn", # Subroutine return. + "IsSerializing", # Serializes pipeline: won't execute until all + # older instructions have committed. + "IsSerializeBefore", + "IsSerializeAfter", + "IsWriteBarrier", # Is a write barrier + "IsReadBarrier", # Is a read barrier + "IsNonSpeculative", # Should not be executed speculatively + "IsQuiesce", # Is a quiesce instruction + "IsUnverifiable", # Can't be verified by a checker + "IsSyscall", # Causes a system call to be emulated in syscall + # emulation mode. # Flags for microcode - 'IsMacroop', # Is a macroop containing microops - 'IsMicroop', # Is a microop - 'IsDelayedCommit', # This microop doesn't commit right away - 'IsLastMicroop', # This microop ends a microop sequence - 'IsFirstMicroop', # This microop begins a microop sequence - - 'IsSquashAfter', # Squash all uncommitted state after executed - + "IsMacroop", # Is a macroop containing microops + "IsMicroop", # Is a microop + "IsDelayedCommit", # This microop doesn't commit right away + "IsLastMicroop", # This microop ends a microop sequence + "IsFirstMicroop", # This microop begins a microop sequence + "IsSquashAfter", # Squash all uncommitted state after executed # hardware transactional memory - 'IsHtmStart', # Starts a HTM transaction - 'IsHtmStop', # Stops (commits) a HTM transaction - 'IsHtmCancel' # Explicitely aborts a HTM transaction - ] + "IsHtmStart", # Starts a HTM transaction + "IsHtmStop", # Stops (commits) a HTM transaction + "IsHtmCancel", # Explicitely aborts a HTM transaction + ] diff --git a/src/cpu/TimingExpr.py b/src/cpu/TimingExpr.py index 9c45097abd..a32b9efb56 100644 --- a/src/cpu/TimingExpr.py +++ b/src/cpu/TimingExpr.py @@ -43,17 +43,20 @@ from m5.SimObject import SimObject # Expressions, in evaluation, will have access to the ThreadContext and # a StaticInst + class TimingExpr(SimObject): - type = 'TimingExpr' - cxx_header = 'cpu/timing_expr.hh' - cxx_class = 'gem5::TimingExpr' - abstract = True; + type = "TimingExpr" + cxx_header = "cpu/timing_expr.hh" + cxx_class = "gem5::TimingExpr" + abstract = True + class TimingExprLiteral(TimingExpr): """Literal 64 bit unsigned value""" - type = 'TimingExprLiteral' - cxx_header = 'cpu/timing_expr.hh' - cxx_class = 'gem5::TimingExprLiteral' + + type = "TimingExprLiteral" + cxx_header = "cpu/timing_expr.hh" + cxx_class = "gem5::TimingExprLiteral" value = Param.UInt64("literal value") @@ -61,40 +64,33 @@ class TimingExprLiteral(TimingExpr): self.value = value return self + class TimingExpr0(TimingExprLiteral): """Convenient 0""" + value = 0 -class TimingExprSrcReg(TimingExpr): - """Find the source register number from the current inst""" - type = 'TimingExprSrcReg' - cxx_header = 'cpu/timing_expr.hh' - cxx_class = 'gem5::TimingExprSrcReg' - # index = Param.Unsigned("index into inst src regs") - index = Param.Unsigned("index into inst src regs") +class TimingExprSrcReg(TimingExpr): + """Read a source register from the current inst""" + + type = "TimingExprSrcReg" + cxx_header = "cpu/timing_expr.hh" + cxx_class = "gem5::TimingExprSrcReg" + + index = Param.Unsigned("index into inst src regs of the reg to read") def set_params(self, index): self.index = index return self -class TimingExprReadIntReg(TimingExpr): - """Read an architectural register""" - type = 'TimingExprReadIntReg' - cxx_header = 'cpu/timing_expr.hh' - cxx_class = 'gem5::TimingExprReadIntReg' - - reg = Param.TimingExpr("register raw index to read") - - def set_params(self, reg): - self.reg = reg - return self class TimingExprLet(TimingExpr): """Block of declarations""" - type = 'TimingExprLet' - cxx_header = 'cpu/timing_expr.hh' - cxx_class = 'gem5::TimingExprLet' + + type = "TimingExprLet" + cxx_header = "cpu/timing_expr.hh" + cxx_class = "gem5::TimingExprLet" defns = VectorParam.TimingExpr("expressions for bindings") expr = Param.TimingExpr("body expression") @@ -104,11 +100,13 @@ class TimingExprLet(TimingExpr): self.expr = expr return self + class TimingExprRef(TimingExpr): """Value of a bound sub-expression""" - type = 'TimingExprRef' - cxx_header = 'cpu/timing_expr.hh' - cxx_class = 'gem5::TimingExprRef' + + type = "TimingExprRef" + cxx_header = "cpu/timing_expr.hh" + cxx_class = "gem5::TimingExprRef" index = Param.Unsigned("expression index") @@ -116,31 +114,38 @@ class TimingExprRef(TimingExpr): self.index = index return self + class TimingExprOp(Enum): vals = [ - 'timingExprAdd', 'timingExprSub', - 'timingExprUMul', 'timingExprUDiv', - 'timingExprSMul', 'timingExprSDiv', - 'timingExprUCeilDiv', # Unsigned divide rounding up - 'timingExprEqual', 'timingExprNotEqual', - 'timingExprULessThan', - 'timingExprUGreaterThan', - 'timingExprSLessThan', - 'timingExprSGreaterThan', - 'timingExprInvert', - 'timingExprNot', - 'timingExprAnd', - 'timingExprOr', - 'timingExprSizeInBits', - 'timingExprSignExtend32To64', - 'timingExprAbs' - ] + "timingExprAdd", + "timingExprSub", + "timingExprUMul", + "timingExprUDiv", + "timingExprSMul", + "timingExprSDiv", + "timingExprUCeilDiv", # Unsigned divide rounding up + "timingExprEqual", + "timingExprNotEqual", + "timingExprULessThan", + "timingExprUGreaterThan", + "timingExprSLessThan", + "timingExprSGreaterThan", + "timingExprInvert", + "timingExprNot", + "timingExprAnd", + "timingExprOr", + "timingExprSizeInBits", + "timingExprSignExtend32To64", + "timingExprAbs", + ] + class TimingExprUn(TimingExpr): """Unary operator""" - type = 'TimingExprUn' - cxx_header = 'cpu/timing_expr.hh' - cxx_class = 'gem5::TimingExprUn' + + type = "TimingExprUn" + cxx_header = "cpu/timing_expr.hh" + cxx_class = "gem5::TimingExprUn" op = Param.TimingExprOp("operator") arg = Param.TimingExpr("expression") @@ -150,11 +155,13 @@ class TimingExprUn(TimingExpr): self.arg = arg return self + class TimingExprBin(TimingExpr): """Binary operator""" - type = 'TimingExprBin' - cxx_header = 'cpu/timing_expr.hh' - cxx_class = 'gem5::TimingExprBin' + + type = "TimingExprBin" + cxx_header = "cpu/timing_expr.hh" + cxx_class = "gem5::TimingExprBin" op = Param.TimingExprOp("operator") left = Param.TimingExpr("LHS expression") @@ -166,11 +173,13 @@ class TimingExprBin(TimingExpr): self.right = right return self + class TimingExprIf(TimingExpr): """If-then-else operator""" - type = 'TimingExprIf' - cxx_header = 'cpu/timing_expr.hh' - cxx_class = 'gem5::TimingExprIf' + + type = "TimingExprIf" + cxx_header = "cpu/timing_expr.hh" + cxx_class = "gem5::TimingExprIf" cond = Param.TimingExpr("condition expression") trueExpr = Param.TimingExpr("true expression") diff --git a/src/cpu/base.cc b/src/cpu/base.cc index 5e8b7c1fb1..98c53d4895 100644 --- a/src/cpu/base.cc +++ b/src/cpu/base.cc @@ -275,9 +275,7 @@ BaseCPU::init() // Set up instruction-count-based termination events, if any. This needs // to happen after threadContexts has been constructed. if (params().max_insts_any_thread != 0) { - const char *cause = "a thread reached the max instruction count"; - for (ThreadID tid = 0; tid < numThreads; ++tid) - scheduleInstStop(tid, params().max_insts_any_thread, cause); + scheduleInstStopAnyThread(params().max_insts_any_thread); } // Set up instruction-count-based termination events for SimPoints @@ -285,13 +283,11 @@ BaseCPU::init() // Simulation.py is responsible to take the necessary actions upon // exitting the simulation loop. if (!params().simpoint_start_insts.empty()) { - const char *cause = "simpoint starting point found"; - for (size_t i = 0; i < params().simpoint_start_insts.size(); ++i) - scheduleInstStop(0, params().simpoint_start_insts[i], cause); + scheduleSimpointsInstStop(params().simpoint_start_insts); } if (params().max_insts_all_threads != 0) { - const char *cause = "all threads reached the max instruction count"; + std::string cause = "all threads reached the max instruction count"; // allocate & initialize shared downcounter: each event will // decrement this when triggered; simulation will terminate @@ -661,7 +657,7 @@ BaseCPU::unserialize(CheckpointIn &cp) } void -BaseCPU::scheduleInstStop(ThreadID tid, Counter insts, const char *cause) +BaseCPU::scheduleInstStop(ThreadID tid, Counter insts, std::string cause) { const Tick now(getCurrentInstCount(tid)); Event *event(new LocalSimLoopExitEvent(cause, 0)); @@ -727,6 +723,23 @@ BaseCPU::traceFunctionsInternal(Addr pc) } } +void +BaseCPU::scheduleSimpointsInstStop(std::vector inst_starts) +{ + std::string cause = "simpoint starting point found"; + for (size_t i = 0; i < inst_starts.size(); ++i) { + scheduleInstStop(0, inst_starts[i], cause); + } +} + +void +BaseCPU::scheduleInstStopAnyThread(Counter max_insts) +{ + std::string cause = "a thread reached the max instruction count"; + for (ThreadID tid = 0; tid < numThreads; ++tid) { + scheduleInstStop(tid, max_insts, cause); + } +} BaseCPU::GlobalStats::GlobalStats(statistics::Group *parent) : statistics::Group(parent), diff --git a/src/cpu/base.hh b/src/cpu/base.hh index afee2365e9..0d56fbad89 100644 --- a/src/cpu/base.hh +++ b/src/cpu/base.hh @@ -44,13 +44,6 @@ #include -// Before we do anything else, check if this build is the NULL ISA, -// and if so stop here -#include "config/the_isa.hh" - -#if IS_NULL_ISA -#error Including BaseCPU in a system without CPU support -#else #include "arch/generic/interrupts.hh" #include "base/statistics.hh" #include "debug/Mwait.hh" @@ -262,7 +255,7 @@ class BaseCPU : public ClockedObject protected: std::vector threadContexts; - Trace::InstTracer * tracer; + trace::InstTracer * tracer; public: @@ -272,7 +265,7 @@ class BaseCPU : public ClockedObject static const uint32_t invldPid = std::numeric_limits::max(); /// Provide access to the tracer pointer - Trace::InstTracer * getTracer() { return tracer; } + trace::InstTracer * getTracer() { return tracer; } /// Notify the CPU that the indicated context is now active. virtual void activateContext(ThreadID thread_num); @@ -443,7 +436,28 @@ class BaseCPU : public ClockedObject * @param insts Number of instructions into the future. * @param cause Cause to signal in the exit event. */ - void scheduleInstStop(ThreadID tid, Counter insts, const char *cause); + void scheduleInstStop(ThreadID tid, Counter insts, std::string cause); + + /** + * Schedule simpoint events using the scheduleInstStop function. + * + * This is used to raise a SIMPOINT_BEGIN exit event in the gem5 standard + * library. + * + * @param inst_starts A vector of number of instructions to start simpoints + */ + + void scheduleSimpointsInstStop(std::vector inst_starts); + + /** + * Schedule an exit event when any threads in the core reach the max_insts + * instructions using the scheduleInstStop function. + * + * This is used to raise a MAX_INSTS exit event in thegem5 standard library + * + * @param max_insts Number of instructions into the future. + */ + void scheduleInstStopAnyThread(Counter max_insts); /** * Get the number of instructions executed by the specified thread @@ -650,6 +664,4 @@ class BaseCPU : public ClockedObject } // namespace gem5 -#endif // !IS_NULL_ISA - #endif // __CPU_BASE_HH__ diff --git a/src/cpu/checker/cpu.hh b/src/cpu/checker/cpu.hh index c455cf52d9..698a0cbc52 100644 --- a/src/cpu/checker/cpu.hh +++ b/src/cpu/checker/cpu.hh @@ -200,8 +200,9 @@ class CheckerCPU : public BaseCPU, public ExecContext const RegId& id = si->destRegIdx(idx); if (id.is(InvalidRegClass)) return; - thread->setReg(id, val); - result.emplace(val); + const RegId flat = id.flatten(*thread->getIsaPtr()); + thread->setReg(flat, val); + result.emplace(flat.regClass(), val); } void @@ -210,8 +211,9 @@ class CheckerCPU : public BaseCPU, public ExecContext const RegId& id = si->destRegIdx(idx); if (id.is(InvalidRegClass)) return; - thread->setReg(id, val); - //TODO setVecResult, setVecPredResult setVecElemResult? + const RegId flat = id.flatten(*thread->getIsaPtr()); + thread->setReg(flat, val); + result.emplace(flat.regClass(), val); } bool readPredicate() const override { return thread->readPredicate(); } diff --git a/src/cpu/checker/cpu_impl.hh b/src/cpu/checker/cpu_impl.hh index a68b6f4526..cb6e57c034 100644 --- a/src/cpu/checker/cpu_impl.hh +++ b/src/cpu/checker/cpu_impl.hh @@ -46,7 +46,6 @@ #include #include "base/refcnt.hh" -#include "config/the_isa.hh" #include "cpu/exetrace.hh" #include "cpu/null_static_inst.hh" #include "cpu/reg_class.hh" @@ -347,7 +346,7 @@ Checker::verify(const DynInstPtr &completed_inst) if (fault == NoFault) { // Execute Checker instruction and trace if (!unverifiedInst->isUnverifiable()) { - Trace::InstRecord *traceData = tracer->getInstRecord(curTick(), + trace::InstRecord *traceData = tracer->getInstRecord(curTick(), tc, curStaticInst, pcState(), @@ -466,24 +465,22 @@ Checker::validateExecution(const DynInstPtr &inst) InstResult inst_val; int idx = -1; bool result_mismatch = false; - bool scalar_mismatch = false; if (inst->isUnverifiable()) { // Unverifiable instructions assume they were executed // properly by the CPU. Grab the result from the // instruction and write it to the register. - copyResult(inst, InstResult((RegVal)0), idx); + copyResult(inst, InstResult(), idx); } else if (inst->numDestRegs() > 0 && !result.empty()) { DPRINTF(Checker, "Dest regs %d, number of checker dest regs %d\n", inst->numDestRegs(), result.size()); for (int i = 0; i < inst->numDestRegs() && !result.empty(); i++) { checker_val = result.front(); result.pop(); - inst_val = inst->popResult(InstResult((RegVal)0)); + inst_val = inst->popResult(); if (checker_val != inst_val) { result_mismatch = true; idx = i; - scalar_mismatch = checker_val.is(); } } } // Checker CPU checks all the saved results in the dyninst passed by @@ -493,12 +490,9 @@ Checker::validateExecution(const DynInstPtr &inst) // this is ok and not a bug. May be worthwhile to try and correct this. if (result_mismatch) { - if (scalar_mismatch) { - warn("%lli: Instruction results (%i) do not match! (Values may" - " not actually be integers) Inst: %#x, checker: %#x", - curTick(), idx, inst_val.asNoAssert(), - checker_val.as()); - } + warn("%lli: Instruction results (%i) do not match! Inst: %s, " + "checker: %s", + curTick(), idx, inst_val.asString(), checker_val.asString()); // It's useful to verify load values from memory, but in MP // systems the value obtained at execute may be different than @@ -580,56 +574,30 @@ Checker::copyResult( // so do the fix-up then start with the next dest reg; if (start_idx >= 0) { const RegId& idx = inst->destRegIdx(start_idx); - switch (idx.classValue()) { - case InvalidRegClass: - break; - case IntRegClass: - case FloatRegClass: - case VecElemClass: - case CCRegClass: - thread->setReg(idx, mismatch_val.as()); - break; - case VecRegClass: - { - auto val = mismatch_val.as(); - thread->setReg(idx, &val); - } - break; - case MiscRegClass: - thread->setMiscReg(idx.index(), mismatch_val.as()); - break; - default: - panic("Unknown register class: %d", (int)idx.classValue()); - } + + if (idx.classValue() == InvalidRegClass) + ; // Do nothing. + else if (idx.classValue() == MiscRegClass) + thread->setMiscReg(idx.index(), mismatch_val.asRegVal()); + else if (mismatch_val.isBlob()) + thread->setReg(idx, mismatch_val.asBlob()); + else + thread->setReg(idx, mismatch_val.asRegVal()); } start_idx++; InstResult res; for (int i = start_idx; i < inst->numDestRegs(); i++) { const RegId& idx = inst->destRegIdx(i); res = inst->popResult(); - switch (idx.classValue()) { - case InvalidRegClass: - break; - case IntRegClass: - case FloatRegClass: - case VecElemClass: - case CCRegClass: - thread->setReg(idx, res.as()); - break; - case VecRegClass: - { - auto val = res.as(); - thread->setReg(idx, &val); - } - break; - case MiscRegClass: - // Try to get the proper misc register index for ARM here... + + if (idx.classValue() == InvalidRegClass) + ; // Do nothing. + else if (idx.classValue() == MiscRegClass) thread->setMiscReg(idx.index(), 0); - break; - // else Register is out of range... - default: - panic("Unknown register class: %d", (int)idx.classValue()); - } + else if (res.isBlob()) + thread->setReg(idx, res.asBlob()); + else + thread->setReg(idx, res.asRegVal()); } } diff --git a/src/cpu/checker/thread_context.hh b/src/cpu/checker/thread_context.hh index 13828f58b2..396ecfe43a 100644 --- a/src/cpu/checker/thread_context.hh +++ b/src/cpu/checker/thread_context.hh @@ -43,7 +43,6 @@ #define __CPU_CHECKER_THREAD_CONTEXT_HH__ #include "arch/generic/pcstate.hh" -#include "config/the_isa.hh" #include "cpu/checker/cpu.hh" #include "cpu/simple_thread.hh" #include "cpu/thread_context.hh" @@ -52,11 +51,6 @@ namespace gem5 { -namespace TheISA -{ - class Decoder; -} // namespace TheISA - /** * Derived ThreadContext class for use with the Checker. The template * parameter is the ThreadContext class used by the specific CPU being @@ -308,12 +302,6 @@ class CheckerThreadContext : public ThreadContext actualTC->setMiscReg(misc_reg, val); } - RegId - flattenRegId(const RegId& regId) const override - { - return actualTC->flattenRegId(regId); - } - unsigned readStCondFailures() const override { @@ -326,38 +314,6 @@ class CheckerThreadContext : public ThreadContext actualTC->setStCondFailures(sc_failures); } - RegVal - getRegFlat(const RegId ®) const override - { - return actualTC->getRegFlat(reg); - } - - void - getRegFlat(const RegId ®, void *val) const override - { - actualTC->getRegFlat(reg, val); - } - - void * - getWritableRegFlat(const RegId ®) override - { - return actualTC->getWritableRegFlat(reg); - } - - void - setRegFlat(const RegId ®, RegVal val) override - { - actualTC->setRegFlat(reg, val); - checkerTC->setRegFlat(reg, val); - } - - void - setRegFlat(const RegId ®, const void *val) override - { - actualTC->setRegFlat(reg, val); - checkerTC->setRegFlat(reg, val); - } - // hardware transactional memory void htmAbortTransaction(uint64_t htm_uid, HtmFailureFaultCause cause) override diff --git a/src/cpu/exec_context.hh b/src/cpu/exec_context.hh index 5fb2cac6f5..92448e2cc9 100644 --- a/src/cpu/exec_context.hh +++ b/src/cpu/exec_context.hh @@ -42,9 +42,7 @@ #ifndef __CPU_EXEC_CONTEXT_HH__ #define __CPU_EXEC_CONTEXT_HH__ -#include "arch/vecregs.hh" #include "base/types.hh" -#include "config/the_isa.hh" #include "cpu/base.hh" #include "cpu/reg_class.hh" #include "cpu/static_inst_fwd.hh" diff --git a/src/cpu/exetrace.cc b/src/cpu/exetrace.cc index f6edded9d7..22d0d4be69 100644 --- a/src/cpu/exetrace.cc +++ b/src/cpu/exetrace.cc @@ -44,7 +44,6 @@ #include #include "base/loader/symtab.hh" -#include "config/the_isa.hh" #include "cpu/base.hh" #include "cpu/static_inst.hh" #include "cpu/thread_context.hh" @@ -55,10 +54,10 @@ namespace gem5 { -namespace Trace { +namespace trace { void -Trace::ExeTracerRecord::traceInst(const StaticInstPtr &inst, bool ran) +ExeTracerRecord::traceInst(const StaticInstPtr &inst, bool ran) { std::stringstream outs; @@ -115,18 +114,11 @@ Trace::ExeTracerRecord::traceInst(const StaticInstPtr &inst, bool ran) outs << "Predicated False"; } - if (debug::ExecResult && data_status != DataInvalid) { - switch (data_status) { - case DataVec: - ccprintf(outs, " D=%s", *data.as_vec); - break; - case DataVecPred: - ccprintf(outs, " D=%s", *data.as_pred); - break; - default: - ccprintf(outs, " D=%#018x", data.as_int); - break; - } + if (debug::ExecResult && dataStatus != DataInvalid) { + if (dataStatus == DataReg) + ccprintf(outs, " D=%s", data.asReg.asString()); + else + ccprintf(outs, " D=%#018x", data.asInt); } if (debug::ExecEffAddr && getMemValid()) @@ -150,13 +142,13 @@ Trace::ExeTracerRecord::traceInst(const StaticInstPtr &inst, bool ran) // outs << std::endl; - Trace::getDebugLogger()->dprintf_flag( + trace::getDebugLogger()->dprintf_flag( when, thread->getCpuPtr()->name(), "ExecEnable", "%s", outs.str().c_str()); } void -Trace::ExeTracerRecord::dump() +ExeTracerRecord::dump() { /* * The behavior this check tries to achieve is that if ExecMacro is on, @@ -178,5 +170,5 @@ Trace::ExeTracerRecord::dump() } } -} // namespace Trace +} // namespace trace } // namespace gem5 diff --git a/src/cpu/exetrace.hh b/src/cpu/exetrace.hh index 7210241eec..143cfa0eb3 100644 --- a/src/cpu/exetrace.hh +++ b/src/cpu/exetrace.hh @@ -42,7 +42,7 @@ namespace gem5 class ThreadContext; -namespace Trace { +namespace trace { class ExeTracerRecord : public InstRecord { @@ -79,7 +79,7 @@ class ExeTracer : public InstTracer } }; -} // namespace Trace +} // namespace trace } // namespace gem5 #endif // __CPU_EXETRACE_HH__ diff --git a/src/cpu/inst_pb_trace.cc b/src/cpu/inst_pb_trace.cc index 08a2389b7c..4a289f0b84 100644 --- a/src/cpu/inst_pb_trace.cc +++ b/src/cpu/inst_pb_trace.cc @@ -39,7 +39,6 @@ #include "base/callback.hh" #include "base/output.hh" -#include "config/the_isa.hh" #include "cpu/static_inst.hh" #include "cpu/thread_context.hh" #include "debug/ExecEnable.hh" @@ -51,7 +50,7 @@ namespace gem5 { -namespace Trace { +namespace trace { ProtoOutputStream *InstPBTrace::traceStream; @@ -178,5 +177,5 @@ InstPBTrace::traceMem(StaticInstPtr si, Addr a, Addr s, unsigned f) } -} // namespace Trace +} // namespace trace } // namespace gem5 diff --git a/src/cpu/inst_pb_trace.hh b/src/cpu/inst_pb_trace.hh index ee0ed6443c..52924f83c1 100644 --- a/src/cpu/inst_pb_trace.hh +++ b/src/cpu/inst_pb_trace.hh @@ -55,7 +55,7 @@ namespace gem5 class ThreadContext; -namespace Trace { +namespace trace { /** * This in an instruction tracer that records the flow of instructions through @@ -136,7 +136,7 @@ class InstPBTrace : public InstTracer friend class InstPBTraceRecord; }; -} // namespace Trace +} // namespace trace } // namespace gem5 #endif // __CPU_INST_PB_TRACE_HH__ diff --git a/src/cpu/inst_res.hh b/src/cpu/inst_res.hh index fd84cb6c85..30bb2b262b 100644 --- a/src/cpu/inst_res.hh +++ b/src/cpu/inst_res.hh @@ -38,11 +38,15 @@ #ifndef __CPU_INST_RES_HH__ #define __CPU_INST_RES_HH__ -#include -#include +#include +#include +#include +#include +#include #include "base/logging.hh" #include "base/types.hh" +#include "cpu/reg_class.hh" namespace gem5 { @@ -50,65 +54,65 @@ namespace gem5 class InstResult { private: - std::any result; - std::function equals; + using BlobPtr = std::unique_ptr; + + std::variant value; + const RegClass *_regClass = nullptr; + + bool blob() const { return std::holds_alternative(value); } + bool valid() const { return _regClass != nullptr; } + + // Raw accessors with no safety checks. + RegVal getRegVal() const { return std::get(value); } + const void *getBlob() const { return std::get(value).get(); } + + // Store copies of blobs, not a pointer to the original. + void + set(const void *val) + { + uint8_t *temp = nullptr; + if (val) { + const size_t size = _regClass->regBytes(); + temp = new uint8_t[size]; + std::memcpy(temp, val, size); + } + value = BlobPtr(temp); + } + + void set(RegVal val) { value = val; } + + void + set(const InstResult &other) + { + other.blob() ? set(other.getBlob()) : set(other.getRegVal()); + } public: /** Default constructor creates an invalid result. */ - InstResult() : - // This InstResult is empty, and will only equal other InstResults - // which are also empty. - equals([](const std::any &a, const std::any &b) -> bool { - gem5_assert(!a.has_value()); - return !b.has_value(); - }) - {} - InstResult(const InstResult &) = default; - - template - explicit InstResult(T val) : result(val), - - // Set equals so it knows how to compare results of type T. - equals([](const std::any &a, const std::any &b) -> bool { - // If one has a value but the other doesn't, not equal. - if (a.has_value() != b.has_value()) - return false; - // If they are both empty, equal. - if (!a.has_value()) - return true; - // At least the local object should be of the right type. - gem5_assert(a.type() == typeid(T)); - // If these aren't the same type, not equal. - if (a.type() != b.type()) - return false; - // We now know these both hold a result of the right type. - return std::any_cast(a) == std::any_cast(b); - }) + InstResult() {} + InstResult(const InstResult &other) : _regClass(other._regClass) { - static_assert(!std::is_pointer_v, - "InstResult shouldn't point to external data."); - // Floating point values should be converted to/from ints using - // floatToBits and bitsToFloat, and not stored in InstResult directly. - static_assert(!std::is_floating_point_v, - "Floating point values should be converted to/from ints."); + set(other); } - // Convert floating point values to integers. - template , int> = 0> - explicit InstResult(T val) : InstResult(floatToBits(val)) {} + InstResult(const RegClass ®_class, RegVal val) : + _regClass(®_class) + { + set(val); + } - // Convert all integer types to RegVal. - template && !std::is_same_v, - int> = 0> - explicit InstResult(T val) : InstResult(static_cast(val)) {} + InstResult(const RegClass ®_class, const void *val) : + _regClass(®_class) + { + set(val); + } InstResult & - operator=(const InstResult& that) + operator=(const InstResult &that) { - result = that.result; - equals = that.equals; + _regClass = that._regClass; + set(that); + return *this; } @@ -119,7 +123,23 @@ class InstResult bool operator==(const InstResult& that) const { - return equals(result, that.result); + if (blob() != that.blob() || _regClass != that._regClass) + return false; + + if (blob()) { + const void *my_blob = getBlob(); + const void *their_blob = that.getBlob(); + + // Invalid results always differ. + if (!my_blob || !their_blob) + return false; + + // Check the contents of the blobs, not their addresses. + return std::memcmp(getBlob(), that.getBlob(), + _regClass->regBytes()) == 0; + } else { + return getRegVal() == that.getRegVal(); + } } bool @@ -128,61 +148,34 @@ class InstResult return !operator==(that); } - /** Checks */ - /** @{ */ + const RegClass ®Class() const { return *_regClass; } + bool isValid() const { return valid(); } + bool isBlob() const { return blob(); } - template - bool - is() const + RegVal + asRegVal() const { - static_assert(!std::is_floating_point_v, - "Floating point values should be converted to/from ints."); - return result.type() == typeid(T); + assert(!blob()); + return getRegVal(); } - template - std::enable_if_t && !std::is_same_v, bool> - is() const + const void * + asBlob() const { - return is(); + assert(blob()); + return getBlob(); } - /** Is this a valid result?. */ - bool isValid() const { return result.has_value(); } - /** @} */ - - /** Explicit cast-like operations. */ - /** @{ */ - template - T - as() const + std::string + asString() const { - assert(is()); - return std::any_cast(result); + if (blob()) { + return _regClass->valString(getBlob()); + } else { + RegVal reg = getRegVal(); + return _regClass->valString(®); + } } - - template - std::enable_if_t && !std::is_same_v, - RegVal> - as() const - { - return as(); - } - - /** Cast to integer without checking type. - * This is required to have the o3 cpu checker happy, as it - * compares results as integers without being fully aware of - * their nature. */ - template - T - asNoAssert() const - { - if (!is()) - return T{}; - return as(); - } - - /** @} */ }; } // namespace gem5 diff --git a/src/cpu/inteltrace.cc b/src/cpu/inteltrace.cc index 0c3c6c2ffd..3aea5c65cc 100644 --- a/src/cpu/inteltrace.cc +++ b/src/cpu/inteltrace.cc @@ -36,12 +36,12 @@ namespace gem5 { -namespace Trace { +namespace trace { void -Trace::IntelTraceRecord::dump() +IntelTraceRecord::dump() { - std::ostream &outs = Trace::output(); + std::ostream &outs = trace::output(); ccprintf(outs, "%7d ) ", when); outs << "0x" << std::hex << pc->instAddr() << ":\t"; if (staticInst->isLoad()) { @@ -52,5 +52,5 @@ Trace::IntelTraceRecord::dump() outs << std::endl; } -} // namespace Trace +} // namespace trace } // namespace gem5 diff --git a/src/cpu/inteltrace.hh b/src/cpu/inteltrace.hh index 4d57026241..3ccfe356f8 100644 --- a/src/cpu/inteltrace.hh +++ b/src/cpu/inteltrace.hh @@ -40,7 +40,7 @@ namespace gem5 { -namespace Trace { +namespace trace { class IntelTraceRecord : public InstRecord { @@ -75,7 +75,7 @@ class IntelTrace : public InstTracer } }; -} // namespace Trace +} // namespace trace } // namespace gem5 #endif // __CPU_INTELTRACE_HH__ diff --git a/src/cpu/kvm/BaseKvmCPU.py b/src/cpu/kvm/BaseKvmCPU.py index 58cb00b8df..f958e8126c 100644 --- a/src/cpu/kvm/BaseKvmCPU.py +++ b/src/cpu/kvm/BaseKvmCPU.py @@ -40,10 +40,11 @@ from m5.proxy import * from m5.objects.BaseCPU import BaseCPU from m5.objects.KvmVM import KvmVM + class BaseKvmCPU(BaseCPU): - type = 'BaseKvmCPU' + type = "BaseKvmCPU" cxx_header = "cpu/kvm/base.hh" - cxx_class = 'gem5::BaseKvmCPU' + cxx_class = "gem5::BaseKvmCPU" abstract = True @cxxMethod @@ -53,7 +54,7 @@ class BaseKvmCPU(BaseCPU): @classmethod def memory_mode(cls): - return 'atomic_noncaching' + return "atomic_noncaching" @classmethod def require_caches(cls): @@ -64,9 +65,12 @@ class BaseKvmCPU(BaseCPU): return True useCoalescedMMIO = Param.Bool(False, "Use coalesced MMIO (EXPERIMENTAL)") - usePerfOverflow = Param.Bool(False, "Use perf event overflow counters (EXPERIMENTAL)") - alwaysSyncTC = Param.Bool(False, - "Always sync thread contexts on entry/exit") + usePerfOverflow = Param.Bool( + False, "Use perf event overflow counters (EXPERIMENTAL)" + ) + alwaysSyncTC = Param.Bool( + False, "Always sync thread contexts on entry/exit" + ) hostFreq = Param.Clock("2GHz", "Host clock frequency") hostFactor = Param.Float(1.0, "Cycle scale factor") diff --git a/src/cpu/kvm/KvmVM.py b/src/cpu/kvm/KvmVM.py index aae9d988b2..cdb826cf6c 100644 --- a/src/cpu/kvm/KvmVM.py +++ b/src/cpu/kvm/KvmVM.py @@ -38,12 +38,14 @@ from m5.proxy import * from m5.SimObject import SimObject -class KvmVM(SimObject): - type = 'KvmVM' - cxx_header = "cpu/kvm/vm.hh" - cxx_class = 'gem5::KvmVM' - coalescedMMIO = \ - VectorParam.AddrRange([], "memory ranges for coalesced MMIO") +class KvmVM(SimObject): + type = "KvmVM" + cxx_header = "cpu/kvm/vm.hh" + cxx_class = "gem5::KvmVM" + + coalescedMMIO = VectorParam.AddrRange( + [], "memory ranges for coalesced MMIO" + ) system = Param.System(Parent.any, "system this VM belongs to") diff --git a/src/cpu/kvm/SConscript b/src/cpu/kvm/SConscript index 2a87c764ea..82a40d5997 100644 --- a/src/cpu/kvm/SConscript +++ b/src/cpu/kvm/SConscript @@ -37,26 +37,27 @@ Import('*') -if not env['CONF']['USE_KVM'] or \ - env['CONF']['TARGET_ISA'] != env['CONF']['KVM_ISA']: - Return() +if env['CONF']['USE_KVM']: + env.TagImplies('kvm', 'gem5 lib') + env.TagImplies(env.subst('${CONF["KVM_ISA"]} kvm'), + env.subst('${CONF["KVM_ISA"]} isa')) -SimObject('KvmVM.py', sim_objects=['KvmVM']) -SimObject('BaseKvmCPU.py', sim_objects=['BaseKvmCPU']) +SimObject('KvmVM.py', sim_objects=['KvmVM'], tags='kvm') +SimObject('BaseKvmCPU.py', sim_objects=['BaseKvmCPU'], tags='kvm') -Source('base.cc') -Source('device.cc') -Source('vm.cc') -Source('perfevent.cc') -Source('timer.cc') +Source('base.cc', tags='kvm') +Source('device.cc', tags='kvm') +Source('vm.cc', tags='kvm') +Source('perfevent.cc', tags='kvm') +Source('timer.cc', tags='kvm') -DebugFlag('Kvm', 'Basic KVM Functionality') -DebugFlag('KvmContext', 'KVM/gem5 context synchronization') -DebugFlag('KvmIO', 'KVM MMIO diagnostics') -DebugFlag('KvmInt', 'KVM Interrupt handling') -DebugFlag('KvmRun', 'KvmRun entry/exit diagnostics') -DebugFlag('KvmTimer', 'KVM timing') +DebugFlag('Kvm', 'Basic KVM Functionality', tags='kvm') +DebugFlag('KvmContext', 'KVM/gem5 context synchronization', tags='kvm') +DebugFlag('KvmIO', 'KVM MMIO diagnostics', tags='kvm') +DebugFlag('KvmInt', 'KVM Interrupt handling', tags='kvm') +DebugFlag('KvmRun', 'KvmRun entry/exit diagnostics', tags='kvm') +DebugFlag('KvmTimer', 'KVM timing', tags='kvm') CompoundFlag('KvmAll', [ 'Kvm', 'KvmContext', 'KvmRun', 'KvmIO', 'KvmInt', 'KvmTimer' ], - 'All KVM debug flags') + 'All KVM debug flags', tags='kvm') diff --git a/src/cpu/kvm/SConsopts b/src/cpu/kvm/SConsopts index 3bb549fbee..275eedaa54 100644 --- a/src/cpu/kvm/SConsopts +++ b/src/cpu/kvm/SConsopts @@ -28,39 +28,29 @@ Import('*') from gem5_scons import warning import gem5_scons -host_isa = None -try: - import platform - host_isa = platform.machine() -except: - pass + +# ISA code can set this to indicate what ISA KVM can target. +main['CONF'].setdefault('KVM_ISA', '') with gem5_scons.Configure(main) as conf: - # Check if we should enable KVM-based hardware virtualization. The API - # we rely on exists since version 2.6.36 of the kernel, but somehow - # the KVM_API_VERSION does not reflect the change. We test for one of - # the types as a fall back. - # The default value of KVM_ISA should serialize to a string in the - # C++ header and test False in Scons/Python. - conf.env['CONF']['KVM_ISA'] = '' + # Check if we should enable KVM-based hardware virtualization. The + # API we rely on exists since version 2.6.36 of the kernel, but + # somehow the KVM_API_VERSION does not reflect the change. We test + # for one of the types as a fall back. + + main['CONF']['HAVE_KVM'] = False + if not conf.CheckHeader('linux/kvm.h', '<>'): - print("Info: Compatible header file not found, " - "disabling KVM support.") + warning("Info: Compatible header file not found, " + "disabling KVM support.") elif not conf.CheckLibWithHeader([None, 'rt'], [ 'time.h', 'signal.h' ], 'C', 'timer_create(CLOCK_MONOTONIC, NULL, NULL);'): warning("Cannot enable KVM, host doesn't support POSIX timers") - elif host_isa == 'x86_64': - if conf.CheckTypeSize('struct kvm_xsave', - '#include ') != 0: - conf.env['CONF']['KVM_ISA'] = 'x86' - else: - warning("KVM on x86 requires xsave support in kernel headers.") - elif host_isa in ('armv7l', 'aarch64'): - conf.env['CONF']['KVM_ISA'] = 'arm' else: - warning("Failed to determine host ISA.") + # Generic support is available. We'll let the ISAs figure out if + # it's really supported. + conf.env['CONF']['HAVE_KVM'] = True - if conf.env['CONF']['KVM_ISA']: # Check if the exclude_host attribute is available. We want this to # get accurate instruction counts in KVM. conf.env['CONF']['HAVE_PERF_ATTR_EXCLUDE_HOST'] = conf.CheckMember( @@ -71,9 +61,13 @@ with gem5_scons.Configure(main) as conf: warning("perf_event headers lack support for the exclude_host " "attribute. KVM instruction counts will be inaccurate.") -if main['CONF']['KVM_ISA']: - sticky_vars.Add(BoolVariable('USE_KVM', - 'Enable hardware virtualized (KVM) CPU models', True)) -else: - main['CONF']['USE_KVM'] = False - warning("Can not enable KVM, host seems to lack KVM support") + +def create_use_kvm_var(): + if main['CONF']['HAVE_KVM'] and main['CONF']['KVM_ISA']: + sticky_vars.Add(BoolVariable('USE_KVM', + 'Enable hardware virtualized (KVM) CPU models', True)) + else: + main['CONF']['USE_KVM'] = False + warning("Cannot enable KVM, host seems to lack KVM support") + +AfterSConsopts(create_use_kvm_var) diff --git a/src/cpu/kvm/vm.cc b/src/cpu/kvm/vm.cc index d3d8f1d1ba..e714a40b22 100644 --- a/src/cpu/kvm/vm.cc +++ b/src/cpu/kvm/vm.cc @@ -207,7 +207,7 @@ Kvm::capXSave() const bool Kvm::capIRQLineLayout2() const { -#if defined(KVM_CAP_ARM_IRQ_LINE_LAYOUT_2) && defined(KVM_ARM_IRQ_VCPU2_SHIFT) +#if defined(KVM_CAP_ARM_IRQ_LINE_LAYOUT_2) return checkExtension(KVM_CAP_ARM_IRQ_LINE_LAYOUT_2) != 0; #else return false; diff --git a/src/cpu/minor/BaseMinorCPU.py b/src/cpu/minor/BaseMinorCPU.py index ac26743a7d..7d15ec4cd4 100644 --- a/src/cpu/minor/BaseMinorCPU.py +++ b/src/cpu/minor/BaseMinorCPU.py @@ -47,151 +47,229 @@ from m5.objects.TimingExpr import TimingExpr from m5.objects.FuncUnit import OpClass + class MinorOpClass(SimObject): """Boxing of OpClass to get around build problems and provide a hook for future additions to OpClass checks""" - type = 'MinorOpClass' + type = "MinorOpClass" cxx_header = "cpu/minor/func_unit.hh" - cxx_class = 'gem5::MinorOpClass' + cxx_class = "gem5::MinorOpClass" opClass = Param.OpClass("op class to match") + class MinorOpClassSet(SimObject): """A set of matchable op classes""" - type = 'MinorOpClassSet' + type = "MinorOpClassSet" cxx_header = "cpu/minor/func_unit.hh" - cxx_class = 'gem5::MinorOpClassSet' + cxx_class = "gem5::MinorOpClassSet" + + opClasses = VectorParam.MinorOpClass( + [], "op classes to be matched." " An empty list means any class" + ) - opClasses = VectorParam.MinorOpClass([], "op classes to be matched." - " An empty list means any class") class MinorFUTiming(SimObject): - type = 'MinorFUTiming' + type = "MinorFUTiming" cxx_header = "cpu/minor/func_unit.hh" - cxx_class = 'gem5::MinorFUTiming' + cxx_class = "gem5::MinorFUTiming" mask = Param.UInt64(0, "mask for testing ExtMachInst") - match = Param.UInt64(0, "match value for testing ExtMachInst:" - " (ext_mach_inst & mask) == match") - suppress = Param.Bool(False, "if true, this inst. is not executed by" - " this FU") - extraCommitLat = Param.Cycles(0, "extra cycles to stall commit for" - " this inst.") - extraCommitLatExpr = Param.TimingExpr(NULL, "extra cycles as a" - " run-time evaluated expression") - extraAssumedLat = Param.Cycles(0, "extra cycles to add to scoreboard" + match = Param.UInt64( + 0, + "match value for testing ExtMachInst:" + " (ext_mach_inst & mask) == match", + ) + suppress = Param.Bool( + False, "if true, this inst. is not executed by" " this FU" + ) + extraCommitLat = Param.Cycles( + 0, "extra cycles to stall commit for" " this inst." + ) + extraCommitLatExpr = Param.TimingExpr( + NULL, "extra cycles as a" " run-time evaluated expression" + ) + extraAssumedLat = Param.Cycles( + 0, + "extra cycles to add to scoreboard" " retire time for this insts dest registers once it leaves the" " functional unit. For mem refs, if this is 0, the result's time" - " is marked as unpredictable and no forwarding can take place.") - srcRegsRelativeLats = VectorParam.Cycles("the maximum number of cycles" + " is marked as unpredictable and no forwarding can take place.", + ) + srcRegsRelativeLats = VectorParam.Cycles( + "the maximum number of cycles" " after inst. issue that each src reg can be available for this" - " inst. to issue") - opClasses = Param.MinorOpClassSet(MinorOpClassSet(), + " inst. to issue" + ) + opClasses = Param.MinorOpClassSet( + MinorOpClassSet(), "op classes to be considered for this decode. An empty set means any" - " class") - description = Param.String('', "description string of the decoding/inst." - " class") + " class", + ) + description = Param.String( + "", "description string of the decoding/inst." " class" + ) + def minorMakeOpClassSet(op_classes): """Make a MinorOpClassSet from a list of OpClass enum value strings""" + def boxOpClass(op_class): return MinorOpClass(opClass=op_class) - return MinorOpClassSet(opClasses=[ boxOpClass(o) for o in op_classes ]) + return MinorOpClassSet(opClasses=[boxOpClass(o) for o in op_classes]) + class MinorFU(SimObject): - type = 'MinorFU' + type = "MinorFU" cxx_header = "cpu/minor/func_unit.hh" - cxx_class = 'gem5::MinorFU' + cxx_class = "gem5::MinorFU" - opClasses = Param.MinorOpClassSet(MinorOpClassSet(), "type of operations" - " allowed on this functional unit") + opClasses = Param.MinorOpClassSet( + MinorOpClassSet(), + "type of operations" " allowed on this functional unit", + ) opLat = Param.Cycles(1, "latency in cycles") - issueLat = Param.Cycles(1, "cycles until another instruction can be" - " issued") + issueLat = Param.Cycles( + 1, "cycles until another instruction can be" " issued" + ) timings = VectorParam.MinorFUTiming([], "extra decoding rules") - cantForwardFromFUIndices = VectorParam.Unsigned([], + cantForwardFromFUIndices = VectorParam.Unsigned( + [], "list of FU indices from which this FU can't receive and early" - " (forwarded) result") + " (forwarded) result", + ) + class MinorFUPool(SimObject): - type = 'MinorFUPool' + type = "MinorFUPool" cxx_header = "cpu/minor/func_unit.hh" - cxx_class = 'gem5::MinorFUPool' + cxx_class = "gem5::MinorFUPool" funcUnits = VectorParam.MinorFU("functional units") + class MinorDefaultIntFU(MinorFU): - opClasses = minorMakeOpClassSet(['IntAlu']) - timings = [MinorFUTiming(description="Int", - srcRegsRelativeLats=[2])] + opClasses = minorMakeOpClassSet(["IntAlu"]) + timings = [MinorFUTiming(description="Int", srcRegsRelativeLats=[2])] opLat = 3 + class MinorDefaultIntMulFU(MinorFU): - opClasses = minorMakeOpClassSet(['IntMult']) - timings = [MinorFUTiming(description='Mul', - srcRegsRelativeLats=[0])] + opClasses = minorMakeOpClassSet(["IntMult"]) + timings = [MinorFUTiming(description="Mul", srcRegsRelativeLats=[0])] opLat = 3 + class MinorDefaultIntDivFU(MinorFU): - opClasses = minorMakeOpClassSet(['IntDiv']) + opClasses = minorMakeOpClassSet(["IntDiv"]) issueLat = 9 opLat = 9 -class MinorDefaultFloatSimdFU(MinorFU): - opClasses = minorMakeOpClassSet([ - 'FloatAdd', 'FloatCmp', 'FloatCvt', 'FloatMisc', 'FloatMult', - 'FloatMultAcc', 'FloatDiv', 'FloatSqrt', - 'SimdAdd', 'SimdAddAcc', 'SimdAlu', 'SimdCmp', 'SimdCvt', - 'SimdMisc', 'SimdMult', 'SimdMultAcc', 'SimdShift', 'SimdShiftAcc', - 'SimdDiv', 'SimdSqrt', 'SimdFloatAdd', 'SimdFloatAlu', 'SimdFloatCmp', - 'SimdFloatCvt', 'SimdFloatDiv', 'SimdFloatMisc', 'SimdFloatMult', - 'SimdFloatMultAcc', 'SimdFloatSqrt', 'SimdReduceAdd', 'SimdReduceAlu', - 'SimdReduceCmp', 'SimdFloatReduceAdd', 'SimdFloatReduceCmp', - 'SimdAes', 'SimdAesMix', - 'SimdSha1Hash', 'SimdSha1Hash2', 'SimdSha256Hash', - 'SimdSha256Hash2', 'SimdShaSigma2', 'SimdShaSigma3']) - timings = [MinorFUTiming(description='FloatSimd', - srcRegsRelativeLats=[2])] +class MinorDefaultFloatSimdFU(MinorFU): + opClasses = minorMakeOpClassSet( + [ + "FloatAdd", + "FloatCmp", + "FloatCvt", + "FloatMisc", + "FloatMult", + "FloatMultAcc", + "FloatDiv", + "FloatSqrt", + "SimdAdd", + "SimdAddAcc", + "SimdAlu", + "SimdCmp", + "SimdCvt", + "SimdMisc", + "SimdMult", + "SimdMultAcc", + "SimdShift", + "SimdShiftAcc", + "SimdDiv", + "SimdSqrt", + "SimdFloatAdd", + "SimdFloatAlu", + "SimdFloatCmp", + "SimdFloatCvt", + "SimdFloatDiv", + "SimdFloatMisc", + "SimdFloatMult", + "SimdFloatMultAcc", + "SimdFloatSqrt", + "SimdReduceAdd", + "SimdReduceAlu", + "SimdReduceCmp", + "SimdFloatReduceAdd", + "SimdFloatReduceCmp", + "SimdAes", + "SimdAesMix", + "SimdSha1Hash", + "SimdSha1Hash2", + "SimdSha256Hash", + "SimdSha256Hash2", + "SimdShaSigma2", + "SimdShaSigma3", + ] + ) + + timings = [MinorFUTiming(description="FloatSimd", srcRegsRelativeLats=[2])] opLat = 6 + class MinorDefaultPredFU(MinorFU): - opClasses = minorMakeOpClassSet(['SimdPredAlu']) - timings = [MinorFUTiming(description="Pred", - srcRegsRelativeLats=[2])] + opClasses = minorMakeOpClassSet(["SimdPredAlu"]) + timings = [MinorFUTiming(description="Pred", srcRegsRelativeLats=[2])] opLat = 3 + class MinorDefaultMemFU(MinorFU): - opClasses = minorMakeOpClassSet(['MemRead', 'MemWrite', 'FloatMemRead', - 'FloatMemWrite']) - timings = [MinorFUTiming(description='Mem', - srcRegsRelativeLats=[1], extraAssumedLat=2)] + opClasses = minorMakeOpClassSet( + ["MemRead", "MemWrite", "FloatMemRead", "FloatMemWrite"] + ) + timings = [ + MinorFUTiming( + description="Mem", srcRegsRelativeLats=[1], extraAssumedLat=2 + ) + ] opLat = 1 + class MinorDefaultMiscFU(MinorFU): - opClasses = minorMakeOpClassSet(['IprAccess', 'InstPrefetch']) + opClasses = minorMakeOpClassSet(["IprAccess", "InstPrefetch"]) opLat = 1 -class MinorDefaultFUPool(MinorFUPool): - funcUnits = [MinorDefaultIntFU(), MinorDefaultIntFU(), - MinorDefaultIntMulFU(), MinorDefaultIntDivFU(), - MinorDefaultFloatSimdFU(), MinorDefaultPredFU(), - MinorDefaultMemFU(), MinorDefaultMiscFU()] -class ThreadPolicy(Enum): vals = ['SingleThreaded', 'RoundRobin', 'Random'] +class MinorDefaultFUPool(MinorFUPool): + funcUnits = [ + MinorDefaultIntFU(), + MinorDefaultIntFU(), + MinorDefaultIntMulFU(), + MinorDefaultIntDivFU(), + MinorDefaultFloatSimdFU(), + MinorDefaultPredFU(), + MinorDefaultMemFU(), + MinorDefaultMiscFU(), + ] + + +class ThreadPolicy(Enum): + vals = ["SingleThreaded", "RoundRobin", "Random"] + class BaseMinorCPU(BaseCPU): - type = 'BaseMinorCPU' + type = "BaseMinorCPU" cxx_header = "cpu/minor/cpu.hh" - cxx_class = 'gem5::MinorCPU' + cxx_class = "gem5::MinorCPU" @classmethod def memory_mode(cls): - return 'timing' + return "timing" @classmethod def require_caches(cls): @@ -201,91 +279,131 @@ class BaseMinorCPU(BaseCPU): def support_take_over(cls): return True - threadPolicy = Param.ThreadPolicy('RoundRobin', - "Thread scheduling policy") - fetch1FetchLimit = Param.Unsigned(1, - "Number of line fetches allowable in flight at once") - fetch1LineSnapWidth = Param.Unsigned(0, + threadPolicy = Param.ThreadPolicy("RoundRobin", "Thread scheduling policy") + fetch1FetchLimit = Param.Unsigned( + 1, "Number of line fetches allowable in flight at once" + ) + fetch1LineSnapWidth = Param.Unsigned( + 0, "Fetch1 'line' fetch snap size in bytes" - " (0 means use system cache line size)") - fetch1LineWidth = Param.Unsigned(0, + " (0 means use system cache line size)", + ) + fetch1LineWidth = Param.Unsigned( + 0, "Fetch1 maximum fetch size in bytes (0 means use system cache" - " line size)") - fetch1ToFetch2ForwardDelay = Param.Cycles(1, - "Forward cycle delay from Fetch1 to Fetch2 (1 means next cycle)") - fetch1ToFetch2BackwardDelay = Param.Cycles(1, + " line size)", + ) + fetch1ToFetch2ForwardDelay = Param.Cycles( + 1, "Forward cycle delay from Fetch1 to Fetch2 (1 means next cycle)" + ) + fetch1ToFetch2BackwardDelay = Param.Cycles( + 1, "Backward cycle delay from Fetch2 to Fetch1 for branch prediction" - " signalling (0 means in the same cycle, 1 mean the next cycle)") + " signalling (0 means in the same cycle, 1 mean the next cycle)", + ) - fetch2InputBufferSize = Param.Unsigned(2, - "Size of input buffer to Fetch2 in cycles-worth of insts.") - fetch2ToDecodeForwardDelay = Param.Cycles(1, - "Forward cycle delay from Fetch2 to Decode (1 means next cycle)") - fetch2CycleInput = Param.Bool(True, + fetch2InputBufferSize = Param.Unsigned( + 2, "Size of input buffer to Fetch2 in cycles-worth of insts." + ) + fetch2ToDecodeForwardDelay = Param.Cycles( + 1, "Forward cycle delay from Fetch2 to Decode (1 means next cycle)" + ) + fetch2CycleInput = Param.Bool( + True, "Allow Fetch2 to cross input lines to generate full output each" - " cycle") + " cycle", + ) - decodeInputBufferSize = Param.Unsigned(3, - "Size of input buffer to Decode in cycles-worth of insts.") - decodeToExecuteForwardDelay = Param.Cycles(1, - "Forward cycle delay from Decode to Execute (1 means next cycle)") - decodeInputWidth = Param.Unsigned(2, + decodeInputBufferSize = Param.Unsigned( + 3, "Size of input buffer to Decode in cycles-worth of insts." + ) + decodeToExecuteForwardDelay = Param.Cycles( + 1, "Forward cycle delay from Decode to Execute (1 means next cycle)" + ) + decodeInputWidth = Param.Unsigned( + 2, "Width (in instructions) of input to Decode (and implicitly" - " Decode's own width)") - decodeCycleInput = Param.Bool(True, + " Decode's own width)", + ) + decodeCycleInput = Param.Bool( + True, "Allow Decode to pack instructions from more than one input cycle" - " to fill its output each cycle") + " to fill its output each cycle", + ) - executeInputWidth = Param.Unsigned(2, - "Width (in instructions) of input to Execute") - executeCycleInput = Param.Bool(True, + executeInputWidth = Param.Unsigned( + 2, "Width (in instructions) of input to Execute" + ) + executeCycleInput = Param.Bool( + True, "Allow Execute to use instructions from more than one input cycle" - " each cycle") - executeIssueLimit = Param.Unsigned(2, - "Number of issuable instructions in Execute each cycle") - executeMemoryIssueLimit = Param.Unsigned(1, - "Number of issuable memory instructions in Execute each cycle") - executeCommitLimit = Param.Unsigned(2, - "Number of committable instructions in Execute each cycle") - executeMemoryCommitLimit = Param.Unsigned(1, - "Number of committable memory references in Execute each cycle") - executeInputBufferSize = Param.Unsigned(7, - "Size of input buffer to Execute in cycles-worth of insts.") - executeMemoryWidth = Param.Unsigned(0, + " each cycle", + ) + executeIssueLimit = Param.Unsigned( + 2, "Number of issuable instructions in Execute each cycle" + ) + executeMemoryIssueLimit = Param.Unsigned( + 1, "Number of issuable memory instructions in Execute each cycle" + ) + executeCommitLimit = Param.Unsigned( + 2, "Number of committable instructions in Execute each cycle" + ) + executeMemoryCommitLimit = Param.Unsigned( + 1, "Number of committable memory references in Execute each cycle" + ) + executeInputBufferSize = Param.Unsigned( + 7, "Size of input buffer to Execute in cycles-worth of insts." + ) + executeMemoryWidth = Param.Unsigned( + 0, "Width (and snap) in bytes of the data memory interface. (0 mean use" - " the system cacheLineSize)") - executeMaxAccessesInMemory = Param.Unsigned(2, + " the system cacheLineSize)", + ) + executeMaxAccessesInMemory = Param.Unsigned( + 2, "Maximum number of concurrent accesses allowed to the memory system" - " from the dcache port") - executeLSQMaxStoreBufferStoresPerCycle = Param.Unsigned(2, - "Maximum number of stores that the store buffer can issue per cycle") - executeLSQRequestsQueueSize = Param.Unsigned(1, - "Size of LSQ requests queue (address translation queue)") - executeLSQTransfersQueueSize = Param.Unsigned(2, - "Size of LSQ transfers queue (memory transaction queue)") - executeLSQStoreBufferSize = Param.Unsigned(5, - "Size of LSQ store buffer") - executeBranchDelay = Param.Cycles(1, + " from the dcache port", + ) + executeLSQMaxStoreBufferStoresPerCycle = Param.Unsigned( + 2, "Maximum number of stores that the store buffer can issue per cycle" + ) + executeLSQRequestsQueueSize = Param.Unsigned( + 1, "Size of LSQ requests queue (address translation queue)" + ) + executeLSQTransfersQueueSize = Param.Unsigned( + 2, "Size of LSQ transfers queue (memory transaction queue)" + ) + executeLSQStoreBufferSize = Param.Unsigned(5, "Size of LSQ store buffer") + executeBranchDelay = Param.Cycles( + 1, "Delay from Execute deciding to branch and Fetch1 reacting" - " (1 means next cycle)") + " (1 means next cycle)", + ) - executeFuncUnits = Param.MinorFUPool(MinorDefaultFUPool(), - "FUlines for this processor") + executeFuncUnits = Param.MinorFUPool( + MinorDefaultFUPool(), "FUlines for this processor" + ) - executeSetTraceTimeOnCommit = Param.Bool(True, - "Set inst. trace times to be commit times") - executeSetTraceTimeOnIssue = Param.Bool(False, - "Set inst. trace times to be issue times") + executeSetTraceTimeOnCommit = Param.Bool( + True, "Set inst. trace times to be commit times" + ) + executeSetTraceTimeOnIssue = Param.Bool( + False, "Set inst. trace times to be issue times" + ) - executeAllowEarlyMemoryIssue = Param.Bool(True, + executeAllowEarlyMemoryIssue = Param.Bool( + True, "Allow mem refs to be issued to the LSQ before reaching the head of" - " the in flight insts queue") + " the in flight insts queue", + ) - enableIdling = Param.Bool(True, - "Enable cycle skipping when the processor is idle\n"); + enableIdling = Param.Bool( + True, "Enable cycle skipping when the processor is idle\n" + ) - branchPred = Param.BranchPredictor(TournamentBP( - numThreads = Parent.numThreads), "Branch Predictor") + branchPred = Param.BranchPredictor( + TournamentBP(numThreads=Parent.numThreads), "Branch Predictor" + ) def addCheckerCpu(self): print("Checker not yet supported by MinorCPU") diff --git a/src/cpu/minor/MinorCPU.py b/src/cpu/minor/MinorCPU.py new file mode 100644 index 0000000000..836a73f618 --- /dev/null +++ b/src/cpu/minor/MinorCPU.py @@ -0,0 +1,44 @@ +# Copyright 2021 Google, Inc. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import m5.defines + +arch_vars = [ + "USE_ARM_ISA", + "USE_MIPS_ISA", + "USE_POWER_ISA", + "USE_RISCV_ISA", + "USE_SPARC_ISA", + "USE_X86_ISA", +] + +enabled = list(filter(lambda var: m5.defines.buildEnv[var], arch_vars)) + +if len(enabled) == 1: + arch = enabled[0] + if arch == "USE_ARM_ISA": + from m5.objects.ArmCPU import ArmMinorCPU as MinorCPU + if arch == "USE_RISCV_ISA": + from m5.objects.RiscvCPU import RiscvMinorCPU as MinorCPU diff --git a/src/cpu/minor/SConscript b/src/cpu/minor/SConscript index cd1b8e3e3d..9603b4114c 100644 --- a/src/cpu/minor/SConscript +++ b/src/cpu/minor/SConscript @@ -40,7 +40,7 @@ Import('*') -if env['CONF']['TARGET_ISA'] != 'null': +if not env['CONF']['USE_NULL_ISA']: SimObject('BaseMinorCPU.py', sim_objects=[ 'MinorOpClass', 'MinorOpClassSet', 'MinorFUTiming', 'MinorFU', 'MinorFUPool', 'BaseMinorCPU'], @@ -71,3 +71,6 @@ if env['CONF']['TARGET_ISA'] != 'null': CompoundFlag('Minor', [ 'MinorCPU', 'MinorExecute', 'MinorInterrupt', 'MinorMem', 'MinorScoreboard']) + + # For backwards compatibility + SimObject('MinorCPU.py', sim_objects=[]) diff --git a/src/cpu/minor/dyn_inst.cc b/src/cpu/minor/dyn_inst.cc index 45889f9da7..ac8f94835c 100644 --- a/src/cpu/minor/dyn_inst.cc +++ b/src/cpu/minor/dyn_inst.cc @@ -40,7 +40,6 @@ #include #include -#include "arch/isa.hh" #include "cpu/base.hh" #include "cpu/minor/trace.hh" #include "cpu/null_static_inst.hh" @@ -134,10 +133,8 @@ operator <<(std::ostream &os, const MinorDynInst &inst) /** Print a register in the form r, f, m() for integer, * float, and misc given an 'architectural register number' */ static void -printRegName(std::ostream &os, const RegId& reg, - const BaseISA::RegClasses ®_classes) +printRegName(std::ostream &os, const RegId& reg) { - const auto ®_class = reg_classes.at(reg.classValue()); switch (reg.classValue()) { case InvalidRegClass: os << 'z'; @@ -145,7 +142,7 @@ printRegName(std::ostream &os, const RegId& reg, case MiscRegClass: { RegIndex misc_reg = reg.index(); - os << 'm' << misc_reg << '(' << reg_class.regName(reg) << ')'; + os << 'm' << misc_reg << '(' << reg << ')'; } break; case FloatRegClass: @@ -155,7 +152,7 @@ printRegName(std::ostream &os, const RegId& reg, os << 'v' << reg.index(); break; case VecElemClass: - os << reg_class.regName(reg); + os << reg; break; case IntRegClass: os << 'r' << reg.index(); @@ -169,8 +166,7 @@ printRegName(std::ostream &os, const RegId& reg, } void -MinorDynInst::minorTraceInst(const Named &named_object, - const BaseISA::RegClasses ®_classes) const +MinorDynInst::minorTraceInst(const Named &named_object) const { if (isFault()) { minorInst(named_object, "id=F;%s addr=0x%x fault=\"%s\"\n", @@ -188,8 +184,7 @@ MinorDynInst::minorTraceInst(const Named &named_object, unsigned int src_reg = 0; while (src_reg < num_src_regs) { - printRegName(regs_str, staticInst->srcRegIdx(src_reg), - reg_classes); + printRegName(regs_str, staticInst->srcRegIdx(src_reg)); src_reg++; if (src_reg != num_src_regs) @@ -200,8 +195,7 @@ MinorDynInst::minorTraceInst(const Named &named_object, unsigned int dest_reg = 0; while (dest_reg < num_dest_regs) { - printRegName(regs_str, staticInst->destRegIdx(dest_reg), - reg_classes); + printRegName(regs_str, staticInst->destRegIdx(dest_reg)); dest_reg++; if (dest_reg != num_dest_regs) diff --git a/src/cpu/minor/dyn_inst.hh b/src/cpu/minor/dyn_inst.hh index ec986cdc7f..d9a85f9db6 100644 --- a/src/cpu/minor/dyn_inst.hh +++ b/src/cpu/minor/dyn_inst.hh @@ -173,7 +173,7 @@ class MinorDynInst : public RefCounted InstId id; /** Trace information for this instruction's execution */ - Trace::InstRecord *traceData = nullptr; + trace::InstRecord *traceData = nullptr; /** The fetch address of this instruction */ std::unique_ptr pc; @@ -269,8 +269,7 @@ class MinorDynInst : public RefCounted /** Print (possibly verbose) instruction information for * MinorTrace using the given Named object's name */ - void minorTraceInst(const Named &named_object, - const BaseISA::RegClasses ®_classes) const; + void minorTraceInst(const Named &named_object) const; /** ReportIF interface */ void reportData(std::ostream &os) const; diff --git a/src/cpu/minor/execute.cc b/src/cpu/minor/execute.cc index d320e67af9..6eccec0be4 100644 --- a/src/cpu/minor/execute.cc +++ b/src/cpu/minor/execute.cc @@ -783,8 +783,7 @@ Execute::issue(ThreadID thread_id) /* Generate MinorTrace's MinorInst lines. Do this at commit * to allow better instruction annotation? */ if (debug::MinorTrace && !inst->isBubble()) { - inst->minorTraceInst(*this, - cpu.threads[0]->getIsaPtr()->regClasses()); + inst->minorTraceInst(*this); } /* Mark up barriers in the LSQ */ @@ -884,6 +883,39 @@ Execute::doInstCommitAccounting(MinorDynInstPtr inst) cpu.stats.committedInstType[inst->id.threadId] [inst->staticInst->opClass()]++; + /** Add a count for every control instruction */ + if (inst->staticInst->isControl()) { + if (inst->staticInst->isReturn()) { + cpu.stats.committedControl[inst->id.threadId] + [gem5::StaticInstFlags::Flags::IsReturn]++; + } + if (inst->staticInst->isCall()) { + cpu.stats.committedControl[inst->id.threadId] + [gem5::StaticInstFlags::Flags::IsCall]++; + } + if (inst->staticInst->isDirectCtrl()) { + cpu.stats.committedControl[inst->id.threadId] + [gem5::StaticInstFlags::Flags::IsDirectControl]++; + } + if (inst->staticInst->isIndirectCtrl()) { + cpu.stats.committedControl[inst->id.threadId] + [gem5::StaticInstFlags::Flags::IsIndirectControl]++; + } + if (inst->staticInst->isCondCtrl()) { + cpu.stats.committedControl[inst->id.threadId] + [gem5::StaticInstFlags::Flags::IsCondControl]++; + } + if (inst->staticInst->isUncondCtrl()) { + cpu.stats.committedControl[inst->id.threadId] + [gem5::StaticInstFlags::Flags::IsUncondControl]++; + + } + cpu.stats.committedControl[inst->id.threadId] + [gem5::StaticInstFlags::Flags::IsControl]++; + } + + + /* Set the CP SeqNum to the numOps commit number */ if (inst->traceData) inst->traceData->setCPSeq(thread->numOp); diff --git a/src/cpu/minor/fetch2.cc b/src/cpu/minor/fetch2.cc index b506bc0428..0ff0140518 100644 --- a/src/cpu/minor/fetch2.cc +++ b/src/cpu/minor/fetch2.cc @@ -486,10 +486,8 @@ Fetch2::evaluate() /* Output MinorTrace instruction info for * pre-microop decomposition macroops */ if (debug::MinorTrace && !dyn_inst->isFault() && - dyn_inst->staticInst->isMacroop()) - { - dyn_inst->minorTraceInst(*this, - cpu.threads[0]->getIsaPtr()->regClasses()); + dyn_inst->staticInst->isMacroop()) { + dyn_inst->minorTraceInst(*this); } } diff --git a/src/cpu/minor/scoreboard.cc b/src/cpu/minor/scoreboard.cc index 926d01d1be..2bb69668a7 100644 --- a/src/cpu/minor/scoreboard.cc +++ b/src/cpu/minor/scoreboard.cc @@ -89,13 +89,6 @@ Scoreboard::findIndex(const RegId& reg, Index &scoreboard_index) return ret; } -/** Flatten a RegId, irrespective of what reg type it's pointing to */ -static RegId -flattenRegIndex(const RegId& reg, ThreadContext *thread_context) -{ - return thread_context->flattenRegId(reg); -} - void Scoreboard::markupInstDests(MinorDynInstPtr inst, Cycles retire_time, ThreadContext *thread_context, bool mark_unpredictable) @@ -106,12 +99,13 @@ Scoreboard::markupInstDests(MinorDynInstPtr inst, Cycles retire_time, StaticInstPtr staticInst = inst->staticInst; unsigned int num_dests = staticInst->numDestRegs(); + auto *isa = thread_context->getIsaPtr(); + /** Mark each destination register */ for (unsigned int dest_index = 0; dest_index < num_dests; dest_index++) { - RegId reg = flattenRegIndex( - staticInst->destRegIdx(dest_index), thread_context); + RegId reg = staticInst->destRegIdx(dest_index).flatten(*isa); Index index; if (findIndex(reg, index)) { @@ -151,9 +145,10 @@ Scoreboard::execSeqNumToWaitFor(MinorDynInstPtr inst, StaticInstPtr staticInst = inst->staticInst; unsigned int num_srcs = staticInst->numSrcRegs(); + auto *isa = thread_context->getIsaPtr(); + for (unsigned int src_index = 0; src_index < num_srcs; src_index++) { - RegId reg = flattenRegIndex(staticInst->srcRegIdx(src_index), - thread_context); + RegId reg = staticInst->srcRegIdx(src_index).flatten(*isa); unsigned short int index; if (findIndex(reg, index)) { @@ -233,13 +228,14 @@ Scoreboard::canInstIssue(MinorDynInstPtr inst, [num_relative_latencies-1]; } + auto *isa = thread_context->getIsaPtr(); + /* For each source register, find the latest result */ unsigned int src_index = 0; while (src_index < num_srcs && /* More registers */ ret /* Still possible */) { - RegId reg = flattenRegIndex(staticInst->srcRegIdx(src_index), - thread_context); + RegId reg = staticInst->srcRegIdx(src_index).flatten(*isa); unsigned short int index; if (findIndex(reg, index)) { diff --git a/src/cpu/minor/scoreboard.hh b/src/cpu/minor/scoreboard.hh index 973be473ad..ac11533443 100644 --- a/src/cpu/minor/scoreboard.hh +++ b/src/cpu/minor/scoreboard.hh @@ -112,12 +112,12 @@ class Scoreboard : public Named Named(name), regClasses(reg_classes), intRegOffset(0), - floatRegOffset(intRegOffset + reg_classes.at(IntRegClass).numRegs()), - ccRegOffset(floatRegOffset + reg_classes.at(FloatRegClass).numRegs()), - vecRegOffset(ccRegOffset + reg_classes.at(CCRegClass).numRegs()), + floatRegOffset(intRegOffset + reg_classes.at(IntRegClass)->numRegs()), + ccRegOffset(floatRegOffset + reg_classes.at(FloatRegClass)->numRegs()), + vecRegOffset(ccRegOffset + reg_classes.at(CCRegClass)->numRegs()), vecPredRegOffset(vecRegOffset + - reg_classes.at(VecElemClass).numRegs()), - numRegs(vecPredRegOffset + reg_classes.at(VecPredRegClass).numRegs()), + reg_classes.at(VecElemClass)->numRegs()), + numRegs(vecPredRegOffset + reg_classes.at(VecPredRegClass)->numRegs()), numResults(numRegs, 0), numUnpredictableResults(numRegs, 0), fuIndices(numRegs, invalidFUIndex), diff --git a/src/cpu/minor/stats.cc b/src/cpu/minor/stats.cc index 3c68f14c82..187687d00c 100644 --- a/src/cpu/minor/stats.cc +++ b/src/cpu/minor/stats.cc @@ -65,7 +65,10 @@ MinorStats::MinorStats(BaseCPU *base_cpu) statistics::units::Count, statistics::units::Cycle>::get(), "IPC: instructions per cycle"), ADD_STAT(committedInstType, statistics::units::Count::get(), - "Class of committed instruction") + "Class of committed instruction"), + ADD_STAT(committedControl, statistics::units::Count::get(), + "Class of control type instructions committed") + { quiesceCycles.prereq(quiesceCycles); @@ -79,6 +82,11 @@ MinorStats::MinorStats(BaseCPU *base_cpu) .init(base_cpu->numThreads, enums::Num_OpClass) .flags(statistics::total | statistics::pdf | statistics::dist); committedInstType.ysubnames(enums::OpClassStrings); + + committedControl + .init(base_cpu->numThreads, StaticInstFlags::Flags::Num_Flags) + .flags(statistics::nozero); + committedControl.ysubnames(StaticInstFlags::FlagsStrings); } } // namespace minor diff --git a/src/cpu/minor/stats.hh b/src/cpu/minor/stats.hh index ed5f9538cd..47b9f0f30e 100644 --- a/src/cpu/minor/stats.hh +++ b/src/cpu/minor/stats.hh @@ -82,6 +82,9 @@ struct MinorStats : public statistics::Group /** Number of instructions by type (OpClass) */ statistics::Vector2d committedInstType; + /** Number of branches commited */ + statistics::Vector2d committedControl; + }; } // namespace minor diff --git a/src/cpu/nativetrace.cc b/src/cpu/nativetrace.cc index 0686bcab01..5b7d0b9895 100644 --- a/src/cpu/nativetrace.cc +++ b/src/cpu/nativetrace.cc @@ -36,7 +36,7 @@ namespace gem5 { -namespace Trace { +namespace trace { NativeTrace::NativeTrace(const Params &p) : ExeTracer(p) @@ -55,7 +55,7 @@ NativeTrace::NativeTrace(const Params &p) } void -Trace::NativeTraceRecord::dump() +NativeTraceRecord::dump() { //Don't print what happens for each micro-op, just print out //once at the last op, and for regular instructions. @@ -63,5 +63,5 @@ Trace::NativeTraceRecord::dump() parent->check(this); } -} // namespace Trace +} // namespace trace } // namespace gem5 diff --git a/src/cpu/nativetrace.hh b/src/cpu/nativetrace.hh index e2756bca11..a00e97a18e 100644 --- a/src/cpu/nativetrace.hh +++ b/src/cpu/nativetrace.hh @@ -44,7 +44,7 @@ namespace gem5 class ThreadContext; -namespace Trace { +namespace trace { class NativeTrace; @@ -117,7 +117,7 @@ class NativeTrace : public ExeTracer check(NativeTraceRecord *record) = 0; }; -} // namespace Trace +} // namespace trace } // namespace gem5 #endif // __CPU_NATIVETRACE_HH__ diff --git a/src/cpu/nop_static_inst.cc b/src/cpu/nop_static_inst.cc index 4b73aa077c..929f297715 100644 --- a/src/cpu/nop_static_inst.cc +++ b/src/cpu/nop_static_inst.cc @@ -43,7 +43,7 @@ class NopStaticInst : public StaticInst NopStaticInst() : StaticInst("gem5 nop", No_OpClass) {} Fault - execute(ExecContext *xc, Trace::InstRecord *traceData) const override + execute(ExecContext *xc, trace::InstRecord *traceData) const override { return NoFault; } diff --git a/src/cpu/o3/BaseO3CPU.py b/src/cpu/o3/BaseO3CPU.py index c58f9fee38..79bd884b87 100644 --- a/src/cpu/o3/BaseO3CPU.py +++ b/src/cpu/o3/BaseO3CPU.py @@ -42,26 +42,31 @@ from m5.proxy import * from m5.objects.BaseCPU import BaseCPU from m5.objects.FUPool import * -#from m5.objects.O3Checker import O3Checker + +# from m5.objects.O3Checker import O3Checker from m5.objects.BranchPredictor import * + class SMTFetchPolicy(ScopedEnum): - vals = [ 'RoundRobin', 'Branch', 'IQCount', 'LSQCount' ] + vals = ["RoundRobin", "Branch", "IQCount", "LSQCount"] + class SMTQueuePolicy(ScopedEnum): - vals = [ 'Dynamic', 'Partitioned', 'Threshold' ] + vals = ["Dynamic", "Partitioned", "Threshold"] + class CommitPolicy(ScopedEnum): - vals = [ 'RoundRobin', 'OldestReady' ] + vals = ["RoundRobin", "OldestReady"] + class BaseO3CPU(BaseCPU): - type = 'BaseO3CPU' - cxx_class = 'gem5::o3::CPU' - cxx_header = 'cpu/o3/dyn_inst.hh' + type = "BaseO3CPU" + cxx_class = "gem5::o3::CPU" + cxx_header = "cpu/o3/dyn_inst.hh" @classmethod def memory_mode(cls): - return 'timing' + return "timing" @classmethod def require_caches(cls): @@ -73,100 +78,122 @@ class BaseO3CPU(BaseCPU): activity = Param.Unsigned(0, "Initial count") - cacheStorePorts = Param.Unsigned(200, "Cache Ports. " - "Constrains stores only.") - cacheLoadPorts = Param.Unsigned(200, "Cache Ports. " - "Constrains loads only.") + cacheStorePorts = Param.Unsigned( + 200, "Cache Ports. " "Constrains stores only." + ) + cacheLoadPorts = Param.Unsigned( + 200, "Cache Ports. " "Constrains loads only." + ) decodeToFetchDelay = Param.Cycles(1, "Decode to fetch delay") - renameToFetchDelay = Param.Cycles(1 ,"Rename to fetch delay") - iewToFetchDelay = Param.Cycles(1, "Issue/Execute/Writeback to fetch " - "delay") + renameToFetchDelay = Param.Cycles(1, "Rename to fetch delay") + iewToFetchDelay = Param.Cycles( + 1, "Issue/Execute/Writeback to fetch " "delay" + ) commitToFetchDelay = Param.Cycles(1, "Commit to fetch delay") fetchWidth = Param.Unsigned(8, "Fetch width") fetchBufferSize = Param.Unsigned(64, "Fetch buffer size in bytes") - fetchQueueSize = Param.Unsigned(32, "Fetch queue size in micro-ops " - "per-thread") + fetchQueueSize = Param.Unsigned( + 32, "Fetch queue size in micro-ops " "per-thread" + ) renameToDecodeDelay = Param.Cycles(1, "Rename to decode delay") - iewToDecodeDelay = Param.Cycles(1, "Issue/Execute/Writeback to decode " - "delay") + iewToDecodeDelay = Param.Cycles( + 1, "Issue/Execute/Writeback to decode " "delay" + ) commitToDecodeDelay = Param.Cycles(1, "Commit to decode delay") fetchToDecodeDelay = Param.Cycles(1, "Fetch to decode delay") decodeWidth = Param.Unsigned(8, "Decode width") - iewToRenameDelay = Param.Cycles(1, "Issue/Execute/Writeback to rename " - "delay") + iewToRenameDelay = Param.Cycles( + 1, "Issue/Execute/Writeback to rename " "delay" + ) commitToRenameDelay = Param.Cycles(1, "Commit to rename delay") decodeToRenameDelay = Param.Cycles(1, "Decode to rename delay") renameWidth = Param.Unsigned(8, "Rename width") - commitToIEWDelay = Param.Cycles(1, "Commit to " - "Issue/Execute/Writeback delay") - renameToIEWDelay = Param.Cycles(2, "Rename to " - "Issue/Execute/Writeback delay") - issueToExecuteDelay = Param.Cycles(1, "Issue to execute delay (internal " - "to the IEW stage)") + commitToIEWDelay = Param.Cycles( + 1, "Commit to " "Issue/Execute/Writeback delay" + ) + renameToIEWDelay = Param.Cycles( + 2, "Rename to " "Issue/Execute/Writeback delay" + ) + issueToExecuteDelay = Param.Cycles( + 1, "Issue to execute delay (internal " "to the IEW stage)" + ) dispatchWidth = Param.Unsigned(8, "Dispatch width") issueWidth = Param.Unsigned(8, "Issue width") wbWidth = Param.Unsigned(8, "Writeback width") fuPool = Param.FUPool(DefaultFUPool(), "Functional Unit pool") - iewToCommitDelay = Param.Cycles(1, "Issue/Execute/Writeback to commit " - "delay") + iewToCommitDelay = Param.Cycles( + 1, "Issue/Execute/Writeback to commit " "delay" + ) renameToROBDelay = Param.Cycles(1, "Rename to reorder buffer delay") commitWidth = Param.Unsigned(8, "Commit width") squashWidth = Param.Unsigned(8, "Squash width") trapLatency = Param.Cycles(13, "Trap latency") fetchTrapLatency = Param.Cycles(1, "Fetch trap latency") - backComSize = Param.Unsigned(5, - "Time buffer size for backwards communication") - forwardComSize = Param.Unsigned(5, - "Time buffer size for forward communication") + backComSize = Param.Unsigned( + 5, "Time buffer size for backwards communication" + ) + forwardComSize = Param.Unsigned( + 5, "Time buffer size for forward communication" + ) LQEntries = Param.Unsigned(32, "Number of load queue entries") SQEntries = Param.Unsigned(32, "Number of store queue entries") - LSQDepCheckShift = Param.Unsigned(4, - "Number of places to shift addr before check") - LSQCheckLoads = Param.Bool(True, + LSQDepCheckShift = Param.Unsigned( + 4, "Number of places to shift addr before check" + ) + LSQCheckLoads = Param.Bool( + True, "Should dependency violations be checked for " - "loads & stores or just stores") - store_set_clear_period = Param.Unsigned(250000, - "Number of load/store insts before the dep predictor " - "should be invalidated") + "loads & stores or just stores", + ) + store_set_clear_period = Param.Unsigned( + 250000, + "Number of load/store insts before the dep predictor " + "should be invalidated", + ) LFSTSize = Param.Unsigned(1024, "Last fetched store table size") SSITSize = Param.Unsigned(1024, "Store set ID table size") - numRobs = Param.Unsigned(1, "Number of Reorder Buffers"); + numRobs = Param.Unsigned(1, "Number of Reorder Buffers") - numPhysIntRegs = Param.Unsigned(256, - "Number of physical integer registers") - numPhysFloatRegs = Param.Unsigned(256, "Number of physical floating point " - "registers") - numPhysVecRegs = Param.Unsigned(256, "Number of physical vector " - "registers") - numPhysVecPredRegs = Param.Unsigned(32, "Number of physical predicate " - "registers") + numPhysIntRegs = Param.Unsigned( + 256, "Number of physical integer registers" + ) + numPhysFloatRegs = Param.Unsigned( + 256, "Number of physical floating point " "registers" + ) + numPhysVecRegs = Param.Unsigned( + 256, "Number of physical vector " "registers" + ) + numPhysVecPredRegs = Param.Unsigned( + 32, "Number of physical predicate " "registers" + ) # most ISAs don't use condition-code regs, so default is 0 numPhysCCRegs = Param.Unsigned(0, "Number of physical cc registers") numIQEntries = Param.Unsigned(64, "Number of instruction queue entries") numROBEntries = Param.Unsigned(192, "Number of reorder buffer entries") smtNumFetchingThreads = Param.Unsigned(1, "SMT Number of Fetching Threads") - smtFetchPolicy = Param.SMTFetchPolicy('RoundRobin', "SMT Fetch policy") - smtLSQPolicy = Param.SMTQueuePolicy('Partitioned', - "SMT LSQ Sharing Policy") + smtFetchPolicy = Param.SMTFetchPolicy("RoundRobin", "SMT Fetch policy") + smtLSQPolicy = Param.SMTQueuePolicy( + "Partitioned", "SMT LSQ Sharing Policy" + ) smtLSQThreshold = Param.Int(100, "SMT LSQ Threshold Sharing Parameter") - smtIQPolicy = Param.SMTQueuePolicy('Partitioned', - "SMT IQ Sharing Policy") + smtIQPolicy = Param.SMTQueuePolicy("Partitioned", "SMT IQ Sharing Policy") smtIQThreshold = Param.Int(100, "SMT IQ Threshold Sharing Parameter") - smtROBPolicy = Param.SMTQueuePolicy('Partitioned', - "SMT ROB Sharing Policy") + smtROBPolicy = Param.SMTQueuePolicy( + "Partitioned", "SMT ROB Sharing Policy" + ) smtROBThreshold = Param.Int(100, "SMT ROB Threshold Sharing Parameter") - smtCommitPolicy = Param.CommitPolicy('RoundRobin', "SMT Commit Policy") + smtCommitPolicy = Param.CommitPolicy("RoundRobin", "SMT Commit Policy") - branchPred = Param.BranchPredictor(TournamentBP(numThreads = - Parent.numThreads), - "Branch Predictor") + branchPred = Param.BranchPredictor( + TournamentBP(numThreads=Parent.numThreads), "Branch Predictor" + ) needsTSO = Param.Bool(False, "Enable TSO Memory model") diff --git a/src/cpu/o3/BaseO3Checker.py b/src/cpu/o3/BaseO3Checker.py index 6365491950..7b480f8057 100644 --- a/src/cpu/o3/BaseO3Checker.py +++ b/src/cpu/o3/BaseO3Checker.py @@ -27,7 +27,8 @@ from m5.params import * from m5.objects.CheckerCPU import CheckerCPU + class BaseO3Checker(CheckerCPU): - type = 'BaseO3Checker' - cxx_class = 'gem5::o3::Checker' - cxx_header = 'cpu/o3/checker.hh' + type = "BaseO3Checker" + cxx_class = "gem5::o3::Checker" + cxx_header = "cpu/o3/checker.hh" diff --git a/src/cpu/o3/FUPool.py b/src/cpu/o3/FUPool.py index e9d606ef11..4e18094ef4 100644 --- a/src/cpu/o3/FUPool.py +++ b/src/cpu/o3/FUPool.py @@ -41,12 +41,24 @@ from m5.params import * from m5.objects.FuncUnit import * from m5.objects.FuncUnitConfig import * + class FUPool(SimObject): - type = 'FUPool' - cxx_class = 'gem5::o3::FUPool' + type = "FUPool" + cxx_class = "gem5::o3::FUPool" cxx_header = "cpu/o3/fu_pool.hh" FUList = VectorParam.FUDesc("list of FU's for this pool") + class DefaultFUPool(FUPool): - FUList = [ IntALU(), IntMultDiv(), FP_ALU(), FP_MultDiv(), ReadPort(), - SIMD_Unit(), PredALU(), WritePort(), RdWrPort(), IprPort() ] + FUList = [ + IntALU(), + IntMultDiv(), + FP_ALU(), + FP_MultDiv(), + ReadPort(), + SIMD_Unit(), + PredALU(), + WritePort(), + RdWrPort(), + IprPort(), + ] diff --git a/src/cpu/o3/FuncUnitConfig.py b/src/cpu/o3/FuncUnitConfig.py index ccbefc58cd..3d626c2275 100644 --- a/src/cpu/o3/FuncUnitConfig.py +++ b/src/cpu/o3/FuncUnitConfig.py @@ -42,86 +42,98 @@ from m5.params import * from m5.objects.FuncUnit import * + class IntALU(FUDesc): - opList = [ OpDesc(opClass='IntAlu') ] + opList = [OpDesc(opClass="IntAlu")] count = 6 + class IntMultDiv(FUDesc): - opList = [ OpDesc(opClass='IntMult', opLat=3), - OpDesc(opClass='IntDiv', opLat=20, pipelined=False) ] + opList = [ + OpDesc(opClass="IntMult", opLat=3), + OpDesc(opClass="IntDiv", opLat=20, pipelined=False), + ] - # DIV and IDIV instructions in x86 are implemented using a loop which - # issues division microops. The latency of these microops should really be - # one (or a small number) cycle each since each of these computes one bit - # of the quotient. - if buildEnv['TARGET_ISA'] in ('x86'): - opList[1].opLat=1 - - count=2 - -class FP_ALU(FUDesc): - opList = [ OpDesc(opClass='FloatAdd', opLat=2), - OpDesc(opClass='FloatCmp', opLat=2), - OpDesc(opClass='FloatCvt', opLat=2) ] - count = 4 - -class FP_MultDiv(FUDesc): - opList = [ OpDesc(opClass='FloatMult', opLat=4), - OpDesc(opClass='FloatMultAcc', opLat=5), - OpDesc(opClass='FloatMisc', opLat=3), - OpDesc(opClass='FloatDiv', opLat=12, pipelined=False), - OpDesc(opClass='FloatSqrt', opLat=24, pipelined=False) ] count = 2 -class SIMD_Unit(FUDesc): - opList = [ OpDesc(opClass='SimdAdd'), - OpDesc(opClass='SimdAddAcc'), - OpDesc(opClass='SimdAlu'), - OpDesc(opClass='SimdCmp'), - OpDesc(opClass='SimdCvt'), - OpDesc(opClass='SimdMisc'), - OpDesc(opClass='SimdMult'), - OpDesc(opClass='SimdMultAcc'), - OpDesc(opClass='SimdShift'), - OpDesc(opClass='SimdShiftAcc'), - OpDesc(opClass='SimdDiv'), - OpDesc(opClass='SimdSqrt'), - OpDesc(opClass='SimdFloatAdd'), - OpDesc(opClass='SimdFloatAlu'), - OpDesc(opClass='SimdFloatCmp'), - OpDesc(opClass='SimdFloatCvt'), - OpDesc(opClass='SimdFloatDiv'), - OpDesc(opClass='SimdFloatMisc'), - OpDesc(opClass='SimdFloatMult'), - OpDesc(opClass='SimdFloatMultAcc'), - OpDesc(opClass='SimdFloatSqrt'), - OpDesc(opClass='SimdReduceAdd'), - OpDesc(opClass='SimdReduceAlu'), - OpDesc(opClass='SimdReduceCmp'), - OpDesc(opClass='SimdFloatReduceAdd'), - OpDesc(opClass='SimdFloatReduceCmp') ] + +class FP_ALU(FUDesc): + opList = [ + OpDesc(opClass="FloatAdd", opLat=2), + OpDesc(opClass="FloatCmp", opLat=2), + OpDesc(opClass="FloatCvt", opLat=2), + ] count = 4 + +class FP_MultDiv(FUDesc): + opList = [ + OpDesc(opClass="FloatMult", opLat=4), + OpDesc(opClass="FloatMultAcc", opLat=5), + OpDesc(opClass="FloatMisc", opLat=3), + OpDesc(opClass="FloatDiv", opLat=12, pipelined=False), + OpDesc(opClass="FloatSqrt", opLat=24, pipelined=False), + ] + count = 2 + + +class SIMD_Unit(FUDesc): + opList = [ + OpDesc(opClass="SimdAdd"), + OpDesc(opClass="SimdAddAcc"), + OpDesc(opClass="SimdAlu"), + OpDesc(opClass="SimdCmp"), + OpDesc(opClass="SimdCvt"), + OpDesc(opClass="SimdMisc"), + OpDesc(opClass="SimdMult"), + OpDesc(opClass="SimdMultAcc"), + OpDesc(opClass="SimdShift"), + OpDesc(opClass="SimdShiftAcc"), + OpDesc(opClass="SimdDiv"), + OpDesc(opClass="SimdSqrt"), + OpDesc(opClass="SimdFloatAdd"), + OpDesc(opClass="SimdFloatAlu"), + OpDesc(opClass="SimdFloatCmp"), + OpDesc(opClass="SimdFloatCvt"), + OpDesc(opClass="SimdFloatDiv"), + OpDesc(opClass="SimdFloatMisc"), + OpDesc(opClass="SimdFloatMult"), + OpDesc(opClass="SimdFloatMultAcc"), + OpDesc(opClass="SimdFloatSqrt"), + OpDesc(opClass="SimdReduceAdd"), + OpDesc(opClass="SimdReduceAlu"), + OpDesc(opClass="SimdReduceCmp"), + OpDesc(opClass="SimdFloatReduceAdd"), + OpDesc(opClass="SimdFloatReduceCmp"), + ] + count = 4 + + class PredALU(FUDesc): - opList = [ OpDesc(opClass='SimdPredAlu') ] + opList = [OpDesc(opClass="SimdPredAlu")] count = 1 + class ReadPort(FUDesc): - opList = [ OpDesc(opClass='MemRead'), - OpDesc(opClass='FloatMemRead') ] + opList = [OpDesc(opClass="MemRead"), OpDesc(opClass="FloatMemRead")] count = 0 + class WritePort(FUDesc): - opList = [ OpDesc(opClass='MemWrite'), - OpDesc(opClass='FloatMemWrite') ] + opList = [OpDesc(opClass="MemWrite"), OpDesc(opClass="FloatMemWrite")] count = 0 + class RdWrPort(FUDesc): - opList = [ OpDesc(opClass='MemRead'), OpDesc(opClass='MemWrite'), - OpDesc(opClass='FloatMemRead'), OpDesc(opClass='FloatMemWrite')] + opList = [ + OpDesc(opClass="MemRead"), + OpDesc(opClass="MemWrite"), + OpDesc(opClass="FloatMemRead"), + OpDesc(opClass="FloatMemWrite"), + ] count = 4 -class IprPort(FUDesc): - opList = [ OpDesc(opClass='IprAccess', opLat = 3, pipelined = False) ] - count = 1 +class IprPort(FUDesc): + opList = [OpDesc(opClass="IprAccess", opLat=3, pipelined=False)] + count = 1 diff --git a/src/cpu/o3/O3CPU.py b/src/cpu/o3/O3CPU.py new file mode 100644 index 0000000000..ee660d62ed --- /dev/null +++ b/src/cpu/o3/O3CPU.py @@ -0,0 +1,54 @@ +# Copyright 2021 Google, Inc. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import m5.defines + +arch_vars = [ + "USE_ARM_ISA", + "USE_MIPS_ISA", + "USE_POWER_ISA", + "USE_RISCV_ISA", + "USE_SPARC_ISA", + "USE_X86_ISA", +] + +enabled = list(filter(lambda var: m5.defines.buildEnv[var], arch_vars)) + +if len(enabled) == 1: + arch = enabled[0] + if arch == "USE_ARM_ISA": + from m5.objects.ArmCPU import ArmO3CPU as O3CPU + elif arch == "USE_MIPS_ISA": + from m5.objects.MipsCPU import MipsO3CPU as O3CPU + elif arch == "USE_POWER_ISA": + from m5.objects.PowerCPU import PowerO3CPU as O3CPU + elif arch == "USE_RISCV_ISA": + from m5.objects.RiscvCPU import RiscvO3CPU as O3CPU + elif arch == "USE_SPARC_ISA": + from m5.objects.SparcCPU import SparcO3CPU as O3CPU + elif arch == "USE_X86_ISA": + from m5.objects.X86CPU import X86O3CPU as O3CPU + + DerivO3CPU = O3CPU diff --git a/src/arch/arm/O3Checker.py b/src/cpu/o3/O3Checker.py similarity index 80% rename from src/arch/arm/O3Checker.py rename to src/cpu/o3/O3Checker.py index 0ac7ab42a7..83febfaa32 100644 --- a/src/arch/arm/O3Checker.py +++ b/src/cpu/o3/O3Checker.py @@ -23,6 +23,20 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from m5.objects.ArmCPU import ArmO3Checker +import m5.defines -O3Checker = ArmO3Checker +arch_vars = [ + "USE_ARM_ISA", + "USE_MIPS_ISA", + "USE_POWER_ISA", + "USE_RISCV_ISA", + "USE_SPARC_ISA", + "USE_X86_ISA", +] + +enabled = list(filter(lambda var: m5.defines.buildEnv[var], arch_vars)) + +if len(enabled) == 1: + arch = enabled[0] + if arch == "USE_ARM_ISA": + from m5.objects.ArmCPU import ArmO3Checker as O3Checker diff --git a/src/cpu/o3/SConscript b/src/cpu/o3/SConscript index e255d895b9..2ac703b041 100755 --- a/src/cpu/o3/SConscript +++ b/src/cpu/o3/SConscript @@ -30,7 +30,7 @@ import sys Import('*') -if env['CONF']['TARGET_ISA'] != 'null': +if not env['CONF']['USE_NULL_ISA']: SimObject('FUPool.py', sim_objects=['FUPool']) SimObject('FuncUnitConfig.py', sim_objects=[]) SimObject('BaseO3CPU.py', sim_objects=['BaseO3CPU'], enums=[ @@ -76,3 +76,7 @@ if env['CONF']['TARGET_ISA'] != 'null': SimObject('BaseO3Checker.py', sim_objects=['BaseO3Checker']) Source('checker.cc') + + # For backwards compatibility + SimObject('O3CPU.py', sim_objects=[]) + SimObject('O3Checker.py', sim_objects=[]) diff --git a/src/cpu/o3/comm.hh b/src/cpu/o3/comm.hh index f2c6e60615..c112e98f49 100644 --- a/src/cpu/o3/comm.hh +++ b/src/cpu/o3/comm.hh @@ -46,7 +46,6 @@ #include "arch/generic/pcstate.hh" #include "base/types.hh" -#include "config/the_isa.hh" #include "cpu/inst_seq.hh" #include "cpu/o3/dyn_inst_ptr.hh" #include "cpu/o3/limits.hh" diff --git a/src/cpu/o3/commit.cc b/src/cpu/o3/commit.cc index 97def7e59a..38dce831b1 100644 --- a/src/cpu/o3/commit.cc +++ b/src/cpu/o3/commit.cc @@ -48,7 +48,6 @@ #include "base/compiler.hh" #include "base/loader/symtab.hh" #include "base/logging.hh" -#include "config/the_isa.hh" #include "cpu/base.hh" #include "cpu/checker/cpu.hh" #include "cpu/exetrace.hh" diff --git a/src/cpu/o3/cpu.cc b/src/cpu/o3/cpu.cc index 301733c1b0..48ccd94b54 100644 --- a/src/cpu/o3/cpu.cc +++ b/src/cpu/o3/cpu.cc @@ -42,7 +42,6 @@ #include "cpu/o3/cpu.hh" -#include "config/the_isa.hh" #include "cpu/activity.hh" #include "cpu/checker/cpu.hh" #include "cpu/checker/thread_context.hh" @@ -194,19 +193,19 @@ CPU::CPU(const BaseO3CPUParams ¶ms) const auto ®Classes = params.isa[0]->regClasses(); assert(params.numPhysIntRegs >= - numThreads * regClasses.at(IntRegClass).numRegs()); + numThreads * regClasses.at(IntRegClass)->numRegs()); assert(params.numPhysFloatRegs >= - numThreads * regClasses.at(FloatRegClass).numRegs()); + numThreads * regClasses.at(FloatRegClass)->numRegs()); assert(params.numPhysVecRegs >= - numThreads * regClasses.at(VecRegClass).numRegs()); + numThreads * regClasses.at(VecRegClass)->numRegs()); assert(params.numPhysVecPredRegs >= - numThreads * regClasses.at(VecPredRegClass).numRegs()); + numThreads * regClasses.at(VecPredRegClass)->numRegs()); assert(params.numPhysCCRegs >= - numThreads * regClasses.at(CCRegClass).numRegs()); + numThreads * regClasses.at(CCRegClass)->numRegs()); // Just make this a warning and go ahead anyway, to keep from having to // add checks everywhere. - warn_if(regClasses.at(CCRegClass).numRegs() == 0 && + warn_if(regClasses.at(CCRegClass)->numRegs() == 0 && params.numPhysCCRegs != 0, "Non-zero number of physical CC regs specified, even though\n" " ISA does not use them."); @@ -216,7 +215,7 @@ CPU::CPU(const BaseO3CPUParams ¶ms) // Setup the rename map for whichever stages need it. for (ThreadID tid = 0; tid < numThreads; tid++) { - isa[tid] = dynamic_cast(params.isa[tid]); + isa[tid] = params.isa[tid]; commitRenameMap[tid].init(regClasses, ®File, &freeList); renameMap[tid].init(regClasses, ®File, &freeList); } @@ -226,14 +225,12 @@ CPU::CPU(const BaseO3CPUParams ¶ms) for (ThreadID tid = 0; tid < active_threads; tid++) { for (auto type = (RegClassType)0; type <= CCRegClass; type = (RegClassType)(type + 1)) { - for (RegIndex ridx = 0; ridx < regClasses.at(type).numRegs(); - ++ridx) { + for (auto &id: *regClasses.at(type)) { // Note that we can't use the rename() method because we don't // want special treatment for the zero register at this point - RegId rid = RegId(type, ridx); PhysRegIdPtr phys_reg = freeList.getReg(type); - renameMap[tid].setEntry(rid, phys_reg); - commitRenameMap[tid].setEntry(rid, phys_reg); + renameMap[tid].setEntry(id, phys_reg); + commitRenameMap[tid].setEntry(id, phys_reg); } } } @@ -694,9 +691,9 @@ CPU::insertThread(ThreadID tid) for (auto type = (RegClassType)0; type <= CCRegClass; type = (RegClassType)(type + 1)) { - for (RegIndex idx = 0; idx < regClasses.at(type).numRegs(); idx++) { + for (auto &id: *regClasses.at(type)) { PhysRegIdPtr phys_reg = freeList.getReg(type); - renameMap[tid].setEntry(RegId(type, idx), phys_reg); + renameMap[tid].setEntry(id, phys_reg); scoreboard.setReg(phys_reg); } } @@ -1159,35 +1156,40 @@ CPU::setReg(PhysRegIdPtr phys_reg, const void *val) RegVal CPU::getArchReg(const RegId ®, ThreadID tid) { - PhysRegIdPtr phys_reg = commitRenameMap[tid].lookup(reg); + const RegId flat = reg.flatten(*isa[tid]); + PhysRegIdPtr phys_reg = commitRenameMap[tid].lookup(flat); return regFile.getReg(phys_reg); } void CPU::getArchReg(const RegId ®, void *val, ThreadID tid) { - PhysRegIdPtr phys_reg = commitRenameMap[tid].lookup(reg); + const RegId flat = reg.flatten(*isa[tid]); + PhysRegIdPtr phys_reg = commitRenameMap[tid].lookup(flat); regFile.getReg(phys_reg, val); } void * CPU::getWritableArchReg(const RegId ®, ThreadID tid) { - PhysRegIdPtr phys_reg = commitRenameMap[tid].lookup(reg); + const RegId flat = reg.flatten(*isa[tid]); + PhysRegIdPtr phys_reg = commitRenameMap[tid].lookup(flat); return regFile.getWritableReg(phys_reg); } void CPU::setArchReg(const RegId ®, RegVal val, ThreadID tid) { - PhysRegIdPtr phys_reg = commitRenameMap[tid].lookup(reg); + const RegId flat = reg.flatten(*isa[tid]); + PhysRegIdPtr phys_reg = commitRenameMap[tid].lookup(flat); regFile.setReg(phys_reg, val); } void CPU::setArchReg(const RegId ®, const void *val, ThreadID tid) { - PhysRegIdPtr phys_reg = commitRenameMap[tid].lookup(reg); + const RegId flat = reg.flatten(*isa[tid]); + PhysRegIdPtr phys_reg = commitRenameMap[tid].lookup(flat); regFile.setReg(phys_reg, val); } diff --git a/src/cpu/o3/cpu.hh b/src/cpu/o3/cpu.hh index db3474da58..08a1312e73 100644 --- a/src/cpu/o3/cpu.hh +++ b/src/cpu/o3/cpu.hh @@ -51,7 +51,6 @@ #include "arch/generic/pcstate.hh" #include "base/statistics.hh" -#include "config/the_isa.hh" #include "cpu/o3/comm.hh" #include "cpu/o3/commit.hh" #include "cpu/o3/decode.hh" @@ -442,7 +441,7 @@ class CPU : public BaseCPU /** Integer Register Scoreboard */ Scoreboard scoreboard; - std::vector isa; + std::vector isa; public: /** Enum to give each stage a specific index, so when calling diff --git a/src/cpu/o3/decode.cc b/src/cpu/o3/decode.cc index 40c929949a..9555e32c23 100644 --- a/src/cpu/o3/decode.cc +++ b/src/cpu/o3/decode.cc @@ -42,7 +42,6 @@ #include "arch/generic/pcstate.hh" #include "base/trace.hh" -#include "config/the_isa.hh" #include "cpu/inst_seq.hh" #include "cpu/o3/dyn_inst.hh" #include "cpu/o3/limits.hh" @@ -301,7 +300,7 @@ Decode::squash(const DynInstPtr &inst, ThreadID tid) // Using PCState::branching() will send execution on the // fallthrough and this will not be caught at execution (since // branch was correctly predicted taken) - toFetch->decodeInfo[tid].branchTaken = inst->readPredTaken() | + toFetch->decodeInfo[tid].branchTaken = inst->readPredTaken() || inst->isUncondCtrl(); toFetch->decodeInfo[tid].squashInst = inst; diff --git a/src/cpu/o3/dyn_inst.hh b/src/cpu/o3/dyn_inst.hh index c49581b40f..ab165bbcd5 100644 --- a/src/cpu/o3/dyn_inst.hh +++ b/src/cpu/o3/dyn_inst.hh @@ -50,7 +50,6 @@ #include "base/refcnt.hh" #include "base/trace.hh" -#include "config/the_isa.hh" #include "cpu/checker/cpu.hh" #include "cpu/exec_context.hh" #include "cpu/exetrace.hh" @@ -138,7 +137,7 @@ class DynInst : public ExecContext, public RefCounted Fault fault = NoFault; /** InstRecord that tracks this instructions. */ - Trace::InstRecord *traceData = nullptr; + trace::InstRecord *traceData = nullptr; protected: enum Status @@ -713,10 +712,10 @@ class DynInst : public ExecContext, public RefCounted /** @{ */ template void - setResult(T &&t) + setResult(const RegClass ®_class, T &&t) { if (instFlags[RecordResult]) { - instResult.emplace(std::forward(t)); + instResult.emplace(reg_class, std::forward(t)); } } /** @} */ @@ -1078,38 +1077,19 @@ class DynInst : public ExecContext, public RefCounted for (int idx = 0; idx < numDestRegs(); idx++) { PhysRegIdPtr prev_phys_reg = prevDestIdx(idx); const RegId& original_dest_reg = staticInst->destRegIdx(idx); - switch (original_dest_reg.classValue()) { - case IntRegClass: - case FloatRegClass: - case CCRegClass: + const auto bytes = original_dest_reg.regClass().regBytes(); + + // Registers which aren't renamed don't need to be forwarded. + if (!original_dest_reg.isRenameable()) + continue; + + if (bytes == sizeof(RegVal)) { setRegOperand(staticInst.get(), idx, cpu->getReg(prev_phys_reg)); - break; - case VecRegClass: - { - TheISA::VecRegContainer val; - cpu->getReg(prev_phys_reg, &val); - setRegOperand(staticInst.get(), idx, &val); - } - break; - case VecElemClass: - setRegOperand(staticInst.get(), idx, - cpu->getReg(prev_phys_reg)); - break; - case VecPredRegClass: - { - TheISA::VecPredRegContainer val; - cpu->getReg(prev_phys_reg, &val); - setRegOperand(staticInst.get(), idx, &val); - } - break; - case InvalidRegClass: - case MiscRegClass: - // no need to forward misc reg values - break; - default: - panic("Unknown register class: %d", - (int)original_dest_reg.classValue()); + } else { + uint8_t val[original_dest_reg.regClass().regBytes()]; + cpu->getReg(prev_phys_reg, val); + setRegOperand(staticInst.get(), idx, val); } } } @@ -1163,7 +1143,7 @@ class DynInst : public ExecContext, public RefCounted if (reg->is(InvalidRegClass)) return; cpu->setReg(reg, val); - setResult(val); + setResult(reg->regClass(), val); } void @@ -1173,7 +1153,7 @@ class DynInst : public ExecContext, public RefCounted if (reg->is(InvalidRegClass)) return; cpu->setReg(reg, val); - //TODO setResult + setResult(reg->regClass(), val); } }; diff --git a/src/cpu/o3/fetch.cc b/src/cpu/o3/fetch.cc index 5358b3313e..49416bf754 100644 --- a/src/cpu/o3/fetch.cc +++ b/src/cpu/o3/fetch.cc @@ -50,7 +50,6 @@ #include "arch/generic/tlb.hh" #include "base/random.hh" #include "base/types.hh" -#include "config/the_isa.hh" #include "cpu/base.hh" #include "cpu/exetrace.hh" #include "cpu/nop_static_inst.hh" diff --git a/src/cpu/o3/fetch.hh b/src/cpu/o3/fetch.hh index 1ca812b8d5..cd311913f5 100644 --- a/src/cpu/o3/fetch.hh +++ b/src/cpu/o3/fetch.hh @@ -44,7 +44,6 @@ #include "arch/generic/decoder.hh" #include "arch/generic/mmu.hh" #include "base/statistics.hh" -#include "config/the_isa.hh" #include "cpu/o3/comm.hh" #include "cpu/o3/dyn_inst_ptr.hh" #include "cpu/o3/limits.hh" diff --git a/src/cpu/o3/iew.cc b/src/cpu/o3/iew.cc index 5c507f01a4..7cf6c54542 100644 --- a/src/cpu/o3/iew.cc +++ b/src/cpu/o3/iew.cc @@ -47,7 +47,6 @@ #include -#include "config/the_isa.hh" #include "cpu/checker/cpu.hh" #include "cpu/o3/dyn_inst.hh" #include "cpu/o3/fu_pool.hh" diff --git a/src/cpu/o3/inst_queue.cc b/src/cpu/o3/inst_queue.cc index 96669261fc..72cb7356ef 100644 --- a/src/cpu/o3/inst_queue.cc +++ b/src/cpu/o3/inst_queue.cc @@ -105,8 +105,8 @@ InstructionQueue::InstructionQueue(CPU *cpu_ptr, IEW *iew_ptr, numPhysRegs = params.numPhysIntRegs + params.numPhysFloatRegs + params.numPhysVecRegs + params.numPhysVecRegs * ( - reg_classes.at(VecElemClass).numRegs() / - reg_classes.at(VecRegClass).numRegs()) + + reg_classes.at(VecElemClass)->numRegs() / + reg_classes.at(VecRegClass)->numRegs()) + params.numPhysVecPredRegs + params.numPhysCCRegs; diff --git a/src/cpu/o3/lsq_unit.cc b/src/cpu/o3/lsq_unit.cc index 52cf8cb81a..139e0de337 100644 --- a/src/cpu/o3/lsq_unit.cc +++ b/src/cpu/o3/lsq_unit.cc @@ -43,7 +43,6 @@ #include "arch/generic/debugfaults.hh" #include "base/str.hh" -#include "config/the_isa.hh" #include "cpu/checker/cpu.hh" #include "cpu/o3/dyn_inst.hh" #include "cpu/o3/limits.hh" diff --git a/src/cpu/o3/lsq_unit.hh b/src/cpu/o3/lsq_unit.hh index c0899baa6e..b807179f2d 100644 --- a/src/cpu/o3/lsq_unit.hh +++ b/src/cpu/o3/lsq_unit.hh @@ -51,7 +51,6 @@ #include "arch/generic/debugfaults.hh" #include "arch/generic/vec_reg.hh" #include "base/circular_queue.hh" -#include "config/the_isa.hh" #include "cpu/base.hh" #include "cpu/inst_seq.hh" #include "cpu/o3/comm.hh" diff --git a/src/cpu/o3/probe/ElasticTrace.py b/src/cpu/o3/probe/ElasticTrace.py index 5386292b8a..ca4fa4ec46 100644 --- a/src/cpu/o3/probe/ElasticTrace.py +++ b/src/cpu/o3/probe/ElasticTrace.py @@ -35,27 +35,36 @@ from m5.objects.Probe import * + class ElasticTrace(ProbeListenerObject): - type = 'ElasticTrace' - cxx_class = 'gem5::o3::ElasticTrace' - cxx_header = 'cpu/o3/probe/elastic_trace.hh' + type = "ElasticTrace" + cxx_class = "gem5::o3::ElasticTrace" + cxx_header = "cpu/o3/probe/elastic_trace.hh" # Trace files for the following params are created in the output directory. # User is forced to provide these when an instance of this class is created. - instFetchTraceFile = Param.String(desc="Protobuf trace file name for " \ - "instruction fetch tracing") - dataDepTraceFile = Param.String(desc="Protobuf trace file name for " \ - "data dependency tracing") + instFetchTraceFile = Param.String( + desc="Protobuf trace file name for " "instruction fetch tracing" + ) + dataDepTraceFile = Param.String( + desc="Protobuf trace file name for " "data dependency tracing" + ) # The dependency window size param must be equal to or greater than the # number of entries in the O3CPU ROB, a typical value is 3 times ROB size - depWindowSize = Param.Unsigned(desc="Instruction window size used for " \ - "recording and processing data " \ - "dependencies") + depWindowSize = Param.Unsigned( + desc="Instruction window size used for " + "recording and processing data " + "dependencies" + ) # The committed instruction count from which to start tracing - startTraceInst = Param.UInt64(0, "The number of committed instructions " \ - "after which to start tracing. Default " \ - "zero means start tracing from first " \ - "committed instruction.") + startTraceInst = Param.UInt64( + 0, + "The number of committed instructions " + "after which to start tracing. Default " + "zero means start tracing from first " + "committed instruction.", + ) # Whether to trace virtual addresses for memory accesses - traceVirtAddr = Param.Bool(False, "Set to true if virtual addresses are " \ - "to be traced.") + traceVirtAddr = Param.Bool( + False, "Set to true if virtual addresses are " "to be traced." + ) diff --git a/src/cpu/o3/probe/SConscript b/src/cpu/o3/probe/SConscript index b2bbb3e222..6039ef2eb9 100644 --- a/src/cpu/o3/probe/SConscript +++ b/src/cpu/o3/probe/SConscript @@ -37,7 +37,7 @@ Import('*') -if env['CONF']['TARGET_ISA'] != 'null': +if not env['CONF']['USE_NULL_ISA']: SimObject('SimpleTrace.py', sim_objects=['SimpleTrace']) Source('simple_trace.cc') DebugFlag('SimpleTrace') diff --git a/src/cpu/o3/probe/SimpleTrace.py b/src/cpu/o3/probe/SimpleTrace.py index 9d36beccc3..a073057f94 100644 --- a/src/cpu/o3/probe/SimpleTrace.py +++ b/src/cpu/o3/probe/SimpleTrace.py @@ -35,7 +35,8 @@ from m5.objects.Probe import * + class SimpleTrace(ProbeListenerObject): - type = 'SimpleTrace' - cxx_class = 'gem5::o3::SimpleTrace' - cxx_header = 'cpu/o3/probe/simple_trace.hh' + type = "SimpleTrace" + cxx_class = "gem5::o3::SimpleTrace" + cxx_header = "cpu/o3/probe/simple_trace.hh" diff --git a/src/cpu/o3/regfile.cc b/src/cpu/o3/regfile.cc index 0e5cc0618d..fecb891ca6 100644 --- a/src/cpu/o3/regfile.cc +++ b/src/cpu/o3/regfile.cc @@ -55,20 +55,21 @@ PhysRegFile::PhysRegFile(unsigned _numPhysicalIntRegs, unsigned _numPhysicalVecPredRegs, unsigned _numPhysicalCCRegs, const BaseISA::RegClasses ®_classes) - : intRegFile(reg_classes.at(IntRegClass), _numPhysicalIntRegs), - floatRegFile(reg_classes.at(FloatRegClass), _numPhysicalFloatRegs), - vectorRegFile(reg_classes.at(VecRegClass), _numPhysicalVecRegs), - vectorElemRegFile(reg_classes.at(VecElemClass), _numPhysicalVecRegs * ( - reg_classes.at(VecElemClass).numRegs() / - reg_classes.at(VecRegClass).numRegs())), - vecPredRegFile(reg_classes.at(VecPredRegClass), _numPhysicalVecPredRegs), - ccRegFile(reg_classes.at(CCRegClass), _numPhysicalCCRegs), + : intRegFile(*reg_classes.at(IntRegClass), _numPhysicalIntRegs), + floatRegFile(*reg_classes.at(FloatRegClass), _numPhysicalFloatRegs), + vectorRegFile(*reg_classes.at(VecRegClass), _numPhysicalVecRegs), + vectorElemRegFile(*reg_classes.at(VecElemClass), _numPhysicalVecRegs * ( + reg_classes.at(VecElemClass)->numRegs() / + reg_classes.at(VecRegClass)->numRegs())), + vecPredRegFile(*reg_classes.at(VecPredRegClass), + _numPhysicalVecPredRegs), + ccRegFile(*reg_classes.at(CCRegClass), _numPhysicalCCRegs), numPhysicalIntRegs(_numPhysicalIntRegs), numPhysicalFloatRegs(_numPhysicalFloatRegs), numPhysicalVecRegs(_numPhysicalVecRegs), numPhysicalVecElemRegs(_numPhysicalVecRegs * ( - reg_classes.at(VecElemClass).numRegs() / - reg_classes.at(VecRegClass).numRegs())), + reg_classes.at(VecElemClass)->numRegs() / + reg_classes.at(VecRegClass)->numRegs())), numPhysicalVecPredRegs(_numPhysicalVecPredRegs), numPhysicalCCRegs(_numPhysicalCCRegs), totalNumRegs(_numPhysicalIntRegs @@ -83,42 +84,48 @@ PhysRegFile::PhysRegFile(unsigned _numPhysicalIntRegs, // The initial batch of registers are the integer ones for (phys_reg = 0; phys_reg < numPhysicalIntRegs; phys_reg++) { - intRegIds.emplace_back(IntRegClass, phys_reg, flat_reg_idx++); + intRegIds.emplace_back(*reg_classes.at(IntRegClass), + phys_reg, flat_reg_idx++); } // The next batch of the registers are the floating-point physical // registers; put them onto the floating-point free list. for (phys_reg = 0; phys_reg < numPhysicalFloatRegs; phys_reg++) { - floatRegIds.emplace_back(FloatRegClass, phys_reg, flat_reg_idx++); + floatRegIds.emplace_back(*reg_classes.at(FloatRegClass), + phys_reg, flat_reg_idx++); } // The next batch of the registers are the vector physical // registers; put them onto the vector free list. for (phys_reg = 0; phys_reg < numPhysicalVecRegs; phys_reg++) { - vecRegIds.emplace_back(VecRegClass, phys_reg, flat_reg_idx++); + vecRegIds.emplace_back(*reg_classes.at(VecRegClass), phys_reg, + flat_reg_idx++); } // The next batch of the registers are the vector element physical // registers; put them onto the vector free list. for (phys_reg = 0; phys_reg < numPhysicalVecElemRegs; phys_reg++) { - vecElemIds.emplace_back(VecElemClass, phys_reg, flat_reg_idx++); + vecElemIds.emplace_back(*reg_classes.at(VecElemClass), phys_reg, + flat_reg_idx++); } // The next batch of the registers are the predicate physical // registers; put them onto the predicate free list. for (phys_reg = 0; phys_reg < numPhysicalVecPredRegs; phys_reg++) { - vecPredRegIds.emplace_back(VecPredRegClass, phys_reg, flat_reg_idx++); + vecPredRegIds.emplace_back(*reg_classes.at(VecPredRegClass), phys_reg, + flat_reg_idx++); } // The rest of the registers are the condition-code physical // registers; put them onto the condition-code free list. for (phys_reg = 0; phys_reg < numPhysicalCCRegs; phys_reg++) { - ccRegIds.emplace_back(CCRegClass, phys_reg, flat_reg_idx++); + ccRegIds.emplace_back(*reg_classes.at(CCRegClass), phys_reg, + flat_reg_idx++); } // Misc regs have a fixed mapping but still need PhysRegIds. - for (phys_reg = 0; phys_reg < reg_classes.at(MiscRegClass).numRegs(); + for (phys_reg = 0; phys_reg < reg_classes.at(MiscRegClass)->numRegs(); phys_reg++) { - miscRegIds.emplace_back(MiscRegClass, phys_reg, 0); + miscRegIds.emplace_back(*reg_classes.at(MiscRegClass), phys_reg, 0); } } diff --git a/src/cpu/o3/regfile.hh b/src/cpu/o3/regfile.hh index 18d2d518b2..3ddf1a2a79 100644 --- a/src/cpu/o3/regfile.hh +++ b/src/cpu/o3/regfile.hh @@ -46,9 +46,7 @@ #include #include "arch/generic/isa.hh" -#include "arch/vecregs.hh" #include "base/trace.hh" -#include "config/the_isa.hh" #include "cpu/o3/comm.hh" #include "cpu/regfile.hh" #include "debug/IEW.hh" diff --git a/src/cpu/o3/rename.cc b/src/cpu/o3/rename.cc index 89d4542db4..f3783d402a 100644 --- a/src/cpu/o3/rename.cc +++ b/src/cpu/o3/rename.cc @@ -1008,15 +1008,17 @@ Rename::renameSrcRegs(const DynInstPtr &inst, ThreadID tid) gem5::ThreadContext *tc = inst->tcBase(); UnifiedRenameMap *map = renameMap[tid]; unsigned num_src_regs = inst->numSrcRegs(); + auto *isa = tc->getIsaPtr(); // Get the architectual register numbers from the source and // operands, and redirect them to the right physical register. for (int src_idx = 0; src_idx < num_src_regs; src_idx++) { const RegId& src_reg = inst->srcRegIdx(src_idx); + const RegId flat_reg = src_reg.flatten(*isa); PhysRegIdPtr renamed_reg; - renamed_reg = map->lookup(tc->flattenRegId(src_reg)); - switch (src_reg.classValue()) { + renamed_reg = map->lookup(flat_reg); + switch (flat_reg.classValue()) { case InvalidRegClass: break; case IntRegClass: @@ -1037,13 +1039,13 @@ Rename::renameSrcRegs(const DynInstPtr &inst, ThreadID tid) break; default: - panic("Invalid register class: %d.", src_reg.classValue()); + panic("Invalid register class: %d.", flat_reg.classValue()); } DPRINTF(Rename, "[tid:%i] " "Looking up %s arch reg %i, got phys reg %i (%s)\n", - tid, src_reg.className(), + tid, flat_reg.className(), src_reg.index(), renamed_reg->index(), renamed_reg->className()); @@ -1076,13 +1078,14 @@ Rename::renameDestRegs(const DynInstPtr &inst, ThreadID tid) gem5::ThreadContext *tc = inst->tcBase(); UnifiedRenameMap *map = renameMap[tid]; unsigned num_dest_regs = inst->numDestRegs(); + auto *isa = tc->getIsaPtr(); // Rename the destination registers. for (int dest_idx = 0; dest_idx < num_dest_regs; dest_idx++) { const RegId& dest_reg = inst->destRegIdx(dest_idx); UnifiedRenameMap::RenameInfo rename_result; - RegId flat_dest_regid = tc->flattenRegId(dest_reg); + RegId flat_dest_regid = dest_reg.flatten(*isa); flat_dest_regid.setNumPinnedWrites(dest_reg.getNumPinnedWrites()); rename_result = map->rename(flat_dest_regid); diff --git a/src/cpu/o3/rename_map.cc b/src/cpu/o3/rename_map.cc index 40e45f201a..a02cfc1f72 100644 --- a/src/cpu/o3/rename_map.cc +++ b/src/cpu/o3/rename_map.cc @@ -43,7 +43,6 @@ #include -#include "arch/vecregs.hh" #include "cpu/o3/dyn_inst.hh" #include "cpu/reg_class.hh" #include "debug/Rename.hh" @@ -114,7 +113,7 @@ UnifiedRenameMap::init(const BaseISA::RegClasses ®Classes, regFile = _regFile; for (int i = 0; i < renameMaps.size(); i++) - renameMaps[i].init(regClasses.at(i), &(freeList->freeLists[i])); + renameMaps[i].init(*regClasses.at(i), &(freeList->freeLists[i])); } bool diff --git a/src/cpu/o3/rob.hh b/src/cpu/o3/rob.hh index d36db733e1..b3aa5b3ebb 100644 --- a/src/cpu/o3/rob.hh +++ b/src/cpu/o3/rob.hh @@ -47,7 +47,6 @@ #include "base/statistics.hh" #include "base/types.hh" -#include "config/the_isa.hh" #include "cpu/inst_seq.hh" #include "cpu/o3/dyn_inst_ptr.hh" #include "cpu/o3/limits.hh" diff --git a/src/cpu/o3/thread_context.cc b/src/cpu/o3/thread_context.cc index e5395211b0..06210de07e 100644 --- a/src/cpu/o3/thread_context.cc +++ b/src/cpu/o3/thread_context.cc @@ -41,8 +41,6 @@ #include "cpu/o3/thread_context.hh" -#include "arch/vecregs.hh" -#include "config/the_isa.hh" #include "debug/O3CPU.hh" namespace gem5 @@ -150,32 +148,32 @@ ThreadContext::clearArchRegs() } RegVal -ThreadContext::getRegFlat(const RegId ®) const +ThreadContext::getReg(const RegId ®) const { return cpu->getArchReg(reg, thread->threadId()); } void * -ThreadContext::getWritableRegFlat(const RegId ®) +ThreadContext::getWritableReg(const RegId ®) { return cpu->getWritableArchReg(reg, thread->threadId()); } void -ThreadContext::getRegFlat(const RegId ®, void *val) const +ThreadContext::getReg(const RegId ®, void *val) const { cpu->getArchReg(reg, val, thread->threadId()); } void -ThreadContext::setRegFlat(const RegId ®, RegVal val) +ThreadContext::setReg(const RegId ®, RegVal val) { cpu->setArchReg(reg, val, thread->threadId()); conditionalSquash(); } void -ThreadContext::setRegFlat(const RegId ®, const void *val) +ThreadContext::setReg(const RegId ®, const void *val) { cpu->setArchReg(reg, val, thread->threadId()); conditionalSquash(); @@ -197,12 +195,6 @@ ThreadContext::pcStateNoRecord(const PCStateBase &val) conditionalSquash(); } -RegId -ThreadContext::flattenRegId(const RegId& regId) const -{ - return cpu->isa[thread->threadId()]->flattenRegId(regId); -} - void ThreadContext::setMiscRegNoEffect(RegIndex misc_reg, RegVal val) { diff --git a/src/cpu/o3/thread_context.hh b/src/cpu/o3/thread_context.hh index ae37dc994d..8d12a1388e 100644 --- a/src/cpu/o3/thread_context.hh +++ b/src/cpu/o3/thread_context.hh @@ -42,7 +42,6 @@ #ifndef __CPU_O3_THREAD_CONTEXT_HH__ #define __CPU_O3_THREAD_CONTEXT_HH__ -#include "config/the_isa.hh" #include "cpu/o3/cpu.hh" #include "cpu/thread_context.hh" @@ -210,8 +209,6 @@ class ThreadContext : public gem5::ThreadContext * write might have as defined by the architecture. */ void setMiscReg(RegIndex misc_reg, RegVal val) override; - RegId flattenRegId(const RegId& regId) const override; - /** Returns the number of consecutive store conditional failures. */ // @todo: Figure out where these store cond failures should go. unsigned @@ -239,12 +236,12 @@ class ThreadContext : public gem5::ThreadContext cpu->squashFromTC(thread->threadId()); } - RegVal getRegFlat(const RegId ®) const override; - void getRegFlat(const RegId ®, void *val) const override; - void *getWritableRegFlat(const RegId ®) override; + RegVal getReg(const RegId ®) const override; + void getReg(const RegId ®, void *val) const override; + void *getWritableReg(const RegId ®) override; - void setRegFlat(const RegId ®, RegVal val) override; - void setRegFlat(const RegId ®, const void *val) override; + void setReg(const RegId ®, RegVal val) override; + void setReg(const RegId ®, const void *val) override; // hardware transactional memory void htmAbortTransaction(uint64_t htm_uid, diff --git a/src/cpu/pred/BranchPredictor.py b/src/cpu/pred/BranchPredictor.py index c6abebb6ab..d18ca3f821 100644 --- a/src/cpu/pred/BranchPredictor.py +++ b/src/cpu/pred/BranchPredictor.py @@ -29,17 +29,19 @@ from m5.SimObject import SimObject from m5.params import * from m5.proxy import * + class IndirectPredictor(SimObject): - type = 'IndirectPredictor' - cxx_class = 'gem5::branch_prediction::IndirectPredictor' + type = "IndirectPredictor" + cxx_class = "gem5::branch_prediction::IndirectPredictor" cxx_header = "cpu/pred/indirect.hh" abstract = True numThreads = Param.Unsigned(Parent.numThreads, "Number of threads") + class SimpleIndirectPredictor(IndirectPredictor): - type = 'SimpleIndirectPredictor' - cxx_class = 'gem5::branch_prediction::SimpleIndirectPredictor' + type = "SimpleIndirectPredictor" + cxx_class = "gem5::branch_prediction::SimpleIndirectPredictor" cxx_header = "cpu/pred/simple_indirect.hh" indirectHashGHR = Param.Bool(True, "Hash branch predictor GHR") @@ -47,14 +49,16 @@ class SimpleIndirectPredictor(IndirectPredictor): indirectSets = Param.Unsigned(256, "Cache sets for indirect predictor") indirectWays = Param.Unsigned(2, "Ways for indirect predictor") indirectTagSize = Param.Unsigned(16, "Indirect target cache tag bits") - indirectPathLength = Param.Unsigned(3, - "Previous indirect targets to use for path history") + indirectPathLength = Param.Unsigned( + 3, "Previous indirect targets to use for path history" + ) indirectGHRBits = Param.Unsigned(13, "Indirect GHR number of bits") instShiftAmt = Param.Unsigned(2, "Number of bits to shift instructions by") + class BranchPredictor(SimObject): - type = 'BranchPredictor' - cxx_class = 'gem5::branch_prediction::BPredUnit' + type = "BranchPredictor" + cxx_class = "gem5::branch_prediction::BPredUnit" cxx_header = "cpu/pred/bpred_unit.hh" abstract = True @@ -64,12 +68,15 @@ class BranchPredictor(SimObject): RASSize = Param.Unsigned(16, "RAS size") instShiftAmt = Param.Unsigned(2, "Number of bits to shift instructions by") - indirectBranchPred = Param.IndirectPredictor(SimpleIndirectPredictor(), - "Indirect branch predictor, set to NULL to disable indirect predictions") + indirectBranchPred = Param.IndirectPredictor( + SimpleIndirectPredictor(), + "Indirect branch predictor, set to NULL to disable indirect predictions", + ) + class LocalBP(BranchPredictor): - type = 'LocalBP' - cxx_class = 'gem5::branch_prediction::LocalBP' + type = "LocalBP" + cxx_class = "gem5::branch_prediction::LocalBP" cxx_header = "cpu/pred/2bit_local.hh" localPredictorSize = Param.Unsigned(2048, "Size of local predictor") @@ -77,8 +84,8 @@ class LocalBP(BranchPredictor): class TournamentBP(BranchPredictor): - type = 'TournamentBP' - cxx_class = 'gem5::branch_prediction::TournamentBP' + type = "TournamentBP" + cxx_class = "gem5::branch_prediction::TournamentBP" cxx_header = "cpu/pred/tournament.hh" localPredictorSize = Param.Unsigned(2048, "Size of local predictor") @@ -91,8 +98,8 @@ class TournamentBP(BranchPredictor): class BiModeBP(BranchPredictor): - type = 'BiModeBP' - cxx_class = 'gem5::branch_prediction::BiModeBP' + type = "BiModeBP" + cxx_class = "gem5::branch_prediction::BiModeBP" cxx_header = "cpu/pred/bi_mode.hh" globalPredictorSize = Param.Unsigned(8192, "Size of global predictor") @@ -100,58 +107,71 @@ class BiModeBP(BranchPredictor): choicePredictorSize = Param.Unsigned(8192, "Size of choice predictor") choiceCtrBits = Param.Unsigned(2, "Bits of choice counters") + class TAGEBase(SimObject): - type = 'TAGEBase' - cxx_class = 'gem5::branch_prediction::TAGEBase' + type = "TAGEBase" + cxx_class = "gem5::branch_prediction::TAGEBase" cxx_header = "cpu/pred/tage_base.hh" numThreads = Param.Unsigned(Parent.numThreads, "Number of threads") - instShiftAmt = Param.Unsigned(Parent.instShiftAmt, - "Number of bits to shift instructions by") + instShiftAmt = Param.Unsigned( + Parent.instShiftAmt, "Number of bits to shift instructions by" + ) nHistoryTables = Param.Unsigned(7, "Number of history tables") minHist = Param.Unsigned(5, "Minimum history size of TAGE") maxHist = Param.Unsigned(130, "Maximum history size of TAGE") tagTableTagWidths = VectorParam.Unsigned( - [0, 9, 9, 10, 10, 11, 11, 12], "Tag size in TAGE tag tables") + [0, 9, 9, 10, 10, 11, 11, 12], "Tag size in TAGE tag tables" + ) logTagTableSizes = VectorParam.Int( - [13, 9, 9, 9, 9, 9, 9, 9], "Log2 of TAGE table sizes") - logRatioBiModalHystEntries = Param.Unsigned(2, - "Log num of prediction entries for a shared hysteresis bit " \ - "for the Bimodal") + [13, 9, 9, 9, 9, 9, 9, 9], "Log2 of TAGE table sizes" + ) + logRatioBiModalHystEntries = Param.Unsigned( + 2, + "Log num of prediction entries for a shared hysteresis bit " + "for the Bimodal", + ) tagTableCounterBits = Param.Unsigned(3, "Number of tag table counter bits") tagTableUBits = Param.Unsigned(2, "Number of tag table u bits") - histBufferSize = Param.Unsigned(2097152, - "A large number to track all branch histories(2MEntries default)") + histBufferSize = Param.Unsigned( + 2097152, + "A large number to track all branch histories(2MEntries default)", + ) pathHistBits = Param.Unsigned(16, "Path history size") - logUResetPeriod = Param.Unsigned(18, - "Log period in number of branches to reset TAGE useful counters") + logUResetPeriod = Param.Unsigned( + 18, "Log period in number of branches to reset TAGE useful counters" + ) numUseAltOnNa = Param.Unsigned(1, "Number of USE_ALT_ON_NA counters") initialTCounterValue = Param.Int(1 << 17, "Initial value of tCounter") useAltOnNaBits = Param.Unsigned(4, "Size of the USE_ALT_ON_NA counter(s)") - maxNumAlloc = Param.Unsigned(1, - "Max number of TAGE entries allocted on mispredict") + maxNumAlloc = Param.Unsigned( + 1, "Max number of TAGE entries allocted on mispredict" + ) # List of enabled TAGE tables. If empty, all are enabled noSkip = VectorParam.Bool([], "Vector of enabled TAGE tables") - speculativeHistUpdate = Param.Bool(True, - "Use speculative update for histories") + speculativeHistUpdate = Param.Bool( + True, "Use speculative update for histories" + ) + # TAGE branch predictor as described in https://www.jilp.org/vol8/v8paper1.pdf # The default sizes below are for the 8C-TAGE configuration (63.5 Kbits) class TAGE(BranchPredictor): - type = 'TAGE' - cxx_class = 'gem5::branch_prediction::TAGE' + type = "TAGE" + cxx_class = "gem5::branch_prediction::TAGE" cxx_header = "cpu/pred/tage.hh" tage = Param.TAGEBase(TAGEBase(), "Tage object") + class LTAGE_TAGE(TAGEBase): nHistoryTables = 12 minHist = 4 @@ -160,16 +180,18 @@ class LTAGE_TAGE(TAGEBase): logTagTableSizes = [14, 10, 10, 11, 11, 11, 11, 10, 10, 10, 10, 9, 9] logUResetPeriod = 19 + class LoopPredictor(SimObject): - type = 'LoopPredictor' - cxx_class = 'gem5::branch_prediction::LoopPredictor' - cxx_header = 'cpu/pred/loop_predictor.hh' + type = "LoopPredictor" + cxx_class = "gem5::branch_prediction::LoopPredictor" + cxx_header = "cpu/pred/loop_predictor.hh" logSizeLoopPred = Param.Unsigned(8, "Log size of the loop predictor") withLoopBits = Param.Unsigned(7, "Size of the WITHLOOP counter") loopTableAgeBits = Param.Unsigned(8, "Number of age bits per loop entry") - loopTableConfidenceBits = Param.Unsigned(2, - "Number of confidence bits per loop entry") + loopTableConfidenceBits = Param.Unsigned( + 2, "Number of confidence bits per loop entry" + ) loopTableTagBits = Param.Unsigned(14, "Number of tag bits per loop entry") loopTableIterBits = Param.Unsigned(14, "Nuber of iteration bits per loop") logLoopTableAssoc = Param.Unsigned(2, "Log loop predictor associativity") @@ -192,17 +214,20 @@ class LoopPredictor(SimObject): # If true, use random to decide whether to allocate or not, and only try # with one entry - restrictAllocation = Param.Bool(False, - "Restrict the allocation conditions") + restrictAllocation = Param.Bool( + False, "Restrict the allocation conditions" + ) initialLoopIter = Param.Unsigned(1, "Initial iteration number") initialLoopAge = Param.Unsigned(255, "Initial age value") - optionalAgeReset = Param.Bool(True, - "Reset age bits optionally in some cases") + optionalAgeReset = Param.Bool( + True, "Reset age bits optionally in some cases" + ) + class TAGE_SC_L_TAGE(TAGEBase): - type = 'TAGE_SC_L_TAGE' - cxx_class = 'gem5::branch_prediction::TAGE_SC_L_TAGE' + type = "TAGE_SC_L_TAGE" + cxx_class = "gem5::branch_prediction::TAGE_SC_L_TAGE" cxx_header = "cpu/pred/tage_sc_l.hh" abstract = True @@ -223,10 +248,12 @@ class TAGE_SC_L_TAGE(TAGEBase): logTagTableSize = Param.Unsigned("Log size of each tag table") shortTagsTageFactor = Param.Unsigned( - "Factor for calculating the total number of short tags TAGE entries") + "Factor for calculating the total number of short tags TAGE entries" + ) longTagsTageFactor = Param.Unsigned( - "Factor for calculating the total number of long tags TAGE entries") + "Factor for calculating the total number of long tags TAGE entries" + ) shortTagsSize = Param.Unsigned(8, "Size of the short tags") @@ -234,13 +261,14 @@ class TAGE_SC_L_TAGE(TAGEBase): firstLongTagTable = Param.Unsigned("First table with long tags") - truncatePathHist = Param.Bool(True, - "Truncate the path history to its configured size") + truncatePathHist = Param.Bool( + True, "Truncate the path history to its configured size" + ) class TAGE_SC_L_TAGE_64KB(TAGE_SC_L_TAGE): - type = 'TAGE_SC_L_TAGE_64KB' - cxx_class = 'gem5::branch_prediction::TAGE_SC_L_TAGE_64KB' + type = "TAGE_SC_L_TAGE_64KB" + cxx_class = "gem5::branch_prediction::TAGE_SC_L_TAGE_64KB" cxx_header = "cpu/pred/tage_sc_l_64KB.hh" nHistoryTables = 36 @@ -258,8 +286,45 @@ class TAGE_SC_L_TAGE_64KB(TAGE_SC_L_TAGE): # Entry 0 is for the bimodal and it is ignored # Note: For this implementation, some odd entries are also set to 0 to save # some bits - noSkip = [0,0,1,0,0,0,1,0,0,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,0,1,0,1,0,1,0,0,0,1,0,0,0,1] + noSkip = [ + 0, + 0, + 1, + 0, + 0, + 0, + 1, + 0, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 0, + 0, + 1, + 0, + 0, + 0, + 1, + ] logTagTableSize = 10 shortTagsTageFactor = 10 @@ -269,9 +334,10 @@ class TAGE_SC_L_TAGE_64KB(TAGE_SC_L_TAGE): firstLongTagTable = 13 + class TAGE_SC_L_TAGE_8KB(TAGE_SC_L_TAGE): - type = 'TAGE_SC_L_TAGE_8KB' - cxx_class = 'gem5::branch_prediction::TAGE_SC_L_TAGE_8KB' + type = "TAGE_SC_L_TAGE_8KB" + cxx_class = "gem5::branch_prediction::TAGE_SC_L_TAGE_8KB" cxx_header = "cpu/pred/tage_sc_l_8KB.hh" nHistoryTables = 30 @@ -290,26 +356,60 @@ class TAGE_SC_L_TAGE_8KB(TAGE_SC_L_TAGE): truncatePathHist = False - noSkip = [0,0,1,0,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,0,1,0,1,0,1] + noSkip = [ + 0, + 0, + 1, + 0, + 1, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + ] tagTableUBits = 2 + # LTAGE branch predictor as described in # https://www.irisa.fr/caps/people/seznec/L-TAGE.pdf # It is basically a TAGE predictor plus a loop predictor # The differnt TAGE sizes are updated according to the paper values (256 Kbits) class LTAGE(TAGE): - type = 'LTAGE' - cxx_class = 'gem5::branch_prediction::LTAGE' + type = "LTAGE" + cxx_class = "gem5::branch_prediction::LTAGE" cxx_header = "cpu/pred/ltage.hh" tage = LTAGE_TAGE() loop_predictor = Param.LoopPredictor(LoopPredictor(), "Loop predictor") + class TAGE_SC_L_LoopPredictor(LoopPredictor): - type = 'TAGE_SC_L_LoopPredictor' - cxx_class = 'gem5::branch_prediction::TAGE_SC_L_LoopPredictor' + type = "TAGE_SC_L_LoopPredictor" + cxx_class = "gem5::branch_prediction::TAGE_SC_L_LoopPredictor" cxx_header = "cpu/pred/tage_sc_l.hh" loopTableAgeBits = 4 @@ -324,56 +424,68 @@ class TAGE_SC_L_LoopPredictor(LoopPredictor): initialLoopAge = 7 optionalAgeReset = False + class StatisticalCorrector(SimObject): - type = 'StatisticalCorrector' - cxx_class = 'gem5::branch_prediction::StatisticalCorrector' + type = "StatisticalCorrector" + cxx_class = "gem5::branch_prediction::StatisticalCorrector" cxx_header = "cpu/pred/statistical_corrector.hh" abstract = True # Statistical corrector parameters numEntriesFirstLocalHistories = Param.Unsigned( - "Number of entries for first local histories") + "Number of entries for first local histories" + ) bwnb = Param.Unsigned("Num global backward branch GEHL lengths") bwm = VectorParam.Int("Global backward branch GEHL lengths") logBwnb = Param.Unsigned("Log num of global backward branch GEHL entries") bwWeightInitValue = Param.Int( - "Initial value of the weights of the global backward branch GEHL entries") + "Initial value of the weights of the global backward branch GEHL entries" + ) lnb = Param.Unsigned("Num first local history GEHL lenghts") lm = VectorParam.Int("First local history GEHL lengths") logLnb = Param.Unsigned("Log number of first local history GEHL entries") lWeightInitValue = Param.Int( - "Initial value of the weights of the first local history GEHL entries") + "Initial value of the weights of the first local history GEHL entries" + ) inb = Param.Unsigned(1, "Num IMLI GEHL lenghts") im = VectorParam.Int([8], "IMLI history GEHL lengths") logInb = Param.Unsigned("Log number of IMLI GEHL entries") iWeightInitValue = Param.Int( - "Initial value of the weights of the IMLI history GEHL entries") + "Initial value of the weights of the IMLI history GEHL entries" + ) logBias = Param.Unsigned("Log size of Bias tables") - logSizeUp = Param.Unsigned(6, - "Log size of update threshold counters tables") + logSizeUp = Param.Unsigned( + 6, "Log size of update threshold counters tables" + ) - chooserConfWidth = Param.Unsigned(7, - "Number of bits for the chooser counters") + chooserConfWidth = Param.Unsigned( + 7, "Number of bits for the chooser counters" + ) - updateThresholdWidth = Param.Unsigned(12, - "Number of bits for the update threshold counter") + updateThresholdWidth = Param.Unsigned( + 12, "Number of bits for the update threshold counter" + ) - pUpdateThresholdWidth = Param.Unsigned(8, - "Number of bits for the pUpdate threshold counters") + pUpdateThresholdWidth = Param.Unsigned( + 8, "Number of bits for the pUpdate threshold counters" + ) - extraWeightsWidth = Param.Unsigned(6, - "Number of bits for the extra weights") + extraWeightsWidth = Param.Unsigned( + 6, "Number of bits for the extra weights" + ) scCountersWidth = Param.Unsigned(6, "Statistical corrector counters width") - initialUpdateThresholdValue = Param.Int(0, - "Initial pUpdate threshold counter value") + initialUpdateThresholdValue = Param.Int( + 0, "Initial pUpdate threshold counter value" + ) + # TAGE-SC-L branch predictor as desribed in # https://www.jilp.org/cbp2016/paper/AndreSeznecLimited.pdf @@ -388,48 +500,55 @@ class StatisticalCorrector(SimObject): # Note that as it is now, this branch predictor does not handle any type # of speculation: All the structures/histories are updated at commit time class TAGE_SC_L(LTAGE): - type = 'TAGE_SC_L' - cxx_class = 'gem5::branch_prediction::TAGE_SC_L' + type = "TAGE_SC_L" + cxx_class = "gem5::branch_prediction::TAGE_SC_L" cxx_header = "cpu/pred/tage_sc_l.hh" abstract = True - statistical_corrector = Param.StatisticalCorrector( - "Statistical Corrector") + statistical_corrector = Param.StatisticalCorrector("Statistical Corrector") + class TAGE_SC_L_64KB_LoopPredictor(TAGE_SC_L_LoopPredictor): logSizeLoopPred = 5 + class TAGE_SC_L_8KB_LoopPredictor(TAGE_SC_L_LoopPredictor): logSizeLoopPred = 3 + class TAGE_SC_L_64KB_StatisticalCorrector(StatisticalCorrector): - type = 'TAGE_SC_L_64KB_StatisticalCorrector' - cxx_class = 'gem5::branch_prediction::TAGE_SC_L_64KB_StatisticalCorrector' + type = "TAGE_SC_L_64KB_StatisticalCorrector" + cxx_class = "gem5::branch_prediction::TAGE_SC_L_64KB_StatisticalCorrector" cxx_header = "cpu/pred/tage_sc_l_64KB.hh" pnb = Param.Unsigned(3, "Num variation global branch GEHL lengths") pm = VectorParam.Int([25, 16, 9], "Variation global branch GEHL lengths") - logPnb = Param.Unsigned(9, - "Log number of variation global branch GEHL entries") + logPnb = Param.Unsigned( + 9, "Log number of variation global branch GEHL entries" + ) snb = Param.Unsigned(3, "Num second local history GEHL lenghts") sm = VectorParam.Int([16, 11, 6], "Second local history GEHL lengths") - logSnb = Param.Unsigned(9, - "Log number of second local history GEHL entries") + logSnb = Param.Unsigned( + 9, "Log number of second local history GEHL entries" + ) tnb = Param.Unsigned(2, "Num third local history GEHL lenghts") tm = VectorParam.Int([9, 4], "Third local history GEHL lengths") - logTnb = Param.Unsigned(10, - "Log number of third local history GEHL entries") + logTnb = Param.Unsigned( + 10, "Log number of third local history GEHL entries" + ) imnb = Param.Unsigned(2, "Num second IMLI GEHL lenghts") imm = VectorParam.Int([10, 4], "Second IMLI history GEHL lengths") logImnb = Param.Unsigned(9, "Log number of second IMLI GEHL entries") - numEntriesSecondLocalHistories = Param.Unsigned(16, - "Number of entries for second local histories") - numEntriesThirdLocalHistories = Param.Unsigned(16, - "Number of entries for second local histories") + numEntriesSecondLocalHistories = Param.Unsigned( + 16, "Number of entries for second local histories" + ) + numEntriesThirdLocalHistories = Param.Unsigned( + 16, "Number of entries for second local histories" + ) numEntriesFirstLocalHistories = 256 @@ -448,9 +567,10 @@ class TAGE_SC_L_64KB_StatisticalCorrector(StatisticalCorrector): logInb = 8 iWeightInitValue = 7 + class TAGE_SC_L_8KB_StatisticalCorrector(StatisticalCorrector): - type = 'TAGE_SC_L_8KB_StatisticalCorrector' - cxx_class = 'gem5::branch_prediction::TAGE_SC_L_8KB_StatisticalCorrector' + type = "TAGE_SC_L_8KB_StatisticalCorrector" + cxx_class = "gem5::branch_prediction::TAGE_SC_L_8KB_StatisticalCorrector" cxx_header = "cpu/pred/tage_sc_l_8KB.hh" gnb = Param.Unsigned(2, "Num global branch GEHL lengths") @@ -474,95 +594,121 @@ class TAGE_SC_L_8KB_StatisticalCorrector(StatisticalCorrector): logInb = 7 iWeightInitValue = 7 + # 64KB TAGE-SC-L branch predictor as described in # http://www.jilp.org/cbp2016/paper/AndreSeznecLimited.pdf class TAGE_SC_L_64KB(TAGE_SC_L): - type = 'TAGE_SC_L_64KB' - cxx_class = 'gem5::branch_prediction::TAGE_SC_L_64KB' + type = "TAGE_SC_L_64KB" + cxx_class = "gem5::branch_prediction::TAGE_SC_L_64KB" cxx_header = "cpu/pred/tage_sc_l_64KB.hh" tage = TAGE_SC_L_TAGE_64KB() loop_predictor = TAGE_SC_L_64KB_LoopPredictor() statistical_corrector = TAGE_SC_L_64KB_StatisticalCorrector() + # 8KB TAGE-SC-L branch predictor as described in # http://www.jilp.org/cbp2016/paper/AndreSeznecLimited.pdf class TAGE_SC_L_8KB(TAGE_SC_L): - type = 'TAGE_SC_L_8KB' - cxx_class = 'gem5::branch_prediction::TAGE_SC_L_8KB' + type = "TAGE_SC_L_8KB" + cxx_class = "gem5::branch_prediction::TAGE_SC_L_8KB" cxx_header = "cpu/pred/tage_sc_l_8KB.hh" tage = TAGE_SC_L_TAGE_8KB() loop_predictor = TAGE_SC_L_8KB_LoopPredictor() statistical_corrector = TAGE_SC_L_8KB_StatisticalCorrector() + class MultiperspectivePerceptron(BranchPredictor): - type = 'MultiperspectivePerceptron' - cxx_class = 'gem5::branch_prediction::MultiperspectivePerceptron' - cxx_header = 'cpu/pred/multiperspective_perceptron.hh' + type = "MultiperspectivePerceptron" + cxx_class = "gem5::branch_prediction::MultiperspectivePerceptron" + cxx_header = "cpu/pred/multiperspective_perceptron.hh" abstract = True num_filter_entries = Param.Int("Number of filter entries") num_local_histories = Param.Int("Number of local history entries") - local_history_length = Param.Int(11, - "Length in bits of each history entry") + local_history_length = Param.Int( + 11, "Length in bits of each history entry" + ) - block_size = Param.Int(21, + block_size = Param.Int( + 21, "number of ghist bits in a 'block'; this is the width of an initial " - "hash of ghist") + "hash of ghist", + ) pcshift = Param.Int(-10, "Shift for hashing PC") threshold = Param.Int(1, "Threshold for deciding low/high confidence") - bias0 = Param.Int(-5, - "Bias perceptron output this much on all-bits-zero local history") - bias1 = Param.Int(5, - "Bias perceptron output this much on all-bits-one local history") - biasmostly0 = Param.Int(-1, + bias0 = Param.Int( + -5, "Bias perceptron output this much on all-bits-zero local history" + ) + bias1 = Param.Int( + 5, "Bias perceptron output this much on all-bits-one local history" + ) + biasmostly0 = Param.Int( + -1, "Bias perceptron output this much on almost-all-bits-zero local " - "history") - biasmostly1 = Param.Int(1, + "history", + ) + biasmostly1 = Param.Int( + 1, "Bias perceptron output this much on almost-all-bits-one local " - "history") - nbest = Param.Int(20, + "history", + ) + nbest = Param.Int( + 20, "Use this many of the top performing tables on a low-confidence " - "branch") + "branch", + ) tunebits = Param.Int(24, "Number of bits in misprediction counters") - hshift = Param.Int(-6, - "How much to shift initial feauture hash before XORing with PC bits") + hshift = Param.Int( + -6, + "How much to shift initial feauture hash before XORing with PC bits", + ) imli_mask1 = Param.UInt64( "Which tables should have their indices hashed with the first IMLI " - "counter") + "counter" + ) imli_mask4 = Param.UInt64( "Which tables should have their indices hashed with the fourth IMLI " - "counter") + "counter" + ) recencypos_mask = Param.UInt64( "Which tables should have their indices hashed with the recency " - "position") + "position" + ) fudge = Param.Float(0.245, "Fudge factor to multiply by perceptron output") n_sign_bits = Param.Int(2, "Number of sign bits per magnitude") pcbit = Param.Int(2, "Bit from the PC to use for hashing global history") decay = Param.Int(0, "Whether and how often to decay a random weight") - record_mask = Param.Int(191, - "Which histories are updated with filtered branch outcomes") - hash_taken = Param.Bool(False, - "Hash the taken/not taken value with a PC bit") - tuneonly = Param.Bool(True, - "If true, only count mispredictions of low-confidence branches") - extra_rounds = Param.Int(1, + record_mask = Param.Int( + 191, "Which histories are updated with filtered branch outcomes" + ) + hash_taken = Param.Bool( + False, "Hash the taken/not taken value with a PC bit" + ) + tuneonly = Param.Bool( + True, "If true, only count mispredictions of low-confidence branches" + ) + extra_rounds = Param.Int( + 1, "Number of extra rounds of training a single weight on a " - "low-confidence prediction") + "low-confidence prediction", + ) speed = Param.Int(9, "Adaptive theta learning speed") initial_theta = Param.Int(10, "Initial theta") budgetbits = Param.Int("Hardware budget in bits") - speculative_update = Param.Bool(False, - "Use speculative update for histories") + speculative_update = Param.Bool( + False, "Use speculative update for histories" + ) initial_ghist_length = Param.Int(1, "Initial GHist length value") ignore_path_size = Param.Bool(False, "Ignore the path storage") + class MultiperspectivePerceptron8KB(MultiperspectivePerceptron): - type = 'MultiperspectivePerceptron8KB' - cxx_class = 'gem5::branch_prediction::MultiperspectivePerceptron8KB' - cxx_header = 'cpu/pred/multiperspective_perceptron_8KB.hh' + type = "MultiperspectivePerceptron8KB" + cxx_class = "gem5::branch_prediction::MultiperspectivePerceptron8KB" + cxx_header = "cpu/pred/multiperspective_perceptron_8KB.hh" budgetbits = 8192 * 8 + 2048 num_local_histories = 48 @@ -571,44 +717,81 @@ class MultiperspectivePerceptron8KB(MultiperspectivePerceptron): imli_mask4 = 0x4400 recencypos_mask = 0x100000090 + class MultiperspectivePerceptron64KB(MultiperspectivePerceptron): - type = 'MultiperspectivePerceptron64KB' - cxx_class = 'gem5::branch_prediction::MultiperspectivePerceptron64KB' - cxx_header = 'cpu/pred/multiperspective_perceptron_64KB.hh' + type = "MultiperspectivePerceptron64KB" + cxx_class = "gem5::branch_prediction::MultiperspectivePerceptron64KB" + cxx_header = "cpu/pred/multiperspective_perceptron_64KB.hh" budgetbits = 65536 * 8 + 2048 num_local_histories = 510 num_filter_entries = 18025 - imli_mask1 = 0xc1000 + imli_mask1 = 0xC1000 imli_mask4 = 0x80008000 recencypos_mask = 0x100000090 + class MPP_TAGE(TAGEBase): - type = 'MPP_TAGE' - cxx_class = 'gem5::branch_prediction::MPP_TAGE' - cxx_header = 'cpu/pred/multiperspective_perceptron_tage.hh' + type = "MPP_TAGE" + cxx_class = "gem5::branch_prediction::MPP_TAGE" + cxx_header = "cpu/pred/multiperspective_perceptron_tage.hh" nHistoryTables = 15 pathHistBits = 27 instShiftAmt = 0 histBufferSize = 16384 - maxHist = 4096; - tagTableTagWidths = [0, 7, 9, 9, 9, 10, 11, 11, 12, 12, - 12, 13, 14, 15, 15, 15] - logTagTableSizes = [14, 10, 11, 11, 11, 11, 11, 12, 12, - 10, 11, 11, 9, 7, 7, 8] - tunedHistoryLengths = VectorParam.Unsigned([0, 5, 12, 15, 21, 31, 43, 64, - 93, 137, 200, 292, 424, 612, 877, 1241], "Tuned history lengths") + maxHist = 4096 + tagTableTagWidths = [ + 0, + 7, + 9, + 9, + 9, + 10, + 11, + 11, + 12, + 12, + 12, + 13, + 14, + 15, + 15, + 15, + ] + logTagTableSizes = [ + 14, + 10, + 11, + 11, + 11, + 11, + 11, + 12, + 12, + 10, + 11, + 11, + 9, + 7, + 7, + 8, + ] + tunedHistoryLengths = VectorParam.Unsigned( + [0, 5, 12, 15, 21, 31, 43, 64, 93, 137, 200, 292, 424, 612, 877, 1241], + "Tuned history lengths", + ) logUResetPeriod = 10 initialTCounterValue = 0 numUseAltOnNa = 512 speculativeHistUpdate = False + class MPP_LoopPredictor(LoopPredictor): - type = 'MPP_LoopPredictor' - cxx_class = 'gem5::branch_prediction::MPP_LoopPredictor' - cxx_header = 'cpu/pred/multiperspective_perceptron_tage.hh' + type = "MPP_LoopPredictor" + cxx_class = "gem5::branch_prediction::MPP_LoopPredictor" + cxx_header = "cpu/pred/multiperspective_perceptron_tage.hh" useDirectionBit = True useHashing = True @@ -623,15 +806,16 @@ class MPP_LoopPredictor(LoopPredictor): logSizeLoopPred = 6 loopTableTagBits = 10 + class MPP_StatisticalCorrector(StatisticalCorrector): - type = 'MPP_StatisticalCorrector' - cxx_class = 'gem5::branch_prediction::MPP_StatisticalCorrector' - cxx_header = 'cpu/pred/multiperspective_perceptron_tage.hh' + type = "MPP_StatisticalCorrector" + cxx_class = "gem5::branch_prediction::MPP_StatisticalCorrector" + cxx_header = "cpu/pred/multiperspective_perceptron_tage.hh" abstract = True # Unused in this Statistical Corrector bwnb = 0 - bwm = [ ] + bwm = [] logBwnb = 0 bwWeightInitValue = -1 @@ -654,15 +838,18 @@ class MPP_StatisticalCorrector(StatisticalCorrector): logGnb = Param.Unsigned(10, "Log number of global branch GEHL entries") pnb = Param.Unsigned(4, "Num variation global branch GEHL lengths") - pm = VectorParam.Int([16, 11, 6, 3], - "Variation global branch GEHL lengths") - logPnb = Param.Unsigned(9, - "Log number of variation global branch GEHL entries") + pm = VectorParam.Int( + [16, 11, 6, 3], "Variation global branch GEHL lengths" + ) + logPnb = Param.Unsigned( + 9, "Log number of variation global branch GEHL entries" + ) + class MultiperspectivePerceptronTAGE(MultiperspectivePerceptron): - type = 'MultiperspectivePerceptronTAGE' - cxx_class = 'gem5::branch_prediction::MultiperspectivePerceptronTAGE' - cxx_header = 'cpu/pred/multiperspective_perceptron_tage.hh' + type = "MultiperspectivePerceptronTAGE" + cxx_class = "gem5::branch_prediction::MultiperspectivePerceptronTAGE" + cxx_header = "cpu/pred/multiperspective_perceptron_tage.hh" abstract = True instShiftAmt = 4 @@ -671,44 +858,50 @@ class MultiperspectivePerceptronTAGE(MultiperspectivePerceptron): imli_mask4 = 0 num_filter_entries = 0 num_local_histories = 0 - recencypos_mask = 0 # Unused + recencypos_mask = 0 # Unused threshold = -1 initial_ghist_length = 0 ignore_path_size = True - n_sign_bits = 1; + n_sign_bits = 1 tage = Param.TAGEBase("Tage object") loop_predictor = Param.LoopPredictor("Loop predictor") statistical_corrector = Param.StatisticalCorrector("Statistical Corrector") + class MPP_StatisticalCorrector_64KB(MPP_StatisticalCorrector): - type = 'MPP_StatisticalCorrector_64KB' - cxx_class = 'gem5::branch_prediction::MPP_StatisticalCorrector_64KB' - cxx_header = 'cpu/pred/multiperspective_perceptron_tage_64KB.hh' + type = "MPP_StatisticalCorrector_64KB" + cxx_class = "gem5::branch_prediction::MPP_StatisticalCorrector_64KB" + cxx_header = "cpu/pred/multiperspective_perceptron_tage_64KB.hh" logBias = 8 snb = Param.Unsigned(4, "Num second local history GEHL lenghts") sm = VectorParam.Int([16, 11, 6, 3], "Second local history GEHL lengths") - logSnb = Param.Unsigned(9, - "Log number of second local history GEHL entries") + logSnb = Param.Unsigned( + 9, "Log number of second local history GEHL entries" + ) tnb = Param.Unsigned(3, "Num third local history GEHL lenghts") tm = VectorParam.Int([22, 17, 14], "Third local history GEHL lengths") - logTnb = Param.Unsigned(9, - "Log number of third local history GEHL entries") + logTnb = Param.Unsigned( + 9, "Log number of third local history GEHL entries" + ) - numEntriesSecondLocalHistories = Param.Unsigned(16, - "Number of entries for second local histories") - numEntriesThirdLocalHistories = Param.Unsigned(16, - "Number of entries for second local histories") + numEntriesSecondLocalHistories = Param.Unsigned( + 16, "Number of entries for second local histories" + ) + numEntriesThirdLocalHistories = Param.Unsigned( + 16, "Number of entries for second local histories" + ) numEntriesFirstLocalHistories = 256 + class MultiperspectivePerceptronTAGE64KB(MultiperspectivePerceptronTAGE): - type = 'MultiperspectivePerceptronTAGE64KB' - cxx_class = 'gem5::branch_prediction::MultiperspectivePerceptronTAGE64KB' - cxx_header = 'cpu/pred/multiperspective_perceptron_tage_64KB.hh' + type = "MultiperspectivePerceptronTAGE64KB" + cxx_class = "gem5::branch_prediction::MultiperspectivePerceptronTAGE64KB" + cxx_header = "cpu/pred/multiperspective_perceptron_tage_64KB.hh" budgetbits = 65536 * 8 + 2048 @@ -716,28 +909,31 @@ class MultiperspectivePerceptronTAGE64KB(MultiperspectivePerceptronTAGE): loop_predictor = MPP_LoopPredictor() statistical_corrector = MPP_StatisticalCorrector_64KB() + class MPP_TAGE_8KB(MPP_TAGE): - type = 'MPP_TAGE_8KB' - cxx_class = 'gem5::branch_prediction::MPP_TAGE_8KB' - cxx_header = 'cpu/pred/multiperspective_perceptron_tage_8KB.hh' + type = "MPP_TAGE_8KB" + cxx_class = "gem5::branch_prediction::MPP_TAGE_8KB" + cxx_header = "cpu/pred/multiperspective_perceptron_tage_8KB.hh" nHistoryTables = 10 tagTableTagWidths = [0, 7, 7, 7, 8, 9, 10, 10, 11, 13, 13] logTagTableSizes = [12, 8, 8, 9, 9, 8, 8, 8, 7, 6, 7] tunedHistoryLengths = [0, 4, 8, 13, 23, 36, 56, 93, 145, 226, 359] + class MPP_LoopPredictor_8KB(MPP_LoopPredictor): - type = 'MPP_LoopPredictor_8KB' - cxx_class = 'gem5::branch_prediction::MPP_LoopPredictor_8KB' - cxx_header = 'cpu/pred/multiperspective_perceptron_tage_8KB.hh' + type = "MPP_LoopPredictor_8KB" + cxx_class = "gem5::branch_prediction::MPP_LoopPredictor_8KB" + cxx_header = "cpu/pred/multiperspective_perceptron_tage_8KB.hh" loopTableIterBits = 10 logSizeLoopPred = 4 + class MPP_StatisticalCorrector_8KB(MPP_StatisticalCorrector): - type = 'MPP_StatisticalCorrector_8KB' - cxx_class = 'gem5::branch_prediction::MPP_StatisticalCorrector_8KB' - cxx_header = 'cpu/pred/multiperspective_perceptron_tage_8KB.hh' + type = "MPP_StatisticalCorrector_8KB" + cxx_class = "gem5::branch_prediction::MPP_StatisticalCorrector_8KB" + cxx_header = "cpu/pred/multiperspective_perceptron_tage_8KB.hh" logBias = 7 @@ -751,10 +947,11 @@ class MPP_StatisticalCorrector_8KB(MPP_StatisticalCorrector): numEntriesFirstLocalHistories = 64 + class MultiperspectivePerceptronTAGE8KB(MultiperspectivePerceptronTAGE): - type = 'MultiperspectivePerceptronTAGE8KB' - cxx_class = 'gem5::branch_prediction::MultiperspectivePerceptronTAGE8KB' - cxx_header = 'cpu/pred/multiperspective_perceptron_tage_8KB.hh' + type = "MultiperspectivePerceptronTAGE8KB" + cxx_class = "gem5::branch_prediction::MultiperspectivePerceptronTAGE8KB" + cxx_header = "cpu/pred/multiperspective_perceptron_tage_8KB.hh" budgetbits = 8192 * 8 + 2048 diff --git a/src/cpu/pred/SConscript b/src/cpu/pred/SConscript index 0437bda0f2..f4b6870ec5 100644 --- a/src/cpu/pred/SConscript +++ b/src/cpu/pred/SConscript @@ -28,9 +28,6 @@ Import('*') -if env['CONF']['TARGET_ISA'] == 'null': - Return() - SimObject('BranchPredictor.py', sim_objects=[ 'IndirectPredictor', 'SimpleIndirectPredictor', 'BranchPredictor', 'LocalBP', 'TournamentBP', 'BiModeBP', 'TAGEBase', 'TAGE', 'LoopPredictor', diff --git a/src/cpu/pred/bpred_unit.cc b/src/cpu/pred/bpred_unit.cc index ac5156707c..ec751f7dc6 100644 --- a/src/cpu/pred/bpred_unit.cc +++ b/src/cpu/pred/bpred_unit.cc @@ -47,7 +47,6 @@ #include "arch/generic/pcstate.hh" #include "base/compiler.hh" #include "base/trace.hh" -#include "config/the_isa.hh" #include "debug/Branch.hh" namespace gem5 @@ -83,6 +82,8 @@ BPredUnit::BPredUnitStats::BPredUnitStats(statistics::Group *parent) "Number of conditional branches incorrect"), ADD_STAT(BTBLookups, statistics::units::Count::get(), "Number of BTB lookups"), + ADD_STAT(BTBUpdates, statistics::units::Count::get(), + "Number of BTB updates"), ADD_STAT(BTBHits, statistics::units::Count::get(), "Number of BTB hits"), ADD_STAT(BTBHitRatio, statistics::units::Ratio::get(), "BTB Hit Ratio", BTBHits / BTBLookups), @@ -174,6 +175,8 @@ BPredUnit::predict(const StaticInstPtr &inst, const InstSeqNum &seqNum, // Now lookup in the BTB or RAS. if (pred_taken) { + // Note: The RAS may be both popped and pushed to + // support coroutines. if (inst->isReturn()) { ++stats.RASUsed; predict_record.wasReturn = true; @@ -193,22 +196,25 @@ BPredUnit::predict(const StaticInstPtr &inst, const InstSeqNum &seqNum, DPRINTF(Branch, "[tid:%i] [sn:%llu] Instruction %s is a return, " "RAS predicted target: %s, RAS index: %i\n", tid, seqNum, pc, *target, predict_record.RASIndex); - } else { + } - if (inst->isCall()) { - RAS[tid].push(pc); - predict_record.pushedRAS = true; + if (inst->isCall()) { + RAS[tid].push(pc); + predict_record.pushedRAS = true; - // Record that it was a call so that the top RAS entry can - // be popped off if the speculation is incorrect. - predict_record.wasCall = true; + // Record that it was a call so that the top RAS entry can + // be popped off if the speculation is incorrect. + predict_record.wasCall = true; - DPRINTF(Branch, - "[tid:%i] [sn:%llu] Instruction %s was a call, adding " - "%s to the RAS index: %i\n", - tid, seqNum, pc, pc, RAS[tid].topIdx()); - } + DPRINTF(Branch, + "[tid:%i] [sn:%llu] Instruction %s was a call, adding " + "%s to the RAS index: %i\n", + tid, seqNum, pc, pc, RAS[tid].topIdx()); + } + // The target address is not predicted by RAS. + // Thus, BTB/IndirectBranch Predictor is employed. + if (!inst->isReturn()) { if (inst->isDirectCtrl() || !iPred) { ++stats.BTBLookups; // Check BTB on direct branches @@ -334,6 +340,13 @@ BPredUnit::squash(const InstSeqNum &squashed_sn, ThreadID tid) while (!pred_hist.empty() && pred_hist.front().seqNum > squashed_sn) { + if (pred_hist.front().wasCall && pred_hist.front().pushedRAS) { + // Was a call but predicated false. Pop RAS here + DPRINTF(Branch, "[tid:%i] [squash sn:%llu] Squashing" + " Call [sn:%llu] PC: %s Popping RAS\n", tid, squashed_sn, + pred_hist.front().seqNum, pred_hist.front().pc); + RAS[tid].pop(); + } if (pred_hist.front().usedRAS) { if (pred_hist.front().RASTarget != nullptr) { DPRINTF(Branch, "[tid:%i] [squash sn:%llu]" @@ -351,12 +364,6 @@ BPredUnit::squash(const InstSeqNum &squashed_sn, ThreadID tid) RAS[tid].restore(pred_hist.front().RASIndex, pred_hist.front().RASTarget.get()); - } else if (pred_hist.front().wasCall && pred_hist.front().pushedRAS) { - // Was a call but predicated false. Pop RAS here - DPRINTF(Branch, "[tid:%i] [squash sn:%llu] Squashing" - " Call [sn:%llu] PC: %s Popping RAS\n", tid, squashed_sn, - pred_hist.front().seqNum, pred_hist.front().pc); - RAS[tid].pop(); } // This call should delete the bpHistory. @@ -474,10 +481,22 @@ BPredUnit::squash(const InstSeqNum &squashed_sn, "PC %#x\n", tid, squashed_sn, hist_it->seqNum, hist_it->pc); + ++stats.BTBUpdates; BTB.update(hist_it->pc, corr_target, tid); } } else { //Actually not Taken + if (hist_it->wasCall && hist_it->pushedRAS) { + //Was a Call but predicated false. Pop RAS here + DPRINTF(Branch, + "[tid:%i] [squash sn:%llu] " + "Incorrectly predicted " + "Call [sn:%llu] PC: %s Popping RAS\n", + tid, squashed_sn, + hist_it->seqNum, hist_it->pc); + RAS[tid].pop(); + hist_it->pushedRAS = false; + } if (hist_it->usedRAS) { DPRINTF(Branch, "[tid:%i] [squash sn:%llu] Incorrectly predicted " @@ -490,16 +509,6 @@ BPredUnit::squash(const InstSeqNum &squashed_sn, hist_it->RASIndex, *hist_it->RASTarget); RAS[tid].restore(hist_it->RASIndex, hist_it->RASTarget.get()); hist_it->usedRAS = false; - } else if (hist_it->wasCall && hist_it->pushedRAS) { - //Was a Call but predicated false. Pop RAS here - DPRINTF(Branch, - "[tid:%i] [squash sn:%llu] " - "Incorrectly predicted " - "Call [sn:%llu] PC: %s Popping RAS\n", - tid, squashed_sn, - hist_it->seqNum, hist_it->pc); - RAS[tid].pop(); - hist_it->pushedRAS = false; } } } else { diff --git a/src/cpu/pred/bpred_unit.hh b/src/cpu/pred/bpred_unit.hh index e57f8e4e18..4af1d876a8 100644 --- a/src/cpu/pred/bpred_unit.hh +++ b/src/cpu/pred/bpred_unit.hh @@ -191,6 +191,7 @@ class BPredUnit : public SimObject void BTBUpdate(Addr instPC, const PCStateBase &target) { + ++stats.BTBUpdates; BTB.update(instPC, target, 0); } @@ -314,6 +315,8 @@ class BPredUnit : public SimObject statistics::Scalar condIncorrect; /** Stat for number of BTB lookups. */ statistics::Scalar BTBLookups; + /** Stat for number of BTB updates. */ + statistics::Scalar BTBUpdates; /** Stat for number of BTB hits. */ statistics::Scalar BTBHits; /** Stat for the ratio between BTB hits and BTB lookups. */ diff --git a/src/cpu/pred/btb.hh b/src/cpu/pred/btb.hh index 206ee8d911..9213053d77 100644 --- a/src/cpu/pred/btb.hh +++ b/src/cpu/pred/btb.hh @@ -32,7 +32,6 @@ #include "arch/generic/pcstate.hh" #include "base/logging.hh" #include "base/types.hh" -#include "config/the_isa.hh" namespace gem5 { diff --git a/src/cpu/pred/indirect.hh b/src/cpu/pred/indirect.hh index 7bec64f542..5f855b14fd 100644 --- a/src/cpu/pred/indirect.hh +++ b/src/cpu/pred/indirect.hh @@ -30,7 +30,6 @@ #define __CPU_PRED_INDIRECT_BASE_HH__ #include "arch/generic/pcstate.hh" -#include "config/the_isa.hh" #include "cpu/inst_seq.hh" #include "params/IndirectPredictor.hh" #include "sim/sim_object.hh" diff --git a/src/cpu/pred/simple_indirect.hh b/src/cpu/pred/simple_indirect.hh index 8587047911..7f7a73bdeb 100644 --- a/src/cpu/pred/simple_indirect.hh +++ b/src/cpu/pred/simple_indirect.hh @@ -31,7 +31,6 @@ #include -#include "config/the_isa.hh" #include "cpu/inst_seq.hh" #include "cpu/pred/indirect.hh" #include "params/SimpleIndirectPredictor.hh" diff --git a/src/cpu/reg_class.cc b/src/cpu/reg_class.cc index 444d1fff14..12d1c7f2e0 100644 --- a/src/cpu/reg_class.cc +++ b/src/cpu/reg_class.cc @@ -52,7 +52,7 @@ namespace gem5 std::string RegClassOps::regName(const RegId &id) const { - return csprintf("r%d", id.index()); + return csprintf("%s[%d]", id.className(), id.index()); } std::string @@ -71,14 +71,4 @@ RegClassOps::valString(const void *val, size_t size) const return printByteBuf(val, size, ByteOrder::big); } -const char *RegId::regClassStrings[] = { - "IntRegClass", - "FloatRegClass", - "VecRegClass", - "VecElemClass", - "VecPredRegClass", - "CCRegClass", - "MiscRegClass" -}; - } // namespace gem5 diff --git a/src/cpu/reg_class.hh b/src/cpu/reg_class.hh index 3372fce403..080c758413 100644 --- a/src/cpu/reg_class.hh +++ b/src/cpu/reg_class.hh @@ -42,12 +42,14 @@ #define __CPU__REG_CLASS_HH__ #include +#include #include #include "base/cprintf.hh" #include "base/debug.hh" #include "base/intmath.hh" #include "base/types.hh" +#include "debug/InvalidReg.hh" namespace gem5 { @@ -67,56 +69,18 @@ enum RegClassType InvalidRegClass = -1 }; -class RegId; +// "Standard" register class names. Using these is encouraged but optional. +inline constexpr char IntRegClassName[] = "integer"; +inline constexpr char FloatRegClassName[] = "floating_point"; +inline constexpr char VecRegClassName[] = "vector"; +inline constexpr char VecElemClassName[] = "vector_element"; +inline constexpr char VecPredRegClassName[] = "vector_predicate"; +inline constexpr char CCRegClassName[] = "condition_code"; +inline constexpr char MiscRegClassName[] = "miscellaneous"; -class RegClassOps -{ - public: - /** Print the name of the register specified in id. */ - virtual std::string regName(const RegId &id) const; - /** Print the value of a register pointed to by val of size size. */ - virtual std::string valString(const void *val, size_t size) const; -}; - -class RegClass -{ - private: - size_t _numRegs; - size_t _regBytes; - // This is how much to shift an index by to get an offset of a register in - // a register file from the register index, which would otherwise need to - // be calculated with a multiply. - size_t _regShift; - - static inline RegClassOps defaultOps; - RegClassOps *_ops = &defaultOps; - const debug::Flag &debugFlag; - - public: - constexpr RegClass(size_t num_regs, const debug::Flag &debug_flag, - size_t reg_bytes=sizeof(RegVal)) : - _numRegs(num_regs), _regBytes(reg_bytes), - _regShift(ceilLog2(reg_bytes)), debugFlag(debug_flag) - {} - constexpr RegClass(size_t num_regs, RegClassOps &new_ops, - const debug::Flag &debug_flag, size_t reg_bytes=sizeof(RegVal)) : - RegClass(num_regs, debug_flag, reg_bytes) - { - _ops = &new_ops; - } - - constexpr size_t numRegs() const { return _numRegs; } - constexpr size_t regBytes() const { return _regBytes; } - constexpr size_t regShift() const { return _regShift; } - constexpr const debug::Flag &debug() const { return debugFlag; } - - std::string regName(const RegId &id) const { return _ops->regName(id); } - std::string - valString(const void *val) const - { - return _ops->valString(val, regBytes()); - } -}; +class RegClass; +class RegClassIterator; +class BaseISA; /** Register ID: describe an architectural register with its class and index. * This structure is used instead of just the register index to disambiguate @@ -126,18 +90,18 @@ class RegClass class RegId { protected: - static const char* regClassStrings[]; - RegClassType regClass; + const RegClass *_regClass = nullptr; RegIndex regIdx; int numPinnedWrites; friend struct std::hash; + friend class RegClassIterator; public: - constexpr RegId() : RegId(InvalidRegClass, 0) {} + inline constexpr RegId(); - constexpr RegId(RegClassType reg_class, RegIndex reg_idx) - : regClass(reg_class), regIdx(reg_idx), numPinnedWrites(0) + constexpr RegId(const RegClass ®_class, RegIndex reg_idx) + : _regClass(®_class), regIdx(reg_idx), numPinnedWrites(0) {} constexpr operator RegIndex() const @@ -148,7 +112,7 @@ class RegId constexpr bool operator==(const RegId& that) const { - return regClass == that.classValue() && regIdx == that.index(); + return classValue() == that.classValue() && regIdx == that.index(); } constexpr bool @@ -163,8 +127,8 @@ class RegId constexpr bool operator<(const RegId& that) const { - return regClass < that.classValue() || - (regClass == that.classValue() && (regIdx < that.index())); + return classValue() < that.classValue() || + (classValue() == that.classValue() && (regIdx < that.index())); } /** @@ -173,39 +137,221 @@ class RegId constexpr bool isRenameable() const { - return regClass != MiscRegClass && regClass != InvalidRegClass; + return classValue() != MiscRegClass && classValue() != InvalidRegClass; } /** @return true if it is of the specified class. */ - constexpr bool - is(RegClassType reg_class) const - { - return regClass == reg_class; - } + inline constexpr bool is(RegClassType reg_class) const; /** Index accessors */ /** @{ */ constexpr RegIndex index() const { return regIdx; } /** Class accessor */ - constexpr RegClassType classValue() const { return regClass; } + constexpr const RegClass ®Class() const { return *_regClass; } + inline constexpr RegClassType classValue() const; /** Return a const char* with the register class name. */ - constexpr const char* - className() const - { - return regClassStrings[regClass]; - } + inline constexpr const char* className() const; + + inline constexpr bool isFlat() const; + inline RegId flatten(const BaseISA &isa) const; int getNumPinnedWrites() const { return numPinnedWrites; } void setNumPinnedWrites(int num_writes) { numPinnedWrites = num_writes; } - friend std::ostream& - operator<<(std::ostream& os, const RegId& rid) + friend inline std::ostream& operator<<(std::ostream& os, const RegId& rid); +}; + +class RegClassOps +{ + public: + /** Print the name of the register specified in id. */ + virtual std::string regName(const RegId &id) const; + /** Print the value of a register pointed to by val of size size. */ + virtual std::string valString(const void *val, size_t size) const; + /** Flatten register id id using information in the ISA object isa. */ + virtual RegId + flatten(const BaseISA &isa, const RegId &id) const { - return os << rid.className() << "{" << rid.index() << "}"; + return id; } }; +class RegClassIterator; + +class RegClass +{ + private: + RegClassType _type; + const char *_name; + + size_t _numRegs; + size_t _regBytes = sizeof(RegVal); + // This is how much to shift an index by to get an offset of a register in + // a register file from the register index, which would otherwise need to + // be calculated with a multiply. + size_t _regShift = ceilLog2(sizeof(RegVal)); + + static inline RegClassOps defaultOps; + const RegClassOps *_ops = &defaultOps; + const debug::Flag &debugFlag; + + bool _flat = true; + + public: + constexpr RegClass(RegClassType type, const char *new_name, + size_t num_regs, const debug::Flag &debug_flag) : + _type(type), _name(new_name), _numRegs(num_regs), debugFlag(debug_flag) + {} + + constexpr RegClass + needsFlattening() const + { + RegClass reg_class = *this; + reg_class._flat = false; + return reg_class; + } + + constexpr RegClass + ops(const RegClassOps &new_ops) const + { + RegClass reg_class = *this; + reg_class._ops = &new_ops; + return reg_class; + } + + template + constexpr RegClass + regType() const + { + RegClass reg_class = *this; + reg_class._regBytes = sizeof(RegType); + reg_class._regShift = ceilLog2(reg_class._regBytes); + return reg_class; + } + + constexpr RegClassType type() const { return _type; } + constexpr const char *name() const { return _name; } + constexpr size_t numRegs() const { return _numRegs; } + constexpr size_t regBytes() const { return _regBytes; } + constexpr size_t regShift() const { return _regShift; } + constexpr const debug::Flag &debug() const { return debugFlag; } + constexpr bool isFlat() const { return _flat; } + + std::string regName(const RegId &id) const { return _ops->regName(id); } + std::string + valString(const void *val) const + { + return _ops->valString(val, regBytes()); + } + RegId + flatten(const BaseISA &isa, const RegId &id) const + { + return isFlat() ? id : _ops->flatten(isa, id); + } + + using iterator = RegClassIterator; + + inline iterator begin() const; + inline iterator end() const; + + inline constexpr RegId operator[](RegIndex idx) const; +}; + +inline constexpr RegClass + invalidRegClass(InvalidRegClass, "invalid", 0, debug::InvalidReg); + +constexpr RegId::RegId() : RegId(invalidRegClass, 0) {} + +constexpr bool +RegId::is(RegClassType reg_class) const +{ + return _regClass->type() == reg_class; +} + +constexpr RegClassType RegId::classValue() const { return _regClass->type(); } +constexpr const char* RegId::className() const { return _regClass->name(); } + +constexpr bool RegId::isFlat() const { return _regClass->isFlat(); } +RegId +RegId::flatten(const BaseISA &isa) const +{ + return _regClass->flatten(isa, *this); +} + +std::ostream& +operator<<(std::ostream& os, const RegId& rid) +{ + return os << rid.regClass().regName(rid); +} + +class RegClassIterator +{ + private: + RegId id; + + RegClassIterator(const RegClass ®_class, RegIndex idx) : + id(reg_class, idx) + {} + + friend class RegClass; + + public: + using iterator_category = std::forward_iterator_tag; + using difference_type = std::size_t; + using value_type = const RegId; + using pointer = value_type *; + using reference = value_type &; + + reference operator*() const { return id; } + pointer operator->() { return &id; } + + RegClassIterator & + operator++() + { + id.regIdx++; + return *this; + } + + RegClassIterator + operator++(int) + { + auto tmp = *this; + ++(*this); + return tmp; + } + + bool + operator==(const RegClassIterator &other) const + { + return id == other.id; + } + + bool + operator!=(const RegClassIterator &other) const + { + return id != other.id; + } +}; + +RegClassIterator +RegClass::begin() const +{ + return RegClassIterator(*this, 0); +} + +RegClassIterator +RegClass::end() const +{ + return RegClassIterator(*this, numRegs()); +} + +constexpr RegId +RegClass::operator[](RegIndex idx) const +{ + return RegId(*this, idx); +} + template class TypedRegClassOps : public RegClassOps { @@ -250,20 +396,21 @@ class PhysRegId : private RegId bool pinned; public: - explicit PhysRegId() : RegId(InvalidRegClass, -1), flatIdx(-1), + explicit PhysRegId() : RegId(invalidRegClass, -1), flatIdx(-1), numPinnedWritesToComplete(0) {} /** Scalar PhysRegId constructor. */ - explicit PhysRegId(RegClassType _regClass, RegIndex _regIdx, + explicit PhysRegId(const RegClass ®_class, RegIndex _regIdx, RegIndex _flatIdx) - : RegId(_regClass, _regIdx), flatIdx(_flatIdx), + : RegId(reg_class, _regIdx), flatIdx(_flatIdx), numPinnedWritesToComplete(0), pinned(false) {} /** Visible RegId methods */ /** @{ */ using RegId::index; + using RegId::regClass; using RegId::classValue; using RegId::className; using RegId::is; @@ -351,7 +498,7 @@ struct hash { // Extract unique integral values for the effective fields of a RegId. const size_t index = static_cast(reg_id.index()); - const size_t class_num = static_cast(reg_id.regClass); + const size_t class_num = static_cast(reg_id.classValue()); const size_t shifted_class_num = class_num << (sizeof(gem5::RegIndex) << 3); diff --git a/src/cpu/simple/AtomicSimpleCPU.py b/src/cpu/simple/AtomicSimpleCPU.py new file mode 100644 index 0000000000..984db9dd2e --- /dev/null +++ b/src/cpu/simple/AtomicSimpleCPU.py @@ -0,0 +1,52 @@ +# Copyright 2021 Google, Inc. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import m5.defines + +arch_vars = [ + "USE_ARM_ISA", + "USE_MIPS_ISA", + "USE_POWER_ISA", + "USE_RISCV_ISA", + "USE_SPARC_ISA", + "USE_X86_ISA", +] + +enabled = list(filter(lambda var: m5.defines.buildEnv[var], arch_vars)) + +if len(enabled) == 1: + arch = enabled[0] + if arch == "USE_ARM_ISA": + from m5.objects.ArmCPU import ArmAtomicSimpleCPU as AtomicSimpleCPU + elif arch == "USE_MIPS_ISA": + from m5.objects.MipsCPU import MipsAtomicSimpleCPU as AtomicSimpleCPU + elif arch == "USE_POWER_ISA": + from m5.objects.PowerCPU import PowerAtomicSimpleCPU as AtomicSimpleCPU + elif arch == "USE_RISCV_ISA": + from m5.objects.RiscvCPU import RiscvAtomicSimpleCPU as AtomicSimpleCPU + elif arch == "USE_SPARC_ISA": + from m5.objects.SparcCPU import SparcAtomicSimpleCPU as AtomicSimpleCPU + elif arch == "USE_X86_ISA": + from m5.objects.X86CPU import X86AtomicSimpleCPU as AtomicSimpleCPU diff --git a/src/cpu/simple/BaseAtomicSimpleCPU.py b/src/cpu/simple/BaseAtomicSimpleCPU.py index ba3b8121f1..4ee53aef0f 100644 --- a/src/cpu/simple/BaseAtomicSimpleCPU.py +++ b/src/cpu/simple/BaseAtomicSimpleCPU.py @@ -40,18 +40,19 @@ from m5.params import * from m5.objects.BaseSimpleCPU import BaseSimpleCPU from m5.objects.SimPoint import SimPoint + class BaseAtomicSimpleCPU(BaseSimpleCPU): """Simple CPU model executing a configurable number of instructions per cycle. This model uses the simplified 'atomic' memory mode.""" - type = 'BaseAtomicSimpleCPU' + type = "BaseAtomicSimpleCPU" cxx_header = "cpu/simple/atomic.hh" - cxx_class = 'gem5::AtomicSimpleCPU' + cxx_class = "gem5::AtomicSimpleCPU" @classmethod def memory_mode(cls): - return 'atomic' + return "atomic" @classmethod def support_take_over(cls): diff --git a/src/cpu/simple/BaseNonCachingSimpleCPU.py b/src/cpu/simple/BaseNonCachingSimpleCPU.py index f5cf1c7a75..58a7324068 100644 --- a/src/cpu/simple/BaseNonCachingSimpleCPU.py +++ b/src/cpu/simple/BaseNonCachingSimpleCPU.py @@ -36,6 +36,7 @@ from m5.params import * from m5.objects.BaseAtomicSimpleCPU import BaseAtomicSimpleCPU + class BaseNonCachingSimpleCPU(BaseAtomicSimpleCPU): """Simple CPU model based on the atomic CPU. Unlike the atomic CPU, this model causes the memory system to bypass caches and is @@ -45,15 +46,15 @@ class BaseNonCachingSimpleCPU(BaseAtomicSimpleCPU): """ - type = 'BaseNonCachingSimpleCPU' + type = "BaseNonCachingSimpleCPU" cxx_header = "cpu/simple/noncaching.hh" - cxx_class = 'gem5::NonCachingSimpleCPU' + cxx_class = "gem5::NonCachingSimpleCPU" numThreads = 1 @classmethod def memory_mode(cls): - return 'atomic_noncaching' + return "atomic_noncaching" @classmethod def support_take_over(cls): diff --git a/src/cpu/simple/BaseSimpleCPU.py b/src/cpu/simple/BaseSimpleCPU.py index 67ba739767..fe7ad751a7 100644 --- a/src/cpu/simple/BaseSimpleCPU.py +++ b/src/cpu/simple/BaseSimpleCPU.py @@ -31,10 +31,11 @@ from m5.objects.BaseCPU import BaseCPU from m5.objects.DummyChecker import DummyChecker from m5.objects.BranchPredictor import * + class BaseSimpleCPU(BaseCPU): - type = 'BaseSimpleCPU' + type = "BaseSimpleCPU" abstract = True cxx_header = "cpu/simple/base.hh" - cxx_class = 'gem5::BaseSimpleCPU' + cxx_class = "gem5::BaseSimpleCPU" branchPred = Param.BranchPredictor(NULL, "Branch Predictor") diff --git a/src/cpu/simple/BaseTimingSimpleCPU.py b/src/cpu/simple/BaseTimingSimpleCPU.py index 1f317a87be..5761816fa7 100644 --- a/src/cpu/simple/BaseTimingSimpleCPU.py +++ b/src/cpu/simple/BaseTimingSimpleCPU.py @@ -28,14 +28,15 @@ from m5.params import * from m5.objects.BaseSimpleCPU import BaseSimpleCPU + class BaseTimingSimpleCPU(BaseSimpleCPU): - type = 'BaseTimingSimpleCPU' + type = "BaseTimingSimpleCPU" cxx_header = "cpu/simple/timing.hh" - cxx_class = 'gem5::TimingSimpleCPU' + cxx_class = "gem5::TimingSimpleCPU" @classmethod def memory_mode(cls): - return 'timing' + return "timing" @classmethod def support_take_over(cls): diff --git a/src/cpu/simple/NonCachingSimpleCPU.py b/src/cpu/simple/NonCachingSimpleCPU.py new file mode 100644 index 0000000000..ed1f624486 --- /dev/null +++ b/src/cpu/simple/NonCachingSimpleCPU.py @@ -0,0 +1,64 @@ +# Copyright 2021 Google, Inc. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import m5.defines + +arch_vars = [ + "USE_ARM_ISA", + "USE_MIPS_ISA", + "USE_POWER_ISA", + "USE_RISCV_ISA", + "USE_SPARC_ISA", + "USE_X86_ISA", +] + +enabled = list(filter(lambda var: m5.defines.buildEnv[var], arch_vars)) + +if len(enabled) == 1: + arch = enabled[0] + if arch == "USE_ARM_ISA": + from m5.objects.ArmCPU import ( + ArmNonCachingSimpleCPU as NonCachingSimpleCPU, + ) + elif arch == "USE_MIPS_ISA": + from m5.objects.MipsCPU import ( + MipsNonCachingSimpleCPU as NonCachingSimpleCPU, + ) + elif arch == "USE_POWER_ISA": + from m5.objects.PowerCPU import ( + PowerNonCachingSimpleCPU as NonCachingSimpleCPU, + ) + elif arch == "USE_RISCV_ISA": + from m5.objects.RiscvCPU import ( + RiscvNonCachingSimpleCPU as NonCachingSimpleCPU, + ) + elif arch == "USE_SPARC_ISA": + from m5.objects.SparcCPU import ( + SparcNonCachingSimpleCPU as NonCachingSimpleCPU, + ) + elif arch == "USE_X86_ISA": + from m5.objects.X86CPU import ( + X86NonCachingSimpleCPU as NonCachingSimpleCPU, + ) diff --git a/src/cpu/simple/SConscript b/src/cpu/simple/SConscript index 66e43d4df5..ffa6467e9b 100644 --- a/src/cpu/simple/SConscript +++ b/src/cpu/simple/SConscript @@ -28,7 +28,7 @@ Import('*') -if env['CONF']['TARGET_ISA'] != 'null': +if not env['CONF']['USE_NULL_ISA']: SimObject('BaseAtomicSimpleCPU.py', sim_objects=['BaseAtomicSimpleCPU']) Source('atomic.cc') @@ -46,3 +46,8 @@ if env['CONF']['TARGET_ISA'] != 'null': Source('base.cc') SimObject('BaseSimpleCPU.py', sim_objects=['BaseSimpleCPU']) + + # For backwards compatibility + SimObject('AtomicSimpleCPU.py', sim_objects=[]) + SimObject('NonCachingSimpleCPU.py', sim_objects=[]) + SimObject('TimingSimpleCPU.py', sim_objects=[]) diff --git a/src/cpu/simple/TimingSimpleCPU.py b/src/cpu/simple/TimingSimpleCPU.py new file mode 100644 index 0000000000..679c86cad9 --- /dev/null +++ b/src/cpu/simple/TimingSimpleCPU.py @@ -0,0 +1,52 @@ +# Copyright 2021 Google, Inc. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import m5.defines + +arch_vars = [ + "USE_ARM_ISA", + "USE_MIPS_ISA", + "USE_POWER_ISA", + "USE_RISCV_ISA", + "USE_SPARC_ISA", + "USE_X86_ISA", +] + +enabled = list(filter(lambda var: m5.defines.buildEnv[var], arch_vars)) + +if len(enabled) == 1: + arch = enabled[0] + if arch == "USE_ARM_ISA": + from m5.objects.ArmCPU import ArmTimingSimpleCPU as TimingSimpleCPU + elif arch == "USE_MIPS_ISA": + from m5.objects.MipsCPU import MipsTimingSimpleCPU as TimingSimpleCPU + elif arch == "USE_POWER_ISA": + from m5.objects.PowerCPU import PowerTimingSimpleCPU as TimingSimpleCPU + elif arch == "USE_RISCV_ISA": + from m5.objects.RiscvCPU import RiscvTimingSimpleCPU as TimingSimpleCPU + elif arch == "USE_SPARC_ISA": + from m5.objects.SparcCPU import SparcTimingSimpleCPU as TimingSimpleCPU + elif arch == "USE_X86_ISA": + from m5.objects.X86CPU import X86TimingSimpleCPU as TimingSimpleCPU diff --git a/src/cpu/simple/atomic.cc b/src/cpu/simple/atomic.cc index 9cf7a294b9..d6638b3654 100644 --- a/src/cpu/simple/atomic.cc +++ b/src/cpu/simple/atomic.cc @@ -43,7 +43,6 @@ #include "arch/generic/decoder.hh" #include "base/output.hh" -#include "config/the_isa.hh" #include "cpu/exetrace.hh" #include "cpu/utils.hh" #include "debug/Drain.hh" diff --git a/src/cpu/simple/base.cc b/src/cpu/simple/base.cc index 426aa44be2..ab67f39496 100644 --- a/src/cpu/simple/base.cc +++ b/src/cpu/simple/base.cc @@ -49,7 +49,6 @@ #include "base/pollevent.hh" #include "base/trace.hh" #include "base/types.hh" -#include "config/the_isa.hh" #include "cpu/base.hh" #include "cpu/checker/cpu.hh" #include "cpu/checker/thread_context.hh" diff --git a/src/cpu/simple/base.hh b/src/cpu/simple/base.hh index 3fdd019774..df5290cf3c 100644 --- a/src/cpu/simple/base.hh +++ b/src/cpu/simple/base.hh @@ -68,7 +68,7 @@ class Process; class Processor; class ThreadContext; -namespace Trace +namespace trace { class InstRecord; } @@ -94,7 +94,7 @@ class BaseSimpleCPU : public BaseCPU virtual ~BaseSimpleCPU(); void wakeup(ThreadID tid) override; public: - Trace::InstRecord *traceData; + trace::InstRecord *traceData; CheckerCPU *checker; std::vector threadInfo; diff --git a/src/cpu/simple/exec_context.hh b/src/cpu/simple/exec_context.hh index fa3c61c7fb..e51ec88dce 100644 --- a/src/cpu/simple/exec_context.hh +++ b/src/cpu/simple/exec_context.hh @@ -41,9 +41,7 @@ #ifndef __CPU_SIMPLE_EXEC_CONTEXT_HH__ #define __CPU_SIMPLE_EXEC_CONTEXT_HH__ -#include "arch/vecregs.hh" #include "base/types.hh" -#include "config/the_isa.hh" #include "cpu/base.hh" #include "cpu/exec_context.hh" #include "cpu/reg_class.hh" diff --git a/src/cpu/simple/probes/SConscript b/src/cpu/simple/probes/SConscript index 8b1aa9a7fb..e9fbbb306c 100644 --- a/src/cpu/simple/probes/SConscript +++ b/src/cpu/simple/probes/SConscript @@ -28,6 +28,6 @@ Import('*') -if env['CONF']['TARGET_ISA'] != 'null': +if not env['CONF']['USE_NULL_ISA']: SimObject('SimPoint.py', sim_objects=['SimPoint']) Source('simpoint.cc') diff --git a/src/cpu/simple/probes/SimPoint.py b/src/cpu/simple/probes/SimPoint.py index 9dd3077e55..6f2f13ba74 100644 --- a/src/cpu/simple/probes/SimPoint.py +++ b/src/cpu/simple/probes/SimPoint.py @@ -36,12 +36,13 @@ from m5.params import * from m5.objects.Probe import ProbeListenerObject + class SimPoint(ProbeListenerObject): """Probe for collecting SimPoint Basic Block Vectors (BBVs).""" - type = 'SimPoint' + type = "SimPoint" cxx_header = "cpu/simple/probes/simpoint.hh" - cxx_class = 'gem5::SimPoint' + cxx_class = "gem5::SimPoint" interval = Param.UInt64(100000000, "Interval Size (insts)") profile_file = Param.String("simpoint.bb.gz", "BBV (output) file") diff --git a/src/cpu/simple/timing.cc b/src/cpu/simple/timing.cc index 188611f3d3..c6348da16a 100644 --- a/src/cpu/simple/timing.cc +++ b/src/cpu/simple/timing.cc @@ -43,7 +43,6 @@ #include "arch/generic/decoder.hh" #include "base/compiler.hh" -#include "config/the_isa.hh" #include "cpu/exetrace.hh" #include "debug/Config.hh" #include "debug/Drain.hh" diff --git a/src/cpu/simple_thread.cc b/src/cpu/simple_thread.cc index e9ffa21e8b..4c4e7dcdb6 100644 --- a/src/cpu/simple_thread.cc +++ b/src/cpu/simple_thread.cc @@ -48,7 +48,6 @@ #include "base/cprintf.hh" #include "base/output.hh" #include "base/trace.hh" -#include "config/the_isa.hh" #include "cpu/base.hh" #include "cpu/simple/base.hh" #include "cpu/thread_context.hh" @@ -71,14 +70,14 @@ SimpleThread::SimpleThread(BaseCPU *_cpu, int _thread_num, System *_sys, BaseISA *_isa, InstDecoder *_decoder) : ThreadState(_cpu, _thread_num, _process), regFiles{{ - {_isa->regClasses().at(IntRegClass)}, - {_isa->regClasses().at(FloatRegClass)}, - {_isa->regClasses().at(VecRegClass)}, - {_isa->regClasses().at(VecElemClass)}, - {_isa->regClasses().at(VecPredRegClass)}, - {_isa->regClasses().at(CCRegClass)} + {*_isa->regClasses().at(IntRegClass)}, + {*_isa->regClasses().at(FloatRegClass)}, + {*_isa->regClasses().at(VecRegClass)}, + {*_isa->regClasses().at(VecElemClass)}, + {*_isa->regClasses().at(VecPredRegClass)}, + {*_isa->regClasses().at(CCRegClass)} }}, - isa(dynamic_cast(_isa)), + isa(_isa), predicate(true), memAccPredicate(true), comInstEventQueue("instruction-based event queue"), system(_sys), mmu(_mmu), decoder(_decoder), diff --git a/src/cpu/simple_thread.hh b/src/cpu/simple_thread.hh index 1de5b370ae..5a60d2ac16 100644 --- a/src/cpu/simple_thread.hh +++ b/src/cpu/simple_thread.hh @@ -49,11 +49,8 @@ #include "arch/generic/mmu.hh" #include "arch/generic/pcstate.hh" #include "arch/generic/tlb.hh" -#include "arch/isa.hh" -#include "arch/vecregs.hh" #include "base/logging.hh" #include "base/types.hh" -#include "config/the_isa.hh" #include "cpu/regfile.hh" #include "cpu/thread_context.hh" #include "cpu/thread_state.hh" @@ -100,7 +97,7 @@ class SimpleThread : public ThreadState, public ThreadContext protected: std::array regFiles; - TheISA::ISA *const isa; // one "instance" of the current ISA. + BaseISA *const isa; // one "instance" of the current ISA. std::unique_ptr _pcState; @@ -291,12 +288,6 @@ class SimpleThread : public ThreadState, public ThreadContext return isa->setMiscReg(misc_reg, val); } - RegId - flattenRegId(const RegId& regId) const override - { - return isa->flattenRegId(regId); - } - unsigned readStCondFailures() const override { return storeCondFailures; } bool @@ -320,7 +311,7 @@ class SimpleThread : public ThreadState, public ThreadContext RegVal getReg(const RegId &arch_reg) const override { - const RegId reg = flattenRegId(arch_reg); + const RegId reg = arch_reg.flatten(*isa); const RegIndex idx = reg.index(); @@ -333,24 +324,10 @@ class SimpleThread : public ThreadState, public ThreadContext return val; } - RegVal - getRegFlat(const RegId ®) const override - { - const RegIndex idx = reg.index(); - - const auto ®_file = regFiles[reg.classValue()]; - const auto ®_class = reg_file.regClass; - - RegVal val = reg_file.reg(idx); - DPRINTFV(reg_class.debug(), "Reading %s reg %d as %#x.\n", - reg.className(), idx, val); - return val; - } - void getReg(const RegId &arch_reg, void *val) const override { - const RegId reg = flattenRegId(arch_reg); + const RegId reg = arch_reg.flatten(*isa); const RegIndex idx = reg.index(); @@ -363,32 +340,10 @@ class SimpleThread : public ThreadState, public ThreadContext reg_class.valString(val)); } - void - getRegFlat(const RegId ®, void *val) const override - { - const RegIndex idx = reg.index(); - - const auto ®_file = regFiles[reg.classValue()]; - const auto ®_class = reg_file.regClass; - - reg_file.get(idx, val); - DPRINTFV(reg_class.debug(), "Reading %s register %d as %s.\n", - reg.className(), idx, reg_class.valString(val)); - } - void * getWritableReg(const RegId &arch_reg) override { - const RegId reg = flattenRegId(arch_reg); - const RegIndex idx = reg.index(); - auto ®_file = regFiles[reg.classValue()]; - - return reg_file.ptr(idx); - } - - void * - getWritableRegFlat(const RegId ®) override - { + const RegId reg = arch_reg.flatten(*isa); const RegIndex idx = reg.index(); auto ®_file = regFiles[reg.classValue()]; @@ -398,7 +353,7 @@ class SimpleThread : public ThreadState, public ThreadContext void setReg(const RegId &arch_reg, RegVal val) override { - const RegId reg = flattenRegId(arch_reg); + const RegId reg = arch_reg.flatten(*isa); if (reg.is(InvalidRegClass)) return; @@ -413,26 +368,10 @@ class SimpleThread : public ThreadState, public ThreadContext reg_file.reg(idx) = val; } - void - setRegFlat(const RegId ®, RegVal val) override - { - if (reg.is(InvalidRegClass)) - return; - - const RegIndex idx = reg.index(); - - auto ®_file = regFiles[reg.classValue()]; - const auto ®_class = reg_file.regClass; - - DPRINTFV(reg_class.debug(), "Setting %s register %d to %#x.\n", - reg.className(), idx, val); - reg_file.reg(idx) = val; - } - void setReg(const RegId &arch_reg, const void *val) override { - const RegId reg = flattenRegId(arch_reg); + const RegId reg = arch_reg.flatten(*isa); const RegIndex idx = reg.index(); @@ -445,19 +384,6 @@ class SimpleThread : public ThreadState, public ThreadContext reg_file.set(idx, val); } - void - setRegFlat(const RegId ®, const void *val) override - { - const RegIndex idx = reg.index(); - - auto ®_file = regFiles[reg.classValue()]; - const auto ®_class = reg_file.regClass; - - DPRINTFV(reg_class.debug(), "Setting %s register %d to %s.\n", - reg.className(), idx, reg_class.valString(val)); - reg_file.set(idx, val); - } - // hardware transactional memory void htmAbortTransaction(uint64_t htm_uid, HtmFailureFaultCause cause) override; diff --git a/src/cpu/static_inst.hh b/src/cpu/static_inst.hh index af5975ecc1..21ce2aaf8b 100644 --- a/src/cpu/static_inst.hh +++ b/src/cpu/static_inst.hh @@ -72,10 +72,10 @@ namespace loader class SymbolTable; } // namespace loader -namespace Trace +namespace trace { class InstRecord; -} // namespace Trace +} // namespace trace /** * Base, ISA-independent static instruction class. @@ -283,17 +283,17 @@ class StaticInst : public RefCounted, public StaticInstFlags virtual ~StaticInst() {}; virtual Fault execute(ExecContext *xc, - Trace::InstRecord *traceData) const = 0; + trace::InstRecord *traceData) const = 0; virtual Fault - initiateAcc(ExecContext *xc, Trace::InstRecord *traceData) const + initiateAcc(ExecContext *xc, trace::InstRecord *traceData) const { panic("initiateAcc not defined!"); } virtual Fault completeAcc(Packet *pkt, ExecContext *xc, - Trace::InstRecord *trace_data) const + trace::InstRecord *trace_data) const { panic("completeAcc not defined!"); } diff --git a/src/cpu/testers/directedtest/RubyDirectedTester.py b/src/cpu/testers/directedtest/RubyDirectedTester.py index ec7797e5ab..b9297b058b 100644 --- a/src/cpu/testers/directedtest/RubyDirectedTester.py +++ b/src/cpu/testers/directedtest/RubyDirectedTester.py @@ -30,36 +30,41 @@ from m5.proxy import * from m5.objects.ClockedObject import ClockedObject + class DirectedGenerator(SimObject): - type = 'DirectedGenerator' + type = "DirectedGenerator" abstract = True cxx_header = "cpu/testers/directedtest/DirectedGenerator.hh" - cxx_class = 'gem5::DirectedGenerator' + cxx_class = "gem5::DirectedGenerator" num_cpus = Param.Int("num of cpus") system = Param.System(Parent.any, "System we belong to") + class SeriesRequestGenerator(DirectedGenerator): - type = 'SeriesRequestGenerator' + type = "SeriesRequestGenerator" cxx_header = "cpu/testers/directedtest/SeriesRequestGenerator.hh" - cxx_class = 'gem5::SeriesRequestGenerator' + cxx_class = "gem5::SeriesRequestGenerator" addr_increment_size = Param.Int(64, "address increment size") - num_series = Param.UInt32(1, - "number of different address streams to generate") + num_series = Param.UInt32( + 1, "number of different address streams to generate" + ) percent_writes = Param.Percent(50, "percent of access that are writes") + class InvalidateGenerator(DirectedGenerator): - type = 'InvalidateGenerator' + type = "InvalidateGenerator" cxx_header = "cpu/testers/directedtest/InvalidateGenerator.hh" - cxx_class = 'gem5::InvalidateGenerator' + cxx_class = "gem5::InvalidateGenerator" addr_increment_size = Param.Int(64, "address increment size") + class RubyDirectedTester(ClockedObject): - type = 'RubyDirectedTester' + type = "RubyDirectedTester" cxx_header = "cpu/testers/directedtest/RubyDirectedTester.hh" - cxx_class = 'gem5::RubyDirectedTester' + cxx_class = "gem5::RubyDirectedTester" cpuPort = VectorRequestPort("the cpu ports") requests_to_complete = Param.Int("checks to complete") diff --git a/src/cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.py b/src/cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.py index f23a141b30..6b7e7b8ab4 100644 --- a/src/cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.py +++ b/src/cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.py @@ -28,30 +28,50 @@ from m5.objects.ClockedObject import ClockedObject from m5.params import * from m5.proxy import * + class GarnetSyntheticTraffic(ClockedObject): - type = 'GarnetSyntheticTraffic' - cxx_header = \ + type = "GarnetSyntheticTraffic" + cxx_header = ( "cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.hh" - cxx_class = 'gem5::GarnetSyntheticTraffic' + ) + cxx_class = "gem5::GarnetSyntheticTraffic" block_offset = Param.Int(6, "block offset in bits") num_dest = Param.Int(1, "Number of Destinations") memory_size = Param.Int(65536, "memory size") sim_cycles = Param.Int(1000, "Number of simulation cycles") - num_packets_max = Param.Int(-1, "Max number of packets to send. \ - Default is to keep sending till simulation ends") - single_sender = Param.Int(-1, "Send only from this node. \ - By default every node sends") - single_dest = Param.Int(-1, "Send only to this dest. \ - Default depends on traffic_type") + num_packets_max = Param.Int( + -1, + "Max number of packets to send. \ + Default is to keep sending till simulation ends", + ) + single_sender = Param.Int( + -1, + "Send only from this node. \ + By default every node sends", + ) + single_dest = Param.Int( + -1, + "Send only to this dest. \ + Default depends on traffic_type", + ) traffic_type = Param.String("uniform_random", "Traffic type") inj_rate = Param.Float(0.1, "Packet injection rate") - inj_vnet = Param.Int(-1, "Vnet to inject in. \ + inj_vnet = Param.Int( + -1, + "Vnet to inject in. \ 0 and 1 are 1-flit, 2 is 5-flit. \ - Default is to inject in all three vnets") - precision = Param.Int(3, "Number of digits of precision \ - after decimal point") - response_limit = Param.Cycles(5000000, "Cycles before exiting \ - due to lack of progress") + Default is to inject in all three vnets", + ) + precision = Param.Int( + 3, + "Number of digits of precision \ + after decimal point", + ) + response_limit = Param.Cycles( + 5000000, + "Cycles before exiting \ + due to lack of progress", + ) test = RequestPort("Port to the memory system to test") system = Param.System(Parent.any, "System we belong to") diff --git a/src/cpu/testers/gpu_ruby_test/CpuThread.py b/src/cpu/testers/gpu_ruby_test/CpuThread.py index a9fa2efecd..f40df272a4 100644 --- a/src/cpu/testers/gpu_ruby_test/CpuThread.py +++ b/src/cpu/testers/gpu_ruby_test/CpuThread.py @@ -32,7 +32,8 @@ from m5.proxy import * from m5.objects.TesterThread import TesterThread + class CpuThread(TesterThread): - type = 'CpuThread' + type = "CpuThread" cxx_header = "cpu/testers/gpu_ruby_test/cpu_thread.hh" - cxx_class = 'gem5::CpuThread' + cxx_class = "gem5::CpuThread" diff --git a/src/cpu/testers/gpu_ruby_test/DmaThread.py b/src/cpu/testers/gpu_ruby_test/DmaThread.py index c5ab564e21..0a3dbc7289 100644 --- a/src/cpu/testers/gpu_ruby_test/DmaThread.py +++ b/src/cpu/testers/gpu_ruby_test/DmaThread.py @@ -32,7 +32,8 @@ from m5.proxy import * from m5.objects.TesterThread import TesterThread + class DmaThread(TesterThread): - type = 'DmaThread' + type = "DmaThread" cxx_header = "cpu/testers/gpu_ruby_test/dma_thread.hh" - cxx_class = 'gem5::DmaThread' + cxx_class = "gem5::DmaThread" diff --git a/src/cpu/testers/gpu_ruby_test/GpuWavefront.py b/src/cpu/testers/gpu_ruby_test/GpuWavefront.py index f71ed3c14f..625af91fa4 100644 --- a/src/cpu/testers/gpu_ruby_test/GpuWavefront.py +++ b/src/cpu/testers/gpu_ruby_test/GpuWavefront.py @@ -32,9 +32,10 @@ from m5.proxy import * from m5.objects.TesterThread import TesterThread + class GpuWavefront(TesterThread): - type = 'GpuWavefront' + type = "GpuWavefront" cxx_header = "cpu/testers/gpu_ruby_test/gpu_wavefront.hh" - cxx_class = 'gem5::GpuWavefront' + cxx_class = "gem5::GpuWavefront" cu_id = Param.Int("Compute Unit ID") diff --git a/src/cpu/testers/gpu_ruby_test/ProtocolTester.py b/src/cpu/testers/gpu_ruby_test/ProtocolTester.py index 3a87337d31..178376bee9 100644 --- a/src/cpu/testers/gpu_ruby_test/ProtocolTester.py +++ b/src/cpu/testers/gpu_ruby_test/ProtocolTester.py @@ -31,10 +31,11 @@ from m5.objects.ClockedObject import ClockedObject from m5.params import * from m5.proxy import * + class ProtocolTester(ClockedObject): - type = 'ProtocolTester' + type = "ProtocolTester" cxx_header = "cpu/testers/gpu_ruby_test/protocol_tester.hh" - cxx_class = 'gem5::ProtocolTester' + cxx_class = "gem5::ProtocolTester" cpu_ports = VectorRequestPort("Ports for CPUs") dma_ports = VectorRequestPort("Ports for DMAs") @@ -49,23 +50,30 @@ class ProtocolTester(ClockedObject): wavefronts_per_cu = Param.Int(1, "Number of wavefronts per CU") workitems_per_wavefront = Param.Int(64, "Number of workitems per wf") - max_cu_tokens = Param.Int(4, "Maximum number of tokens, i.e., the number" - " of instructions that can be uncoalesced" - " before back-pressure occurs from the" - " coalescer.") + max_cu_tokens = Param.Int( + 4, + "Maximum number of tokens, i.e., the number" + " of instructions that can be uncoalesced" + " before back-pressure occurs from the" + " coalescer.", + ) cpu_threads = VectorParam.CpuThread("All cpus") dma_threads = VectorParam.DmaThread("All DMAs") wavefronts = VectorParam.GpuWavefront("All wavefronts") num_atomic_locations = Param.Int(2, "Number of atomic locations") - num_normal_locs_per_atomic = Param.Int(1000, \ - "Number of normal locations per atomic") + num_normal_locs_per_atomic = Param.Int( + 1000, "Number of normal locations per atomic" + ) episode_length = Param.Int(10, "Number of actions per episode") max_num_episodes = Param.Int(20, "Maximum number of episodes") debug_tester = Param.Bool(False, "Are we debugging the tester?") - random_seed = Param.Int(0, "Random seed number. Default value (0) means \ - using runtime-specific value.") + random_seed = Param.Int( + 0, + "Random seed number. Default value (0) means \ + using runtime-specific value.", + ) log_file = Param.String("Log file's name") system = Param.System(Parent.any, "System we belong to") diff --git a/src/cpu/testers/gpu_ruby_test/README b/src/cpu/testers/gpu_ruby_test/README index 73fd55421c..00e4c8e781 100644 --- a/src/cpu/testers/gpu_ruby_test/README +++ b/src/cpu/testers/gpu_ruby_test/README @@ -124,4 +124,4 @@ For more detail, please see the following paper: T. Ta, X. Zhang, A. Gutierrez and B. M. Beckmann, "Autonomous Data-Race-Free GPU Testing," 2019 IEEE International Symposium on Workload Characterization (IISWC), Orlando, FL, USA, 2019, pp. 81-92, doi: -10.1109/IISWC47752.2019.9042019. \ No newline at end of file +10.1109/IISWC47752.2019.9042019. diff --git a/src/cpu/testers/gpu_ruby_test/TesterDma.py b/src/cpu/testers/gpu_ruby_test/TesterDma.py index 1555393804..81aa1831fc 100644 --- a/src/cpu/testers/gpu_ruby_test/TesterDma.py +++ b/src/cpu/testers/gpu_ruby_test/TesterDma.py @@ -29,7 +29,8 @@ from m5.objects.Device import DmaDevice + class TesterDma(DmaDevice): - type = 'TesterDma' + type = "TesterDma" cxx_header = "cpu/testers/gpu_ruby_test/tester_dma.hh" - cxx_class = 'gem5::TesterDma' + cxx_class = "gem5::TesterDma" diff --git a/src/cpu/testers/gpu_ruby_test/TesterThread.py b/src/cpu/testers/gpu_ruby_test/TesterThread.py index c743dd5d95..49388a76e1 100644 --- a/src/cpu/testers/gpu_ruby_test/TesterThread.py +++ b/src/cpu/testers/gpu_ruby_test/TesterThread.py @@ -31,11 +31,12 @@ from m5.objects.ClockedObject import ClockedObject from m5.params import * from m5.proxy import * + class TesterThread(ClockedObject): - type = 'TesterThread' + type = "TesterThread" abstract = True cxx_header = "cpu/testers/gpu_ruby_test/tester_thread.hh" - cxx_class = 'gem5::TesterThread' + cxx_class = "gem5::TesterThread" thread_id = Param.Int("Unique TesterThread ID") num_lanes = Param.Int("Number of lanes this thread has") diff --git a/src/cpu/testers/memtest/MemTest.py b/src/cpu/testers/memtest/MemTest.py index 7be4a76337..e8492b5402 100644 --- a/src/cpu/testers/memtest/MemTest.py +++ b/src/cpu/testers/memtest/MemTest.py @@ -41,10 +41,11 @@ from m5.proxy import * from m5.objects.ClockedObject import ClockedObject + class MemTest(ClockedObject): - type = 'MemTest' + type = "MemTest" cxx_header = "cpu/testers/memtest/memtest.hh" - cxx_class = 'gem5::MemTest' + cxx_class = "gem5::MemTest" # Interval of packet injection, the size of the memory range # touched, and an optional stop condition @@ -53,7 +54,8 @@ class MemTest(ClockedObject): base_addr_1 = Param.Addr(0x100000, "Start of the first testing region") base_addr_2 = Param.Addr(0x400000, "Start of the second testing region") uncacheable_base_addr = Param.Addr( - 0x800000, "Start of the uncacheable testing region") + 0x800000, "Start of the uncacheable testing region" + ) max_loads = Param.Counter(0, "Number of loads to execute before exiting") # Control the mix of packets and if functional accesses are part of @@ -64,15 +66,18 @@ class MemTest(ClockedObject): # Determine how often to print progress messages and what timeout # to use for checking progress of both requests and responses - progress_interval = Param.Counter(1000000, - "Progress report interval (in accesses)") - progress_check = Param.Cycles(5000000, "Cycles before exiting " \ - "due to lack of progress") + progress_interval = Param.Counter( + 1000000, "Progress report interval (in accesses)" + ) + progress_check = Param.Cycles( + 5000000, "Cycles before exiting " "due to lack of progress" + ) port = RequestPort("Port to the memory system") system = Param.System(Parent.any, "System this tester is part of") # Add the ability to supress error responses on functional # accesses as Ruby needs this - suppress_func_errors = Param.Bool(False, "Suppress panic when "\ - "functional accesses fail.") + suppress_func_errors = Param.Bool( + False, "Suppress panic when " "functional accesses fail." + ) diff --git a/src/cpu/testers/rubytest/RubyTester.py b/src/cpu/testers/rubytest/RubyTester.py index 408dc04260..3fabece8c4 100644 --- a/src/cpu/testers/rubytest/RubyTester.py +++ b/src/cpu/testers/rubytest/RubyTester.py @@ -30,14 +30,16 @@ from m5.proxy import * from m5.objects.ClockedObject import ClockedObject + class RubyTester(ClockedObject): - type = 'RubyTester' + type = "RubyTester" cxx_header = "cpu/testers/rubytest/RubyTester.hh" - cxx_class = 'gem5::RubyTester' + cxx_class = "gem5::RubyTester" num_cpus = Param.Int("number of cpus / RubyPorts") - cpuInstDataPort = VectorRequestPort("cpu combo ports to inst & " - "data caches") + cpuInstDataPort = VectorRequestPort( + "cpu combo ports to inst & " "data caches" + ) cpuInstPort = VectorRequestPort("cpu ports to only inst caches") cpuDataPort = VectorRequestPort("cpu ports to only data caches") checks_to_complete = Param.Int(100, "checks to complete") diff --git a/src/cpu/testers/traffic_gen/BaseTrafficGen.py b/src/cpu/testers/traffic_gen/BaseTrafficGen.py index ae4df35552..b8de198f9a 100644 --- a/src/cpu/testers/traffic_gen/BaseTrafficGen.py +++ b/src/cpu/testers/traffic_gen/BaseTrafficGen.py @@ -42,7 +42,9 @@ from m5.objects.ClockedObject import ClockedObject # and are meant to initialize the stream and substream IDs for # every memory request, regardless of how the packet has been # generated (Random, Linear, Trace etc) -class StreamGenType(ScopedEnum): vals = [ 'none', 'fixed', 'random' ] +class StreamGenType(ScopedEnum): + vals = ["none", "fixed", "random"] + # The traffic generator is a requestor module that generates stimuli for # the memory system, based on a collection of simple behaviours that @@ -52,10 +54,10 @@ class StreamGenType(ScopedEnum): vals = [ 'none', 'fixed', 'random' ] # components that are not yet modelled in detail, e.g. a video engine # or baseband subsystem in an SoC. class BaseTrafficGen(ClockedObject): - type = 'BaseTrafficGen' + type = "BaseTrafficGen" abstract = True cxx_header = "cpu/testers/traffic_gen/traffic_gen.hh" - cxx_class = 'gem5::BaseTrafficGen' + cxx_class = "gem5::BaseTrafficGen" # Port used for sending requests and receiving responses port = RequestPort("This port sends requests and receives responses") @@ -66,23 +68,27 @@ class BaseTrafficGen(ClockedObject): # Should requests respond to back-pressure or not, if true, the # rate of the traffic generator will be slowed down if requests # are not immediately accepted - elastic_req = Param.Bool(False, - "Slow down requests in case of backpressure") + elastic_req = Param.Bool( + False, "Slow down requests in case of backpressure" + ) # Maximum number of requests waiting for response. Set to 0 for an # unlimited number of outstanding requests. - max_outstanding_reqs = Param.Int(0, - "Maximum number of outstanding requests") + max_outstanding_reqs = Param.Int( + 0, "Maximum number of outstanding requests" + ) # Let the user know if we have waited for a retry and not made any # progress for a long period of time. The default value is # somewhat arbitrary and may well have to be tuned. - progress_check = Param.Latency('1ms', "Time before exiting " \ - "due to lack of progress") + progress_check = Param.Latency( + "1ms", "Time before exiting " "due to lack of progress" + ) # Generator type used for applying Stream and/or Substream IDs to requests - stream_gen = Param.StreamGenType('none', - "Generator for adding Stream and/or Substream ID's to requests") + stream_gen = Param.StreamGenType( + "none", "Generator for adding Stream and/or Substream ID's to requests" + ) # Sources for Stream/Substream IDs to apply to requests sids = VectorParam.Unsigned([], "StreamIDs to use") @@ -96,7 +102,7 @@ class BaseTrafficGen(ClockedObject): @classmethod def memory_mode(cls): - return 'timing' + return "timing" @classmethod def require_caches(cls): @@ -109,9 +115,9 @@ class BaseTrafficGen(ClockedObject): pass def connectCachedPorts(self, in_ports): - if hasattr(self, '_cached_ports') and (len(self._cached_ports) > 0): + if hasattr(self, "_cached_ports") and (len(self._cached_ports) > 0): for p in self._cached_ports: - exec('self.%s = in_ports' % p) + exec("self.%s = in_ports" % p) else: self.port = in_ports @@ -119,11 +125,12 @@ class BaseTrafficGen(ClockedObject): self.connectCachedPorts(cached_in) def connectBus(self, bus): - self.connectAllPorts(bus.cpu_side_ports, - bus.cpu_side_ports, bus.mem_side_ports) + self.connectAllPorts( + bus.cpu_side_ports, bus.cpu_side_ports, bus.mem_side_ports + ) - def addPrivateSplitL1Caches(self, ic, dc, iwc = None, dwc = None): + def addPrivateSplitL1Caches(self, ic, dc, iwc=None, dwc=None): self.dcache = dc self.port = dc.cpu_side - self._cached_ports = ['dcache.mem_side'] + self._cached_ports = ["dcache.mem_side"] self._uncached_ports = [] diff --git a/src/cpu/testers/traffic_gen/GUPSGen.py b/src/cpu/testers/traffic_gen/GUPSGen.py index dafc86de08..31b5ed3e10 100644 --- a/src/cpu/testers/traffic_gen/GUPSGen.py +++ b/src/cpu/testers/traffic_gen/GUPSGen.py @@ -29,32 +29,44 @@ from m5.params import * from m5.proxy import * from m5.objects.ClockedObject import ClockedObject + class GUPSGen(ClockedObject): """ This ClockedObject implements the RandomAccess benchmark specified by HPCC benchmarks in https://icl.utk.edu/projectsfiles/hpcc/RandomAccess. """ - type = 'GUPSGen' + + type = "GUPSGen" cxx_header = "cpu/testers/traffic_gen/gups_gen.hh" cxx_class = "gem5::GUPSGen" - system = Param.System(Parent.any, 'System this generator is a part of') + system = Param.System(Parent.any, "System this generator is a part of") - port = RequestPort('Port that should be connected to other components') + port = RequestPort("Port that should be connected to other components") - start_addr = Param.Addr(0, 'Start address for allocating update table,' - ' should be a multiple of block_size') + start_addr = Param.Addr( + 0, + "Start address for allocating update table," + " should be a multiple of block_size", + ) - mem_size = Param.MemorySize('Size for allocating update table, based on' - ' randomAccess benchmark specification, this' - ' should be equal to half of total system memory' - ' ,also should be a power of 2') + mem_size = Param.MemorySize( + "Size for allocating update table, based on" + " randomAccess benchmark specification, this" + " should be equal to half of total system memory" + " ,also should be a power of 2" + ) - update_limit = Param.Int(0, 'The number of updates to issue before the' - ' simulation is over') + update_limit = Param.Int( + 0, "The number of updates to issue before the" " simulation is over" + ) - request_queue_size = Param.Int(1024, 'Maximum number of parallel' - ' outstanding requests') + request_queue_size = Param.Int( + 1024, "Maximum number of parallel" " outstanding requests" + ) - init_memory = Param.Bool(False, 'Whether or not to initialize the memory,' - ' it does not effect the performance') + init_memory = Param.Bool( + False, + "Whether or not to initialize the memory," + " it does not effect the performance", + ) diff --git a/src/cpu/testers/traffic_gen/PyTrafficGen.py b/src/cpu/testers/traffic_gen/PyTrafficGen.py index a3097a536f..c3a660f053 100644 --- a/src/cpu/testers/traffic_gen/PyTrafficGen.py +++ b/src/cpu/testers/traffic_gen/PyTrafficGen.py @@ -38,10 +38,11 @@ from m5.SimObject import * from m5.objects.BaseTrafficGen import * + class PyTrafficGen(BaseTrafficGen): - type = 'PyTrafficGen' + type = "PyTrafficGen" cxx_header = "cpu/testers/traffic_gen/pygen.hh" - cxx_class = 'gem5::PyTrafficGen' + cxx_class = "gem5::PyTrafficGen" @cxxMethod def start(self, meta_generator): @@ -61,14 +62,17 @@ class PyTrafficGen(BaseTrafficGen): PyBindMethod("createDramRot"), PyBindMethod("createHybrid"), PyBindMethod("createNvm"), - PyBindMethod("createStrided") + PyBindMethod("createStrided"), ] @cxxMethod(override=True) def createTrace(self, duration, trace_file, addr_offset=0): - if buildEnv['HAVE_PROTOBUF']: - return self.getCCObject().createTrace(duration, trace_file, - addr_offset=addr_offset) + if buildEnv["HAVE_PROTOBUF"]: + return self.getCCObject().createTrace( + duration, trace_file, addr_offset=addr_offset + ) else: - raise NotImplementedError("Trace playback requires that gem5 " - "was built with protobuf support.") + raise NotImplementedError( + "Trace playback requires that gem5 " + "was built with protobuf support." + ) diff --git a/src/cpu/testers/traffic_gen/SConscript b/src/cpu/testers/traffic_gen/SConscript index 098bd7b1d7..f31bd53cdd 100644 --- a/src/cpu/testers/traffic_gen/SConscript +++ b/src/cpu/testers/traffic_gen/SConscript @@ -68,4 +68,3 @@ if env['USE_PYTHON']: SimObject('TrafficGen.py', sim_objects=['TrafficGen'], tags='protobuf') Source('trace_gen.cc', tags='protobuf') Source('traffic_gen.cc', tags='protobuf') - diff --git a/src/cpu/testers/traffic_gen/TrafficGen.py b/src/cpu/testers/traffic_gen/TrafficGen.py index 5a4a0ea70e..6f1aa67bfd 100644 --- a/src/cpu/testers/traffic_gen/TrafficGen.py +++ b/src/cpu/testers/traffic_gen/TrafficGen.py @@ -45,9 +45,9 @@ from m5.objects.BaseTrafficGen import * # arranging them in graphs. The graph transitions can also be # annotated with probabilities, effectively making it a Markov Chain. class TrafficGen(BaseTrafficGen): - type = 'TrafficGen' + type = "TrafficGen" cxx_header = "cpu/testers/traffic_gen/traffic_gen.hh" - cxx_class = 'gem5::TrafficGen' + cxx_class = "gem5::TrafficGen" # Config file to parse for the state descriptions config_file = Param.String("Configuration file describing the behaviour") diff --git a/src/cpu/thread_context.cc b/src/cpu/thread_context.cc index 400bc165a7..69094f87af 100644 --- a/src/cpu/thread_context.cc +++ b/src/cpu/thread_context.cc @@ -46,7 +46,6 @@ #include "arch/generic/vec_pred_reg.hh" #include "base/logging.hh" #include "base/trace.hh" -#include "config/the_isa.hh" #include "cpu/base.hh" #include "debug/Context.hh" #include "debug/Quiesce.hh" @@ -65,56 +64,52 @@ ThreadContext::compare(ThreadContext *one, ThreadContext *two) DPRINTF(Context, "Comparing thread contexts\n"); // First loop through the integer registers. - for (int i = 0; i < regClasses.at(IntRegClass).numRegs(); ++i) { - RegVal t1 = one->readIntReg(i); - RegVal t2 = two->readIntReg(i); + for (auto &id: *regClasses.at(IntRegClass)) { + RegVal t1 = one->getReg(id); + RegVal t2 = two->getReg(id); if (t1 != t2) panic("Int reg idx %d doesn't match, one: %#x, two: %#x", - i, t1, t2); + id.index(), t1, t2); } // Then loop through the floating point registers. - for (int i = 0; i < regClasses.at(FloatRegClass).numRegs(); ++i) { - RegVal t1 = one->readFloatReg(i); - RegVal t2 = two->readFloatReg(i); + for (auto &id: *regClasses.at(FloatRegClass)) { + RegVal t1 = one->getReg(id); + RegVal t2 = two->getReg(id); if (t1 != t2) panic("Float reg idx %d doesn't match, one: %#x, two: %#x", - i, t1, t2); + id.index(), t1, t2); } // Then loop through the vector registers. - const auto &vec_class = regClasses.at(VecRegClass); - std::vector vec1(vec_class.regBytes()); - std::vector vec2(vec_class.regBytes()); - for (int i = 0; i < vec_class.numRegs(); ++i) { - RegId rid(VecRegClass, i); - - one->getReg(rid, vec1.data()); - two->getReg(rid, vec2.data()); + const auto *vec_class = regClasses.at(VecRegClass); + std::vector vec1(vec_class->regBytes()); + std::vector vec2(vec_class->regBytes()); + for (auto &id: *regClasses.at(VecRegClass)) { + one->getReg(id, vec1.data()); + two->getReg(id, vec2.data()); if (vec1 != vec2) { panic("Vec reg idx %d doesn't match, one: %#x, two: %#x", - i, vec_class.valString(vec1.data()), - vec_class.valString(vec2.data())); + id.index(), vec_class->valString(vec1.data()), + vec_class->valString(vec2.data())); } } // Then loop through the predicate registers. - const auto &vec_pred_class = regClasses.at(VecPredRegClass); - std::vector pred1(vec_pred_class.regBytes()); - std::vector pred2(vec_pred_class.regBytes()); - for (int i = 0; i < vec_pred_class.numRegs(); ++i) { - RegId rid(VecPredRegClass, i); - - one->getReg(rid, pred1.data()); - two->getReg(rid, pred2.data()); + const auto *vec_pred_class = regClasses.at(VecPredRegClass); + std::vector pred1(vec_pred_class->regBytes()); + std::vector pred2(vec_pred_class->regBytes()); + for (auto &id: *regClasses.at(VecPredRegClass)) { + one->getReg(id, pred1.data()); + two->getReg(id, pred2.data()); if (pred1 != pred2) { panic("Pred reg idx %d doesn't match, one: %s, two: %s", - i, vec_pred_class.valString(pred1.data()), - vec_pred_class.valString(pred2.data())); + id.index(), vec_pred_class->valString(pred1.data()), + vec_pred_class->valString(pred2.data())); } } - for (int i = 0; i < regClasses.at(MiscRegClass).numRegs(); ++i) { + for (int i = 0; i < regClasses.at(MiscRegClass)->numRegs(); ++i) { RegVal t1 = one->readMiscRegNoEffect(i); RegVal t2 = two->readMiscRegNoEffect(i); if (t1 != t2) @@ -123,12 +118,12 @@ ThreadContext::compare(ThreadContext *one, ThreadContext *two) } // loop through the Condition Code registers. - for (int i = 0; i < regClasses.at(CCRegClass).numRegs(); ++i) { - RegVal t1 = one->readCCReg(i); - RegVal t2 = two->readCCReg(i); + for (auto &id: *regClasses.at(CCRegClass)) { + RegVal t1 = one->getReg(id); + RegVal t2 = two->getReg(id); if (t1 != t2) panic("CC reg idx %d doesn't match, one: %#x, two: %#x", - i, t1, t2); + id.index(), t1, t2); } if (one->pcState() != two->pcState()) panic("PC state doesn't match."); @@ -170,89 +165,38 @@ ThreadContext::quiesceTick(Tick resume) RegVal ThreadContext::getReg(const RegId ®) const { - return getRegFlat(flattenRegId(reg)); -} - -void * -ThreadContext::getWritableReg(const RegId ®) -{ - return getWritableRegFlat(flattenRegId(reg)); + RegVal val; + getReg(reg, &val); + return val; } void ThreadContext::setReg(const RegId ®, RegVal val) { - setRegFlat(flattenRegId(reg), val); -} - -void -ThreadContext::getReg(const RegId ®, void *val) const -{ - getRegFlat(flattenRegId(reg), val); -} - -void -ThreadContext::setReg(const RegId ®, const void *val) -{ - setRegFlat(flattenRegId(reg), val); -} - -RegVal -ThreadContext::getRegFlat(const RegId ®) const -{ - RegVal val; - getRegFlat(reg, &val); - return val; -} - -void -ThreadContext::setRegFlat(const RegId ®, RegVal val) -{ - setRegFlat(reg, &val); + setReg(reg, &val); } void serialize(const ThreadContext &tc, CheckpointOut &cp) { - // Cast away the const so we can get the non-const ISA ptr, which we then - // use to get the const register classes. - auto &nc_tc = const_cast(tc); - const auto ®Classes = nc_tc.getIsaPtr()->regClasses(); + for (const auto *reg_class: tc.getIsaPtr()->regClasses()) { + // MiscRegs are serialized elsewhere. + if (reg_class->type() == MiscRegClass) + continue; - const size_t numFloats = regClasses.at(FloatRegClass).numRegs(); - RegVal floatRegs[numFloats]; - for (int i = 0; i < numFloats; ++i) - floatRegs[i] = tc.readFloatRegFlat(i); - // This is a bit ugly, but needed to maintain backwards - // compatibility. - arrayParamOut(cp, "floatRegs.i", floatRegs, numFloats); + const size_t reg_bytes = reg_class->regBytes(); + const size_t reg_count = reg_class->numRegs(); + const size_t array_bytes = reg_bytes * reg_count; - const size_t numVecs = regClasses.at(VecRegClass).numRegs(); - std::vector vecRegs(numVecs); - for (int i = 0; i < numVecs; ++i) { - vecRegs[i] = tc.readVecRegFlat(i); - } - SERIALIZE_CONTAINER(vecRegs); + uint8_t regs[array_bytes]; + auto *reg_ptr = regs; + for (const auto &id: *reg_class) { + tc.getReg(id, reg_ptr); + reg_ptr += reg_bytes; + } - const size_t numPreds = regClasses.at(VecPredRegClass).numRegs(); - std::vector vecPredRegs(numPreds); - for (int i = 0; i < numPreds; ++i) { - tc.getRegFlat(RegId(VecPredRegClass, i), &vecPredRegs[i]); - } - SERIALIZE_CONTAINER(vecPredRegs); - - const size_t numInts = regClasses.at(IntRegClass).numRegs(); - RegVal intRegs[numInts]; - for (int i = 0; i < numInts; ++i) - intRegs[i] = tc.readIntRegFlat(i); - SERIALIZE_ARRAY(intRegs, numInts); - - const size_t numCcs = regClasses.at(CCRegClass).numRegs(); - if (numCcs) { - RegVal ccRegs[numCcs]; - for (int i = 0; i < numCcs; ++i) - ccRegs[i] = tc.readCCRegFlat(i); - SERIALIZE_ARRAY(ccRegs, numCcs); + arrayParamOut(cp, std::string("regs.") + reg_class->name(), regs, + array_bytes); } tc.pcState().serialize(cp); @@ -263,42 +207,24 @@ serialize(const ThreadContext &tc, CheckpointOut &cp) void unserialize(ThreadContext &tc, CheckpointIn &cp) { - const auto ®Classes = tc.getIsaPtr()->regClasses(); + for (const auto *reg_class: tc.getIsaPtr()->regClasses()) { + // MiscRegs are serialized elsewhere. + if (reg_class->type() == MiscRegClass) + continue; - const size_t numFloats = regClasses.at(FloatRegClass).numRegs(); - RegVal floatRegs[numFloats]; - // This is a bit ugly, but needed to maintain backwards - // compatibility. - arrayParamIn(cp, "floatRegs.i", floatRegs, numFloats); - for (int i = 0; i < numFloats; ++i) - tc.setFloatRegFlat(i, floatRegs[i]); + const size_t reg_bytes = reg_class->regBytes(); + const size_t reg_count = reg_class->numRegs(); + const size_t array_bytes = reg_bytes * reg_count; - const size_t numVecs = regClasses.at(VecRegClass).numRegs(); - std::vector vecRegs(numVecs); - UNSERIALIZE_CONTAINER(vecRegs); - for (int i = 0; i < numVecs; ++i) { - tc.setVecRegFlat(i, vecRegs[i]); - } + uint8_t regs[array_bytes]; + arrayParamIn(cp, std::string("regs.") + reg_class->name(), regs, + array_bytes); - const size_t numPreds = regClasses.at(VecPredRegClass).numRegs(); - std::vector vecPredRegs(numPreds); - UNSERIALIZE_CONTAINER(vecPredRegs); - for (int i = 0; i < numPreds; ++i) { - tc.setRegFlat(RegId(VecPredRegClass, i), &vecPredRegs[i]); - } - - const size_t numInts = regClasses.at(IntRegClass).numRegs(); - RegVal intRegs[numInts]; - UNSERIALIZE_ARRAY(intRegs, numInts); - for (int i = 0; i < numInts; ++i) - tc.setIntRegFlat(i, intRegs[i]); - - const size_t numCcs = regClasses.at(CCRegClass).numRegs(); - if (numCcs) { - RegVal ccRegs[numCcs]; - UNSERIALIZE_ARRAY(ccRegs, numCcs); - for (int i = 0; i < numCcs; ++i) - tc.setCCRegFlat(i, ccRegs[i]); + auto *reg_ptr = regs; + for (const auto &id: *reg_class) { + tc.setReg(id, reg_ptr); + reg_ptr += reg_bytes; + } } std::unique_ptr pc_state(tc.pcState().clone()); diff --git a/src/cpu/thread_context.hh b/src/cpu/thread_context.hh index 835ac4625e..3f4dc0379c 100644 --- a/src/cpu/thread_context.hh +++ b/src/cpu/thread_context.hh @@ -48,9 +48,7 @@ #include "arch/generic/htm.hh" #include "arch/generic/isa.hh" #include "arch/generic/pcstate.hh" -#include "arch/vecregs.hh" #include "base/types.hh" -#include "config/the_isa.hh" #include "cpu/pc_event.hh" #include "cpu/reg_class.hh" @@ -59,10 +57,6 @@ namespace gem5 // @todo: Figure out a more architecture independent way to obtain the ITB and // DTB pointers. -namespace TheISA -{ - class Decoder; -} class BaseCPU; class BaseMMU; class BaseTLB; @@ -194,78 +188,11 @@ class ThreadContext : public PCEventScope // New accessors for new decoder. // virtual RegVal getReg(const RegId ®) const; - virtual void getReg(const RegId ®, void *val) const; - virtual void *getWritableReg(const RegId ®); + virtual void getReg(const RegId ®, void *val) const = 0; + virtual void *getWritableReg(const RegId ®) = 0; virtual void setReg(const RegId ®, RegVal val); - virtual void setReg(const RegId ®, const void *val); - - RegVal - readIntReg(RegIndex reg_idx) const - { - return getReg(RegId(IntRegClass, reg_idx)); - } - - RegVal - readFloatReg(RegIndex reg_idx) const - { - return getReg(RegId(FloatRegClass, reg_idx)); - } - - TheISA::VecRegContainer - readVecReg(const RegId ®) const - { - TheISA::VecRegContainer val; - getReg(reg, &val); - return val; - } - TheISA::VecRegContainer& - getWritableVecReg(const RegId& reg) - { - return *(TheISA::VecRegContainer *)getWritableReg(reg); - } - - RegVal - readVecElem(const RegId& reg) const - { - return getReg(reg); - } - - RegVal - readCCReg(RegIndex reg_idx) const - { - return getReg(RegId(CCRegClass, reg_idx)); - } - - void - setIntReg(RegIndex reg_idx, RegVal val) - { - setReg(RegId(IntRegClass, reg_idx), val); - } - - void - setFloatReg(RegIndex reg_idx, RegVal val) - { - setReg(RegId(FloatRegClass, reg_idx), val); - } - - void - setVecReg(const RegId& reg, const TheISA::VecRegContainer &val) - { - setReg(reg, &val); - } - - void - setVecElem(const RegId& reg, RegVal val) - { - setReg(reg, val); - } - - void - setCCReg(RegIndex reg_idx, RegVal val) - { - setReg(RegId(CCRegClass, reg_idx), val); - } + virtual void setReg(const RegId ®, const void *val) = 0; virtual const PCStateBase &pcState() const = 0; @@ -287,8 +214,6 @@ class ThreadContext : public PCEventScope virtual void setMiscReg(RegIndex misc_reg, RegVal val) = 0; - virtual RegId flattenRegId(const RegId& reg_id) const = 0; - // Also not necessarily the best location for these two. Hopefully will go // away once we decide upon where st cond failures goes. virtual unsigned readStCondFailures() const = 0; @@ -303,89 +228,6 @@ class ThreadContext : public PCEventScope /** function to compare two thread contexts (for debugging) */ static void compare(ThreadContext *one, ThreadContext *two); - /** @{ */ - /** - * Flat register interfaces - * - * Some architectures have different registers visible in - * different modes. Such architectures "flatten" a register (see - * flattenRegId()) to map it into the - * gem5 register file. This interface provides a flat interface to - * the underlying register file, which allows for example - * serialization code to access all registers. - */ - - virtual RegVal getRegFlat(const RegId ®) const; - virtual void getRegFlat(const RegId ®, void *val) const = 0; - virtual void *getWritableRegFlat(const RegId ®) = 0; - - virtual void setRegFlat(const RegId ®, RegVal val); - virtual void setRegFlat(const RegId ®, const void *val) = 0; - - RegVal - readIntRegFlat(RegIndex idx) const - { - return getRegFlat(RegId(IntRegClass, idx)); - } - void - setIntRegFlat(RegIndex idx, RegVal val) - { - setRegFlat(RegId(IntRegClass, idx), val); - } - - RegVal - readFloatRegFlat(RegIndex idx) const - { - return getRegFlat(RegId(FloatRegClass, idx)); - } - void - setFloatRegFlat(RegIndex idx, RegVal val) - { - setRegFlat(RegId(FloatRegClass, idx), val); - } - - TheISA::VecRegContainer - readVecRegFlat(RegIndex idx) const - { - TheISA::VecRegContainer val; - getRegFlat(RegId(VecRegClass, idx), &val); - return val; - } - TheISA::VecRegContainer& - getWritableVecRegFlat(RegIndex idx) - { - return *(TheISA::VecRegContainer *) - getWritableRegFlat(RegId(VecRegClass, idx)); - } - void - setVecRegFlat(RegIndex idx, const TheISA::VecRegContainer& val) - { - setRegFlat(RegId(VecRegClass, idx), &val); - } - - RegVal - readVecElemFlat(RegIndex idx) const - { - return getRegFlat(RegId(VecElemClass, idx)); - } - void - setVecElemFlat(RegIndex idx, RegVal val) - { - setRegFlat(RegId(VecElemClass, idx), val); - } - - RegVal - readCCRegFlat(RegIndex idx) const - { - return getRegFlat(RegId(CCRegClass, idx)); - } - void - setCCRegFlat(RegIndex idx, RegVal val) - { - setRegFlat(RegId(CCRegClass, idx), val); - } - /** @} */ - // hardware transactional memory virtual void htmAbortTransaction(uint64_t htm_uid, HtmFailureFaultCause cause) = 0; diff --git a/src/cpu/timing_expr.cc b/src/cpu/timing_expr.cc index 41868a5ac9..d1f8186f88 100644 --- a/src/cpu/timing_expr.cc +++ b/src/cpu/timing_expr.cc @@ -59,13 +59,7 @@ TimingExprEvalContext::TimingExprEvalContext(const StaticInstPtr &inst_, uint64_t TimingExprSrcReg::eval(TimingExprEvalContext &context) { - return context.inst->srcRegIdx(index).index(); -} - -uint64_t -TimingExprReadIntReg::eval(TimingExprEvalContext &context) -{ - return context.thread->readIntReg(reg->eval(context)); + return context.thread->getReg(context.inst->srcRegIdx(index)); } uint64_t diff --git a/src/cpu/timing_expr.hh b/src/cpu/timing_expr.hh index 170364e281..76212bdf49 100644 --- a/src/cpu/timing_expr.hh +++ b/src/cpu/timing_expr.hh @@ -55,7 +55,6 @@ #include "params/TimingExprIf.hh" #include "params/TimingExprLet.hh" #include "params/TimingExprLiteral.hh" -#include "params/TimingExprReadIntReg.hh" #include "params/TimingExprRef.hh" #include "params/TimingExprSrcReg.hh" #include "params/TimingExprUn.hh" @@ -124,19 +123,6 @@ class TimingExprSrcReg : public TimingExpr uint64_t eval(TimingExprEvalContext &context); }; -class TimingExprReadIntReg : public TimingExpr -{ - public: - TimingExpr *reg; - - TimingExprReadIntReg(const TimingExprReadIntRegParams ¶ms) : - TimingExpr(params), - reg(params.reg) - { } - - uint64_t eval(TimingExprEvalContext &context); -}; - class TimingExprLet : public TimingExpr { public: diff --git a/src/cpu/trace/SConscript b/src/cpu/trace/SConscript index ad77009123..223bbd1e51 100644 --- a/src/cpu/trace/SConscript +++ b/src/cpu/trace/SConscript @@ -1,8 +1,5 @@ Import('*') -if env['CONF']['TARGET_ISA'] == 'null': - Return() - # Only build TraceCPU if we have support for protobuf as TraceCPU relies on it SimObject('TraceCPU.py', sim_objects=['TraceCPU'], tags='protobuf') Source('trace_cpu.cc', tags='protobuf') diff --git a/src/cpu/trace/TraceCPU.py b/src/cpu/trace/TraceCPU.py index 713496899b..e2dc1db6c5 100644 --- a/src/cpu/trace/TraceCPU.py +++ b/src/cpu/trace/TraceCPU.py @@ -36,23 +36,25 @@ from m5.params import * from m5.objects.BaseCPU import BaseCPU + class TraceCPU(BaseCPU): """Trace CPU model which replays traces generated in a prior simulation - using DerivO3CPU or its derived classes. It interfaces with L1 caches. + using DerivO3CPU or its derived classes. It interfaces with L1 caches. """ - type = 'TraceCPU' + + type = "TraceCPU" cxx_header = "cpu/trace/trace_cpu.hh" - cxx_class = 'gem5::TraceCPU' + cxx_class = "gem5::TraceCPU" @classmethod def memory_mode(cls): - return 'timing' + return "timing" @classmethod def require_caches(cls): return True - def addPMU(self, pmu = None): + def addPMU(self, pmu=None): pass @classmethod @@ -61,26 +63,32 @@ class TraceCPU(BaseCPU): instTraceFile = Param.String("", "Instruction trace file") dataTraceFile = Param.String("", "Data dependency trace file") - sizeStoreBuffer = Param.Unsigned(16, "Number of entries in the store "\ - "buffer") + sizeStoreBuffer = Param.Unsigned( + 16, "Number of entries in the store " "buffer" + ) sizeLoadBuffer = Param.Unsigned(16, "Number of entries in the load buffer") - sizeROB = Param.Unsigned(40, "Number of entries in the re-order buffer") + sizeROB = Param.Unsigned(40, "Number of entries in the re-order buffer") # Frequency multiplier used to effectively scale the Trace CPU frequency # either up or down. Note that the Trace CPU's clock domain must also be # changed when frequency is scaled. A default value of 1.0 means the same # frequency as was used for generating the traces. - freqMultiplier = Param.Float(1.0, "Multiplier scale the Trace CPU "\ - "frequency up or down") + freqMultiplier = Param.Float( + 1.0, "Multiplier scale the Trace CPU " "frequency up or down" + ) # Enable exiting when any one Trace CPU completes execution which is set to # false by default - enableEarlyExit = Param.Bool(False, "Exit when any one Trace CPU "\ - "completes execution") + enableEarlyExit = Param.Bool( + False, "Exit when any one Trace CPU " "completes execution" + ) # If progress msg interval is set to a non-zero value, it is treated as # the interval of committed instructions at which an info message is # printed. - progressMsgInterval = Param.Unsigned(0, "Interval of committed "\ - "instructions at which to print a"\ - " progress msg") + progressMsgInterval = Param.Unsigned( + 0, + "Interval of committed " + "instructions at which to print a" + " progress msg", + ) diff --git a/src/dev/BadDevice.py b/src/dev/BadDevice.py index 75509b745f..2b630c087e 100644 --- a/src/dev/BadDevice.py +++ b/src/dev/BadDevice.py @@ -27,9 +27,10 @@ from m5.params import * from m5.objects.Device import BasicPioDevice + class BadDevice(BasicPioDevice): - type = 'BadDevice' + type = "BadDevice" cxx_header = "dev/baddev.hh" - cxx_class = 'gem5::BadDevice' + cxx_class = "gem5::BadDevice" devicename = Param.String("Name of device to error on") diff --git a/src/dev/Device.py b/src/dev/Device.py index 050261821b..5c3a4193a1 100644 --- a/src/dev/Device.py +++ b/src/dev/Device.py @@ -42,59 +42,75 @@ from m5.util.fdthelper import * from m5.objects.ClockedObject import ClockedObject + class PioDevice(ClockedObject): - type = 'PioDevice' + type = "PioDevice" cxx_header = "dev/io_device.hh" - cxx_class = 'gem5::PioDevice' + cxx_class = "gem5::PioDevice" abstract = True pio = ResponsePort("Programmed I/O port") system = Param.System(Parent.any, "System this device is part of") - def generateBasicPioDeviceNode(self, state, name, pio_addr, - size, interrupts = None): + def generateBasicPioDeviceNode( + self, state, name, pio_addr, size, interrupts=None + ): node = FdtNode("%s@%x" % (name, int(pio_addr))) - node.append(FdtPropertyWords("reg", - state.addrCells(pio_addr) + - state.sizeCells(size) )) + node.append( + FdtPropertyWords( + "reg", state.addrCells(pio_addr) + state.sizeCells(size) + ) + ) if interrupts: if any([i.num < 32 for i in interrupts]): - raise(("Interrupt number smaller than 32 "+ - " in PioDevice %s") % name) + raise ( + ("Interrupt number smaller than 32 " + " in PioDevice %s") + % name + ) gic = self._parent.unproxy(self).gic - node.append(FdtPropertyWords("interrupts", sum( - [ i.generateFdtProperty(gic) for i in interrupts], []) )) + node.append( + FdtPropertyWords( + "interrupts", + sum([i.generateFdtProperty(gic) for i in interrupts], []), + ) + ) return node + class BasicPioDevice(PioDevice): - type = 'BasicPioDevice' + type = "BasicPioDevice" cxx_header = "dev/io_device.hh" - cxx_class = 'gem5::BasicPioDevice' + cxx_class = "gem5::BasicPioDevice" abstract = True pio_addr = Param.Addr("Device Address") - pio_latency = Param.Latency('100ns', "Programmed IO latency") + pio_latency = Param.Latency("100ns", "Programmed IO latency") + class DmaDevice(PioDevice): - type = 'DmaDevice' + type = "DmaDevice" cxx_header = "dev/dma_device.hh" - cxx_class = 'gem5::DmaDevice' + cxx_class = "gem5::DmaDevice" abstract = True dma = RequestPort("DMA port") _iommu = None - sid = Param.Unsigned(0, + sid = Param.Unsigned( + 0, "Stream identifier used by an IOMMU to distinguish amongst " - "several devices attached to it") - ssid = Param.Unsigned(0, + "several devices attached to it", + ) + ssid = Param.Unsigned( + 0, "Substream identifier used by an IOMMU to distinguish amongst " - "several devices attached to it") + "several devices attached to it", + ) def addIommuProperty(self, state, node): """ @@ -105,19 +121,24 @@ class DmaDevice(PioDevice): a dma device and the iommu. """ if self._iommu is not None: - node.append(FdtPropertyWords("iommus", - [ state.phandle(self._iommu), self.sid ])) + node.append( + FdtPropertyWords( + "iommus", [state.phandle(self._iommu), self.sid] + ) + ) + class DmaVirtDevice(DmaDevice): - type = 'DmaVirtDevice' + type = "DmaVirtDevice" cxx_header = "dev/dma_virt_device.hh" - cxx_class = 'gem5::DmaVirtDevice' + cxx_class = "gem5::DmaVirtDevice" abstract = True + class IsaFake(BasicPioDevice): - type = 'IsaFake' + type = "IsaFake" cxx_header = "dev/isa_fake.hh" - cxx_class = 'gem5::IsaFake' + cxx_class = "gem5::IsaFake" pio_size = Param.Addr(0x8, "Size of address range") ret_data8 = Param.UInt8(0xFF, "Default data to return") @@ -125,13 +146,16 @@ class IsaFake(BasicPioDevice): ret_data32 = Param.UInt32(0xFFFFFFFF, "Default data to return") ret_data64 = Param.UInt64(0xFFFFFFFFFFFFFFFF, "Default data to return") ret_bad_addr = Param.Bool(False, "Return pkt status bad address on access") - update_data = Param.Bool(False, "Update the data that is returned on writes") + update_data = Param.Bool( + False, "Update the data that is returned on writes" + ) warn_access = Param.String("", "String to print when device is accessed") - fake_mem = Param.Bool(False, - "Is this device acting like a memory and thus may get a cache line sized req") + fake_mem = Param.Bool( + False, + "Is this device acting like a memory and thus may get a cache line sized req", + ) + class BadAddr(IsaFake): pio_addr = 0 ret_bad_addr = Param.Bool(True, "Return pkt status bad address on access") - - diff --git a/src/dev/IntPin.py b/src/dev/IntPin.py index 80618ce251..9336a89900 100644 --- a/src/dev/IntPin.py +++ b/src/dev/IntPin.py @@ -25,8 +25,8 @@ from m5.params import Port, VectorPort -INT_SOURCE_ROLE = 'Int Source Pin' -INT_SINK_ROLE = 'Int Sink Pin' +INT_SOURCE_ROLE = "Int Source Pin" +INT_SINK_ROLE = "Int Sink Pin" Port.compat(INT_SOURCE_ROLE, INT_SINK_ROLE) # A source pin generally represents a single pin which might connect to @@ -35,6 +35,7 @@ class IntSourcePin(VectorPort): def __init__(self, desc): super().__init__(INT_SOURCE_ROLE, desc, is_source=True) + # A vector of source pins which might represent a bank of physical pins. Unlike # IntSourcePin, each source pin in VectorIntSourcePin can only connect to a # single sink pin. VectorIntSourcePin has the same definition as IntSourcePin @@ -46,12 +47,14 @@ class VectorIntSourcePin(VectorPort): def __init__(self, desc): super().__init__(INT_SOURCE_ROLE, desc, is_source=True) + # Each "physical" pin can be driven by a single source pin since there are no # provisions for resolving competing signals running to the same pin. class IntSinkPin(Port): def __init__(self, desc): super().__init__(INT_SINK_ROLE, desc) + # A vector of sink pins represents a bank of physical pins. For instance, an # interrupt controller with many numbered input interrupts could represent them # as a VectorIntSinkPin. diff --git a/src/dev/Platform.py b/src/dev/Platform.py index 4f28db39fe..5a18f83010 100644 --- a/src/dev/Platform.py +++ b/src/dev/Platform.py @@ -28,11 +28,12 @@ from m5.SimObject import SimObject from m5.params import * from m5.proxy import * + class Platform(SimObject): - type = 'Platform' + type = "Platform" abstract = True cxx_header = "dev/platform.hh" - cxx_class = 'gem5::Platform' + cxx_class = "gem5::Platform" system = Param.System(Parent.any, "system") diff --git a/src/dev/ResetPort.py b/src/dev/ResetPort.py index d7140c501f..f35bc117c0 100644 --- a/src/dev/ResetPort.py +++ b/src/dev/ResetPort.py @@ -25,8 +25,8 @@ from m5.params import Port, VectorPort -RESET_REQUEST_ROLE = 'Reset Request' -RESET_RESPONSE_ROLE = 'Reset Response' +RESET_REQUEST_ROLE = "Reset Request" +RESET_RESPONSE_ROLE = "Reset Response" Port.compat(RESET_REQUEST_ROLE, RESET_RESPONSE_ROLE) # ResetRequestPort is an artifact request port for reset purpose. @@ -34,12 +34,14 @@ class ResetRequestPort(Port): def __init__(self, desc): super().__init__(RESET_REQUEST_ROLE, desc, is_source=True) + # ResetResponsePort is an artifact response port for reset purpose. # The owner should perform whole reset when receiving a request. class ResetResponsePort(Port): def __init__(self, desc): super().__init__(RESET_RESPONSE_ROLE, desc) + # VectorResetRequestPort presents a bank of artifact reset request # ports. class VectorResetRequestPort(VectorPort): diff --git a/src/dev/SConscript b/src/dev/SConscript index 44a7cc9a04..6a6ce40dcc 100644 --- a/src/dev/SConscript +++ b/src/dev/SConscript @@ -47,9 +47,6 @@ DebugFlag('DMA') SimObject('Platform.py', sim_objects=['Platform']) Source('platform.cc') -if env['CONF']['TARGET_ISA'] == 'null': - Return() - SimObject('BadDevice.py', sim_objects=['BadDevice']) Source('baddev.cc') diff --git a/src/dev/amdgpu/AMDGPU.py b/src/dev/amdgpu/AMDGPU.py index 6afce0fa82..c834d3be92 100644 --- a/src/dev/amdgpu/AMDGPU.py +++ b/src/dev/amdgpu/AMDGPU.py @@ -42,9 +42,9 @@ from m5.objects.ClockedObject import ClockedObject # device registers and memory. It is intended only to be used in full-system # simulation under Linux where the amdgpu driver is modprobed. class AMDGPUDevice(PciDevice): - type = 'AMDGPUDevice' + type = "AMDGPUDevice" cxx_header = "dev/amdgpu/amdgpu_device.hh" - cxx_class = 'gem5::AMDGPUDevice' + cxx_class = "gem5::AMDGPUDevice" # IDs for AMD Vega 10 VendorID = 0x1002 @@ -59,12 +59,12 @@ class AMDGPUDevice(PciDevice): ProgIF = 0x00 # Use max possible BAR size for Vega 10. We can override with driver param - BAR0 = PciMemBar(size='16GiB') + BAR0 = PciMemBar(size="16GiB") BAR1 = PciMemUpperBar() - BAR2 = PciMemBar(size='2MiB') + BAR2 = PciMemBar(size="2MiB") BAR3 = PciMemUpperBar() - BAR4 = PciLegacyIoBar(addr=0xf000, size='256B') - BAR5 = PciMemBar(size='512KiB') + BAR4 = PciLegacyIoBar(addr=0xF000, size="256B") + BAR5 = PciMemBar(size="512KiB") InterruptLine = 14 InterruptPin = 2 @@ -72,8 +72,9 @@ class AMDGPUDevice(PciDevice): rom_binary = Param.String("ROM binary dumped from hardware") trace_file = Param.String("MMIO trace collected on hardware") - checkpoint_before_mmios = Param.Bool(False, "Take a checkpoint before the" - " device begins sending MMIOs") + checkpoint_before_mmios = Param.Bool( + False, "Take a checkpoint before the" " device begins sending MMIOs" + ) # Specific to Vega10: Vega10 has two SDMA engines these do not have any # assigned function and are referenced by ID so they are given the generic @@ -90,33 +91,38 @@ class AMDGPUDevice(PciDevice): memories = VectorParam.AbstractMemory([], "All memories in the device") device_ih = Param.AMDGPUInterruptHandler("GPU Interrupt handler") -class SDMAEngine(DmaVirtDevice): - type = 'SDMAEngine' - cxx_header = "dev/amdgpu/sdma_engine.hh" - cxx_class = 'gem5::SDMAEngine' - gpu_device = Param.AMDGPUDevice(NULL, 'GPU Controller') +class SDMAEngine(DmaVirtDevice): + type = "SDMAEngine" + cxx_header = "dev/amdgpu/sdma_engine.hh" + cxx_class = "gem5::SDMAEngine" + + gpu_device = Param.AMDGPUDevice(NULL, "GPU Controller") walker = Param.VegaPagetableWalker("Page table walker") + class PM4PacketProcessor(DmaVirtDevice): - type = 'PM4PacketProcessor' + type = "PM4PacketProcessor" cxx_header = "dev/amdgpu/pm4_packet_processor.hh" - cxx_class = 'gem5::PM4PacketProcessor' + cxx_class = "gem5::PM4PacketProcessor" + class AMDGPUMemoryManager(ClockedObject): - type = 'AMDGPUMemoryManager' - cxx_header = 'dev/amdgpu/memory_manager.hh' - cxx_class = 'gem5::AMDGPUMemoryManager' + type = "AMDGPUMemoryManager" + cxx_header = "dev/amdgpu/memory_manager.hh" + cxx_class = "gem5::AMDGPUMemoryManager" + + port = RequestPort("Memory Port to access VRAM (device memory)") + system = Param.System(Parent.any, "System the dGPU belongs to") - port = RequestPort('Memory Port to access VRAM (device memory)') - system = Param.System(Parent.any, 'System the dGPU belongs to') class AMDGPUInterruptHandler(DmaDevice): - type = 'AMDGPUInterruptHandler' + type = "AMDGPUInterruptHandler" cxx_header = "dev/amdgpu/interrupt_handler.hh" - cxx_class = 'gem5::AMDGPUInterruptHandler' + cxx_class = "gem5::AMDGPUInterruptHandler" + class AMDGPUSystemHub(DmaDevice): - type = 'AMDGPUSystemHub' - cxx_class = 'gem5::AMDGPUSystemHub' + type = "AMDGPUSystemHub" + cxx_class = "gem5::AMDGPUSystemHub" cxx_header = "dev/amdgpu/system_hub.hh" diff --git a/src/dev/amdgpu/SConscript b/src/dev/amdgpu/SConscript index bece7c31f1..713f0a6efe 100644 --- a/src/dev/amdgpu/SConscript +++ b/src/dev/amdgpu/SConscript @@ -51,3 +51,4 @@ DebugFlag('AMDGPUDevice', tags='x86 isa') DebugFlag('AMDGPUMem', tags='x86 isa') DebugFlag('PM4PacketProcessor', tags='x86 isa') DebugFlag('SDMAEngine', tags='x86 isa') +DebugFlag('SDMAData', tags='x86 isa') diff --git a/src/dev/amdgpu/amdgpu_device.cc b/src/dev/amdgpu/amdgpu_device.cc index 132a81a461..2b58b200ea 100644 --- a/src/dev/amdgpu/amdgpu_device.cc +++ b/src/dev/amdgpu/amdgpu_device.cc @@ -40,6 +40,7 @@ #include "dev/amdgpu/sdma_engine.hh" #include "dev/hsa/hw_scheduler.hh" #include "gpu-compute/gpu_command_processor.hh" +#include "gpu-compute/shader.hh" #include "mem/abstract_mem.hh" #include "mem/packet.hh" #include "mem/packet_access.hh" @@ -179,17 +180,16 @@ AMDGPUDevice::readFrame(PacketPtr pkt, Addr offset) { DPRINTF(AMDGPUDevice, "Read framebuffer address %#lx\n", offset); - /* Try MMIO trace for frame writes first. */ - mmioReader.readFromTrace(pkt, FRAMEBUFFER_BAR, offset); - - /* If the driver wrote something, use that value over the trace. */ - if (frame_regs.find(offset) != frame_regs.end()) { - pkt->setUintX(frame_regs[offset], ByteOrder::little); - } - - /* Handle special counter addresses in framebuffer. */ + /* + * Return data for frame reads in priority order: (1) Special addresses + * first, ignoring any writes from driver. (2) Any other address from + * device backing store / abstract memory class functionally. + */ if (offset == 0xa28000) { - /* Counter addresses expect the read to return previous value + 1. */ + /* + * Handle special counter addresses in framebuffer. These counter + * addresses expect the read to return previous value + 1. + */ if (regs.find(pkt->getAddr()) == regs.end()) { regs[pkt->getAddr()] = 1; } else { @@ -197,6 +197,22 @@ AMDGPUDevice::readFrame(PacketPtr pkt, Addr offset) } pkt->setUintX(regs[pkt->getAddr()], ByteOrder::little); + } else { + /* + * Read the value from device memory. This must be done functionally + * because this method is called by the PCIDevice::read method which + * is a non-timing read. + */ + RequestPtr req = std::make_shared(offset, pkt->getSize(), 0, + vramRequestorId()); + PacketPtr readPkt = Packet::createRead(req); + uint8_t *dataPtr = new uint8_t[pkt->getSize()]; + readPkt->dataDynamic(dataPtr); + + auto system = cp->shader()->gpuCmdProc.system(); + system->getDeviceMemory(readPkt)->access(readPkt); + + pkt->setUintX(readPkt->getUintX(ByteOrder::little), ByteOrder::little); } } @@ -253,12 +269,10 @@ AMDGPUDevice::writeFrame(PacketPtr pkt, Addr offset) Addr aperture_offset = offset - aperture; // Record the value - frame_regs[offset] = pkt->getUintX(ByteOrder::little); if (aperture == gpuvm.gartBase()) { - frame_regs[aperture_offset] = pkt->getLE(); + gpuvm.gartTable[aperture_offset] = pkt->getUintX(ByteOrder::little); DPRINTF(AMDGPUDevice, "GART translation %p -> %p\n", aperture_offset, - bits(frame_regs[aperture_offset], 48, 12)); - gpuvm.gartTable[aperture_offset] = pkt->getLE(); + gpuvm.gartTable[aperture_offset]); } } @@ -297,8 +311,8 @@ AMDGPUDevice::writeDoorbell(PacketPtr pkt, Addr offset) deviceIH->updateRptr(pkt->getLE()); break; case RLC: { - panic("RLC queues not yet supported. Run with the environment " - "variable HSA_ENABLE_SDMA set to False"); + SDMAEngine *sdmaEng = getSDMAEngine(offset); + sdmaEng->processRLC(offset, pkt->getLE()); } break; default: panic("Write to unkown queue type!"); @@ -379,7 +393,7 @@ AMDGPUDevice::write(PacketPtr pkt) switch (barnum) { case FRAMEBUFFER_BAR: gpuMemMgr->writeRequest(offset, pkt->getPtr(), - pkt->getSize()); + pkt->getSize(), 0, nullptr); writeFrame(pkt, offset); break; case DOORBELL_BAR: @@ -623,6 +637,9 @@ AMDGPUDevice::deallocateAllQueues() { idMap.erase(idMap.begin(), idMap.end()); usedVMIDs.erase(usedVMIDs.begin(), usedVMIDs.end()); + + sdma0->deallocateRLCQueues(); + sdma1->deallocateRLCQueues(); } void diff --git a/src/dev/amdgpu/amdgpu_device.hh b/src/dev/amdgpu/amdgpu_device.hh index fbb0d1c6de..ac31b95fd2 100644 --- a/src/dev/amdgpu/amdgpu_device.hh +++ b/src/dev/amdgpu/amdgpu_device.hh @@ -85,7 +85,6 @@ class AMDGPUDevice : public PciDevice * Structures to hold registers, doorbells, and some frame memory */ using GPURegMap = std::unordered_map; - GPURegMap frame_regs; GPURegMap regs; std::unordered_map doorbells; diff --git a/src/dev/amdgpu/amdgpu_vm.cc b/src/dev/amdgpu/amdgpu_vm.cc index c0c920977e..7a30917b21 100644 --- a/src/dev/amdgpu/amdgpu_vm.cc +++ b/src/dev/amdgpu/amdgpu_vm.cc @@ -331,11 +331,11 @@ AMDGPUVM::UserTranslationGen::translate(Range &range) const DPRINTF(AMDGPUDevice, "User tl base %#lx start %#lx walker %p\n", base, start, walker); - bool dummy; + bool system_bit; unsigned logBytes; Addr paddr = range.vaddr; Fault fault = walker->startFunctional(base, paddr, logBytes, - BaseMMU::Mode::Read, dummy); + BaseMMU::Mode::Read, system_bit); if (fault != NoFault) { fatal("User translation fault"); } @@ -343,9 +343,17 @@ AMDGPUVM::UserTranslationGen::translate(Range &range) const // GPU page size is variable. Use logBytes to determine size. const Addr page_size = 1 << logBytes; Addr next = roundUp(range.vaddr, page_size); - if (next == range.vaddr) + if (next == range.vaddr) { // We don't know the size of the next page, use default. next += AMDGPU_USER_PAGE_SIZE; + } + + // If we are not in system/host memory, change the address to the MMHUB + // aperture. This is mapped to the same backing memory as device memory. + if (!system_bit) { + paddr += vm->getMMHUBBase(); + assert(vm->inMMHUB(paddr)); + } range.size = std::min(range.size, next - range.vaddr); range.paddr = paddr; diff --git a/src/dev/amdgpu/amdgpu_vm.hh b/src/dev/amdgpu/amdgpu_vm.hh index 8df169b79a..212a688716 100644 --- a/src/dev/amdgpu/amdgpu_vm.hh +++ b/src/dev/amdgpu/amdgpu_vm.hh @@ -165,7 +165,7 @@ class AMDGPUVM : public Serializable * Copy of GART table. Typically resides in device memory, however we use * a copy in gem5 to simplify the interface. */ - std::unordered_map gartTable; + std::unordered_map gartTable; void readMMIO(PacketPtr pkt, Addr offset); void writeMMIO(PacketPtr pkt, Addr offset); diff --git a/src/dev/amdgpu/hwreg_defines.hh b/src/dev/amdgpu/hwreg_defines.hh new file mode 100644 index 0000000000..f5097c8994 --- /dev/null +++ b/src/dev/amdgpu/hwreg_defines.hh @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2022 Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from this + * software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef __DEV_GPU_HWREG_DEFINES_H__ +#define __DEV_GPU_HWREG_DEFINES_H__ +/* + * This enum is adapted from the offsets seen by LLVM: + * + * https://github.com/llvm/llvm-project/blob/release/14.x/llvm/lib/ + * Target/AMDGPU/Utils/AMDGPUAsmUtils.cpp#L58 + */ + +namespace gem5 +{ + +/* + * Further descriptions can be found in the "Hardware Register Values" table + * in any of the GCN3, Vega, CDNA1, CNDA2, or RDNA ISA manuals. + */ +enum amdgpu_hwreg +{ + HW_REG_MODE = 0x1, + HW_REG_STATUS = 0x2, + HW_REG_TRAPSTS = 0x3, + HW_REG_HW_ID = 0x4, + HW_REG_GPR_ALLOC = 0x5, + HW_REG_LDS_ALLOC = 0x6, + HW_REG_IB_STS = 0x7, + HW_REG_SH_MEM_BASES = 0xf, + HW_REG_TBA_LO = 0x10, + HW_REG_TBA_HI = 0x11, + HW_REG_TMA_LO = 0x12, + HW_REG_TMA_HI = 0x13, + HW_REG_FLAT_SCR_LO = 0x14, + HW_REG_FLAT_SCR_HI = 0x15, + HW_REG_XNACK_MASK = 0x16, + HW_REG_HW_ID1 = 0x17, + HW_REG_HW_ID2 = 0x18, + HW_REG_POPS_PACKER = 0x19, + HW_REG_SHADER_CYCLES = 0x1d, +}; + +} // namespace gem5 + +#endif // __DEV_GPU_HWREG_DEFINES_H__ diff --git a/src/dev/amdgpu/interrupt_handler.cc b/src/dev/amdgpu/interrupt_handler.cc index 36c9b04ee2..a771976d98 100644 --- a/src/dev/amdgpu/interrupt_handler.cc +++ b/src/dev/amdgpu/interrupt_handler.cc @@ -77,6 +77,12 @@ AMDGPUInterruptHandler::prepareInterruptCookie(ContextID cntxt_id, uint32_t client_id, uint32_t source_id) { + assert(client_id == SOC15_IH_CLIENTID_RLC || + client_id == SOC15_IH_CLIENTID_SDMA0 || + client_id == SOC15_IH_CLIENTID_SDMA1 || + client_id == SOC15_IH_CLIENTID_GRBM_CP); + assert(source_id == CP_EOP || source_id == TRAP_ID); + /** * Setup the fields in the interrupt cookie (see header file for more * detail on the fields). The timestamp here is a bogus value. It seems @@ -88,6 +94,14 @@ AMDGPUInterruptHandler::prepareInterruptCookie(ContextID cntxt_id, */ AMDGPUInterruptCookie *cookie = new AMDGPUInterruptCookie(); memset(cookie, 0, sizeof(AMDGPUInterruptCookie)); + + // Currently only one process is supported and the first pasid from driver + // is always 0x8000. In the future this can be obtained from the PM4 + // MAP_PROCESS packet and may need to be passed to this function. + // + // On a related note, leave vmid fields alone as they are only used for + // memory exceptions. Memory exceptions are not supported on gfx900. + cookie->pasid = 0x8000; cookie->timestamp_Lo = 0x40; cookie->clientId = client_id; cookie->sourceId = source_id; @@ -188,15 +202,14 @@ AMDGPUInterruptHandler::setCntl(const uint32_t &data) void AMDGPUInterruptHandler::setBase(const uint32_t &data) { - regs.IH_Base = data << 8; - regs.baseAddr |= regs.IH_Base; + regs.baseAddr = data; + regs.baseAddr <<= 8; } void AMDGPUInterruptHandler::setBaseHi(const uint32_t &data) { - regs.IH_Base_Hi = data; - regs.baseAddr |= ((uint64_t)regs.IH_Base_Hi) << 32; + regs.baseAddr |= static_cast(data) << 40; } void diff --git a/src/dev/amdgpu/interrupt_handler.hh b/src/dev/amdgpu/interrupt_handler.hh index 5e5175fbbe..ab8a853074 100644 --- a/src/dev/amdgpu/interrupt_handler.hh +++ b/src/dev/amdgpu/interrupt_handler.hh @@ -57,11 +57,13 @@ enum soc15_ih_clientid { SOC15_IH_CLIENTID_RLC = 0x07, SOC15_IH_CLIENTID_SDMA0 = 0x08, - SOC15_IH_CLIENTID_SDMA1 = 0x09 + SOC15_IH_CLIENTID_SDMA1 = 0x09, + SOC15_IH_CLIENTID_GRBM_CP = 0x14 }; enum ihSourceId { + CP_EOP = 181, TRAP_ID = 224 }; diff --git a/src/dev/amdgpu/memory_manager.cc b/src/dev/amdgpu/memory_manager.cc index 5698a70973..7b671b0611 100644 --- a/src/dev/amdgpu/memory_manager.cc +++ b/src/dev/amdgpu/memory_manager.cc @@ -43,7 +43,7 @@ namespace gem5 { AMDGPUMemoryManager::AMDGPUMemoryManager(const AMDGPUMemoryManagerParams &p) - : ClockedObject(p), _gpuMemPort(csprintf("%s-port", name()), this), + : ClockedObject(p), _gpuMemPort(csprintf("%s-port", name()), *this), cacheLineSize(p.system->cacheLineSize()), _requestorId(p.system->getRequestorId(this)) { @@ -55,6 +55,14 @@ AMDGPUMemoryManager::writeRequest(Addr addr, uint8_t *data, int size, { assert(data); + // Requests may return out of order, so we should track how many chunks + // are outstanding and if the last chunk was sent. Give each status struct + // a unique ID so that DMAs to the same address may occur at the same time + requestStatus.emplace(std::piecewise_construct, + std::forward_as_tuple(requestId), std::tuple<>{}); + + DPRINTF(AMDGPUMem, "Created status for write request %ld\n", requestId); + ChunkGenerator gen(addr, size, cacheLineSize); for (; !gen.done(); gen.next()) { RequestPtr req = std::make_shared(gen.addr(), gen.size(), @@ -66,11 +74,11 @@ AMDGPUMemoryManager::writeRequest(Addr addr, uint8_t *data, int size, gen.size()); pkt->dataDynamic(dataPtr); - // We only want to issue the callback on the last request completing. + pkt->pushSenderState( + new GPUMemPort::SenderState(callback, addr, requestId)); + requestStatus.at(requestId).outstandingChunks++; if (gen.last()) { - pkt->pushSenderState(new GPUMemPort::SenderState(callback, addr)); - } else { - pkt->pushSenderState(new GPUMemPort::SenderState(nullptr, addr)); + requestStatus.at(requestId).sentLastChunk = true; } if (!_gpuMemPort.sendTimingReq(pkt)) { @@ -80,6 +88,50 @@ AMDGPUMemoryManager::writeRequest(Addr addr, uint8_t *data, int size, DPRINTF(AMDGPUMem, "Write request to %#lx sent\n", gen.addr()); } } + + requestId++; +} + +void +AMDGPUMemoryManager::readRequest(Addr addr, uint8_t *data, int size, + Request::Flags flag, Event *callback) +{ + assert(data); + uint8_t *dataPtr = data; + + // Requests may return out of order, so we should track how many chunks + // are outstanding and if the last chunk was sent. Give each status struct + // a unique ID so that DMAs to the same address may occur at the same time + requestStatus.emplace(std::piecewise_construct, + std::forward_as_tuple(requestId), std::tuple<>{}); + + DPRINTF(AMDGPUMem, "Created status for read request %ld\n", requestId); + + ChunkGenerator gen(addr, size, cacheLineSize); + for (; !gen.done(); gen.next()) { + RequestPtr req = std::make_shared(gen.addr(), gen.size(), + flag, _requestorId); + + PacketPtr pkt = Packet::createRead(req); + pkt->dataStatic(dataPtr); + dataPtr += gen.size(); + + pkt->pushSenderState( + new GPUMemPort::SenderState(callback, addr, requestId)); + requestStatus.at(requestId).outstandingChunks++; + if (gen.last()) { + requestStatus.at(requestId).sentLastChunk = true; + } + + if (!_gpuMemPort.sendTimingReq(pkt)) { + DPRINTF(AMDGPUMem, "Request to %#lx needs retry\n", gen.addr()); + _gpuMemPort.retries.push_back(pkt); + } else { + DPRINTF(AMDGPUMem, "Read request to %#lx sent\n", gen.addr()); + } + } + + requestId++; } bool @@ -89,12 +141,29 @@ AMDGPUMemoryManager::GPUMemPort::recvTimingResp(PacketPtr pkt) [[maybe_unused]] SenderState *sender_state = safe_cast(pkt->senderState); - DPRINTF(AMDGPUMem, "Recveived Response for %#x\n", sender_state->_addr); + // Check if all chunks have completed, the last chunk was sent, and there + // is a callback, call the callback now. + assert(gpu_mem.requestStatus.count(sender_state->_requestId)); + auto& status = gpu_mem.requestStatus.at(sender_state->_requestId); - // Check if there is a callback event and if so call it - if (sender_state->_callback) { - sender_state->_callback->process(); - delete sender_state->_callback; + assert(status.outstandingChunks != 0); + status.outstandingChunks--; + DPRINTF(AMDGPUMem, "Received Response for %#x. %d chunks remain, sent " + "last = %d, requestId = %ld\n", sender_state->_addr, + status.outstandingChunks, status.sentLastChunk, + sender_state->_requestId); + + if (!status.outstandingChunks && status.sentLastChunk) { + // Call and free the callback if there is one + if (sender_state->_callback) { + DPRINTF(AMDGPUMem, "Calling callback for request %ld\n", + sender_state->_requestId); + sender_state->_callback->process(); + delete sender_state->_callback; + } + DPRINTF(AMDGPUMem, "Deleting status for request %ld\n", + sender_state->_requestId); + gpu_mem.requestStatus.erase(sender_state->_requestId); } delete pkt->senderState; diff --git a/src/dev/amdgpu/memory_manager.hh b/src/dev/amdgpu/memory_manager.hh index 8fb237b8cb..e18ec643a6 100644 --- a/src/dev/amdgpu/memory_manager.hh +++ b/src/dev/amdgpu/memory_manager.hh @@ -33,6 +33,7 @@ #define __DEV_AMDGPU_MEMORY_MANAGER_HH__ #include +#include #include "base/callback.hh" #include "mem/port.hh" @@ -46,9 +47,9 @@ class AMDGPUMemoryManager : public ClockedObject { class GPUMemPort : public MasterPort { - public: - GPUMemPort(const std::string &_name, AMDGPUMemoryManager *_gpuMemMgr) - : MasterPort(_name, _gpuMemMgr) + public: + GPUMemPort(const std::string &_name, AMDGPUMemoryManager &_gpuMemMgr) + : MasterPort(_name, &_gpuMemMgr), gpu_mem(_gpuMemMgr) { } @@ -57,21 +58,35 @@ class AMDGPUMemoryManager : public ClockedObject struct SenderState : public Packet::SenderState { - SenderState(Event *callback, Addr addr) - : _callback(callback), _addr(addr) + SenderState(Event *callback, Addr addr, uint64_t requestId) + : _callback(callback), _addr(addr), _requestId(requestId) {} Event *_callback; Addr _addr; + uint64_t _requestId; }; std::deque retries; + AMDGPUMemoryManager &gpu_mem; }; GPUMemPort _gpuMemPort; const int cacheLineSize; const RequestorID _requestorId; + struct RequestStatus + { + RequestStatus() : outstandingChunks(0), sentLastChunk(false) + { } + + uint64_t outstandingChunks; + bool sentLastChunk; + }; + + uint64_t requestId = 0; + std::unordered_map requestStatus; + public: AMDGPUMemoryManager(const AMDGPUMemoryManagerParams &p); ~AMDGPUMemoryManager() {}; @@ -87,7 +102,20 @@ class AMDGPUMemoryManager : public ClockedObject * @param callback Event callback to call after all bytes are written. */ void writeRequest(Addr addr, uint8_t *data, int size, - Request::Flags flag = 0, Event *callback = nullptr); + Request::Flags flag, Event *callback); + + /** + * Read size amount of data from device memory at addr using flags and + * callback. + * + * @param addr Device address to read. + * @param data Pointer to data to read into. + * @param size Number of bytes to read. + * @param flag Additional request flags for read packets. + * @param callback Event callback to call after all bytes are read. + */ + void readRequest(Addr addr, uint8_t *data, int size, + Request::Flags flag, Event *callback); /** * Get the requestorID for the memory manager. This ID is used for all diff --git a/src/dev/amdgpu/pm4_defines.hh b/src/dev/amdgpu/pm4_defines.hh index b690e54906..42832d50bf 100644 --- a/src/dev/amdgpu/pm4_defines.hh +++ b/src/dev/amdgpu/pm4_defines.hh @@ -124,9 +124,9 @@ typedef struct GEM5_PACKED uint32_t reserved2 : 2; uint32_t vmid : 4; uint32_t reserved3 : 1; - uint32_t me : 1; - uint32_t pipe : 2; uint32_t queueSlot : 3; + uint32_t pipe : 2; + uint32_t me : 1; uint32_t reserved6 : 2; uint32_t queueType : 3; uint32_t allocFormat : 2; diff --git a/src/dev/amdgpu/pm4_mmio.hh b/src/dev/amdgpu/pm4_mmio.hh index a3ce5f14e5..3801223175 100644 --- a/src/dev/amdgpu/pm4_mmio.hh +++ b/src/dev/amdgpu/pm4_mmio.hh @@ -60,6 +60,7 @@ namespace gem5 #define mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI 0x1251 #define mmCP_HQD_PQ_WPTR_POLL_ADDR 0x1252 #define mmCP_HQD_PQ_WPTR_POLL_ADDR_HI 0x1253 +#define mmCP_HQD_PQ_CONTROL 0x1256 #define mmCP_HQD_IB_CONTROL 0x125a #define mmCP_HQD_PQ_WPTR_LO 0x127b #define mmCP_HQD_PQ_WPTR_HI 0x127c diff --git a/src/dev/amdgpu/pm4_packet_processor.cc b/src/dev/amdgpu/pm4_packet_processor.cc index c70f2f26ad..f78f8333a6 100644 --- a/src/dev/amdgpu/pm4_packet_processor.cc +++ b/src/dev/amdgpu/pm4_packet_processor.cc @@ -34,12 +34,14 @@ #include "debug/PM4PacketProcessor.hh" #include "dev/amdgpu/amdgpu_device.hh" +#include "dev/amdgpu/hwreg_defines.hh" #include "dev/amdgpu/interrupt_handler.hh" #include "dev/amdgpu/pm4_mmio.hh" #include "dev/amdgpu/sdma_engine.hh" #include "dev/hsa/hw_scheduler.hh" #include "enums/GfxVersion.hh" #include "gpu-compute/gpu_command_processor.hh" +#include "gpu-compute/shader.hh" #include "mem/packet.hh" #include "mem/packet_access.hh" @@ -114,14 +116,14 @@ void PM4PacketProcessor::mapKiq(Addr offset) { DPRINTF(PM4PacketProcessor, "Mapping KIQ\n"); - newQueue((QueueDesc *)&kiq, offset); + newQueue((QueueDesc *)&kiq, offset, &kiq_pkt); } void PM4PacketProcessor::mapPq(Addr offset) { DPRINTF(PM4PacketProcessor, "Mapping PQ\n"); - newQueue((QueueDesc *)&pq, offset); + newQueue((QueueDesc *)&pq, offset, &pq_pkt); } void @@ -144,8 +146,9 @@ PM4PacketProcessor::newQueue(QueueDesc *mqd, Addr offset, : QueueType::Compute; gpuDevice->setDoorbellType(offset, qt); - DPRINTF(PM4PacketProcessor, "New PM4 queue %d, base: %p offset: %p\n", - id, q->base(), q->offset()); + DPRINTF(PM4PacketProcessor, "New PM4 queue %d, base: %p offset: %p, me: " + "%d, pipe %d queue: %d size: %d\n", id, q->base(), q->offset(), + q->me(), q->pipe(), q->queue(), q->size()); } void @@ -200,9 +203,7 @@ PM4PacketProcessor::decodeHeader(PM4Queue *q, PM4Header header) case IT_NOP: { DPRINTF(PM4PacketProcessor, "PM4 nop, count %p\n", header.count); DPRINTF(PM4PacketProcessor, "rptr %p wptr %p\n", q->rptr(), q->wptr()); - if (header.count == 0x3fff) { - q->fastforwardRptr(); - } else { + if (header.count != 0x3fff) { q->incRptr((header.count + 1) * sizeof(uint32_t)); } decodeNext(q); @@ -387,7 +388,8 @@ PM4PacketProcessor::mapQueues(PM4Queue *q, PM4MapQueues *pkt) SDMAQueueDesc *sdmaMQD = new SDMAQueueDesc(); memset(sdmaMQD, 0, sizeof(SDMAQueueDesc)); - Addr addr = pkt->mqdAddr; + // For SDMA we read the full MQD, so there is no offset calculation. + Addr addr = getGARTAddr(pkt->mqdAddr); auto cb = new DmaVirtCallback( [ = ] (const uint32_t &) { @@ -439,12 +441,17 @@ void PM4PacketProcessor::processSDMAMQD(PM4MapQueues *pkt, PM4Queue *q, Addr addr, SDMAQueueDesc *mqd, uint16_t vmid) { + uint32_t rlc_size = 4UL << bits(mqd->sdmax_rlcx_rb_cntl, 6, 1); + Addr rptr_wb_addr = mqd->sdmax_rlcx_rb_rptr_addr_hi; + rptr_wb_addr <<= 32; + rptr_wb_addr |= mqd->sdmax_rlcx_rb_rptr_addr_lo; + DPRINTF(PM4PacketProcessor, "SDMAMQD: rb base: %#lx rptr: %#x/%#x wptr: " - "%#x/%#x ib: %#x/%#x size: %d ctrl: %#x\n", mqd->rb_base, - mqd->sdmax_rlcx_rb_rptr, mqd->sdmax_rlcx_rb_rptr_hi, + "%#x/%#x ib: %#x/%#x size: %d ctrl: %#x rptr wb addr: %#lx\n", + mqd->rb_base, mqd->sdmax_rlcx_rb_rptr, mqd->sdmax_rlcx_rb_rptr_hi, mqd->sdmax_rlcx_rb_wptr, mqd->sdmax_rlcx_rb_wptr_hi, mqd->sdmax_rlcx_ib_base_lo, mqd->sdmax_rlcx_ib_base_hi, - mqd->sdmax_rlcx_ib_size, mqd->sdmax_rlcx_rb_cntl); + rlc_size, mqd->sdmax_rlcx_rb_cntl, rptr_wb_addr); // Engine 2 points to SDMA0 while engine 3 points to SDMA1 assert(pkt->engineSel == 2 || pkt->engineSel == 3); @@ -452,7 +459,8 @@ PM4PacketProcessor::processSDMAMQD(PM4MapQueues *pkt, PM4Queue *q, Addr addr, // Register RLC queue with SDMA sdma_eng->registerRLCQueue(pkt->doorbellOffset << 2, - mqd->rb_base << 8); + mqd->rb_base << 8, rlc_size, + rptr_wb_addr); // Register doorbell with GPU device gpuDevice->setSDMAEngine(pkt->doorbellOffset << 2, sdma_eng); @@ -489,14 +497,16 @@ PM4PacketProcessor::releaseMemDone(PM4Queue *q, PM4ReleaseMem *pkt, Addr addr) DPRINTF(PM4PacketProcessor, "PM4 release_mem wrote %d to %p\n", pkt->dataLo, addr); if (pkt->intSelect == 2) { - DPRINTF(PM4PacketProcessor, "PM4 interrupt, ctx: %d, me: %d, pipe: " - "%d, queueSlot:%d\n", pkt->intCtxId, q->me(), q->pipe(), - q->queue()); - // Rearranging the queue field of PM4MapQueues as the interrupt RingId - // format specified in PM4ReleaseMem pkt. - uint32_t ringId = (q->me() << 6) | (q->pipe() << 4) | q->queue(); + DPRINTF(PM4PacketProcessor, "PM4 interrupt, id: %d ctx: %d, me: %d, " + "pipe: %d, queueSlot:%d\n", q->id(), pkt->intCtxId, q->me(), + q->pipe(), q->queue()); + + uint8_t ringId = 0; + if (q->id() != 0) { + ringId = (q->queue() << 4) | (q->me() << 2) | q->pipe(); + } gpuDevice->getIH()->prepareInterruptCookie(pkt->intCtxId, ringId, - SOC15_IH_CLIENTID_RLC, TRAP_ID); + SOC15_IH_CLIENTID_GRBM_CP, CP_EOP); gpuDevice->getIH()->submitInterruptCookie(); } @@ -615,6 +625,17 @@ PM4PacketProcessor::mapProcess(PM4Queue *q, PM4MapProcess *pkt) pkt->ptBase, pkt->completionSignal); gpuDevice->getVM().setPageTableBase(vmid, pkt->ptBase); + gpuDevice->CP()->shader()->setHwReg(HW_REG_SH_MEM_BASES, pkt->shMemBases); + + // Setup the apertures that gem5 uses. These values are bits [63:48]. + Addr lds_base = (Addr)bits(pkt->shMemBases, 31, 16) << 48; + Addr scratch_base = (Addr)bits(pkt->shMemBases, 15, 0) << 48; + + // There does not seem to be any register for the limit, but the driver + // assumes scratch and LDS have a 4GB aperture, so use that. + gpuDevice->CP()->shader()->setLdsApe(lds_base, lds_base + 0xFFFFFFFF); + gpuDevice->CP()->shader()->setScratchApe(scratch_base, + scratch_base + 0xFFFFFFFF); delete pkt; decodeNext(q); @@ -769,6 +790,9 @@ PM4PacketProcessor::writeMMIO(PacketPtr pkt, Addr mmio_offset) case mmCP_HQD_PQ_WPTR_POLL_ADDR_HI: setHqdPqWptrPollAddrHi(pkt->getLE()); break; + case mmCP_HQD_PQ_CONTROL: + setHqdPqControl(pkt->getLE()); + break; case mmCP_HQD_IB_CONTROL: setHqdIbCtrl(pkt->getLE()); break; @@ -890,6 +914,12 @@ PM4PacketProcessor::setHqdPqWptrPollAddrHi(uint32_t data) kiq.hqd_pq_wptr_poll_addr_hi = data; } +void +PM4PacketProcessor::setHqdPqControl(uint32_t data) +{ + kiq.hqd_pq_control = data; +} + void PM4PacketProcessor::setHqdIbCtrl(uint32_t data) { diff --git a/src/dev/amdgpu/pm4_packet_processor.hh b/src/dev/amdgpu/pm4_packet_processor.hh index c77edd2651..4617a21a06 100644 --- a/src/dev/amdgpu/pm4_packet_processor.hh +++ b/src/dev/amdgpu/pm4_packet_processor.hh @@ -54,8 +54,10 @@ class PM4PacketProcessor : public DmaVirtDevice AMDGPUDevice *gpuDevice; /* First graphics queue */ PrimaryQueue pq; + PM4MapQueues pq_pkt; /* First compute queue */ QueueDesc kiq; + PM4MapQueues kiq_pkt; /* All PM4 queues, indexed by VMID */ std::unordered_map queues; @@ -169,6 +171,7 @@ class PM4PacketProcessor : public DmaVirtDevice void setHqdPqRptrReportAddrHi(uint32_t data); void setHqdPqWptrPollAddr(uint32_t data); void setHqdPqWptrPollAddrHi(uint32_t data); + void setHqdPqControl(uint32_t data); void setHqdIbCtrl(uint32_t data); void setRbVmid(uint32_t data); void setRbCntl(uint32_t data); diff --git a/src/dev/amdgpu/pm4_queues.hh b/src/dev/amdgpu/pm4_queues.hh index 4e8638b39e..8b6626d176 100644 --- a/src/dev/amdgpu/pm4_queues.hh +++ b/src/dev/amdgpu/pm4_queues.hh @@ -375,16 +375,16 @@ class PM4Queue Addr _offset; bool _processing; bool _ib; - PM4MapQueues *_pkt; + const PM4MapQueues _pkt; public: PM4Queue() : _id(0), q(nullptr), _wptr(0), _offset(0), _processing(false), - _ib(false), _pkt(nullptr) {} + _ib(false), _pkt() {} PM4Queue(int id, QueueDesc *queue, Addr offset) : _id(id), q(queue), _wptr(queue->rptr), _ibWptr(0), _offset(offset), - _processing(false), _ib(false), _pkt(nullptr) {} + _processing(false), _ib(false), _pkt() {} PM4Queue(int id, QueueDesc *queue, Addr offset, PM4MapQueues *pkt) : _id(id), q(queue), _wptr(queue->rptr), _ibWptr(0), _offset(offset), - _processing(false), _ib(false), _pkt(pkt) {} + _processing(false), _ib(false), _pkt(*pkt) {} QueueDesc *getMQD() { return q; } int id() { return _id; } @@ -396,14 +396,14 @@ class PM4Queue rptr() { if (ib()) return q->ibBase + q->ibRptr; - else return q->base + q->rptr; + else return q->base + (q->rptr % size()); } Addr wptr() { if (ib()) return q->ibBase + _ibWptr; - else return q->base + _wptr; + else return q->base + (_wptr % size()); } Addr @@ -466,10 +466,13 @@ class PM4Queue void offset(Addr value) { _offset = value; } void processing(bool value) { _processing = value; } void ib(bool value) { _ib = value; } - uint32_t me() { if (_pkt) return _pkt->me; else return 0; } - uint32_t pipe() { if (_pkt) return _pkt->pipe; else return 0; } - uint32_t queue() { if (_pkt) return _pkt->queueSlot; else return 0; } - bool privileged() { assert(_pkt); return _pkt->queueSel == 0 ? 1 : 0; } + uint32_t me() { return _pkt.me + 1; } + uint32_t pipe() { return _pkt.pipe; } + uint32_t queue() { return _pkt.queueSlot; } + bool privileged() { return _pkt.queueSel == 0 ? 1 : 0; } + + // Same computation as processMQD. See comment there for details. + uint64_t size() { return 4UL << ((q->hqd_pq_control & 0x3f) + 1); } }; } // namespace gem5 diff --git a/src/dev/amdgpu/sdma_engine.cc b/src/dev/amdgpu/sdma_engine.cc index df08e32289..59c5027c85 100644 --- a/src/dev/amdgpu/sdma_engine.cc +++ b/src/dev/amdgpu/sdma_engine.cc @@ -33,6 +33,8 @@ #include "arch/amdgpu/vega/pagetable_walker.hh" #include "arch/generic/mmu.hh" +#include "debug/SDMAData.hh" +#include "debug/SDMAEngine.hh" #include "dev/amdgpu/interrupt_handler.hh" #include "dev/amdgpu/sdma_commands.hh" #include "dev/amdgpu/sdma_mmio.hh" @@ -96,6 +98,39 @@ SDMAEngine::getGARTAddr(Addr addr) const return addr; } +Addr +SDMAEngine::getDeviceAddress(Addr raw_addr) +{ + // SDMA packets can access both host and device memory as either a source + // or destination address. We don't know which until it is translated, so + // we do a dummy functional translation to determine if the address + // resides in system memory or not. + auto tgen = translate(raw_addr, 64); + auto addr_range = *(tgen->begin()); + Addr tmp_addr = addr_range.paddr; + DPRINTF(SDMAEngine, "getDeviceAddress raw_addr %#lx -> %#lx\n", + raw_addr, tmp_addr); + + // SDMA packets will access device memory through the MMHUB aperture in + // supervisor mode (vmid == 0) and in user mode (vmid > 0). In the case + // of vmid == 0 the address is already an MMHUB address in the packet, + // so simply subtract the MMHUB base. For vmid > 0 the address is a + // virtual address that must first be translated. The translation will + // return an MMHUB address, then we can similarly subtract the base to + // get the device address. Otherwise, for host, device address is 0. + Addr device_addr = 0; + if ((gpuDevice->getVM().inMMHUB(raw_addr) && cur_vmid == 0) || + (gpuDevice->getVM().inMMHUB(tmp_addr) && cur_vmid != 0)) { + if (cur_vmid == 0) { + device_addr = raw_addr - gpuDevice->getVM().getMMHUBBase(); + } else { + device_addr = tmp_addr - gpuDevice->getVM().getMMHUBBase(); + } + } + + return device_addr; +} + /** * GPUController will perform DMA operations on VAs, and because * page faults are not currently supported for GPUController, we @@ -104,7 +139,12 @@ SDMAEngine::getGARTAddr(Addr addr) const TranslationGenPtr SDMAEngine::translate(Addr vaddr, Addr size) { - if (gpuDevice->getVM().inAGP(vaddr)) { + if (cur_vmid > 0) { + // Only user translation is available to user queues (vmid > 0) + return TranslationGenPtr(new AMDGPUVM::UserTranslationGen( + &gpuDevice->getVM(), walker, + cur_vmid, vaddr, size)); + } else if (gpuDevice->getVM().inAGP(vaddr)) { // Use AGP translation gen return TranslationGenPtr( new AMDGPUVM::AGPTranslationGen(&gpuDevice->getVM(), vaddr, size)); @@ -121,29 +161,30 @@ SDMAEngine::translate(Addr vaddr, Addr size) } void -SDMAEngine::registerRLCQueue(Addr doorbell, Addr rb_base) +SDMAEngine::registerRLCQueue(Addr doorbell, Addr rb_base, uint32_t size, + Addr rptr_wb_addr) { // Get first free RLC if (!rlc0.valid()) { DPRINTF(SDMAEngine, "Doorbell %lx mapped to RLC0\n", doorbell); - rlcMap.insert(std::make_pair(doorbell, 0)); + rlcInfo[0] = doorbell; rlc0.valid(true); rlc0.base(rb_base); rlc0.rptr(0); rlc0.wptr(0); + rlc0.rptrWbAddr(rptr_wb_addr); rlc0.processing(false); - // TODO: size - I think pull from MQD 2^rb_cntrl[6:1]-1 - rlc0.size(1024*1024); + rlc0.size(size); } else if (!rlc1.valid()) { DPRINTF(SDMAEngine, "Doorbell %lx mapped to RLC1\n", doorbell); - rlcMap.insert(std::make_pair(doorbell, 1)); + rlcInfo[1] = doorbell; rlc1.valid(true); rlc1.base(rb_base); - rlc1.rptr(1); - rlc1.wptr(1); + rlc1.rptr(0); + rlc1.wptr(0); + rlc1.rptrWbAddr(rptr_wb_addr); rlc1.processing(false); - // TODO: size - I think pull from MQD 2^rb_cntrl[6:1]-1 - rlc1.size(1024*1024); + rlc1.size(size); } else { panic("No free RLCs. Check they are properly unmapped."); } @@ -152,16 +193,23 @@ SDMAEngine::registerRLCQueue(Addr doorbell, Addr rb_base) void SDMAEngine::unregisterRLCQueue(Addr doorbell) { - assert(rlcMap.find(doorbell) != rlcMap.end()); - - if (rlcMap[doorbell] == 0) { + DPRINTF(SDMAEngine, "Unregistering RLC queue at %#lx\n", doorbell); + if (rlcInfo[0] == doorbell) { rlc0.valid(false); - rlcMap.erase(doorbell); - } else if (rlcMap[doorbell] == 1) { + rlcInfo[0] = 0; + } else if (rlcInfo[1] == doorbell) { rlc1.valid(false); - rlcMap.erase(doorbell); + rlcInfo[1] = 0; } else { - panic("Cannot unregister unknown RLC queue: %d\n", rlcMap[doorbell]); + panic("Cannot unregister: no RLC queue at %#lx\n", doorbell); + } +} + +void +SDMAEngine::deallocateRLCQueues() +{ + for (auto doorbell: rlcInfo) { + unregisterRLCQueue(doorbell); } } @@ -191,15 +239,12 @@ SDMAEngine::processPage(Addr wptrOffset) void SDMAEngine::processRLC(Addr doorbellOffset, Addr wptrOffset) { - assert(rlcMap.find(doorbellOffset) != rlcMap.end()); - - if (rlcMap[doorbellOffset] == 0) { + if (rlcInfo[0] == doorbellOffset) { processRLC0(wptrOffset); - } else if (rlcMap[doorbellOffset] == 1) { + } else if (rlcInfo[1] == doorbellOffset) { processRLC1(wptrOffset); } else { - panic("Cannot process unknown RLC queue: %d\n", - rlcMap[doorbellOffset]); + panic("Cannot process: no RLC queue at %#lx\n", doorbellOffset); } } @@ -247,6 +292,17 @@ SDMAEngine::decodeNext(SDMAQueue *q) { decodeHeader(q, header); }); dmaReadVirt(q->rptr(), sizeof(uint32_t), cb, &cb->dmaBuffer); } else { + // The driver expects the rptr to be written back to host memory + // periodically. In simulation, we writeback rptr after each burst of + // packets from a doorbell, rather than using the cycle count which + // is not accurate in all simulation settings (e.g., KVM). + DPRINTF(SDMAEngine, "Writing rptr %#lx back to host addr %#lx\n", + q->globalRptr(), q->rptrWbAddr()); + if (q->rptrWbAddr()) { + auto cb = new DmaVirtCallback( + [ = ](const uint64_t &) { }, q->globalRptr()); + dmaWriteVirt(q->rptrWbAddr(), sizeof(Addr), cb, &cb->dmaBuffer); + } q->processing(false); if (q->parent()) { DPRINTF(SDMAEngine, "SDMA switching queues\n"); @@ -387,9 +443,14 @@ SDMAEngine::decodeHeader(SDMAQueue *q, uint32_t header) decodeNext(q); } break; case SDMA_OP_ATOMIC: { - q->incRptr(sizeof(sdmaAtomic)); - warn("SDMA_OP_ATOMIC not implemented"); - decodeNext(q); + DPRINTF(SDMAEngine, "SDMA Atomic packet\n"); + dmaBuffer = new sdmaAtomic(); + sdmaAtomicHeader *h = new sdmaAtomicHeader(); + *h = *(sdmaAtomicHeader *)&header; + cb = new DmaVirtCallback( + [ = ] (const uint64_t &) + { atomic(q, h, (sdmaAtomic *)dmaBuffer); }); + dmaReadVirt(q->rptr(), sizeof(sdmaAtomic), cb, dmaBuffer); } break; case SDMA_OP_CONST_FILL: { q->incRptr(sizeof(sdmaConstFill)); @@ -496,12 +557,10 @@ SDMAEngine::writeReadData(SDMAQueue *q, sdmaWrite *pkt, uint32_t *dmaBuffer) // lastly we write read data to the destination address if (gpuDevice->getVM().inMMHUB(pkt->dest)) { Addr mmhubAddr = pkt->dest - gpuDevice->getVM().getMMHUBBase(); + auto cb = new EventFunctionWrapper( + [ = ]{ writeDone(q, pkt, dmaBuffer); }, name()); gpuDevice->getMemMgr()->writeRequest(mmhubAddr, (uint8_t *)dmaBuffer, - bufferSize); - - delete []dmaBuffer; - delete pkt; - decodeNext(q); + bufferSize, 0, cb); } else { // TODO: getGARTAddr? pkt->dest = getGARTAddr(pkt->dest); @@ -535,11 +594,34 @@ SDMAEngine::copy(SDMAQueue *q, sdmaCopy *pkt) pkt->source = getGARTAddr(pkt->source); DPRINTF(SDMAEngine, "GART addr %lx\n", pkt->source); - // first we have to read needed data from the source address + // Read data from the source first, then call the copyReadData method uint8_t *dmaBuffer = new uint8_t[pkt->count]; - auto cb = new DmaVirtCallback( - [ = ] (const uint64_t &) { copyReadData(q, pkt, dmaBuffer); }); - dmaReadVirt(pkt->source, pkt->count, cb, (void *)dmaBuffer); + Addr device_addr = getDeviceAddress(pkt->source); + if (device_addr) { + DPRINTF(SDMAEngine, "Copying from device address %#lx\n", device_addr); + auto cb = new EventFunctionWrapper( + [ = ]{ copyReadData(q, pkt, dmaBuffer); }, name()); + + // Copy the minimum page size at a time in case the physical addresses + // are not contiguous. + ChunkGenerator gen(pkt->source, pkt->count, AMDGPU_MMHUB_PAGE_SIZE); + for (; !gen.done(); gen.next()) { + Addr chunk_addr = getDeviceAddress(gen.addr()); + assert(chunk_addr); + + DPRINTF(SDMAEngine, "Copying chunk of %d bytes from %#lx (%#lx)\n", + gen.size(), gen.addr(), chunk_addr); + + gpuDevice->getMemMgr()->readRequest(chunk_addr, dmaBuffer, + gen.size(), 0, + gen.last() ? cb : nullptr); + dmaBuffer += gen.size(); + } + } else { + auto cb = new DmaVirtCallback( + [ = ] (const uint64_t &) { copyReadData(q, pkt, dmaBuffer); }); + dmaReadVirt(pkt->source, pkt->count, cb, (void *)dmaBuffer); + } } /* Completion of data reading for a copy packet. */ @@ -547,34 +629,40 @@ void SDMAEngine::copyReadData(SDMAQueue *q, sdmaCopy *pkt, uint8_t *dmaBuffer) { // lastly we write read data to the destination address - DPRINTF(SDMAEngine, "Copy packet data:\n"); - uint64_t *dmaBuffer64 = new uint64_t[pkt->count/8]; - memcpy(dmaBuffer64, dmaBuffer, pkt->count); + uint64_t *dmaBuffer64 = reinterpret_cast(dmaBuffer); + + DPRINTF(SDMAEngine, "Copy packet last/first qwords:\n"); + DPRINTF(SDMAEngine, "First: %016lx\n", dmaBuffer64[0]); + DPRINTF(SDMAEngine, "Last: %016lx\n", dmaBuffer64[(pkt->count/8)-1]); + + DPRINTF(SDMAData, "Copy packet data:\n"); for (int i = 0; i < pkt->count/8; ++i) { - DPRINTF(SDMAEngine, "%016lx\n", dmaBuffer64[i]); + DPRINTF(SDMAData, "%016lx\n", dmaBuffer64[i]); } - delete [] dmaBuffer64; - // Aperture is unknown until translating. Do a dummy translation. - auto tgen = translate(pkt->dest, 64); - auto addr_range = *(tgen->begin()); - Addr tmp_addr = addr_range.paddr; - DPRINTF(SDMAEngine, "Tmp addr %#lx -> %#lx\n", pkt->dest, tmp_addr); + Addr device_addr = getDeviceAddress(pkt->dest); + // Write read data to the destination address then call the copyDone method + if (device_addr) { + DPRINTF(SDMAEngine, "Copying to device address %#lx\n", device_addr); + auto cb = new EventFunctionWrapper( + [ = ]{ copyDone(q, pkt, dmaBuffer); }, name()); - // Writing generated data to the destination address. - if ((gpuDevice->getVM().inMMHUB(pkt->dest) && cur_vmid == 0) || - (gpuDevice->getVM().inMMHUB(tmp_addr) && cur_vmid != 0)) { - Addr mmhubAddr = 0; - if (cur_vmid == 0) { - mmhubAddr = pkt->dest - gpuDevice->getVM().getMMHUBBase(); - } else { - mmhubAddr = tmp_addr - gpuDevice->getVM().getMMHUBBase(); + // Copy the minimum page size at a time in case the physical addresses + // are not contiguous. + ChunkGenerator gen(pkt->dest, pkt->count, AMDGPU_MMHUB_PAGE_SIZE); + for (; !gen.done(); gen.next()) { + Addr chunk_addr = getDeviceAddress(gen.addr()); + assert(chunk_addr); + + DPRINTF(SDMAEngine, "Copying chunk of %d bytes to %#lx (%#lx)\n", + gen.size(), gen.addr(), chunk_addr); + + gpuDevice->getMemMgr()->writeRequest(chunk_addr, dmaBuffer, + gen.size(), 0, + gen.last() ? cb : nullptr); + + dmaBuffer += gen.size(); } - DPRINTF(SDMAEngine, "Copying to MMHUB address %#lx\n", mmhubAddr); - gpuDevice->getMemMgr()->writeRequest(mmhubAddr, dmaBuffer, pkt->count); - - delete pkt; - decodeNext(q); } else { auto cb = new DmaVirtCallback( [ = ] (const uint64_t &) { copyDone(q, pkt, dmaBuffer); }); @@ -637,11 +725,16 @@ SDMAEngine::trap(SDMAQueue *q, sdmaTrap *pkt) { q->incRptr(sizeof(sdmaTrap)); - DPRINTF(SDMAEngine, "Trap contextId: %p rbRptr: %p ibOffset: %p\n", - pkt->contextId, pkt->rbRptr, pkt->ibOffset); + DPRINTF(SDMAEngine, "Trap contextId: %p\n", pkt->intrContext); - gpuDevice->getIH()->prepareInterruptCookie(pkt->contextId, 0, - getIHClientId(), TRAP_ID); + uint32_t ring_id = 0; + assert(page.processing() ^ gfx.processing()); + if (page.processing()) { + ring_id = 3; + } + + gpuDevice->getIH()->prepareInterruptCookie(pkt->intrContext, ring_id, + getIHClientId(), TRAP_ID); gpuDevice->getIH()->submitInterruptCookie(); delete pkt; @@ -799,10 +892,11 @@ SDMAEngine::ptePde(SDMAQueue *q, sdmaPtePde *pkt) // Writing generated data to the destination address. if (gpuDevice->getVM().inMMHUB(pkt->dest)) { Addr mmhubAddr = pkt->dest - gpuDevice->getVM().getMMHUBBase(); + auto cb = new EventFunctionWrapper( + [ = ]{ ptePdeDone(q, pkt, dmaBuffer); }, name()); gpuDevice->getMemMgr()->writeRequest(mmhubAddr, (uint8_t *)dmaBuffer, - sizeof(uint64_t) * pkt->count); - - decodeNext(q); + sizeof(uint64_t) * pkt->count, 0, + cb); } else { auto cb = new DmaVirtCallback( [ = ] (const uint64_t &) { ptePdeDone(q, pkt, dmaBuffer); }); @@ -823,6 +917,62 @@ SDMAEngine::ptePdeDone(SDMAQueue *q, sdmaPtePde *pkt, uint64_t *dmaBuffer) decodeNext(q); } +void +SDMAEngine::atomic(SDMAQueue *q, sdmaAtomicHeader *header, sdmaAtomic *pkt) +{ + q->incRptr(sizeof(sdmaAtomic)); + DPRINTF(SDMAEngine, "Atomic op %d on addr %#lx, src: %ld, cmp: %ld, loop?" + " %d loopInt: %d\n", header->opcode, pkt->addr, pkt->srcData, + pkt->cmpData, header->loop, pkt->loopInt); + + // Read the data at pkt->addr + uint64_t *dmaBuffer = new uint64_t; + auto cb = new DmaVirtCallback( + [ = ] (const uint64_t &) + { atomicData(q, header, pkt, dmaBuffer); }); + dmaReadVirt(pkt->addr, sizeof(uint64_t), cb, (void *)dmaBuffer); +} + +void +SDMAEngine::atomicData(SDMAQueue *q, sdmaAtomicHeader *header, sdmaAtomic *pkt, + uint64_t *dmaBuffer) +{ + DPRINTF(SDMAEngine, "Atomic op %d on addr %#lx got data %#lx\n", + header->opcode, pkt->addr, *dmaBuffer); + + if (header->opcode == SDMA_ATOMIC_ADD64) { + // Atomic add with return -- dst = dst + src + int64_t dst_data = *dmaBuffer; + int64_t src_data = pkt->srcData; + + DPRINTF(SDMAEngine, "Atomic ADD_RTN: %ld + %ld = %ld\n", dst_data, + src_data, dst_data + src_data); + + // Reuse the dmaBuffer allocated + *dmaBuffer = dst_data + src_data; + + auto cb = new DmaVirtCallback( + [ = ] (const uint64_t &) + { atomicDone(q, header, pkt, dmaBuffer); }); + dmaWriteVirt(pkt->addr, sizeof(uint64_t), cb, (void *)dmaBuffer); + } else { + panic("Unsupported SDMA atomic opcode: %d\n", header->opcode); + } +} + +void +SDMAEngine::atomicDone(SDMAQueue *q, sdmaAtomicHeader *header, sdmaAtomic *pkt, + uint64_t *dmaBuffer) +{ + DPRINTF(SDMAEngine, "Atomic op %d op addr %#lx complete (sent %lx)\n", + header->opcode, pkt->addr, *dmaBuffer); + + delete dmaBuffer; + delete header; + delete pkt; + decodeNext(q); +} + AddrRangeList SDMAEngine::getAddrRanges() const { @@ -1020,6 +1170,7 @@ SDMAEngine::setGfxRptrLo(uint32_t data) { gfxRptr = insertBits(gfxRptr, 31, 0, 0); gfxRptr |= data; + gfx.rptrWbAddr(getGARTAddr(gfxRptr)); } void @@ -1027,6 +1178,7 @@ SDMAEngine::setGfxRptrHi(uint32_t data) { gfxRptr = insertBits(gfxRptr, 63, 32, 0); gfxRptr |= ((uint64_t)data) << 32; + gfx.rptrWbAddr(getGARTAddr(gfxRptr)); } void @@ -1098,6 +1250,7 @@ SDMAEngine::setPageRptrLo(uint32_t data) { pageRptr = insertBits(pageRptr, 31, 0, 0); pageRptr |= data; + page.rptrWbAddr(getGARTAddr(pageRptr)); } void @@ -1105,6 +1258,7 @@ SDMAEngine::setPageRptrHi(uint32_t data) { pageRptr = insertBits(pageRptr, 63, 32, 0); pageRptr |= ((uint64_t)data) << 32; + page.rptrWbAddr(getGARTAddr(pageRptr)); } void diff --git a/src/dev/amdgpu/sdma_engine.hh b/src/dev/amdgpu/sdma_engine.hh index 90d8e5b0de..d0afaf7a4a 100644 --- a/src/dev/amdgpu/sdma_engine.hh +++ b/src/dev/amdgpu/sdma_engine.hh @@ -33,7 +33,6 @@ #define __DEV_AMDGPU_SDMA_ENGINE_HH__ #include "base/bitunion.hh" -#include "debug/SDMAEngine.hh" #include "dev/amdgpu/amdgpu_device.hh" #include "dev/amdgpu/sdma_packets.hh" #include "dev/dma_virt_device.hh" @@ -59,6 +58,8 @@ class SDMAEngine : public DmaVirtDevice Addr _rptr; Addr _wptr; Addr _size; + Addr _rptr_wb_addr = 0; + Addr _global_rptr = 0; bool _valid; bool _processing; SDMAQueue *_parent; @@ -73,6 +74,8 @@ class SDMAEngine : public DmaVirtDevice Addr wptr() { return _base + _wptr; } Addr getWptr() { return _wptr; } Addr size() { return _size; } + Addr rptrWbAddr() { return _rptr_wb_addr; } + Addr globalRptr() { return _global_rptr; } bool valid() { return _valid; } bool processing() { return _processing; } SDMAQueue* parent() { return _parent; } @@ -83,22 +86,27 @@ class SDMAEngine : public DmaVirtDevice void incRptr(uint32_t value) { - //assert((_rptr + value) <= (_size << 1)); _rptr = (_rptr + value) % _size; + _global_rptr += value; } - void rptr(Addr value) { _rptr = value; } + void + rptr(Addr value) + { + _rptr = value; + _global_rptr = value; + } void setWptr(Addr value) { - //assert(value <= (_size << 1)); _wptr = value % _size; } void wptr(Addr value) { _wptr = value; } void size(Addr value) { _size = value; } + void rptrWbAddr(Addr value) { _rptr_wb_addr = value; } void valid(bool v) { _valid = v; } void processing(bool value) { _processing = value; } void parent(SDMAQueue* q) { _parent = q; } @@ -134,7 +142,7 @@ class SDMAEngine : public DmaVirtDevice VegaISA::Walker *walker; /* processRLC will select the correct queue for the doorbell */ - std::unordered_map rlcMap; + std::array rlcInfo{}; void processRLC0(Addr wptrOffset); void processRLC1(Addr wptrOffset); @@ -155,6 +163,13 @@ class SDMAEngine : public DmaVirtDevice Addr getGARTAddr(Addr addr) const; TranslationGenPtr translate(Addr vaddr, Addr size) override; + /** + * Translate an address in an SDMA packet. Return the device address if + * address in the packet is on the device and 0 if the the address in the + * packet is on the host/system memory. + */ + Addr getDeviceAddress(Addr raw_addr); + /** * Inherited methods. */ @@ -211,6 +226,11 @@ class SDMAEngine : public DmaVirtDevice bool pollRegMemFunc(uint32_t value, uint32_t reference, uint32_t func); void ptePde(SDMAQueue *q, sdmaPtePde *pkt); void ptePdeDone(SDMAQueue *q, sdmaPtePde *pkt, uint64_t *dmaBuffer); + void atomic(SDMAQueue *q, sdmaAtomicHeader *header, sdmaAtomic *pkt); + void atomicData(SDMAQueue *q, sdmaAtomicHeader *header, sdmaAtomic *pkt, + uint64_t *dmaBuffer); + void atomicDone(SDMAQueue *q, sdmaAtomicHeader *header, sdmaAtomic *pkt, + uint64_t *dmaBuffer); /** * Methods for getting the values of SDMA MMIO registers. @@ -257,8 +277,10 @@ class SDMAEngine : public DmaVirtDevice /** * Methods for RLC queues */ - void registerRLCQueue(Addr doorbell, Addr rb_base); + void registerRLCQueue(Addr doorbell, Addr rb_base, uint32_t size, + Addr rptr_wb_addr); void unregisterRLCQueue(Addr doorbell); + void deallocateRLCQueues(); int cur_vmid = 0; }; diff --git a/src/dev/amdgpu/sdma_packets.hh b/src/dev/amdgpu/sdma_packets.hh index c4907561cc..52a47d3a2d 100644 --- a/src/dev/amdgpu/sdma_packets.hh +++ b/src/dev/amdgpu/sdma_packets.hh @@ -283,6 +283,17 @@ typedef struct GEM5_PACKED } sdmaAtomic; static_assert(sizeof(sdmaAtomic) == 28); +typedef struct GEM5_PACKED +{ + int unused2 : 16; + int loop : 1; + int unused1 : 8; + int opcode : 7; +} sdmaAtomicHeader; +static_assert(sizeof(sdmaAtomicHeader) == 4); + +constexpr unsigned int SDMA_ATOMIC_ADD64 = 47; + typedef struct GEM5_PACKED { uint64_t dest; diff --git a/src/dev/arm/AbstractNVM.py b/src/dev/arm/AbstractNVM.py index 7cfcc3eb95..20a8a80c46 100644 --- a/src/dev/arm/AbstractNVM.py +++ b/src/dev/arm/AbstractNVM.py @@ -37,8 +37,9 @@ from m5.params import * from m5.proxy import * from m5.SimObject import SimObject + class AbstractNVM(SimObject): - type = 'AbstractNVM' + type = "AbstractNVM" abstract = True cxx_header = "dev/arm/abstract_nvm.hh" - cxx_class = 'gem5::AbstractNVM' + cxx_class = "gem5::AbstractNVM" diff --git a/src/dev/arm/Display.py b/src/dev/arm/Display.py index 035d971aeb..f28d9a84c9 100644 --- a/src/dev/arm/Display.py +++ b/src/dev/arm/Display.py @@ -37,10 +37,11 @@ from m5.params import * from m5.SimObject import SimObject from m5.util.fdthelper import * + class Display(SimObject): - type = 'Display' + type = "Display" cxx_header = "dev/arm/display.hh" - cxx_class = 'gem5::Display' + cxx_class = "gem5::Display" clock_frequency = Param.Unsigned("clock-frequency property") hactive = Param.Unsigned("hactive property") vactive = Param.Unsigned("vactive property") @@ -64,37 +65,34 @@ class Display(SimObject): # timing node timing_node = FdtNode(self.timingNode()) - timing_node.append(FdtPropertyWords( - "clock-frequency", [self.clock_frequency])) - timing_node.append(FdtPropertyWords( - "hactive", [self.hactive])) - timing_node.append(FdtPropertyWords( - "vactive", [self.vactive])) - timing_node.append(FdtPropertyWords( - "hfront-porch", [self.hfront_porch])) - timing_node.append(FdtPropertyWords( - "hback-porch", [self.hback_porch])) - timing_node.append(FdtPropertyWords( - "hsync-len", [self.hsync_len])) - timing_node.append(FdtPropertyWords( - "vfront-porch", [self.vfront_porch])) - timing_node.append(FdtPropertyWords( - "vback-porch", [self.vback_porch])) - timing_node.append(FdtPropertyWords( - "vsync-len", [self.vsync_len])) + timing_node.append( + FdtPropertyWords("clock-frequency", [self.clock_frequency]) + ) + timing_node.append(FdtPropertyWords("hactive", [self.hactive])) + timing_node.append(FdtPropertyWords("vactive", [self.vactive])) + timing_node.append( + FdtPropertyWords("hfront-porch", [self.hfront_porch]) + ) + timing_node.append(FdtPropertyWords("hback-porch", [self.hback_porch])) + timing_node.append(FdtPropertyWords("hsync-len", [self.hsync_len])) + timing_node.append( + FdtPropertyWords("vfront-porch", [self.vfront_porch]) + ) + timing_node.append(FdtPropertyWords("vback-porch", [self.vback_porch])) + timing_node.append(FdtPropertyWords("vsync-len", [self.vsync_len])) timing_node.appendPhandle(self.timingNode()) # display timing node dispt_node = FdtNode("display-timings") - dispt_node.append(FdtPropertyWords("native-mode", - state.phandle(self.timingNode()))) + dispt_node.append( + FdtPropertyWords("native-mode", state.phandle(self.timingNode())) + ) dispt_node.append(timing_node) # endpoint node endpoint_node = FdtNode("endpoint") - endpoint_node.appendPhandle( - self.endpointPhandle()) + endpoint_node.appendPhandle(self.endpointPhandle()) # Assign node so that it can be retrieved self._endpoint_node = endpoint_node @@ -111,6 +109,7 @@ class Display(SimObject): yield node + class Display1080p(Display): clock_frequency = 148500000 hactive = 1920 diff --git a/src/dev/arm/Doorbell.py b/src/dev/arm/Doorbell.py index 9120452158..106a184902 100644 --- a/src/dev/arm/Doorbell.py +++ b/src/dev/arm/Doorbell.py @@ -36,10 +36,11 @@ from m5.SimObject import SimObject from m5.params import * + class Doorbell(SimObject): - type = 'Doorbell' + type = "Doorbell" abstract = True cxx_header = "dev/arm/doorbell.hh" - cxx_class = 'gem5::Doorbell' + cxx_class = "gem5::Doorbell" set_address = Param.Addr("Doorbell set address") clear_address = Param.Addr("Doorbell clear address") diff --git a/src/dev/arm/EnergyCtrl.py b/src/dev/arm/EnergyCtrl.py index 2dcef4f410..91296143c8 100644 --- a/src/dev/arm/EnergyCtrl.py +++ b/src/dev/arm/EnergyCtrl.py @@ -39,15 +39,17 @@ from m5.objects.Device import BasicPioDevice from m5.proxy import * from m5.util.fdthelper import * + class EnergyCtrl(BasicPioDevice): - type = 'EnergyCtrl' + type = "EnergyCtrl" cxx_header = "dev/arm/energy_ctrl.hh" - cxx_class = 'gem5::EnergyCtrl' + cxx_class = "gem5::EnergyCtrl" dvfs_handler = Param.DVFSHandler(Parent.dvfs_handler, "DVFS handler") def generateDeviceTree(self, state): - node = self.generateBasicPioDeviceNode(state, 'gem5_energy_ctrl', - self.pio_addr, 0x1000) + node = self.generateBasicPioDeviceNode( + state, "gem5_energy_ctrl", self.pio_addr, 0x1000 + ) node.appendCompatible("arm,gem5-energy-ctrl") diff --git a/src/dev/arm/FlashDevice.py b/src/dev/arm/FlashDevice.py index cf06bdabb8..d5069d94ac 100644 --- a/src/dev/arm/FlashDevice.py +++ b/src/dev/arm/FlashDevice.py @@ -38,16 +38,18 @@ from m5.proxy import * from m5.objects.AbstractNVM import * -#Distribution of the data. -#sequential: sequential (address n+1 is likely to be on the same plane as n) -#Random: @TODO Not yet implemented -#stripe: striping over all the planes -class DataDistribution(Enum): vals = ['sequential', 'stripe'] +# Distribution of the data. +# sequential: sequential (address n+1 is likely to be on the same plane as n) +# Random: @TODO Not yet implemented +# stripe: striping over all the planes +class DataDistribution(Enum): + vals = ["sequential", "stripe"] + class FlashDevice(AbstractNVM): - type = 'FlashDevice' + type = "FlashDevice" cxx_header = "dev/arm/flash_device.hh" - cxx_class = 'gem5::FlashDevice' + cxx_class = "gem5::FlashDevice" # default blocksize is 128 KiB.This seems to be the most common size in # mobile devices (not the image blocksize) blk_size = Param.MemorySize("128KiB", "Size of one disk block") @@ -56,8 +58,11 @@ class FlashDevice(AbstractNVM): page_size = Param.MemorySize("2KiB", "Size of one disk page") # There are many GC flavors. It is impossible to cover them all; this # parameter enables the approximation of different GC algorithms - GC_active = Param.Percent(50, "Percentage of the time (in whole numbers) \ - that the GC is activated if a block is full") + GC_active = Param.Percent( + 50, + "Percentage of the time (in whole numbers) \ + that the GC is activated if a block is full", + ) # Access latencies. Different devices will have different latencies, but # the latencies will be around the default values. read_lat = Param.Latency("25us", "Read Latency") @@ -67,7 +72,9 @@ class FlashDevice(AbstractNVM): num_planes = Param.UInt32(1, "Number of planes per die") # Data distribution. Default is none. It is adviced to switch to stripe # when more than one plane is used. - data_distribution = Param.DataDistribution('sequential', "Distribution \ + data_distribution = Param.DataDistribution( + "sequential", + "Distribution \ of the data in the adress table; Stripe needed for multiple\ - planes; otherwise use: sequential") - + planes; otherwise use: sequential", + ) diff --git a/src/dev/arm/GenericTimer.py b/src/dev/arm/GenericTimer.py index af6d5ec453..a44cd6fd7e 100644 --- a/src/dev/arm/GenericTimer.py +++ b/src/dev/arm/GenericTimer.py @@ -40,45 +40,50 @@ from m5.proxy import Parent from m5.util import fatal from m5.util.fdthelper import FdtNode, FdtProperty, FdtPropertyWords, FdtState + class SystemCounter(SimObject): """ -Shared by both PE-implementations and memory-mapped timers. It provides a -uniform view of system time through its counter value. + Shared by both PE-implementations and memory-mapped timers. It provides a + uniform view of system time through its counter value. -Reference: - Arm ARM (ARM DDI 0487E.a) - D11.1.2 - The system counter + Reference: + Arm ARM (ARM DDI 0487E.a) + D11.1.2 - The system counter """ - type = 'SystemCounter' + type = "SystemCounter" cxx_header = "dev/arm/generic_timer.hh" - cxx_class = 'gem5::SystemCounter' + cxx_class = "gem5::SystemCounter" # Maximum of 1004 frequency entries, including end marker - freqs = VectorParam.UInt32([0x01800000], "Frequencies available for the " + freqs = VectorParam.UInt32( + [0x01800000], + "Frequencies available for the " "system counter (in Hz). First element is the base frequency, " - "following are alternative lower ones which must be exact divisors") + "following are alternative lower ones which must be exact divisors", + ) def generateDtb(self): if not self.freqs: fatal("No counter frequency to expose in DTB") return FdtPropertyWords("clock-frequency", [self.freqs[0]]) + class GenericTimer(SimObject): """ -Architected timers per PE in the system. Each of them provides a physical -counter, a virtual counter and several timers accessible from different -exception levels and security states. + Architected timers per PE in the system. Each of them provides a physical + counter, a virtual counter and several timers accessible from different + exception levels and security states. -Reference: - Arm ARM (ARM DDI 0487E.a) - D11.2 - The AArch64 view of the Generic Timer - G6.2 - The AArch32 view of the Generic Timer + Reference: + Arm ARM (ARM DDI 0487E.a) + D11.2 - The AArch64 view of the Generic Timer + G6.2 - The AArch32 view of the Generic Timer """ - type = 'GenericTimer' + type = "GenericTimer" cxx_header = "dev/arm/generic_timer.hh" - cxx_class = 'gem5::GenericTimer' + cxx_class = "gem5::GenericTimer" _freq_in_dtb = False @@ -106,36 +111,41 @@ Reference: def generateDeviceTree(self, state): node = FdtNode("timer") - node.appendCompatible(["arm,cortex-a15-timer", - "arm,armv7-timer", - "arm,armv8-timer"]) + node.appendCompatible( + ["arm,cortex-a15-timer", "arm,armv7-timer", "arm,armv8-timer"] + ) gic = self._parent.unproxy(self).gic - node.append(FdtPropertyWords("interrupts", - self.int_el3_phys.generateFdtProperty(gic) + - self.int_el1_phys.generateFdtProperty(gic) + - self.int_el1_virt.generateFdtProperty(gic) + - self.int_el2_ns_phys.generateFdtProperty(gic) + - self.int_el2_ns_virt.generateFdtProperty(gic))) + node.append( + FdtPropertyWords( + "interrupts", + self.int_el3_phys.generateFdtProperty(gic) + + self.int_el1_phys.generateFdtProperty(gic) + + self.int_el1_virt.generateFdtProperty(gic) + + self.int_el2_ns_phys.generateFdtProperty(gic) + + self.int_el2_ns_virt.generateFdtProperty(gic), + ) + ) if self._freq_in_dtb: node.append(self.counter.unproxy(self).generateDtb()) yield node + class GenericTimerFrame(PioDevice): """ -Memory-mapped timer frame implementation. Controlled from GenericTimerMem, -may be used by peripherals without a system register interface. + Memory-mapped timer frame implementation. Controlled from GenericTimerMem, + may be used by peripherals without a system register interface. -Reference: - Arm ARM (ARM DDI 0487E.a) - I2.3.2 - The CNTBaseN and CNTEL0BaseN frames + Reference: + Arm ARM (ARM DDI 0487E.a) + I2.3.2 - The CNTBaseN and CNTEL0BaseN frames """ - type = 'GenericTimerFrame' + type = "GenericTimerFrame" cxx_header = "dev/arm/generic_timer.hh" - cxx_class = 'gem5::GenericTimerFrame' + cxx_class = "gem5::GenericTimerFrame" _frame_num = 0 @@ -158,29 +168,31 @@ Reference: reg = state.addrCells(self.cnt_base) + state.sizeCells(0x1000) if self.cnt_el0_base.value != MaxAddr: - reg.extend(state.addrCells(self.cnt_el0_base) - + state.sizeCells(0x1000)) + reg.extend( + state.addrCells(self.cnt_el0_base) + state.sizeCells(0x1000) + ) node.append(FdtPropertyWords("reg", reg)) return node + class GenericTimerMem(PioDevice): """ -System level implementation. It provides three main components: -- Memory-mapped counter module: controls the system timer through the - CNTControlBase frame, and provides its value through the CNTReadBase frame -- Memory-mapped timer control module: controls the memory-mapped timers -- Memory-mapped timers: implementations of the GenericTimer for system - peripherals + System level implementation. It provides three main components: + - Memory-mapped counter module: controls the system timer through the + CNTControlBase frame, and provides its value through the CNTReadBase frame + - Memory-mapped timer control module: controls the memory-mapped timers + - Memory-mapped timers: implementations of the GenericTimer for system + peripherals -Reference: - Arm ARM (ARM DDI 0487E.a) - I2 - System Level Implementation of the Generic Timer + Reference: + Arm ARM (ARM DDI 0487E.a) + I2 - System Level Implementation of the Generic Timer """ - type = 'GenericTimerMem' + type = "GenericTimerMem" cxx_header = "dev/arm/generic_timer.hh" - cxx_class = 'gem5::GenericTimerMem' + cxx_class = "gem5::GenericTimerMem" _freq_in_dtb = False @@ -194,8 +206,9 @@ Reference: frames = VectorParam.GenericTimerFrame([], "Memory-mapped timer frames") def generateDeviceTree(self, state): - node = self.generateBasicPioDeviceNode(state, "timer", - self.cnt_ctl_base, 0x1000) + node = self.generateBasicPioDeviceNode( + state, "timer", self.cnt_ctl_base, 0x1000 + ) node.appendCompatible(["arm,armv7-timer-mem"]) node.append(state.addrCellsProperty()) node.append(state.sizeCellsProperty()) diff --git a/src/dev/arm/Gic.py b/src/dev/arm/Gic.py index ffbdbac3eb..41d602b86a 100644 --- a/src/dev/arm/Gic.py +++ b/src/dev/arm/Gic.py @@ -42,25 +42,24 @@ from m5.objects.Device import PioDevice, BasicPioDevice from m5.objects.Platform import Platform from m5.objects.IntPin import IntSourcePin + class BaseGic(PioDevice): - type = 'BaseGic' + type = "BaseGic" abstract = True cxx_header = "dev/arm/base_gic.hh" - cxx_class = 'gem5::BaseGic' + cxx_class = "gem5::BaseGic" # Used for DTB autogeneration _state = FdtState(addr_cells=0, interrupt_cells=3) platform = Param.Platform(Parent.any, "Platform this device is part of.") - gicd_iidr = Param.UInt32(0, - "Distributor Implementer Identification Register") - gicd_pidr = Param.UInt32(0, - "Peripheral Identification Register") - gicc_iidr = Param.UInt32(0, - "CPU Interface Identification Register") - gicv_iidr = Param.UInt32(0, - "VM CPU Interface Identification Register") + gicd_iidr = Param.UInt32( + 0, "Distributor Implementer Identification Register" + ) + gicd_pidr = Param.UInt32(0, "Peripheral Identification Register") + gicc_iidr = Param.UInt32(0, "CPU Interface Identification Register") + gicv_iidr = Param.UInt32(0, "VM CPU Interface Identification Register") def interruptCells(self, int_type, int_num, int_trigger, partition=None): """ @@ -75,11 +74,12 @@ class BaseGic(PioDevice): # partitioning, set the affinity to target all CPUs # (affinity = 0xf00) if partition is None and int_type == ArmPPI._LINUX_ID: - affinity = 0xf00 + affinity = 0xF00 else: affinity = 0 - return [ int_type, int_num, affinity | int_trigger ] + return [int_type, int_num, affinity | int_trigger] + class ArmInterruptType(ScopedEnum): """ @@ -87,26 +87,30 @@ class ArmInterruptType(ScopedEnum): defined in include/linux/irq.h. They are mainly meant to be used for DTB autogen """ + map = { - 'IRQ_TYPE_EDGE_RISING' : 0x1, - 'IRQ_TYPE_EDGE_FALLING' : 0x2, - 'IRQ_TYPE_LEVEL_HIGH' : 0x4, - 'IRQ_TYPE_LEVEL_LOW' : 0x8 + "IRQ_TYPE_EDGE_RISING": 0x1, + "IRQ_TYPE_EDGE_FALLING": 0x2, + "IRQ_TYPE_LEVEL_HIGH": 0x4, + "IRQ_TYPE_LEVEL_LOW": 0x8, } + class ArmInterruptPin(SimObject): - type = 'ArmInterruptPin' + type = "ArmInterruptPin" cxx_header = "dev/arm/base_gic.hh" cxx_class = "gem5::ArmInterruptPinGen" abstract = True platform = Param.Platform(Parent.any, "Platform with interrupt controller") num = Param.UInt32("Interrupt number in GIC") - int_type = Param.ArmInterruptType('IRQ_TYPE_LEVEL_HIGH', - "Interrupt type (level/edge triggered)") + int_type = Param.ArmInterruptType( + "IRQ_TYPE_LEVEL_HIGH", "Interrupt type (level/edge triggered)" + ) + class ArmSPI(ArmInterruptPin): - type = 'ArmSPI' + type = "ArmSPI" cxx_header = "dev/arm/base_gic.hh" cxx_class = "gem5::ArmSPIGen" @@ -120,10 +124,12 @@ class ArmSPI(ArmInterruptPin): gem5 uses the internal GIC numbering (SPIs start at 32) """ return gic.interruptCells( - self._LINUX_ID, self.num - 32, int(self.int_type.getValue())) + self._LINUX_ID, self.num - 32, int(self.int_type.getValue()) + ) + class ArmPPI(ArmInterruptPin): - type = 'ArmPPI' + type = "ArmPPI" cxx_header = "dev/arm/base_gic.hh" cxx_class = "gem5::ArmPPIGen" @@ -137,107 +143,124 @@ class ArmPPI(ArmInterruptPin): gem5 uses the internal GIC numbering (PPIs start at 16) """ return gic.interruptCells( - self._LINUX_ID, self.num - 16, int(self.int_type.getValue())) + self._LINUX_ID, self.num - 16, int(self.int_type.getValue()) + ) + class ArmSigInterruptPin(ArmInterruptPin): - type = 'ArmSigInterruptPin' + type = "ArmSigInterruptPin" cxx_header = "dev/arm/base_gic.hh" cxx_class = "gem5::ArmSigInterruptPinGen" - irq = IntSourcePin('Interrupt pin') + irq = IntSourcePin("Interrupt pin") + class GicV2(BaseGic): - type = 'GicV2' + type = "GicV2" cxx_header = "dev/arm/gic_v2.hh" - cxx_class = 'gem5::GicV2' + cxx_class = "gem5::GicV2" dist_addr = Param.Addr("Address for distributor") cpu_addr = Param.Addr("Address for cpu") cpu_size = Param.Addr(0x2000, "Size of cpu register bank") - dist_pio_delay = Param.Latency('10ns', "Delay for PIO r/w to distributor") - cpu_pio_delay = Param.Latency('10ns', "Delay for PIO r/w to cpu interface") - int_latency = Param.Latency('10ns', "Delay for interrupt to get to CPU") - it_lines = Param.UInt32(128, "Number of interrupt lines supported (max = 1020)") + dist_pio_delay = Param.Latency("10ns", "Delay for PIO r/w to distributor") + cpu_pio_delay = Param.Latency("10ns", "Delay for PIO r/w to cpu interface") + int_latency = Param.Latency("10ns", "Delay for interrupt to get to CPU") + it_lines = Param.UInt32( + 128, "Number of interrupt lines supported (max = 1020)" + ) gem5_extensions = Param.Bool(False, "Enable gem5 extensions") + class Gic400(GicV2): """ As defined in: "ARM Generic Interrupt Controller Architecture" version 2.0 "CoreLink GIC-400 Generic Interrupt Controller" revision r0p1 """ - gicd_pidr = 0x002bb490 + + gicd_pidr = 0x002BB490 gicd_iidr = 0x0200143B gicc_iidr = 0x0202143B # gicv_iidr same as gicc_idr gicv_iidr = gicc_iidr + class Gicv2mFrame(SimObject): - type = 'Gicv2mFrame' + type = "Gicv2mFrame" cxx_header = "dev/arm/gic_v2m.hh" - cxx_class = 'gem5::Gicv2mFrame' - spi_base = Param.UInt32(0x0, "Frame SPI base number"); - spi_len = Param.UInt32(0x0, "Frame SPI total number"); + cxx_class = "gem5::Gicv2mFrame" + spi_base = Param.UInt32(0x0, "Frame SPI base number") + spi_len = Param.UInt32(0x0, "Frame SPI total number") addr = Param.Addr("Address for frame PIO") -class Gicv2m(PioDevice): - type = 'Gicv2m' - cxx_header = "dev/arm/gic_v2m.hh" - cxx_class = 'gem5::Gicv2m' - pio_delay = Param.Latency('10ns', "Delay for PIO r/w") +class Gicv2m(PioDevice): + type = "Gicv2m" + cxx_header = "dev/arm/gic_v2m.hh" + cxx_class = "gem5::Gicv2m" + + pio_delay = Param.Latency("10ns", "Delay for PIO r/w") gic = Param.BaseGic(Parent.any, "Gic on which to trigger interrupts") frames = VectorParam.Gicv2mFrame([], "Power of two number of frames") + class VGic(PioDevice): - type = 'VGic' + type = "VGic" cxx_header = "dev/arm/vgic.hh" - cxx_class = 'gem5::VGic' + cxx_class = "gem5::VGic" gic = Param.BaseGic(Parent.any, "Gic to use for interrupting") platform = Param.Platform(Parent.any, "Platform this device is part of.") vcpu_addr = Param.Addr(0, "Address for vcpu interfaces") hv_addr = Param.Addr(0, "Address for hv control") - pio_delay = Param.Latency('10ns', "Delay for PIO r/w") - # The number of list registers is not currently configurable at runtime. + pio_delay = Param.Latency("10ns", "Delay for PIO r/w") + # The number of list registers is not currently configurable at runtime. maint_int = Param.UInt32("HV maintenance interrupt number") # gicv_iidr same as gicc_idr - gicv_iidr = Param.UInt32(Self.gic.gicc_iidr, - "VM CPU Interface Identification Register") + gicv_iidr = Param.UInt32( + Self.gic.gicc_iidr, "VM CPU Interface Identification Register" + ) def generateDeviceTree(self, state): gic = self.gic.unproxy(self) node = FdtNode("interrupt-controller") - node.appendCompatible(["gem5,gic", "arm,cortex-a15-gic", - "arm,cortex-a9-gic"]) + node.appendCompatible( + ["gem5,gic", "arm,cortex-a15-gic", "arm,cortex-a9-gic"] + ) node.append(gic._state.interruptCellsProperty()) node.append(gic._state.addrCellsProperty()) node.append(FdtProperty("interrupt-controller")) regs = ( - state.addrCells(gic.dist_addr) + - state.sizeCells(0x1000) + - state.addrCells(gic.cpu_addr) + - state.sizeCells(0x1000) + - state.addrCells(self.hv_addr) + - state.sizeCells(0x2000) + - state.addrCells(self.vcpu_addr) + - state.sizeCells(0x2000) ) + state.addrCells(gic.dist_addr) + + state.sizeCells(0x1000) + + state.addrCells(gic.cpu_addr) + + state.sizeCells(0x1000) + + state.addrCells(self.hv_addr) + + state.sizeCells(0x2000) + + state.addrCells(self.vcpu_addr) + + state.sizeCells(0x2000) + ) node.append(FdtPropertyWords("reg", regs)) - node.append(FdtPropertyWords("interrupts", - [1, int(self.maint_int)-16, 0xf04])) + node.append( + FdtPropertyWords( + "interrupts", [1, int(self.maint_int) - 16, 0xF04] + ) + ) node.appendPhandle(gic) yield node + class Gicv3Its(BasicPioDevice): - type = 'Gicv3Its' + type = "Gicv3Its" cxx_header = "dev/arm/gic_v3_its.hh" - cxx_class = 'gem5::Gicv3Its' + cxx_class = "gem5::Gicv3Its" dma = RequestPort("DMA port") pio_size = Param.Unsigned(0x20000, "Gicv3Its pio size") @@ -248,18 +271,20 @@ class Gicv3Its(BasicPioDevice): gits_typer = Param.UInt64(0x30023F01, "GITS_TYPER RO value") def generateDeviceTree(self, state): - node = self.generateBasicPioDeviceNode(state, "gic-its", self.pio_addr, - self.pio_size) + node = self.generateBasicPioDeviceNode( + state, "gic-its", self.pio_addr, self.pio_size + ) node.appendCompatible(["arm,gic-v3-its"]) node.append(FdtProperty("msi-controller")) node.append(FdtPropertyWords("#msi-cells", [1])) return node + class Gicv3(BaseGic): - type = 'Gicv3' + type = "Gicv3" cxx_header = "dev/arm/gic_v3.hh" - cxx_class = 'gem5::Gicv3' + cxx_class = "gem5::Gicv3" # Used for DTB autogeneration _state = FdtState(addr_cells=2, size_cells=2, interrupt_cells=3) @@ -267,21 +292,26 @@ class Gicv3(BaseGic): its = Param.Gicv3Its(NULL, "GICv3 Interrupt Translation Service") dist_addr = Param.Addr("Address for distributor") - dist_pio_delay = Param.Latency('10ns', "Delay for PIO r/w to distributor") + dist_pio_delay = Param.Latency("10ns", "Delay for PIO r/w to distributor") redist_addr = Param.Addr("Address for redistributors") - redist_pio_delay = Param.Latency('10ns', - "Delay for PIO r/w to redistributors") - it_lines = Param.UInt32(1020, - "Number of interrupt lines supported (max = 1020)") + redist_pio_delay = Param.Latency( + "10ns", "Delay for PIO r/w to redistributors" + ) + it_lines = Param.UInt32( + 1020, "Number of interrupt lines supported (max = 1020)" + ) maint_int = Param.ArmInterruptPin( "HV maintenance interrupt." "ARM strongly recommends that maintenance interrupts " - "are configured to use INTID 25 (PPI Interrupt).") + "are configured to use INTID 25 (PPI Interrupt)." + ) - cpu_max = Param.Unsigned(256, + cpu_max = Param.Unsigned( + 256, "Maximum number of PE. This is affecting the maximum number of " - "redistributors") + "redistributors", + ) gicv4 = Param.Bool(False, "GIC is GICv4 compatible") @@ -309,18 +339,26 @@ class Gicv3(BaseGic): node.append(FdtProperty("interrupt-controller")) redist_stride = 0x40000 if self.gicv4 else 0x20000 - node.append(FdtPropertyWords("redistributor-stride", - state.sizeCells(redist_stride))) + node.append( + FdtPropertyWords( + "redistributor-stride", state.sizeCells(redist_stride) + ) + ) regs = ( - state.addrCells(self.dist_addr) + - state.sizeCells(0x10000) + - state.addrCells(self.redist_addr) + - state.sizeCells(0x2000000) ) + state.addrCells(self.dist_addr) + + state.sizeCells(0x10000) + + state.addrCells(self.redist_addr) + + state.sizeCells(0x2000000) + ) node.append(FdtPropertyWords("reg", regs)) - node.append(FdtPropertyWords("interrupts", - self.interruptCells(1, int(self.maint_int.num)-16, 0x4))) + node.append( + FdtPropertyWords( + "interrupts", + self.interruptCells(1, int(self.maint_int.num) - 16, 0x4), + ) + ) node.appendPhandle(self) diff --git a/src/dev/arm/NoMali.py b/src/dev/arm/NoMali.py index 6a933d69ac..c7d0d4259c 100644 --- a/src/dev/arm/NoMali.py +++ b/src/dev/arm/NoMali.py @@ -38,16 +38,15 @@ from m5.params import * from m5.objects.Device import BasicPioDevice from m5.objects.Gic import * -class NoMaliGpuType(Enum): vals = [ - 'T60x', - 'T62x', - 'T760', - ] + +class NoMaliGpuType(Enum): + vals = ["T60x", "T62x", "T760"] + class NoMaliGpu(PioDevice): - type = 'NoMaliGpu' + type = "NoMaliGpu" cxx_header = "dev/arm/gpu_nomali.hh" - cxx_class = 'gem5::NoMaliGpu' + cxx_class = "gem5::NoMaliGpu" pio_addr = Param.Addr("Device base address") @@ -62,15 +61,16 @@ class NoMaliGpu(PioDevice): int_job = Param.UInt32("Interrupt number for JOB interrupts") int_mmu = Param.UInt32("Interrupt number for MMU interrupts") + class CustomNoMaliGpu(NoMaliGpu): """Base class for custom NoMali implementation that need to override configuration registers. See CustomNoMaliT760 for a usage example. """ - type = 'CustomNoMaliGpu' + type = "CustomNoMaliGpu" cxx_header = "dev/arm/gpu_nomali.hh" - cxx_class = 'gem5::CustomNoMaliGpu' + cxx_class = "gem5::CustomNoMaliGpu" gpu_id = Param.UInt32("") l2_features = Param.UInt32("") @@ -92,6 +92,7 @@ class CustomNoMaliGpu(NoMaliGpu): tiler_present = Param.UInt64("") l2_present = Param.UInt64("") + class CustomNoMaliT760(CustomNoMaliGpu): """Example NoMali T760 r0p0-0 configuration using the defaults from the NoMali library. @@ -104,25 +105,17 @@ class CustomNoMaliT760(CustomNoMaliGpu): tiler_features = 0x00000809 mem_features = 0x00000001 mmu_features = 0x00002830 - as_present = 0x000000ff + as_present = 0x000000FF js_present = 0x00000007 thread_max_threads = 0x00000100 thread_max_workgroup_size = 0x00000100 thread_max_barrier_size = 0x00000100 - thread_features = 0x0a040400 + thread_features = 0x0A040400 - texture_features = [ - 0x00fe001e, - 0x0000ffff, - 0x9f81ffff, - ] - js_features = [ - 0x0000020e, - 0x000001fe, - 0x0000007e, - ] + texture_features = [0x00FE001E, 0x0000FFFF, 0x9F81FFFF] + js_features = [0x0000020E, 0x000001FE, 0x0000007E] - shader_present = 0x0000000f + shader_present = 0x0000000F tiler_present = 0x00000001 l2_present = 0x00000001 diff --git a/src/dev/arm/RealView.py b/src/dev/arm/RealView.py index 6645b39b69..41e1bcfbb4 100644 --- a/src/dev/arm/RealView.py +++ b/src/dev/arm/RealView.py @@ -43,8 +43,13 @@ from m5.util.fdthelper import * from m5.objects.ArmSystem import ArmExtension from m5.objects.ClockDomain import ClockDomain, SrcClockDomain from m5.objects.VoltageDomain import VoltageDomain -from m5.objects.Device import \ - BasicPioDevice, PioDevice, IsaFake, BadAddr, DmaDevice +from m5.objects.Device import ( + BasicPioDevice, + PioDevice, + IsaFake, + BadAddr, + DmaDevice, +) from m5.objects.PciHost import * from m5.objects.Ethernet import NSGigE, IGbE_igb, IGbE_e1000 from m5.objects.Ide import * @@ -74,6 +79,7 @@ from m5.objects.CfiMemory import CfiMemory # gem5's GIC model and KVM's GIC model if KVM is available. try: from m5.objects.KvmGic import MuxingKvmGicV2, MuxingKvmGicV3 + kvm_gicv2_class = MuxingKvmGicV2 kvm_gicv3_class = MuxingKvmGicV3 except ImportError: @@ -83,52 +89,60 @@ except ImportError: kvm_gicv3_class = Gicv3 pass + class AmbaPioDevice(BasicPioDevice): - type = 'AmbaPioDevice' + type = "AmbaPioDevice" abstract = True cxx_header = "dev/arm/amba_device.hh" - cxx_class = 'gem5::AmbaPioDevice' + cxx_class = "gem5::AmbaPioDevice" amba_id = Param.UInt32("ID of AMBA device for kernel detection") + class AmbaIntDevice(AmbaPioDevice): - type = 'AmbaIntDevice' + type = "AmbaIntDevice" abstract = True cxx_header = "dev/arm/amba_device.hh" - cxx_class = 'gem5::AmbaIntDevice' + cxx_class = "gem5::AmbaIntDevice" interrupt = Param.ArmInterruptPin("Interrupt that connects to GIC") - int_delay = Param.Latency("100ns", - "Time between action and interrupt generation by device") + int_delay = Param.Latency( + "100ns", "Time between action and interrupt generation by device" + ) + class AmbaDmaDevice(DmaDevice): - type = 'AmbaDmaDevice' + type = "AmbaDmaDevice" abstract = True cxx_header = "dev/arm/amba_device.hh" - cxx_class = 'gem5::AmbaDmaDevice' + cxx_class = "gem5::AmbaDmaDevice" pio_addr = Param.Addr("Address for AMBA responder interface") - pio_latency = Param.Latency("10ns", "Time between action and write/read" - "result by AMBA DMA Device") + pio_latency = Param.Latency( + "10ns", + "Time between action and write/read" "result by AMBA DMA Device", + ) interrupt = Param.ArmInterruptPin("Interrupt that connects to GIC") amba_id = Param.UInt32("ID of AMBA device for kernel detection") -class A9SCU(BasicPioDevice): - type = 'A9SCU' - cxx_header = "dev/arm/a9scu.hh" - cxx_class = 'gem5::A9SCU' -class ArmPciIntRouting(Enum): vals = [ - 'ARM_PCI_INT_STATIC', - 'ARM_PCI_INT_DEV', - 'ARM_PCI_INT_PIN', - ] +class A9SCU(BasicPioDevice): + type = "A9SCU" + cxx_header = "dev/arm/a9scu.hh" + cxx_class = "gem5::A9SCU" + + +class ArmPciIntRouting(Enum): + vals = ["ARM_PCI_INT_STATIC", "ARM_PCI_INT_DEV", "ARM_PCI_INT_PIN"] + class GenericArmPciHost(GenericPciHost): - type = 'GenericArmPciHost' + type = "GenericArmPciHost" cxx_header = "dev/arm/pci_host.hh" - cxx_class = 'gem5::GenericArmPciHost' + cxx_class = "gem5::GenericArmPciHost" int_policy = Param.ArmPciIntRouting("PCI interrupt routing policy") int_base = Param.Unsigned("PCI interrupt base") - int_count = Param.Unsigned("Maximum number of interrupts used by this host") + int_count = Param.Unsigned( + "Maximum number of interrupts used by this host" + ) # This python parameter can be used in configuration scripts to turn # on/off the fdt dma-coherent flag when doing dtb autogeneration @@ -136,8 +150,8 @@ class GenericArmPciHost(GenericPciHost): def generateDeviceTree(self, state): local_state = FdtState( - addr_cells=3, size_cells=2, - cpu_cells=1, interrupt_cells=1) + addr_cells=3, size_cells=2, cpu_cells=1, interrupt_cells=1 + ) node = FdtNode("pci") @@ -155,9 +169,13 @@ class GenericArmPciHost(GenericPciHost): node.append(local_state.sizeCellsProperty()) node.append(local_state.interruptCellsProperty()) # PCI address for CPU - node.append(FdtPropertyWords("reg", - state.addrCells(self.conf_base) + - state.sizeCells(self.conf_size) )) + node.append( + FdtPropertyWords( + "reg", + state.addrCells(self.conf_base) + + state.sizeCells(self.conf_size), + ) + ) # Ranges mapping # For now some of this is hard coded, because the PCI module does not @@ -173,10 +191,10 @@ class GenericArmPciHost(GenericPciHost): # AXI memory address range ranges += self.pciFdtAddr(space=2, addr=0) ranges += state.addrCells(self.pci_mem_base) - ranges += local_state.sizeCells(0x40000000) # Fixed size + ranges += local_state.sizeCells(0x40000000) # Fixed size node.append(FdtPropertyWords("ranges", ranges)) - if str(self.int_policy) == 'ARM_PCI_INT_DEV': + if str(self.int_policy) == "ARM_PCI_INT_DEV": gic = self._parent.unproxy(self).gic int_phandle = state.phandle(gic) # Interrupt mapping @@ -189,12 +207,17 @@ class GenericArmPciHost(GenericPciHost): parent_addr = gic._state.addrCells(0x0) for i in range(int(self.int_count)): - parent_interrupt = gic.interruptCells(0, - int(self.int_base) - 32 + i, 1) + parent_interrupt = gic.interruptCells( + 0, int(self.int_base) - 32 + i, 1 + ) - interrupts += self.pciFdtAddr(device=i, addr=0) + \ - child_interrupt + [int_phandle] + parent_addr + \ - parent_interrupt + interrupts += ( + self.pciFdtAddr(device=i, addr=0) + + child_interrupt + + [int_phandle] + + parent_addr + + parent_interrupt + ) node.append(FdtPropertyWords("interrupt-map", interrupts)) @@ -205,18 +228,21 @@ class GenericArmPciHost(GenericPciHost): intmask = self.pciFdtAddr(device=int_count - 1, addr=0) + [0x0] node.append(FdtPropertyWords("interrupt-map-mask", intmask)) else: - m5.fatal("Unsupported PCI interrupt policy " + - "for Device Tree generation") + m5.fatal( + "Unsupported PCI interrupt policy " + + "for Device Tree generation" + ) if self._dma_coherent: node.append(FdtProperty("dma-coherent")) yield node + class RealViewCtrl(BasicPioDevice): - type = 'RealViewCtrl' + type = "RealViewCtrl" cxx_header = "dev/arm/rv_ctrl.hh" - cxx_class = 'gem5::RealViewCtrl' + cxx_class = "gem5::RealViewCtrl" proc_id0 = Param.UInt32(0x0C000000, "Processor ID, SYS_PROCID") proc_id1 = Param.UInt32(0x0C000222, "Processor ID, SYS_PROCID1") idreg = Param.UInt32(0x00000000, "ID Register, SYS_ID") @@ -224,27 +250,31 @@ class RealViewCtrl(BasicPioDevice): def generateDeviceTree(self, state): node = FdtNode("sysreg@%x" % int(self.pio_addr)) node.appendCompatible("arm,vexpress-sysreg") - node.append(FdtPropertyWords("reg", - state.addrCells(self.pio_addr) + - state.sizeCells(0x1000) )) + node.append( + FdtPropertyWords( + "reg", state.addrCells(self.pio_addr) + state.sizeCells(0x1000) + ) + ) node.append(FdtProperty("gpio-controller")) node.append(FdtPropertyWords("#gpio-cells", [2])) node.appendPhandle(self) yield node + class RealViewOsc(ClockDomain): - type = 'RealViewOsc' + type = "RealViewOsc" cxx_header = "dev/arm/rv_ctrl.hh" - cxx_class = 'gem5::RealViewOsc' + cxx_class = "gem5::RealViewOsc" parent = Param.RealViewCtrl(Parent.any, "RealView controller") # TODO: We currently don't have the notion of a clock source, # which means we have to associate oscillators with a voltage # source. - voltage_domain = Param.VoltageDomain(Parent.voltage_domain, - "Voltage domain") + voltage_domain = Param.VoltageDomain( + Parent.voltage_domain, "Voltage domain" + ) # See ARM DUI 0447J (ARM Motherboard Express uATX -- V2M-P1) and # the individual core/logic tile reference manuals for details @@ -262,10 +292,13 @@ class RealViewOsc(ClockDomain): def generateDeviceTree(self, state): phandle = state.phandle(self) - node = FdtNode("osc@" + format(int(phandle), 'x')) + node = FdtNode("osc@" + format(int(phandle), "x")) node.appendCompatible("arm,vexpress-osc") - node.append(FdtPropertyWords("arm,vexpress-sysreg,func", - [0x1, int(self.device)])) + node.append( + FdtPropertyWords( + "arm,vexpress-sysreg,func", [0x1, int(self.device)] + ) + ) node.append(FdtPropertyWords("#clock-cells", [0])) minf = self.min_freq if self.min_freq.value else self.freq @@ -275,17 +308,20 @@ class RealViewOsc(ClockDomain): def to_freq(prop): return int(1.0 / prop.value) - node.append(FdtPropertyWords("freq-range", - [to_freq(minf), to_freq(maxf)])) - node.append(FdtPropertyStrings("clock-output-names", - ["oscclk" + str(phandle)])) + node.append( + FdtPropertyWords("freq-range", [to_freq(minf), to_freq(maxf)]) + ) + node.append( + FdtPropertyStrings("clock-output-names", ["oscclk" + str(phandle)]) + ) node.appendPhandle(self) yield node + class RealViewTemperatureSensor(SimObject): - type = 'RealViewTemperatureSensor' + type = "RealViewTemperatureSensor" cxx_header = "dev/arm/rv_ctrl.hh" - cxx_class = 'gem5::RealViewTemperatureSensor' + cxx_class = "gem5::RealViewTemperatureSensor" parent = Param.RealViewCtrl(Parent.any, "RealView controller") @@ -299,12 +335,13 @@ class RealViewTemperatureSensor(SimObject): dcc = Param.UInt8("Daughterboard Configuration Controller") device = Param.UInt8("Device ID") + class VExpressMCC(SubSystem): """ARM V2M-P1 Motherboard Configuration Controller -This subsystem describes a subset of the devices that sit behind the -motherboard configuration controller on the the ARM Motherboard -Express (V2M-P1) motherboard. See ARM DUI 0447J for details. + This subsystem describes a subset of the devices that sit behind the + motherboard configuration controller on the the ARM Motherboard + Express (V2M-P1) motherboard. See ARM DUI 0447J for details. """ class Osc(RealViewOsc): @@ -314,11 +351,13 @@ Express (V2M-P1) motherboard. See ARM DUI 0447J for details. site, position, dcc = (0, 0, 0) osc_mcc = Osc(device=0, min_freq="25MHz", max_freq="60MHz", freq="50MHz") - osc_clcd = Osc(device=1, min_freq="23.75MHz", max_freq="63.5MHz", - freq="23.75MHz") + osc_clcd = Osc( + device=1, min_freq="23.75MHz", max_freq="63.5MHz", freq="23.75MHz" + ) osc_peripheral = Osc(device=2, freq="24MHz") - osc_system_bus = Osc(device=4, min_freq="2MHz", max_freq="230MHz", - freq="24MHz") + osc_system_bus = Osc( + device=4, min_freq="2MHz", max_freq="230MHz", freq="24MHz" + ) # See Table 4.19 in ARM DUI 0447J (Motherboard Express uATX TRM). temp_crtl = Temperature(device=0) @@ -337,12 +376,13 @@ Express (V2M-P1) motherboard. See ARM DUI 0447J for details. yield node + class CoreTile2A15DCC(SubSystem): """ARM CoreTile Express A15x2 Daughterboard Configuration Controller -This subsystem describes a subset of the devices that sit behind the -daughterboard configuration controller on a CoreTile Express A15x2. See -ARM DUI 0604E for details. + This subsystem describes a subset of the devices that sit behind the + daughterboard configuration controller on a CoreTile Express A15x2. See + ARM DUI 0604E for details. """ class Osc(RealViewOsc): @@ -351,8 +391,9 @@ ARM DUI 0604E for details. # See Table 2.8 in ARM DUI 0604E (CoreTile Express A15x2 TRM) osc_cpu = Osc(device=0, min_freq="20MHz", max_freq="60MHz", freq="60MHz") osc_hsbm = Osc(device=4, min_freq="20MHz", max_freq="40MHz", freq="40MHz") - osc_pxl = Osc(device=5, min_freq="23.76MHz", max_freq="165MHz", - freq="23.75MHz") + osc_pxl = Osc( + device=5, min_freq="23.76MHz", max_freq="165MHz", freq="23.75MHz" + ) osc_smb = Osc(device=6, min_freq="20MHz", max_freq="50MHz", freq="50MHz") osc_sys = Osc(device=7, min_freq="20MHz", max_freq="60MHz", freq="60MHz") osc_ddr = Osc(device=8, freq="40MHz") @@ -370,13 +411,42 @@ ARM DUI 0604E for details. yield node + +class SysSecCtrl(BasicPioDevice): + """ + System Security Control registers. Taken from: + Arm Neoverse N1 System Development Platform - TRM - Version 0.0 + Document ID: 101489_0000_02_en + """ + + type = "SysSecCtrl" + cxx_header = "dev/arm/ssc.hh" + cxx_class = "gem5::SysSecCtrl" + + ssc_dbgcfg_stat = Param.Unsigned( + 0x00010000, "Debug authentication configuration status" + ) + ssc_version = Param.Unsigned(0x100417B0, "Version register") + ssc_pid0 = Param.Unsigned(0x44, "Peripheral ID0 register") + ssc_pid1 = Param.Unsigned(0xB8, "Peripheral ID1 register") + ssc_pid2 = Param.Unsigned(0xB, "Peripheral ID2 register") + ssc_pid4 = Param.Unsigned(0x4, "Peripheral ID4 register") + + compid0 = Param.Unsigned(0x0D, "Component ID0 register") + compid1 = Param.Unsigned(0xF0, "Component ID1 register") + compid2 = Param.Unsigned(0x5, "Component ID2 register") + compid3 = Param.Unsigned(0xB1, "Component ID3 register") + + class AmbaFake(AmbaPioDevice): - type = 'AmbaFake' + type = "AmbaFake" cxx_header = "dev/arm/amba_fake.hh" - cxx_class = 'gem5::AmbaFake' - ignore_access = Param.Bool(False, - "Ignore reads/writes to this device, (e.g. IsaFake + AMBA)") - amba_id = 0; + cxx_class = "gem5::AmbaFake" + ignore_access = Param.Bool( + False, "Ignore reads/writes to this device, (e.g. IsaFake + AMBA)" + ) + amba_id = 0 + # Simple fixed-rate clock source. Intended to be instantiated in Platform # instances for definition of clock bindings on DTB auto-generation @@ -387,114 +457,135 @@ class FixedClock(SrcClockDomain): def generateDeviceTree(self, state): if len(self.clock) > 1: - fatal('FixedClock configured with multiple frequencies') - node = FdtNode('clock{}'.format(FixedClock._index)) - node.appendCompatible('fixed-clock') - node.append(FdtPropertyWords('#clock-cells', 0)) - node.append(FdtPropertyWords('clock-frequency', - self.clock[0].frequency)) + fatal("FixedClock configured with multiple frequencies") + node = FdtNode("clock{}".format(FixedClock._index)) + node.appendCompatible("fixed-clock") + node.append(FdtPropertyWords("#clock-cells", 0)) + node.append( + FdtPropertyWords("clock-frequency", self.clock[0].frequency) + ) node.appendPhandle(self) FixedClock._index += 1 yield node + class Pl011(Uart): - type = 'Pl011' + type = "Pl011" cxx_header = "dev/arm/pl011.hh" - cxx_class = 'gem5::Pl011' + cxx_class = "gem5::Pl011" interrupt = Param.ArmInterruptPin("Interrupt that connects to GIC") - end_on_eot = Param.Bool(False, - "End the simulation when a EOT is received on the UART") - int_delay = Param.Latency("100ns", - "Time between action and interrupt generation by UART") + end_on_eot = Param.Bool( + False, "End the simulation when a EOT is received on the UART" + ) + int_delay = Param.Latency( + "100ns", "Time between action and interrupt generation by UART" + ) def generateDeviceTree(self, state): - node = self.generateBasicPioDeviceNode(state, 'uart', self.pio_addr, - 0x1000, [ self.interrupt ]) + node = self.generateBasicPioDeviceNode( + state, "uart", self.pio_addr, 0x1000, [self.interrupt] + ) node.appendCompatible(["arm,pl011", "arm,primecell"]) # Hardcoded reference to the realview platform clocks, because the # clk_domain can only store one clock (i.e. it is not a VectorParam) realview = self._parent.unproxy(self) - node.append(FdtPropertyWords("clocks", - [state.phandle(realview.mcc.osc_peripheral), - state.phandle(realview.dcc.osc_smb)])) + node.append( + FdtPropertyWords( + "clocks", + [ + state.phandle(realview.mcc.osc_peripheral), + state.phandle(realview.dcc.osc_smb), + ], + ) + ) node.append(FdtPropertyStrings("clock-names", ["uartclk", "apb_pclk"])) yield node + class Sp804(AmbaPioDevice): - type = 'Sp804' + type = "Sp804" cxx_header = "dev/arm/timer_sp804.hh" - cxx_class = 'gem5::Sp804' + cxx_class = "gem5::Sp804" int0 = Param.ArmSPI("Interrupt that connects to GIC") - clock0 = Param.Clock('1MHz', "Clock speed of the input") + clock0 = Param.Clock("1MHz", "Clock speed of the input") int1 = Param.ArmSPI("Interrupt that connects to GIC") - clock1 = Param.Clock('1MHz', "Clock speed of the input") + clock1 = Param.Clock("1MHz", "Clock speed of the input") amba_id = 0x00141804 + class Sp805(AmbaIntDevice): """ -Arm Watchdog Module (SP805) -Reference: - Arm Watchdog Module (SP805) - Technical Reference Manual - rev. r1p0 - Doc. ID: ARM DDI 0270B + Arm Watchdog Module (SP805) + Reference: + Arm Watchdog Module (SP805) - Technical Reference Manual - rev. r1p0 + Doc. ID: ARM DDI 0270B """ - type = 'Sp805' - cxx_header = 'dev/arm/watchdog_sp805.hh' - cxx_class = 'gem5::Sp805' + type = "Sp805" + cxx_header = "dev/arm/watchdog_sp805.hh" + cxx_class = "gem5::Sp805" amba_id = 0x00141805 def generateDeviceTree(self, state): - node = self.generateBasicPioDeviceNode(state, 'watchdog', - self.pio_addr, 0x1000, [ self.interrupt ]) - node.appendCompatible(['arm,sp805', 'arm,primecell']) + node = self.generateBasicPioDeviceNode( + state, "watchdog", self.pio_addr, 0x1000, [self.interrupt] + ) + node.appendCompatible(["arm,sp805", "arm,primecell"]) clocks = [state.phandle(self.clk_domain.unproxy(self))] - clock_names = ['wdogclk'] + clock_names = ["wdogclk"] platform = self._parent.unproxy(self) if self in platform._off_chip_devices(): clocks.append(state.phandle(platform.dcc.osc_smb)) - clock_names.append('apb_pclk') - node.append(FdtPropertyWords('clocks', clocks)) - node.append(FdtPropertyStrings('clock-names', clock_names)) + clock_names.append("apb_pclk") + node.append(FdtPropertyWords("clocks", clocks)) + node.append(FdtPropertyStrings("clock-names", clock_names)) yield node + class GenericWatchdog(PioDevice): - type = 'GenericWatchdog' - cxx_header = 'dev/arm/watchdog_generic.hh' - cxx_class = 'gem5::GenericWatchdog' + type = "GenericWatchdog" + cxx_header = "dev/arm/watchdog_generic.hh" + cxx_class = "gem5::GenericWatchdog" refresh_start = Param.Addr("Start address for the refresh frame") control_start = Param.Addr("Start address for the control frame") - pio_latency = Param.Latency('10ns', "Delay for PIO r/w") + pio_latency = Param.Latency("10ns", "Delay for PIO r/w") ws0 = Param.ArmInterruptPin("WS0 Signal") ws1 = Param.ArmInterruptPin("WS1 Signal") - system_counter = Param.SystemCounter(Parent.any, + system_counter = Param.SystemCounter( + Parent.any, "The Watchdog uses the Generic Timer system counter as the timebase " - "against which the decision to trigger an interrupt is made.") + "against which the decision to trigger an interrupt is made.", + ) + class CpuLocalTimer(BasicPioDevice): - type = 'CpuLocalTimer' + type = "CpuLocalTimer" cxx_header = "dev/arm/timer_cpulocal.hh" - cxx_class = 'gem5::CpuLocalTimer' + cxx_class = "gem5::CpuLocalTimer" int_timer = Param.ArmPPI("Interrrupt used per-cpu to GIC") int_watchdog = Param.ArmPPI("Interrupt for per-cpu watchdog to GIC") + class PL031(AmbaIntDevice): - type = 'PL031' + type = "PL031" cxx_header = "dev/arm/rtc_pl031.hh" - cxx_class = 'gem5::PL031' - time = Param.Time('01/01/2009', - "System time to use ('Now' for actual time)") + cxx_class = "gem5::PL031" + time = Param.Time( + "01/01/2009", "System time to use ('Now' for actual time)" + ) amba_id = 0x00041031 def generateDeviceTree(self, state): - node = self.generateBasicPioDeviceNode(state, 'rtc', self.pio_addr, - 0x1000, [ self.interrupt ]) + node = self.generateBasicPioDeviceNode( + state, "rtc", self.pio_addr, 0x1000, [self.interrupt] + ) node.appendCompatible(["arm,pl031", "arm,primecell"]) clock = state.phandle(self.clk_domain.unproxy(self)) @@ -503,17 +594,19 @@ class PL031(AmbaIntDevice): yield node + class Pl050(AmbaIntDevice): - type = 'Pl050' + type = "Pl050" cxx_header = "dev/arm/kmi.hh" - cxx_class = 'gem5::Pl050' + cxx_class = "gem5::Pl050" amba_id = 0x00141050 ps2 = Param.PS2Device("PS/2 device") def generateDeviceTree(self, state): - node = self.generateBasicPioDeviceNode(state, 'kmi', self.pio_addr, - 0x1000, [ self.interrupt ]) + node = self.generateBasicPioDeviceNode( + state, "kmi", self.pio_addr, 0x1000, [self.interrupt] + ) node.appendCompatible(["arm,pl050", "arm,primecell"]) clock = state.phandle(self.clk_domain.unproxy(self)) @@ -521,39 +614,49 @@ class Pl050(AmbaIntDevice): yield node + class Pl111(AmbaDmaDevice): - type = 'Pl111' + type = "Pl111" cxx_header = "dev/arm/pl111.hh" - cxx_class = 'gem5::Pl111' - pixel_clock = Param.Clock('24MHz', "Pixel clock") - vnc = Param.VncInput(Parent.any, - "Vnc server for remote frame buffer display") + cxx_class = "gem5::Pl111" + pixel_clock = Param.Clock("24MHz", "Pixel clock") + vnc = Param.VncInput( + Parent.any, "Vnc server for remote frame buffer display" + ) amba_id = 0x00141111 - enable_capture = Param.Bool(True, - "capture frame to system.framebuffer.bmp") + enable_capture = Param.Bool( + True, "capture frame to system.framebuffer.bmp" + ) + class HDLcd(AmbaDmaDevice): - type = 'HDLcd' + type = "HDLcd" cxx_header = "dev/arm/hdlcd.hh" - cxx_class = 'gem5::HDLcd' - vnc = Param.VncInput(Parent.any, "Vnc server for remote frame buffer " - "display") + cxx_class = "gem5::HDLcd" + vnc = Param.VncInput( + Parent.any, "Vnc server for remote frame buffer " "display" + ) amba_id = 0x00141000 - workaround_swap_rb = Param.Bool(False, "Workaround incorrect color " - "selector order in some kernels") - workaround_dma_line_count = Param.Bool(True, "Workaround incorrect " - "DMA line count (off by 1)") - enable_capture = Param.Bool(True, "capture frame to " - "system.framebuffer.{extension}") - frame_format = Param.ImageFormat("Auto", - "image format of the captured frame") + workaround_swap_rb = Param.Bool( + False, "Workaround incorrect color " "selector order in some kernels" + ) + workaround_dma_line_count = Param.Bool( + True, "Workaround incorrect " "DMA line count (off by 1)" + ) + enable_capture = Param.Bool( + True, "capture frame to " "system.framebuffer.{extension}" + ) + frame_format = Param.ImageFormat( + "Auto", "image format of the captured frame" + ) pixel_buffer_size = Param.MemorySize32("2KiB", "Size of address range") pxl_clk = Param.ClockDomain("Pixel clock source") pixel_chunk = Param.Unsigned(32, "Number of pixels to handle in one batch") - virt_refresh_rate = Param.Frequency("20Hz", "Frame refresh rate " - "in KVM mode") + virt_refresh_rate = Param.Frequency( + "20Hz", "Frame refresh rate " "in KVM mode" + ) _status = "ok" encoder = Param.Display(Display1080p(), "Display encoder") @@ -569,24 +672,32 @@ class HDLcd(AmbaDmaDevice): encoder_endpoint = self.encoder.endpointNode() # Endpoint subnode - endpoint_node.append(FdtPropertyWords("remote-endpoint", - [ state.phandle(self.encoder.endpointPhandle()) ])) - encoder_endpoint.append(FdtPropertyWords("remote-endpoint", - [ state.phandle(self.endpointPhandle()) ])) + endpoint_node.append( + FdtPropertyWords( + "remote-endpoint", + [state.phandle(self.encoder.endpointPhandle())], + ) + ) + encoder_endpoint.append( + FdtPropertyWords( + "remote-endpoint", [state.phandle(self.endpointPhandle())] + ) + ) yield encoder_node port_node = FdtNode("port") port_node.append(endpoint_node) - node = self.generateBasicPioDeviceNode(state, 'hdlcd', - self.pio_addr, 0x1000, [ self.interrupt ]) + node = self.generateBasicPioDeviceNode( + state, "hdlcd", self.pio_addr, 0x1000, [self.interrupt] + ) node.appendCompatible(["arm,hdlcd"]) node.append(FdtPropertyWords("clocks", state.phandle(self.pxl_clk))) node.append(FdtPropertyStrings("clock-names", ["pxlclk"])) - node.append(FdtPropertyStrings("status", [ self._status ])) + node.append(FdtPropertyStrings("status", [self._status])) self.addIommuProperty(state, node) @@ -594,6 +705,7 @@ class HDLcd(AmbaDmaDevice): yield node + class ParentMem(SimpleMemory): """ This is a base abstract class for child node generation @@ -605,6 +717,7 @@ class ParentMem(SimpleMemory): to the application model, which is registering the generator helper via the ParentMem interface. """ + def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._generators = [] @@ -624,6 +737,7 @@ class ParentMem(SimpleMemory): for subnode_gen in self._generators: node.append(subnode_gen(state)) + class MmioSRAM(ParentMem): def __init__(self, *args, **kwargs): super().__init__(**kwargs) @@ -631,56 +745,76 @@ class MmioSRAM(ParentMem): def generateDeviceTree(self, state): node = FdtNode("sram@%x" % int(self.range.start)) node.appendCompatible(["mmio-sram"]) - node.append(FdtPropertyWords("reg", - state.addrCells(self.range.start) + - state.sizeCells(self.range.size()) )) + node.append( + FdtPropertyWords( + "reg", + state.addrCells(self.range.start) + + state.sizeCells(self.range.size()), + ) + ) local_state = FdtState(addr_cells=2, size_cells=2, cpu_cells=1) node.append(local_state.addrCellsProperty()) node.append(local_state.sizeCellsProperty()) - node.append(FdtPropertyWords("ranges", - local_state.addrCells(0) + - state.addrCells(self.range.start) + - state.sizeCells(self.range.size()) )) + node.append( + FdtPropertyWords( + "ranges", + local_state.addrCells(0) + + state.addrCells(self.range.start) + + state.sizeCells(self.range.size()), + ) + ) self.generateSubnodes(node, state) yield node + class FVPBasePwrCtrl(BasicPioDevice): """ -Based on Fast Models Base_PowerController v11.8 -Reference: - Fast Models Reference Manual - Section 7.7.2 - Version 11.8 - Document ID: 100964_1180_00_en + Based on Fast Models Base_PowerController v11.8 + Reference: + Fast Models Reference Manual - Section 7.7.2 - Version 11.8 + Document ID: 100964_1180_00_en """ - type = 'FVPBasePwrCtrl' - cxx_header = 'dev/arm/fvp_base_pwr_ctrl.hh' - cxx_class = 'gem5::FVPBasePwrCtrl' + type = "FVPBasePwrCtrl" + cxx_header = "dev/arm/fvp_base_pwr_ctrl.hh" + cxx_class = "gem5::FVPBasePwrCtrl" + class GenericMHU(MHU): lowp_scp2ap = Scp2ApDoorbell( - set_address=0x10020008, clear_address=0x10020010, - interrupt=ArmSPI(num=68)) + set_address=0x10020008, + clear_address=0x10020010, + interrupt=ArmSPI(num=68), + ) highp_scp2ap = Scp2ApDoorbell( - set_address=0x10020028, clear_address=0x10020030, - interrupt=ArmSPI(num=67)) + set_address=0x10020028, + clear_address=0x10020030, + interrupt=ArmSPI(num=67), + ) sec_scp2ap = Scp2ApDoorbell( - set_address=0x10020208, clear_address=0x10020210, - interrupt=ArmSPI(num=69)) + set_address=0x10020208, + clear_address=0x10020210, + interrupt=ArmSPI(num=69), + ) lowp_ap2scp = Ap2ScpDoorbell( - set_address=0x10020108, clear_address=0x10020110) + set_address=0x10020108, clear_address=0x10020110 + ) highp_ap2scp = Ap2ScpDoorbell( - set_address=0x10020128, clear_address=0x10020130) + set_address=0x10020128, clear_address=0x10020130 + ) sec_ap2scp = Ap2ScpDoorbell( - set_address=0x10020308, clear_address=0x10020310) + set_address=0x10020308, clear_address=0x10020310 + ) + class RealView(Platform): - type = 'RealView' + type = "RealView" cxx_header = "dev/arm/realview.hh" - cxx_class = 'gem5::RealView' - _mem_regions = [ AddrRange(0, size='256MiB') ] + cxx_class = "gem5::RealView" + _mem_regions = [AddrRange(0, size="256MiB")] _num_pci_dev = 0 def _on_chip_devices(self): @@ -757,9 +891,10 @@ class RealView(Platform): cur_sys.workload.cpu_release_addr = cur_sys.workload.dtb_addr - 8 def generateDeviceTree(self, state): - node = FdtNode("/") # Things in this module need to end up in the root - node.append(FdtPropertyWords("interrupt-parent", - state.phandle(self.gic))) + node = FdtNode("/") # Things in this module need to end up in the root + node.append( + FdtPropertyWords("interrupt-parent", state.phandle(self.gic)) + ) for subnode in self.recurseDeviceTree(state): node.append(subnode) @@ -769,108 +904,142 @@ class RealView(Platform): def annotateCpuDeviceNode(self, cpu, state): system = self.system.unproxy(self) if system._have_psci: - cpu.append(FdtPropertyStrings('enable-method', 'psci')) + cpu.append(FdtPropertyStrings("enable-method", "psci")) else: cpu.append(FdtPropertyStrings("enable-method", "spin-table")) # The kernel writes the entry addres of secondary CPUs to this # address before waking up secondary CPUs. # The gem5 bootloader then makes secondary CPUs jump to it. - cpu.append(FdtPropertyWords("cpu-release-addr", \ - state.addrCells(system.workload.cpu_release_addr))) + cpu.append( + FdtPropertyWords( + "cpu-release-addr", + state.addrCells(system.workload.cpu_release_addr), + ) + ) + class VExpress_EMM(RealView): - _mem_regions = [ AddrRange('2GiB', size='2GiB') ] + _mem_regions = [AddrRange("2GiB", size="2GiB")] # Ranges based on excluding what is part of on-chip I/O (gic, # a9scu) - _off_chip_ranges = [AddrRange(0x2F000000, size='16MiB'), - AddrRange(0x30000000, size='256MiB'), - AddrRange(0x40000000, size='512MiB'), - AddrRange(0x18000000, size='64MiB'), - AddrRange(0x1C000000, size='64MiB')] + _off_chip_ranges = [ + AddrRange(0x2F000000, size="16MiB"), + AddrRange(0x30000000, size="256MiB"), + AddrRange(0x40000000, size="512MiB"), + AddrRange(0x18000000, size="64MiB"), + AddrRange(0x1C000000, size="64MiB"), + ] # Platform control device (off-chip) - realview_io = RealViewCtrl(proc_id0=0x14000000, proc_id1=0x14000000, - idreg=0x02250000, pio_addr=0x1C010000) + realview_io = RealViewCtrl( + proc_id0=0x14000000, + proc_id1=0x14000000, + idreg=0x02250000, + pio_addr=0x1C010000, + ) mcc = VExpressMCC() dcc = CoreTile2A15DCC() ### On-chip devices ### gic = Gic400(dist_addr=0x2C001000, cpu_addr=0x2C002000) - vgic = VGic(vcpu_addr=0x2c006000, hv_addr=0x2c004000, maint_int=25) + vgic = VGic(vcpu_addr=0x2C006000, hv_addr=0x2C004000, maint_int=25) - local_cpu_timer = CpuLocalTimer(int_timer=ArmPPI(num=29), - int_watchdog=ArmPPI(num=30), - pio_addr=0x2C080000) + local_cpu_timer = CpuLocalTimer( + int_timer=ArmPPI(num=29), + int_watchdog=ArmPPI(num=30), + pio_addr=0x2C080000, + ) - hdlcd = HDLcd(pxl_clk=dcc.osc_pxl, - pio_addr=0x2b000000, interrupt=ArmSPI(num=117), - workaround_swap_rb=True) + hdlcd = HDLcd( + pxl_clk=dcc.osc_pxl, + pio_addr=0x2B000000, + interrupt=ArmSPI(num=117), + workaround_swap_rb=True, + ) def _on_chip_devices(self): - devices = [ - self.gic, self.vgic, - self.local_cpu_timer - ] + devices = [self.gic, self.vgic, self.local_cpu_timer] if hasattr(self, "gicv2m"): devices.append(self.gicv2m) devices.append(self.hdlcd) return devices def _on_chip_memory(self): - memories = [ - self.bootmem, - ] + memories = [self.bootmem] return memories ### Off-chip devices ### - uart = Pl011(pio_addr=0x1c090000, interrupt=ArmSPI(num=37)) + uart = Pl011(pio_addr=0x1C090000, interrupt=ArmSPI(num=37)) pci_host = GenericPciHost( - conf_base=0x30000000, conf_size='256MiB', conf_device_bits=16, - pci_pio_base=0) + conf_base=0x30000000, + conf_size="256MiB", + conf_device_bits=16, + pci_pio_base=0, + ) sys_counter = SystemCounter() generic_timer = GenericTimer( - int_el3_phys=ArmPPI(num=29, int_type='IRQ_TYPE_LEVEL_LOW'), - int_el1_phys=ArmPPI(num=30, int_type='IRQ_TYPE_LEVEL_LOW'), - int_el1_virt=ArmPPI(num=27, int_type='IRQ_TYPE_LEVEL_LOW'), - int_el2_ns_phys=ArmPPI(num=26, int_type='IRQ_TYPE_LEVEL_LOW'), - int_el2_ns_virt=ArmPPI(num=28, int_type='IRQ_TYPE_LEVEL_LOW'), - int_el2_s_phys=ArmPPI(num=20, int_type='IRQ_TYPE_LEVEL_LOW'), - int_el2_s_virt=ArmPPI(num=19, int_type='IRQ_TYPE_LEVEL_LOW')) + int_el3_phys=ArmPPI(num=29, int_type="IRQ_TYPE_LEVEL_LOW"), + int_el1_phys=ArmPPI(num=30, int_type="IRQ_TYPE_LEVEL_LOW"), + int_el1_virt=ArmPPI(num=27, int_type="IRQ_TYPE_LEVEL_LOW"), + int_el2_ns_phys=ArmPPI(num=26, int_type="IRQ_TYPE_LEVEL_LOW"), + int_el2_ns_virt=ArmPPI(num=28, int_type="IRQ_TYPE_LEVEL_LOW"), + int_el2_s_phys=ArmPPI(num=20, int_type="IRQ_TYPE_LEVEL_LOW"), + int_el2_s_virt=ArmPPI(num=19, int_type="IRQ_TYPE_LEVEL_LOW"), + ) - timer0 = Sp804(int0=ArmSPI(num=34), int1=ArmSPI(num=34), - pio_addr=0x1C110000, clock0='1MHz', clock1='1MHz') - timer1 = Sp804(int0=ArmSPI(num=35), int1=ArmSPI(num=35), - pio_addr=0x1C120000, clock0='1MHz', clock1='1MHz') - clcd = Pl111(pio_addr=0x1c1f0000, interrupt=ArmSPI(num=46)) - kmi0 = Pl050(pio_addr=0x1c060000, interrupt=ArmSPI(num=44), - ps2=PS2Keyboard()) - kmi1 = Pl050(pio_addr=0x1c070000, interrupt=ArmSPI(num=45), - ps2=PS2TouchKit()) - cf_ctrl = IdeController(disks=[], pci_func=0, pci_dev=0, pci_bus=2, - io_shift = 2, ctrl_offset = 2, Command = 0x1) - cf_ctrl.BAR0 = PciLegacyIoBar(addr='0x1C1A0000', size='256B') - cf_ctrl.BAR1 = PciLegacyIoBar(addr='0x1C1A0100', size='4096B') + timer0 = Sp804( + int0=ArmSPI(num=34), + int1=ArmSPI(num=34), + pio_addr=0x1C110000, + clock0="1MHz", + clock1="1MHz", + ) + timer1 = Sp804( + int0=ArmSPI(num=35), + int1=ArmSPI(num=35), + pio_addr=0x1C120000, + clock0="1MHz", + clock1="1MHz", + ) + clcd = Pl111(pio_addr=0x1C1F0000, interrupt=ArmSPI(num=46)) + kmi0 = Pl050( + pio_addr=0x1C060000, interrupt=ArmSPI(num=44), ps2=PS2Keyboard() + ) + kmi1 = Pl050( + pio_addr=0x1C070000, interrupt=ArmSPI(num=45), ps2=PS2TouchKit() + ) + cf_ctrl = IdeController( + disks=[], + pci_func=0, + pci_dev=0, + pci_bus=2, + io_shift=2, + ctrl_offset=2, + Command=0x1, + ) + cf_ctrl.BAR0 = PciLegacyIoBar(addr="0x1C1A0000", size="256B") + cf_ctrl.BAR1 = PciLegacyIoBar(addr="0x1C1A0100", size="4096B") - bootmem = SimpleMemory(range = AddrRange('64MiB'), - conf_table_reported = False) - vram = SimpleMemory(range = AddrRange(0x18000000, size='32MiB'), - conf_table_reported = False) - rtc = PL031(pio_addr=0x1C170000, interrupt=ArmSPI(num=36)) + bootmem = SimpleMemory(range=AddrRange("64MiB"), conf_table_reported=False) + vram = SimpleMemory( + range=AddrRange(0x18000000, size="32MiB"), conf_table_reported=False + ) + rtc = PL031(pio_addr=0x1C170000, interrupt=ArmSPI(num=36)) - l2x0_fake = IsaFake(pio_addr=0x2C100000, pio_size=0xfff) - uart1_fake = AmbaFake(pio_addr=0x1C0A0000) - uart2_fake = AmbaFake(pio_addr=0x1C0B0000) - uart3_fake = AmbaFake(pio_addr=0x1C0C0000) - sp810_fake = AmbaFake(pio_addr=0x1C020000, ignore_access=True) - watchdog_fake = AmbaFake(pio_addr=0x1C0F0000) - aaci_fake = AmbaFake(pio_addr=0x1C040000) - lan_fake = IsaFake(pio_addr=0x1A000000, pio_size=0xffff) - usb_fake = IsaFake(pio_addr=0x1B000000, pio_size=0x1ffff) - mmc_fake = AmbaFake(pio_addr=0x1c050000) - energy_ctrl = EnergyCtrl(pio_addr=0x1c080000) + l2x0_fake = IsaFake(pio_addr=0x2C100000, pio_size=0xFFF) + uart1_fake = AmbaFake(pio_addr=0x1C0A0000) + uart2_fake = AmbaFake(pio_addr=0x1C0B0000) + uart3_fake = AmbaFake(pio_addr=0x1C0C0000) + sp810_fake = AmbaFake(pio_addr=0x1C020000, ignore_access=True) + watchdog_fake = AmbaFake(pio_addr=0x1C0F0000) + aaci_fake = AmbaFake(pio_addr=0x1C040000) + lan_fake = IsaFake(pio_addr=0x1A000000, pio_size=0xFFFF) + usb_fake = IsaFake(pio_addr=0x1B000000, pio_size=0x1FFFF) + mmc_fake = AmbaFake(pio_addr=0x1C050000) + energy_ctrl = EnergyCtrl(pio_addr=0x1C080000) def _off_chip_devices(self): devices = [ @@ -906,255 +1075,293 @@ class VExpress_EMM(RealView): # Attach any PCI devices that are supported def attachPciDevices(self): - self.ethernet = IGbE_e1000(pci_bus=0, pci_dev=0, pci_func=0, - InterruptLine=1, InterruptPin=1) - self.ide = IdeController(disks = [], pci_bus=0, pci_dev=1, pci_func=0, - InterruptLine=2, InterruptPin=2) + self.ethernet = IGbE_e1000( + pci_bus=0, pci_dev=0, pci_func=0, InterruptLine=1, InterruptPin=1 + ) + self.ide = IdeController( + disks=[], + pci_bus=0, + pci_dev=1, + pci_func=0, + InterruptLine=2, + InterruptPin=2, + ) def enableMSIX(self): - self.gic = Gic400(dist_addr=0x2C001000, cpu_addr=0x2C002000, - it_lines=512) + self.gic = Gic400( + dist_addr=0x2C001000, cpu_addr=0x2C002000, it_lines=512 + ) self.gicv2m = Gicv2m() - self.gicv2m.frames = [Gicv2mFrame(spi_base=256, spi_len=64, addr=0x2C1C0000)] + self.gicv2m.frames = [ + Gicv2mFrame(spi_base=256, spi_len=64, addr=0x2C1C0000) + ] def setupBootLoader(self, cur_sys, loc, boot_loader=None): if boot_loader is None: - boot_loader = loc('boot_emm.arm') + boot_loader = loc("boot_emm.arm") super().setupBootLoader(cur_sys, boot_loader, 0x8000000, 0x80000000) + class VExpress_EMM64(VExpress_EMM): # Three memory regions are specified totalling 512GiB - _mem_regions = [ AddrRange('2GiB', size='2GiB'), - AddrRange('34GiB', size='30GiB'), - AddrRange('512GiB', size='480GiB') ] + _mem_regions = [ + AddrRange("2GiB", size="2GiB"), + AddrRange("34GiB", size="30GiB"), + AddrRange("512GiB", size="480GiB"), + ] pci_host = GenericPciHost( - conf_base=0x30000000, conf_size='256MiB', conf_device_bits=12, - pci_pio_base=0x2f000000) + conf_base=0x30000000, + conf_size="256MiB", + conf_device_bits=12, + pci_pio_base=0x2F000000, + ) def setupBootLoader(self, cur_sys, loc, boot_loader=None): if boot_loader is None: - boot_loader = loc('boot_emm.arm64') - RealView.setupBootLoader(self, cur_sys, boot_loader, - 0x8000000, 0x80000000) + boot_loader = loc("boot_emm.arm64") + RealView.setupBootLoader( + self, cur_sys, boot_loader, 0x8000000, 0x80000000 + ) + class VExpress_GEM5_Base(RealView): """ -The VExpress gem5 memory map is loosely based on a modified -Versatile Express RS1 memory map. + The VExpress gem5 memory map is loosely based on a modified + Versatile Express RS1 memory map. -The gem5 platform has been designed to implement a subset of the -original Versatile Express RS1 memory map. Off-chip peripherals should, -when possible, adhere to the Versatile Express memory map. Non-PCI -off-chip devices that are gem5-specific should live in the CS5 memory -space to avoid conflicts with existing devices that we might want to -model in the future. Such devices should normally have interrupts in -the gem5-specific SPI range. + The gem5 platform has been designed to implement a subset of the + original Versatile Express RS1 memory map. Off-chip peripherals should, + when possible, adhere to the Versatile Express memory map. Non-PCI + off-chip devices that are gem5-specific should live in the CS5 memory + space to avoid conflicts with existing devices that we might want to + model in the future. Such devices should normally have interrupts in + the gem5-specific SPI range. -On-chip peripherals are loosely modeled after the ARM CoreTile Express -A15x2 memory and interrupt map. In particular, the GIC and -Generic Timer have the same interrupt lines and base addresses. Other -on-chip devices are gem5 specific. + On-chip peripherals are loosely modeled after the ARM CoreTile Express + A15x2 memory and interrupt map. In particular, the GIC and + Generic Timer have the same interrupt lines and base addresses. Other + on-chip devices are gem5 specific. -Unlike the original Versatile Express RS2 extended platform, gem5 implements a -large contigious DRAM space, without aliases or holes, starting at the -2GiB boundary. This means that PCI memory is limited to 1GiB. + Unlike the original Versatile Express RS2 extended platform, gem5 implements a + large contigious DRAM space, without aliases or holes, starting at the + 2GiB boundary. This means that PCI memory is limited to 1GiB. -References: + References: - Technical Reference Manuals: - Arm Motherboard Express uATX (V2M-P1) - ARM DUI 0447J - Arm CoreTile Express A15x2 (V2P-CA15) - ARM DUI 0604E + Technical Reference Manuals: + Arm Motherboard Express uATX (V2M-P1) - ARM DUI 0447J + Arm CoreTile Express A15x2 (V2P-CA15) - ARM DUI 0604E - Official Linux device tree specifications: - V2M-P1 - arch/arm/boot/dts/vexpress-v2m-rs1.dtsi - V2P-CA15 - arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts + Official Linux device tree specifications: + V2M-P1 - arch/arm/boot/dts/vexpress-v2m-rs1.dtsi + V2P-CA15 - arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts + + Memory map: + Arm CoreTile Express A15x2 (V2P-CA15) - ARM DUI 0604E + Daughterboard (global) + Section 3.2.1 - Table 3-1 - Daughterboard memory map + On-chip + Section 3.2.3 - Table 3-2 - Cortex-A15 MPCore on-chip peripheral + memory map + + Interrupts: + Armv8-A Foundation Platform - User Guide - Version 11.8 + Document ID: 100961_1180_00_en Memory map: - Arm CoreTile Express A15x2 (V2P-CA15) - ARM DUI 0604E - Daughterboard (global) - Section 3.2.1 - Table 3-1 - Daughterboard memory map - On-chip - Section 3.2.3 - Table 3-2 - Cortex-A15 MPCore on-chip peripheral - memory map + 0x00000000-0x03ffffff: Boot memory (CS0) + 0x04000000-0x07ffffff: Trusted Memory/Reserved + 0x04000000-0x0403FFFF: 256kB Trusted SRAM + 0x06000000-0x07ffffff: 32MB Trusted DRAM + 0x08000000-0x0bffffff: NOR FLASH0 (CS0 alias) + 0x0c000000-0x0fffffff: NOR FLASH1 (Off-chip, CS4) + 0x10000000-0x13ffffff: gem5-specific peripherals (Off-chip, CS5) + 0x10000000-0x1000ffff: gem5 energy controller + 0x10010000-0x1001ffff: gem5 pseudo-ops + 0x10020000-0x1002ffff: gem5 MHU + + 0x14000000-0x17ffffff: Reserved (Off-chip, PSRAM, CS1) + + 0x18000000-0x1bffffff: Off-chip, Peripherals, CS2 + 0x18000000-0x19ffffff: VRAM + 0x1a000000-0x1bffffff: Reserved + + 0x1c000000-0x1fffffff: Peripheral block 1 (Off-chip, CS3): + 0x1c010000-0x1c01ffff: realview_io (VE system control regs.) + 0x1c060000-0x1c06ffff: KMI0 (keyboard) + 0x1c070000-0x1c07ffff: KMI1 (mouse) + 0x1c090000-0x1c09ffff: UART0 + 0x1c0a0000-0x1c0affff: UART1 + 0x1c0b0000-0x1c0bffff: UART2 + 0x1c0c0000-0x1c0cffff: UART3 + 0x1c0f0000-0x1c0fffff: Watchdog (SP805) + 0x1c130000-0x1c13ffff: VirtIO (gem5/FM extension) + 0x1c140000-0x1c14ffff: VirtIO (gem5/FM extension) + 0x1c170000-0x1c17ffff: RTC + + 0x20000000-0x3fffffff: On-chip peripherals: + 0x2a430000-0x2a43ffff: System Counter (control) + 0x2a490000-0x2a49ffff: Trusted Watchdog (SP805) + 0x2a800000-0x2a800fff: System Counter (read) + 0x2a810000-0x2a810fff: System Timer (control) + + 0x2a820000-0x2a820fff: System Timer (frame 0) + 0x2a830000-0x2a830fff: System Timer (frame 1) + + 0x2b000000-0x2b00ffff: HDLCD + + 0x2b060000-0x2b060fff: System Watchdog (SP805) + + 0x2b400000-0x2b41ffff: SMMUv3 + + 0x2c001000-0x2c001fff: GIC (distributor) + 0x2c002000-0x2c003fff: GIC (CPU interface) + 0x2c004000-0x2c005fff: vGIC (HV) + 0x2c006000-0x2c007fff: vGIC (VCPU) + 0x2c1c0000-0x2c1cffff: GICv2m MSI frame 0 + + 0x2d000000-0x2d00ffff: GPU (reserved) + + 0x2e000000-0x2e007fff: Non-trusted SRAM + + 0x2f000000-0x2fffffff: PCI IO space + 0x30000000-0x3fffffff: PCI config space + + 0x40000000-0x7fffffff: Ext. AXI: Used as PCI memory + + 0x80000000-X: DRAM Interrupts: - Armv8-A Foundation Platform - User Guide - Version 11.8 - Document ID: 100961_1180_00_en - -Memory map: - 0x00000000-0x03ffffff: Boot memory (CS0) - 0x04000000-0x07ffffff: Trusted Memory/Reserved - 0x04000000-0x0403FFFF: 256kB Trusted SRAM - 0x06000000-0x07ffffff: 32MB Trusted DRAM - 0x08000000-0x0bffffff: NOR FLASH0 (CS0 alias) - 0x0c000000-0x0fffffff: NOR FLASH1 (Off-chip, CS4) - 0x10000000-0x13ffffff: gem5-specific peripherals (Off-chip, CS5) - 0x10000000-0x1000ffff: gem5 energy controller - 0x10010000-0x1001ffff: gem5 pseudo-ops - 0x10020000-0x1002ffff: gem5 MHU - - 0x14000000-0x17ffffff: Reserved (Off-chip, PSRAM, CS1) - - 0x18000000-0x1bffffff: Off-chip, Peripherals, CS2 - 0x18000000-0x19ffffff: VRAM - 0x1a000000-0x1bffffff: Reserved - - 0x1c000000-0x1fffffff: Peripheral block 1 (Off-chip, CS3): - 0x1c010000-0x1c01ffff: realview_io (VE system control regs.) - 0x1c060000-0x1c06ffff: KMI0 (keyboard) - 0x1c070000-0x1c07ffff: KMI1 (mouse) - 0x1c090000-0x1c09ffff: UART0 - 0x1c0a0000-0x1c0affff: UART1 - 0x1c0b0000-0x1c0bffff: UART2 - 0x1c0c0000-0x1c0cffff: UART3 - 0x1c0f0000-0x1c0fffff: Watchdog (SP805) - 0x1c130000-0x1c13ffff: VirtIO (gem5/FM extension) - 0x1c140000-0x1c14ffff: VirtIO (gem5/FM extension) - 0x1c170000-0x1c17ffff: RTC - - 0x20000000-0x3fffffff: On-chip peripherals: - 0x2a430000-0x2a43ffff: System Counter (control) - 0x2a490000-0x2a49ffff: Trusted Watchdog (SP805) - 0x2a800000-0x2a800fff: System Counter (read) - 0x2a810000-0x2a810fff: System Timer (control) - - 0x2a820000-0x2a820fff: System Timer (frame 0) - 0x2a830000-0x2a830fff: System Timer (frame 1) - - 0x2b000000-0x2b00ffff: HDLCD - - 0x2b060000-0x2b060fff: System Watchdog (SP805) - - 0x2b400000-0x2b41ffff: SMMUv3 - - 0x2c001000-0x2c001fff: GIC (distributor) - 0x2c002000-0x2c003fff: GIC (CPU interface) - 0x2c004000-0x2c005fff: vGIC (HV) - 0x2c006000-0x2c007fff: vGIC (VCPU) - 0x2c1c0000-0x2c1cffff: GICv2m MSI frame 0 - - 0x2d000000-0x2d00ffff: GPU (reserved) - - 0x2e000000-0x2e007fff: Non-trusted SRAM - - 0x2f000000-0x2fffffff: PCI IO space - 0x30000000-0x3fffffff: PCI config space - - 0x40000000-0x7fffffff: Ext. AXI: Used as PCI memory - - 0x80000000-X: DRAM - -Interrupts: - 0- 15: Software generated interrupts (SGIs) - 16- 31: On-chip private peripherals (PPIs) - 19 : generic_timer (virt sec EL2) - 20 : generic_timer (phys sec EL2) - 25 : vgic - 26 : generic_timer (phys non-sec EL2) - 27 : generic_timer (virt EL1) - 28 : generic_timer (virt non-sec EL2) - 29 : generic_timer (phys EL3) - 30 : generic_timer (phys EL1) - 31 : Reserved (Legacy IRQ) - 32- 95: Mother board peripherals (SPIs) - 32 : Watchdog (SP805) - 33 : Reserved (IOFPGA SW int) - 34-35: Reserved (SP804) - 36 : RTC - 37-40: uart0-uart3 - 41-42: Reserved (PL180) - 43 : Reserved (AACI) - 44-45: kmi0-kmi1 - 46 : Reserved (CLCD) - 47 : Reserved (Ethernet) - 48 : Reserved (USB) - 56 : Trusted Watchdog (SP805) - 57 : System timer0 (phys) - 58 : System timer1 (phys) - 95-255: On-chip interrupt sources (we use these for - gem5-specific devices, SPIs) - 74 : VirtIO (gem5/FM extension) - 75 : VirtIO (gem5/FM extension) - 95 : HDLCD - 96- 98: GPU (reserved) - 100-103: PCI - 130 : System Watchdog (SP805) - 256-319: MSI frame 0 (gem5-specific, SPIs) - 320-511: Unused + 0- 15: Software generated interrupts (SGIs) + 16- 31: On-chip private peripherals (PPIs) + 19 : generic_timer (virt sec EL2) + 20 : generic_timer (phys sec EL2) + 25 : vgic + 26 : generic_timer (phys non-sec EL2) + 27 : generic_timer (virt EL1) + 28 : generic_timer (virt non-sec EL2) + 29 : generic_timer (phys EL3) + 30 : generic_timer (phys EL1) + 31 : Reserved (Legacy IRQ) + 32- 95: Mother board peripherals (SPIs) + 32 : Watchdog (SP805) + 33 : Reserved (IOFPGA SW int) + 34-35: Reserved (SP804) + 36 : RTC + 37-40: uart0-uart3 + 41-42: Reserved (PL180) + 43 : Reserved (AACI) + 44-45: kmi0-kmi1 + 46 : Reserved (CLCD) + 47 : Reserved (Ethernet) + 48 : Reserved (USB) + 56 : Trusted Watchdog (SP805) + 57 : System timer0 (phys) + 58 : System timer1 (phys) + 95-255: On-chip interrupt sources (we use these for + gem5-specific devices, SPIs) + 74 : VirtIO (gem5/FM extension) + 75 : VirtIO (gem5/FM extension) + 95 : HDLCD + 96- 98: GPU (reserved) + 100-103: PCI + 130 : System Watchdog (SP805) + 256-319: MSI frame 0 (gem5-specific, SPIs) + 320-511: Unused """ # Everything above 2GiB is memory - _mem_regions = [ AddrRange('2GiB', size='510GiB') ] + _mem_regions = [AddrRange("2GiB", size="510GiB")] _off_chip_ranges = [ # CS1-CS5 - AddrRange(0x0c000000, 0x20000000), + AddrRange(0x0C000000, 0x20000000), # External AXI interface (PCI) - AddrRange(0x2f000000, 0x80000000), + AddrRange(0x2F000000, 0x80000000), ] - bootmem = SimpleMemory(range=AddrRange(0, size='64MiB'), - conf_table_reported=False) + bootmem = SimpleMemory( + range=AddrRange(0, size="64MiB"), conf_table_reported=False + ) # NOR flash, flash0 - flash0 = SimpleMemory(range=AddrRange(0x08000000, size='64MiB'), - conf_table_reported=False) + flash0 = SimpleMemory( + range=AddrRange(0x08000000, size="64MiB"), conf_table_reported=False + ) # Trusted SRAM - trusted_sram = SimpleMemory(range=AddrRange(0x04000000, size='256KiB'), - conf_table_reported=False) + trusted_sram = SimpleMemory( + range=AddrRange(0x04000000, size="256KiB"), conf_table_reported=False + ) # Trusted DRAM # TODO: preventing access from unsecure world to the trusted RAM - trusted_dram=SimpleMemory(range=AddrRange(0x06000000, size='32MB'), - conf_table_reported=False) + trusted_dram = SimpleMemory( + range=AddrRange(0x06000000, size="32MB"), conf_table_reported=False + ) # Non-Trusted SRAM - non_trusted_sram = MmioSRAM(range=AddrRange(0x2e000000, size=0x8000), - conf_table_reported=False) + non_trusted_sram = MmioSRAM( + range=AddrRange(0x2E000000, size=0x8000), conf_table_reported=False + ) # Platform control device (off-chip) - realview_io = RealViewCtrl(proc_id0=0x14000000, proc_id1=0x14000000, - idreg=0x30101100, pio_addr=0x1c010000) + realview_io = RealViewCtrl( + proc_id0=0x14000000, + proc_id1=0x14000000, + idreg=0x30101100, + pio_addr=0x1C010000, + ) mcc = VExpressMCC() dcc = CoreTile2A15DCC() ### On-chip devices ### el2_watchdog = GenericWatchdog( - control_start=0x2a440000, - refresh_start=0x2a450000, - ws0=ArmSPI(num=59), ws1=ArmSPI(num=60)) + control_start=0x2A440000, + refresh_start=0x2A450000, + ws0=ArmSPI(num=59), + ws1=ArmSPI(num=60), + ) # Trusted Watchdog, SP805 - trusted_watchdog = Sp805(pio_addr=0x2a490000, interrupt=ArmSPI(num=56)) + trusted_watchdog = Sp805(pio_addr=0x2A490000, interrupt=ArmSPI(num=56)) sys_counter = SystemCounter() generic_timer = GenericTimer( - int_el3_phys=ArmPPI(num=29, int_type='IRQ_TYPE_LEVEL_LOW'), - int_el1_phys=ArmPPI(num=30, int_type='IRQ_TYPE_LEVEL_LOW'), - int_el1_virt=ArmPPI(num=27, int_type='IRQ_TYPE_LEVEL_LOW'), - int_el2_ns_phys=ArmPPI(num=26, int_type='IRQ_TYPE_LEVEL_LOW'), - int_el2_ns_virt=ArmPPI(num=28, int_type='IRQ_TYPE_LEVEL_LOW'), - int_el2_s_phys=ArmPPI(num=20, int_type='IRQ_TYPE_LEVEL_LOW'), - int_el2_s_virt=ArmPPI(num=19, int_type='IRQ_TYPE_LEVEL_LOW')) - generic_timer_mem = GenericTimerMem(cnt_control_base=0x2a430000, - cnt_read_base=0x2a800000, - cnt_ctl_base=0x2a810000, - frames=[ - GenericTimerFrame(cnt_base=0x2a820000, - int_phys=ArmSPI(num=57), int_virt=ArmSPI(num=133)), - GenericTimerFrame(cnt_base=0x2a830000, - int_phys=ArmSPI(num=58), int_virt=ArmSPI(num=134)) - ]) + int_el3_phys=ArmPPI(num=29, int_type="IRQ_TYPE_LEVEL_LOW"), + int_el1_phys=ArmPPI(num=30, int_type="IRQ_TYPE_LEVEL_LOW"), + int_el1_virt=ArmPPI(num=27, int_type="IRQ_TYPE_LEVEL_LOW"), + int_el2_ns_phys=ArmPPI(num=26, int_type="IRQ_TYPE_LEVEL_LOW"), + int_el2_ns_virt=ArmPPI(num=28, int_type="IRQ_TYPE_LEVEL_LOW"), + int_el2_s_phys=ArmPPI(num=20, int_type="IRQ_TYPE_LEVEL_LOW"), + int_el2_s_virt=ArmPPI(num=19, int_type="IRQ_TYPE_LEVEL_LOW"), + ) + generic_timer_mem = GenericTimerMem( + cnt_control_base=0x2A430000, + cnt_read_base=0x2A800000, + cnt_ctl_base=0x2A810000, + frames=[ + GenericTimerFrame( + cnt_base=0x2A820000, + int_phys=ArmSPI(num=57), + int_virt=ArmSPI(num=133), + ), + GenericTimerFrame( + cnt_base=0x2A830000, + int_phys=ArmSPI(num=58), + int_virt=ArmSPI(num=134), + ), + ], + ) - system_watchdog = Sp805(pio_addr=0x2b060000, interrupt=ArmSPI(num=130)) + system_watchdog = Sp805(pio_addr=0x2B060000, interrupt=ArmSPI(num=130)) def _on_chip_devices(self): return [ self.generic_timer_mem, self.el2_watchdog, self.trusted_watchdog, - self.system_watchdog + self.system_watchdog, ] + self.generic_timer_mem.frames def _on_chip_memory(self): @@ -1173,50 +1380,63 @@ Interrupts: clock24MHz = SrcClockDomain(clock="24MHz") uart = [ - Pl011(pio_addr=0x1c090000, - interrupt=ArmSPI(num=37)), - Pl011(pio_addr=0x1c0a0000, - interrupt=ArmSPI(num=38), device=Terminal()), - Pl011(pio_addr=0x1c0b0000, - interrupt=ArmSPI(num=39), device=Terminal()), - Pl011(pio_addr=0x1c0c0000, - interrupt=ArmSPI(num=40), device=Terminal()) + Pl011(pio_addr=0x1C090000, interrupt=ArmSPI(num=37)), + Pl011( + pio_addr=0x1C0A0000, interrupt=ArmSPI(num=38), device=Terminal() + ), + Pl011( + pio_addr=0x1C0B0000, interrupt=ArmSPI(num=39), device=Terminal() + ), + Pl011( + pio_addr=0x1C0C0000, interrupt=ArmSPI(num=40), device=Terminal() + ), ] - kmi0 = Pl050(pio_addr=0x1c060000, interrupt=ArmSPI(num=44), - ps2=PS2Keyboard()) - kmi1 = Pl050(pio_addr=0x1c070000, interrupt=ArmSPI(num=45), - ps2=PS2TouchKit()) + kmi0 = Pl050( + pio_addr=0x1C060000, interrupt=ArmSPI(num=44), ps2=PS2Keyboard() + ) + kmi1 = Pl050( + pio_addr=0x1C070000, interrupt=ArmSPI(num=45), ps2=PS2TouchKit() + ) - watchdog = Sp805(pio_addr=0x1c0f0000, interrupt=ArmSPI(num=32)) + watchdog = Sp805(pio_addr=0x1C0F0000, interrupt=ArmSPI(num=32)) rtc = PL031(pio_addr=0x1C170000, interrupt=ArmSPI(num=36)) ### gem5-specific off-chip devices ### pci_host = GenericArmPciHost( - conf_base=0x30000000, conf_size='256MiB', conf_device_bits=12, - pci_pio_base=0x2f000000, + conf_base=0x30000000, + conf_size="256MiB", + conf_device_bits=12, + pci_pio_base=0x2F000000, pci_mem_base=0x40000000, - int_policy="ARM_PCI_INT_DEV", int_base=100, int_count=4) + int_policy="ARM_PCI_INT_DEV", + int_base=100, + int_count=4, + ) energy_ctrl = EnergyCtrl(pio_addr=0x10000000) - pwr_ctrl = FVPBasePwrCtrl(pio_addr=0x1c100000) + pwr_ctrl = FVPBasePwrCtrl(pio_addr=0x1C100000) vio = [ - MmioVirtIO(pio_addr=0x1c130000, pio_size=0x1000, - interrupt=ArmSPI(num=74)), - MmioVirtIO(pio_addr=0x1c140000, pio_size=0x1000, - interrupt=ArmSPI(num=75)), + MmioVirtIO( + pio_addr=0x1C130000, pio_size=0x1000, interrupt=ArmSPI(num=74) + ), + MmioVirtIO( + pio_addr=0x1C140000, pio_size=0x1000, interrupt=ArmSPI(num=75) + ), ] # NOR flash, flash1 - flash1 = CfiMemory(range=AddrRange(0x0c000000, 0x10000000), - conf_table_reported=False) + flash1 = CfiMemory( + range=AddrRange(0x0C000000, 0x10000000), conf_table_reported=False + ) # VRAM - vram = SimpleMemory(range=AddrRange(0x18000000, size='32MB'), - conf_table_reported=False) + vram = SimpleMemory( + range=AddrRange(0x18000000, size="32MB"), conf_table_reported=False + ) def _off_chip_devices(self): return [ @@ -1235,10 +1455,7 @@ Interrupts: ] + self.uart def _off_chip_memory(self): - return [ - self.flash1, - self.vram, - ] + return [self.flash1, self.vram] def __init__(self, **kwargs): super().__init__(**kwargs) @@ -1270,10 +1487,10 @@ Interrupts: receive memory requests from the SMMU, and its request port will forward accesses to the memory mapped devices """ - if hasattr(self, 'smmu'): + if hasattr(self, "smmu"): m5.fatal("A SMMU has already been instantiated\n") - self.smmu = SMMUv3(reg_map=AddrRange(0x2b400000, size=0x00020000)) + self.smmu = SMMUv3(reg_map=AddrRange(0x2B400000, size=0x00020000)) self.smmu.request = bus.cpu_side_ports self.smmu.control = bus.mem_side_ports @@ -1298,16 +1515,19 @@ Interrupts: # Generate and attach the SCMI platform _scmi_comm = ScmiCommunication( - agent_channel = ScmiAgentChannel( + agent_channel=ScmiAgentChannel( shmem=self.non_trusted_sram, - shmem_range=AddrRange(0x2e000000, size=0x200), - doorbell=self.mailbox.highp_ap2scp), - platform_channel = ScmiPlatformChannel( + shmem_range=AddrRange(0x2E000000, size=0x200), + doorbell=self.mailbox.highp_ap2scp, + ), + platform_channel=ScmiPlatformChannel( shmem=self.non_trusted_sram, - shmem_range=AddrRange(0x2e000000, size=0x200), - doorbell=self.mailbox.highp_scp2ap)) + shmem_range=AddrRange(0x2E000000, size=0x200), + doorbell=self.mailbox.highp_scp2ap, + ), + ) - self.scmi = ScmiPlatform(comms=[ _scmi_comm ]) + self.scmi = ScmiPlatform(comms=[_scmi_comm]) self._attach_device(self.scmi, bus) def generateDeviceTree(self, state): @@ -1320,66 +1540,75 @@ Interrupts: node.appendCompatible(["arm,vexpress"]) node.append(FdtPropertyStrings("model", ["V2P-CA15"])) node.append(FdtPropertyWords("arm,hbi", [0x0])) - node.append(FdtPropertyWords("arm,vexpress,site", [0xf])) + node.append(FdtPropertyWords("arm,vexpress,site", [0xF])) system = self.system.unproxy(self) if system._have_psci: # PSCI functions exposed to the kernel - if not system.release.has(ArmExtension('SECURITY')): + if not system.release.has(ArmExtension("SECURITY")): raise AssertionError("PSCI requires EL3 (have_security)") - psci_node = FdtNode('psci') - psci_node.appendCompatible(['arm,psci-1.0', 'arm,psci-0.2', - 'arm,psci']) - method = 'smc' - psci_node.append(FdtPropertyStrings('method', method)) - psci_node.append(FdtPropertyWords('cpu_suspend', 0xc4000001)) - psci_node.append(FdtPropertyWords('cpu_off', 0x84000002)) - psci_node.append(FdtPropertyWords('cpu_on', 0xc4000003)) - psci_node.append(FdtPropertyWords('sys_poweroff', 0x84000008)) - psci_node.append(FdtPropertyWords('sys_reset', 0x84000009)) + psci_node = FdtNode("psci") + psci_node.appendCompatible( + ["arm,psci-1.0", "arm,psci-0.2", "arm,psci"] + ) + method = "smc" + psci_node.append(FdtPropertyStrings("method", method)) + psci_node.append(FdtPropertyWords("cpu_suspend", 0xC4000001)) + psci_node.append(FdtPropertyWords("cpu_off", 0x84000002)) + psci_node.append(FdtPropertyWords("cpu_on", 0xC4000003)) + psci_node.append(FdtPropertyWords("sys_poweroff", 0x84000008)) + psci_node.append(FdtPropertyWords("sys_reset", 0x84000009)) node.append(psci_node) yield node + class VExpress_GEM5_V1_Base(VExpress_GEM5_Base): - gic = kvm_gicv2_class(dist_addr=0x2c001000, cpu_addr=0x2c002000, - it_lines=512) - vgic = VGic(vcpu_addr=0x2c006000, hv_addr=0x2c004000, maint_int=25) + gic = kvm_gicv2_class( + dist_addr=0x2C001000, cpu_addr=0x2C002000, it_lines=512 + ) + vgic = VGic(vcpu_addr=0x2C006000, hv_addr=0x2C004000, maint_int=25) gicv2m = Gicv2m() - gicv2m.frames = [ - Gicv2mFrame(spi_base=256, spi_len=64, addr=0x2c1c0000), - ] + gicv2m.frames = [Gicv2mFrame(spi_base=256, spi_len=64, addr=0x2C1C0000)] def setupBootLoader(self, cur_sys, loc, boot_loader=None): if boot_loader is None: - boot_loader = [ loc('boot.arm64'), loc('boot.arm') ] + boot_loader = [loc("boot.arm64"), loc("boot.arm")] super().setupBootLoader(cur_sys, boot_loader) def _on_chip_devices(self): - return super()._on_chip_devices() + [ - self.gic, self.vgic, self.gicv2m, - ] + return super()._on_chip_devices() + [self.gic, self.vgic, self.gicv2m] + class VExpress_GEM5_V1(VExpress_GEM5_V1_Base): """ We subclass VExpress_GEM5_V1_Base in order to alias it to VExpress_GEM5_V1, which is what gem5 scripts are currently using """ + pass + class VExpress_GEM5_V1_HDLcd(VExpress_GEM5_V1_Base): - hdlcd = HDLcd(pxl_clk=VExpress_GEM5_V1_Base.dcc.osc_pxl, - pio_addr=0x2b000000, interrupt=ArmSPI(num=95)) + hdlcd = HDLcd( + pxl_clk=VExpress_GEM5_V1_Base.dcc.osc_pxl, + pio_addr=0x2B000000, + interrupt=ArmSPI(num=95), + ) def _on_chip_devices(self): - return super()._on_chip_devices() + [self.hdlcd,] + return super()._on_chip_devices() + [self.hdlcd] + class VExpress_GEM5_V2_Base(VExpress_GEM5_Base): - gic = Gicv3(dist_addr=0x2c000000, redist_addr=0x2c010000, - maint_int=ArmPPI(num=25), - gicv4=True, - its=Gicv3Its(pio_addr=0x2e010000)) + gic = Gicv3( + dist_addr=0x2C000000, + redist_addr=0x2C010000, + maint_int=ArmPPI(num=25), + gicv4=True, + its=Gicv3Its(pio_addr=0x2E010000), + ) # Limiting to 128 since it will otherwise overlap with PCI space gic.cpu_max = 128 @@ -1389,22 +1618,29 @@ class VExpress_GEM5_V2_Base(VExpress_GEM5_Base): def setupBootLoader(self, cur_sys, loc, boot_loader=None): if boot_loader is None: - boot_loader = [ loc('boot_v2.arm64') ] + boot_loader = [loc("boot_v2.arm64")] super().setupBootLoader(cur_sys, boot_loader) + class VExpress_GEM5_V2(VExpress_GEM5_V2_Base): """ We subclass VExpress_GEM5_V2_Base in order to alias it to VExpress_GEM5_V2, which is what gem5 scripts are currently using """ + pass + class VExpress_GEM5_V2_HDLcd(VExpress_GEM5_V2_Base): - hdlcd = HDLcd(pxl_clk=VExpress_GEM5_V2_Base.dcc.osc_pxl, - pio_addr=0x2b000000, interrupt=ArmSPI(num=95)) + hdlcd = HDLcd( + pxl_clk=VExpress_GEM5_V2_Base.dcc.osc_pxl, + pio_addr=0x2B000000, + interrupt=ArmSPI(num=95), + ) def _on_chip_devices(self): - return super()._on_chip_devices() + [self.hdlcd,] + return super()._on_chip_devices() + [self.hdlcd] + class VExpress_GEM5_Foundation(VExpress_GEM5_Base): """ @@ -1419,35 +1655,45 @@ class VExpress_GEM5_Foundation(VExpress_GEM5_Base): Fast Models - Reference Manual - Version 11.8 Document ID: 100964_1108_00_en """ + _off_chip_ranges = [ # CS1-CS5 - AddrRange(0x0c000000, 0x20000000), + AddrRange(0x0C000000, 0x20000000), # External AXI interface (PCI) AddrRange(0x40000000, 0x80000000), - AddrRange(0x4000000000, 0x800000000), + AddrRange(0x4000000000, 0x8000000000), ] sp810_fake = AmbaFake(pio_addr=0x1C020000, ignore_access=True) - clcd = Pl111(pio_addr=0x1c1f0000, interrupt=ArmSPI(num=46)) + clcd = Pl111(pio_addr=0x1C1F0000, interrupt=ArmSPI(num=46)) - gic = kvm_gicv3_class(dist_addr=0x2f000000, redist_addr=0x2f100000, - maint_int=ArmPPI(num=25), gicv4=False, - its=NULL) + gic = kvm_gicv3_class( + dist_addr=0x2F000000, + redist_addr=0x2F100000, + maint_int=ArmPPI(num=25), + gicv4=False, + its=NULL, + ) pci_host = GenericArmPciHost( - conf_base=0x40000000, conf_size='256MiB', conf_device_bits=12, + conf_base=0x40000000, + conf_size="256MiB", + conf_device_bits=12, pci_pio_base=0x50000000, pci_mem_base=0x4000000000, - int_policy="ARM_PCI_INT_DEV", int_base=100, int_count=4) + int_policy="ARM_PCI_INT_DEV", + int_base=100, + int_count=4, + ) def _on_chip_devices(self): return super()._on_chip_devices() + [self.gic] def _off_chip_devices(self): - return super()._off_chip_devices() + [self.clcd, self.sp810_fake,] + return super()._off_chip_devices() + [self.clcd, self.sp810_fake] def setupBootLoader(self, cur_sys, loc, boot_loader=None): if boot_loader is None: - boot_loader = [ loc('boot_foundation.arm64') ] + boot_loader = [loc("boot_foundation.arm64")] super().setupBootLoader(cur_sys, boot_loader) diff --git a/src/dev/arm/SConscript b/src/dev/arm/SConscript index f7e9c763b6..0a68e480f3 100644 --- a/src/dev/arm/SConscript +++ b/src/dev/arm/SConscript @@ -54,7 +54,7 @@ SimObject('RealView.py', sim_objects=[ 'GenericArmPciHost', 'RealViewCtrl', 'RealViewOsc', 'RealViewTemperatureSensor', 'AmbaFake', 'Pl011', 'Sp804', 'Sp805', 'GenericWatchdog', 'CpuLocalTimer', 'PL031', 'Pl050', 'Pl111', 'HDLcd', - 'FVPBasePwrCtrl', 'RealView'], + 'FVPBasePwrCtrl', 'RealView', 'SysSecCtrl'], enums=['ArmPciIntRouting'], tags='arm isa') SimObject('SMMUv3.py', sim_objects=['SMMUv3DeviceInterface', 'SMMUv3'], tags='arm isa') @@ -92,6 +92,7 @@ Source('smmu_v3_ports.cc', tags='arm isa'); Source('smmu_v3_proc.cc', tags='arm isa'); Source('smmu_v3_deviceifc.cc', tags='arm isa'); Source('smmu_v3_transl.cc', tags='arm isa'); +Source('ssc.cc', tags='arm isa'); Source('timer_sp804.cc', tags='arm isa') Source('watchdog_generic.cc', tags='arm isa') Source('watchdog_sp805.cc', tags='arm isa') diff --git a/src/dev/arm/SMMUv3.py b/src/dev/arm/SMMUv3.py index f14d985c9e..415eccd742 100644 --- a/src/dev/arm/SMMUv3.py +++ b/src/dev/arm/SMMUv3.py @@ -39,120 +39,134 @@ from m5.util.fdthelper import * from m5.SimObject import * from m5.objects.ClockedObject import ClockedObject + class SMMUv3DeviceInterface(ClockedObject): - type = 'SMMUv3DeviceInterface' - cxx_header = 'dev/arm/smmu_v3_deviceifc.hh' - cxx_class = 'gem5::SMMUv3DeviceInterface' + type = "SMMUv3DeviceInterface" + cxx_header = "dev/arm/smmu_v3_deviceifc.hh" + cxx_class = "gem5::SMMUv3DeviceInterface" - device_port = ResponsePort('Device port') - slave = DeprecatedParam(device_port, - '`slave` is now called `device_port`') - ats_mem_side_port = RequestPort('ATS mem side port,' - 'sends requests and receives responses') - ats_master = DeprecatedParam(ats_mem_side_port, - '`ats_master` is now called `ats_mem_side_port`') - ats_dev_side_port = ResponsePort('ATS dev_side_port,' - 'sends responses and receives requests') - ats_slave = DeprecatedParam(ats_dev_side_port, - '`ats_slave` is now called `ats_dev_side_port`') + device_port = ResponsePort("Device port") + slave = DeprecatedParam(device_port, "`slave` is now called `device_port`") + ats_mem_side_port = RequestPort( + "ATS mem side port," "sends requests and receives responses" + ) + ats_master = DeprecatedParam( + ats_mem_side_port, "`ats_master` is now called `ats_mem_side_port`" + ) + ats_dev_side_port = ResponsePort( + "ATS dev_side_port," "sends responses and receives requests" + ) + ats_slave = DeprecatedParam( + ats_dev_side_port, "`ats_slave` is now called `ats_dev_side_port`" + ) - port_width = Param.Unsigned(16, 'Port width in bytes (= 1 beat)') - wrbuf_slots = Param.Unsigned(16, 'Write buffer size (in beats)') - xlate_slots = Param.Unsigned(16, 'Translation slots') + port_width = Param.Unsigned(16, "Port width in bytes (= 1 beat)") + wrbuf_slots = Param.Unsigned(16, "Write buffer size (in beats)") + xlate_slots = Param.Unsigned(16, "Translation slots") - utlb_entries = Param.Unsigned(32, 'Micro TLB size (entries)') - utlb_assoc = Param.Unsigned(0, 'Micro TLB associativity (0=full)') - utlb_policy = Param.String('rr', 'Micro TLB replacement policy') - utlb_enable = Param.Bool(True, 'Micro TLB enable') - utlb_lat = Param.Cycles(1, 'Micro TLB lookup latency') - utlb_slots = Param.Cycles(1, 'Micro TLB lookup slots') + utlb_entries = Param.Unsigned(32, "Micro TLB size (entries)") + utlb_assoc = Param.Unsigned(0, "Micro TLB associativity (0=full)") + utlb_policy = Param.String("rr", "Micro TLB replacement policy") + utlb_enable = Param.Bool(True, "Micro TLB enable") + utlb_lat = Param.Cycles(1, "Micro TLB lookup latency") + utlb_slots = Param.Cycles(1, "Micro TLB lookup slots") - tlb_entries = Param.Unsigned(2048, 'Main TLB size (entries)') - tlb_assoc = Param.Unsigned(4, 'Main TLB associativity (0=full)') - tlb_policy = Param.String('rr', 'Main TLB replacement policy') - tlb_enable = Param.Bool(True, 'Main TLB enable') - tlb_lat = Param.Cycles(3, 'Main TLB lookup latency') - tlb_slots = Param.Cycles(3, 'Main TLB lookup slots') + tlb_entries = Param.Unsigned(2048, "Main TLB size (entries)") + tlb_assoc = Param.Unsigned(4, "Main TLB associativity (0=full)") + tlb_policy = Param.String("rr", "Main TLB replacement policy") + tlb_enable = Param.Bool(True, "Main TLB enable") + tlb_lat = Param.Cycles(3, "Main TLB lookup latency") + tlb_slots = Param.Cycles(3, "Main TLB lookup slots") + + prefetch_enable = Param.Bool(False, "Enable prefetch") + prefetch_reserve_last_way = Param.Bool( + True, "Reserve last way of the main TLB for prefetched entries" + ) - prefetch_enable = Param.Bool(False, - 'Enable prefetch') - prefetch_reserve_last_way = Param.Bool(True, - 'Reserve last way of the main TLB for prefetched entries') class SMMUv3(ClockedObject): - type = 'SMMUv3' - cxx_header = 'dev/arm/smmu_v3.hh' - cxx_class = 'gem5::SMMUv3' + type = "SMMUv3" + cxx_header = "dev/arm/smmu_v3.hh" + cxx_class = "gem5::SMMUv3" - request = RequestPort('Request port') + request = RequestPort("Request port") walker = RequestPort( - 'Request port for SMMU initiated HWTW requests (optional)') + "Request port for SMMU initiated HWTW requests (optional)" + ) control = ResponsePort( - 'Control port for accessing memory-mapped registers') - sample_period = Param.Clock('10us', 'Stats sample period') - reg_map = Param.AddrRange('Address range for control registers') + "Control port for accessing memory-mapped registers" + ) + sample_period = Param.Clock("10us", "Stats sample period") + reg_map = Param.AddrRange("Address range for control registers") system = Param.System(Parent.any, "System this device is part of") - irq_interface_enable = Param.Bool(False, - "This flag enables software to program SMMU_IRQ_CTRL and " - "SMMU_IRQ_CTRLACK as if the model implemented architectural " - "interrupt sources") + irq_interface_enable = Param.Bool( + False, + "This flag enables software to program SMMU_IRQ_CTRL and " + "SMMU_IRQ_CTRLACK as if the model implemented architectural " + "interrupt sources", + ) - device_interfaces = VectorParam.SMMUv3DeviceInterface([], - "Responder interfaces") + device_interfaces = VectorParam.SMMUv3DeviceInterface( + [], "Responder interfaces" + ) # RESPONDER INTERFACE<->SMMU link parameters - ifc_smmu_lat = Param.Cycles(8, 'IFC to SMMU communication latency') - smmu_ifc_lat = Param.Cycles(8, 'SMMU to IFC communication latency') + ifc_smmu_lat = Param.Cycles(8, "IFC to SMMU communication latency") + smmu_ifc_lat = Param.Cycles(8, "SMMU to IFC communication latency") # SMMU parameters - xlate_slots = Param.Unsigned(64, 'SMMU translation slots') - ptw_slots = Param.Unsigned(16, 'SMMU page table walk slots') + xlate_slots = Param.Unsigned(64, "SMMU translation slots") + ptw_slots = Param.Unsigned(16, "SMMU page table walk slots") - request_port_width = Param.Unsigned(16, - 'Request port width in bytes (= 1 beat)') + request_port_width = Param.Unsigned( + 16, "Request port width in bytes (= 1 beat)" + ) - tlb_entries = Param.Unsigned(2048, 'TLB size (entries)') - tlb_assoc = Param.Unsigned(4, 'TLB associativity (0=full)') - tlb_policy = Param.String('rr', 'TLB replacement policy') - tlb_enable = Param.Bool(False, 'TLB enable') - tlb_lat = Param.Cycles(3, 'TLB lookup latency') - tlb_slots = Param.Cycles(3, 'TLB lookup slots') + tlb_entries = Param.Unsigned(2048, "TLB size (entries)") + tlb_assoc = Param.Unsigned(4, "TLB associativity (0=full)") + tlb_policy = Param.String("rr", "TLB replacement policy") + tlb_enable = Param.Bool(False, "TLB enable") + tlb_lat = Param.Cycles(3, "TLB lookup latency") + tlb_slots = Param.Cycles(3, "TLB lookup slots") - cfg_entries = Param.Unsigned(64, 'Config cache size (entries)') - cfg_assoc = Param.Unsigned(4, 'Config cache associativity (0=full)') - cfg_policy = Param.String('rr', 'Config cache replacement policy') - cfg_enable = Param.Bool(True, 'Config cache enable') - cfg_lat = Param.Cycles(3, 'Config cache lookup latency') - cfg_slots = Param.Cycles(3, 'Config cache lookup slots') + cfg_entries = Param.Unsigned(64, "Config cache size (entries)") + cfg_assoc = Param.Unsigned(4, "Config cache associativity (0=full)") + cfg_policy = Param.String("rr", "Config cache replacement policy") + cfg_enable = Param.Bool(True, "Config cache enable") + cfg_lat = Param.Cycles(3, "Config cache lookup latency") + cfg_slots = Param.Cycles(3, "Config cache lookup slots") - ipa_entries = Param.Unsigned(128, 'IPA cache size (entries)') - ipa_assoc = Param.Unsigned(4, 'IPA cache associativity (0=full)') - ipa_policy = Param.String('rr', 'IPA cache replacement policy') - ipa_enable = Param.Bool(False, 'IPA cache enable') - ipa_lat = Param.Cycles(3, 'IPA cache lookup lantency') - ipa_slots = Param.Cycles(3, 'IPA cache lookup slots') + ipa_entries = Param.Unsigned(128, "IPA cache size (entries)") + ipa_assoc = Param.Unsigned(4, "IPA cache associativity (0=full)") + ipa_policy = Param.String("rr", "IPA cache replacement policy") + ipa_enable = Param.Bool(False, "IPA cache enable") + ipa_lat = Param.Cycles(3, "IPA cache lookup lantency") + ipa_slots = Param.Cycles(3, "IPA cache lookup slots") - walk_S1L0 = Param.Unsigned(4, 'Walk cache S1L0 size (entries)') - walk_S1L1 = Param.Unsigned(28, 'Walk cache S1L1 size (entries)') - walk_S1L2 = Param.Unsigned(348, 'Walk cache S1L2 size (entries)') - walk_S1L3 = Param.Unsigned(4, 'Walk cache S1L3 size (entries)') - walk_S2L0 = Param.Unsigned(4, 'Walk cache S2L0 size (entries)') - walk_S2L1 = Param.Unsigned(28, 'Walk cache S2L1 size (entries)') - walk_S2L2 = Param.Unsigned(92, 'Walk cache S2L2 size (entries)') - walk_S2L3 = Param.Unsigned(4, 'Walk cache S2L3 size (entries)') - walk_assoc = Param.Unsigned(4, 'Walk cache associativity (0=full)') - walk_policy = Param.String('rr', 'Walk cache replacement policy') - walk_enable = Param.Bool(True, 'Walk cache enable') - wc_nonfinal_enable = Param.Bool(False, - 'Nonfinal translations use walk cache') - wc_s1_levels = Param.Unsigned(7, - 'S1 PT levels cached in walk cache (bit 0 is L0, bit 1 is L1, etc)') - wc_s2_levels = Param.Unsigned(7, - 'S2 PT levels cached in walk cache (bit 0 is L0, bit 1 is L1, etc)') + walk_S1L0 = Param.Unsigned(4, "Walk cache S1L0 size (entries)") + walk_S1L1 = Param.Unsigned(28, "Walk cache S1L1 size (entries)") + walk_S1L2 = Param.Unsigned(348, "Walk cache S1L2 size (entries)") + walk_S1L3 = Param.Unsigned(4, "Walk cache S1L3 size (entries)") + walk_S2L0 = Param.Unsigned(4, "Walk cache S2L0 size (entries)") + walk_S2L1 = Param.Unsigned(28, "Walk cache S2L1 size (entries)") + walk_S2L2 = Param.Unsigned(92, "Walk cache S2L2 size (entries)") + walk_S2L3 = Param.Unsigned(4, "Walk cache S2L3 size (entries)") + walk_assoc = Param.Unsigned(4, "Walk cache associativity (0=full)") + walk_policy = Param.String("rr", "Walk cache replacement policy") + walk_enable = Param.Bool(True, "Walk cache enable") + wc_nonfinal_enable = Param.Bool( + False, "Nonfinal translations use walk cache" + ) + wc_s1_levels = Param.Unsigned( + 7, "S1 PT levels cached in walk cache (bit 0 is L0, bit 1 is L1, etc)" + ) + wc_s2_levels = Param.Unsigned( + 7, "S2 PT levels cached in walk cache (bit 0 is L0, bit 1 is L1, etc)" + ) - walk_lat = Param.Cycles(4, 'Walk cache lookup latency') - walk_slots = Param.Cycles(4, 'Walk cache lookup slots') + walk_lat = Param.Cycles(4, "Walk cache lookup latency") + walk_slots = Param.Cycles(4, "Walk cache lookup slots") # [28:27] ST_LEVEL = 0b01, 2-level Stream Table supported in addition # to Linear Stream table. @@ -167,33 +181,35 @@ class SMMUv3(ClockedObject): # (0b10 = AArch64). # [1] S1P = 0b1, Stage 1 translation supported. # [0] S2P = 0b1, Stage 2 translation supported. - smmu_idr0 = Param.UInt32(0x094C100F, "SMMU_IDR0 register"); + smmu_idr0 = Param.UInt32(0x094C100F, "SMMU_IDR0 register") # [25:21] CMDQS = 0b00111, Maximum number of Command queue entries # as log 2 (entries) (0b00111 = 128 entries). - smmu_idr1 = Param.UInt32(0x00E00000, "SMMU_IDR1 register"); + smmu_idr1 = Param.UInt32(0x00E00000, "SMMU_IDR1 register") - smmu_idr2 = Param.UInt32(0, "SMMU_IDR2 register"); - smmu_idr3 = Param.UInt32(0, "SMMU_IDR3 register"); - smmu_idr4 = Param.UInt32(0, "SMMU_IDR4 register"); + smmu_idr2 = Param.UInt32(0, "SMMU_IDR2 register") + smmu_idr3 = Param.UInt32(0, "SMMU_IDR3 register") + smmu_idr4 = Param.UInt32(0, "SMMU_IDR4 register") # [6] GRAN64K = 0b1, 64KB translation granule supported. # [4] GRAN4K = 0b1, 4KB translation granule supported. # [2:0] OAS = 0b101, Output Address Size (0b101 = 48-bit). - smmu_idr5 = Param.UInt32(0x55, "SMMU_IDR5 register"); - smmu_iidr = Param.UInt32(0, "SMMU_IIDR register"); + smmu_idr5 = Param.UInt32(0x55, "SMMU_IDR5 register") + smmu_iidr = Param.UInt32(0, "SMMU_IIDR register") # [7:0] (0 = SMMUv3.0) (1 = SMMUv3.1) - smmu_aidr = Param.UInt32(0, "SMMU_AIDR register"); + smmu_aidr = Param.UInt32(0, "SMMU_AIDR register") def generateDeviceTree(self, state): reg_addr = self.reg_map.start reg_size = self.reg_map.size() node = FdtNode("smmuv3@%x" % int(reg_addr)) node.appendCompatible("arm,smmu-v3") - node.append(FdtPropertyWords("reg", - state.addrCells(reg_addr) + - state.sizeCells(reg_size))) + node.append( + FdtPropertyWords( + "reg", state.addrCells(reg_addr) + state.sizeCells(reg_size) + ) + ) node.append(FdtPropertyWords("#iommu-cells", [1])) node.appendPhandle(self) diff --git a/src/dev/arm/UFSHostDevice.py b/src/dev/arm/UFSHostDevice.py index bae7124f9e..46ed7ddd24 100644 --- a/src/dev/arm/UFSHostDevice.py +++ b/src/dev/arm/UFSHostDevice.py @@ -39,13 +39,17 @@ from m5.proxy import * from m5.objects.Device import DmaDevice from m5.objects.AbstractNVM import * + class UFSHostDevice(DmaDevice): - type = 'UFSHostDevice' + type = "UFSHostDevice" cxx_header = "dev/arm/ufs_device.hh" - cxx_class = 'gem5::UFSHostDevice' + cxx_class = "gem5::UFSHostDevice" pio_addr = Param.Addr("Address for SCSI configuration responder interface") - pio_latency = Param.Latency("10ns", "Time between action and write/read \ - result by AMBA DMA Device") + pio_latency = Param.Latency( + "10ns", + "Time between action and write/read \ + result by AMBA DMA Device", + ) gic = Param.BaseGic(Parent.any, "Gic to use for interrupting") int_num = Param.UInt32("Interrupt number that connects to GIC") img_blk_size = Param.UInt32(512, "Size of one image block in bytes") @@ -60,6 +64,8 @@ class UFSHostDevice(DmaDevice): # its own flash dimensions; to allow the system to define a hetrogeneous # storage system. internalflash = VectorParam.AbstractNVM("Describes the internal flash") - ufs_slots = Param.UInt32(32, "Number of commands that can be queued in \ - the Host controller (min: 1, max: 32)") - + ufs_slots = Param.UInt32( + 32, + "Number of commands that can be queued in \ + the Host controller (min: 1, max: 32)", + ) diff --git a/src/dev/arm/VExpressFastmodel.py b/src/dev/arm/VExpressFastmodel.py index 6e5ade4150..0dfddb7735 100644 --- a/src/dev/arm/VExpressFastmodel.py +++ b/src/dev/arm/VExpressFastmodel.py @@ -28,17 +28,23 @@ from m5.objects.Gic import ArmSPI from m5.objects.RealView import VExpress_GEM5_Base, HDLcd from m5.objects.SubSystem import SubSystem + class VExpressFastmodel(VExpress_GEM5_Base): gic = FastModelGIC( sc_gic=SCFastModelGIC( - reg_base=0x2c000000, + its_count=1, + its0_base=0x2E010000, + reg_base=0x2C000000, reg_base_per_redistributor="0.0.0.0=0x2c010000", spi_count=988, - )) + ) + ) hdlcd = HDLcd( - pxl_clk=VExpress_GEM5_Base.dcc.osc_pxl, pio_addr=0x2b000000, - interrupt=ArmSPI(num=95)) + pxl_clk=VExpress_GEM5_Base.dcc.osc_pxl, + pio_addr=0x2B000000, + interrupt=ArmSPI(num=95), + ) # Remove original timer to prevent from possible conflict with Fastmodel # timer. @@ -57,5 +63,5 @@ class VExpressFastmodel(VExpress_GEM5_Base): def setupBootLoader(self, cur_sys, loc, boot_loader=None): if boot_loader is None: - boot_loader = [ loc('boot_v2.arm64') ] + boot_loader = [loc("boot_v2.arm64")] super().setupBootLoader(cur_sys, boot_loader) diff --git a/src/dev/arm/VirtIOMMIO.py b/src/dev/arm/VirtIOMMIO.py index 2661db2fd2..eecd703e3a 100644 --- a/src/dev/arm/VirtIOMMIO.py +++ b/src/dev/arm/VirtIOMMIO.py @@ -43,10 +43,11 @@ from m5.objects.Device import BasicPioDevice from m5.objects.Gic import ArmInterruptPin from m5.objects.VirtIO import VirtIODeviceBase, VirtIODummyDevice + class MmioVirtIO(BasicPioDevice): - type = 'MmioVirtIO' - cxx_header = 'dev/arm/vio_mmio.hh' - cxx_class = 'gem5::MmioVirtIO' + type = "MmioVirtIO" + cxx_header = "dev/arm/vio_mmio.hh" + cxx_class = "gem5::MmioVirtIO" pio_size = Param.Addr(4096, "IO range") interrupt = Param.ArmInterruptPin("Interrupt to use for this device") @@ -54,7 +55,12 @@ class MmioVirtIO(BasicPioDevice): vio = Param.VirtIODeviceBase(VirtIODummyDevice(), "VirtIO device") def generateDeviceTree(self, state): - node = self.generateBasicPioDeviceNode(state, 'virtio', self.pio_addr, - int(self.pio_size), [ self.interrupt ]) + node = self.generateBasicPioDeviceNode( + state, + "virtio", + self.pio_addr, + int(self.pio_size), + [self.interrupt], + ) node.appendCompatible(["virtio,mmio"]) yield node diff --git a/src/dev/arm/a9scu.hh b/src/dev/arm/a9scu.hh index 4f0ecefccb..5a8a6dad7d 100644 --- a/src/dev/arm/a9scu.hh +++ b/src/dev/arm/a9scu.hh @@ -84,4 +84,3 @@ class A9SCU : public BasicPioDevice } // namespace gem5 #endif // __DEV_ARM_A9SCU_HH__ - diff --git a/src/dev/arm/css/MHU.py b/src/dev/arm/css/MHU.py index 179173965d..6a930f423c 100644 --- a/src/dev/arm/css/MHU.py +++ b/src/dev/arm/css/MHU.py @@ -39,77 +39,108 @@ from m5.params import * from m5.proxy import * from m5.util.fdthelper import * + class MhuDoorbell(Doorbell): - type = 'MhuDoorbell' + type = "MhuDoorbell" abstract = True cxx_header = "dev/arm/css/mhu.hh" - cxx_class = 'gem5::MhuDoorbell' + cxx_class = "gem5::MhuDoorbell" + class Scp2ApDoorbell(MhuDoorbell): - type = 'Scp2ApDoorbell' + type = "Scp2ApDoorbell" cxx_header = "dev/arm/css/mhu.hh" - cxx_class = 'gem5::Scp2ApDoorbell' + cxx_class = "gem5::Scp2ApDoorbell" interrupt = Param.ArmInterruptPin("Interrupt Pin") + class Ap2ScpDoorbell(MhuDoorbell): - type = 'Ap2ScpDoorbell' + type = "Ap2ScpDoorbell" cxx_header = "dev/arm/css/mhu.hh" - cxx_class = 'gem5::Ap2ScpDoorbell' + cxx_class = "gem5::Ap2ScpDoorbell" + # Message Handling Unit class MHU(BasicPioDevice): - type = 'MHU' + type = "MHU" cxx_header = "dev/arm/css/mhu.hh" - cxx_class = 'gem5::MHU' + cxx_class = "gem5::MHU" pio_size = Param.Unsigned(0x1000, "MHU pio size") lowp_scp2ap = Param.Scp2ApDoorbell( "Low Priority doorbell channel for communications " "from the System Control Processor (SCP) to the " - "Application Processor (AP)") + "Application Processor (AP)" + ) highp_scp2ap = Param.Scp2ApDoorbell( "High Priority doorbell channel for communications " "from the System Control Processor (SCP) to the " - "Application Processor (AP)") + "Application Processor (AP)" + ) sec_scp2ap = Param.Scp2ApDoorbell( "Secure doorbell channel for communications " "from the System Control Processor (SCP) to the " - "Application Processor (AP)") + "Application Processor (AP)" + ) lowp_ap2scp = Param.Ap2ScpDoorbell( "Low Priority doorbell channel for communications " "from the Application Processor (AP) to the " - "System Control Processor (SCP)") + "System Control Processor (SCP)" + ) highp_ap2scp = Param.Ap2ScpDoorbell( "High Priority doorbell channel for communications " "from the Application Processor (AP) to the " - "System Control Processor (SCP)") + "System Control Processor (SCP)" + ) sec_ap2scp = Param.Ap2ScpDoorbell( "Secure doorbell channel for communications " "from the Application Processor (AP) to the " - "System Control Processor (SCP)") + "System Control Processor (SCP)" + ) scp = Param.Scp(Parent.any, "System Control Processor") def generateDeviceTree(self, state): node = FdtNode("mailbox@%x" % int(self.pio_addr)) node.appendCompatible(["arm,mhu", "arm,primecell"]) - node.append(FdtPropertyWords("reg", - state.addrCells(self.pio_addr) + - state.sizeCells(self.pio_size) )) + node.append( + FdtPropertyWords( + "reg", + state.addrCells(self.pio_addr) + + state.sizeCells(self.pio_size), + ) + ) node.append(FdtPropertyWords("#mbox-cells", 1)) - node.append(FdtPropertyWords("interrupts", [ - 0, int(self.lowp_scp2ap.interrupt.num) - 32, 1, - 0, int(self.highp_scp2ap.interrupt.num) - 32, 1, - 0, int(self.sec_scp2ap.interrupt.num) - 32, 1, - ])) + node.append( + FdtPropertyWords( + "interrupts", + [ + 0, + int(self.lowp_scp2ap.interrupt.num) - 32, + 1, + 0, + int(self.highp_scp2ap.interrupt.num) - 32, + 1, + 0, + int(self.sec_scp2ap.interrupt.num) - 32, + 1, + ], + ) + ) realview = self._parent.unproxy(self) - node.append(FdtPropertyWords("clocks", - [state.phandle(realview.mcc.osc_peripheral), - state.phandle(realview.dcc.osc_smb)])) + node.append( + FdtPropertyWords( + "clocks", + [ + state.phandle(realview.mcc.osc_peripheral), + state.phandle(realview.dcc.osc_smb), + ], + ) + ) node.append(FdtPropertyStrings("clock-names", ["apb_pclk"])) node.appendPhandle(self) diff --git a/src/dev/arm/css/Scmi.py b/src/dev/arm/css/Scmi.py index 4b51fa8593..1246e69343 100644 --- a/src/dev/arm/css/Scmi.py +++ b/src/dev/arm/css/Scmi.py @@ -40,21 +40,25 @@ from m5.objects.Doorbell import Doorbell from m5.util.fdthelper import * from m5.SimObject import SimObject + class ScmiChannel(SimObject): """ Unidirectional channel """ - type = 'ScmiChannel' + + type = "ScmiChannel" cxx_header = "dev/arm/css/scmi_platform.hh" cxx_class = "gem5::scmi::VirtualChannel" shmem_range = Param.AddrRange( - "Virtual channel's shared memory address range") - phys_id = Param.Unsigned(4, - "Physical slot of the channel") - virt_id = Param.Unsigned(0, - "Virtual slot of the channel (within the physical)") + "Virtual channel's shared memory address range" + ) + phys_id = Param.Unsigned(4, "Physical slot of the channel") + virt_id = Param.Unsigned( + 0, "Virtual slot of the channel (within the physical)" + ) doorbell = Param.Doorbell( - "This is the doorbell used to notify the SCMI platform") + "This is the doorbell used to notify the SCMI platform" + ) def __init__(self, shmem, *args, **kwargs): super().__init__(**kwargs) @@ -62,21 +66,25 @@ class ScmiChannel(SimObject): def shmemGenerator(state): shmem_node = FdtNode("scp-shmem@%x" % 0) shmem_node.appendCompatible(["arm,scmi-shmem"]) - shmem_node.append(FdtPropertyWords("reg", - state.addrCells(0) + - state.sizeCells(0x200)) ) - #shmem_node.appendPhandle(self._parent.unproxy(self).channel) + shmem_node.append( + FdtPropertyWords( + "reg", state.addrCells(0) + state.sizeCells(0x200) + ) + ) + # shmem_node.appendPhandle(self._parent.unproxy(self).channel) shmem_node.appendPhandle("scmi_virt" + str(self.virt_id)) return shmem_node self._shmem = shmem self._shmem.addSubnodeGenerator(shmemGenerator) + class ScmiAgentChannel(ScmiChannel): """ This is a Agent to Platform channel (The agent is the initiator) """ - type = 'ScmiAgentChannel' + + type = "ScmiAgentChannel" cxx_header = "dev/arm/css/scmi_platform.hh" cxx_class = "gem5::scmi::AgentChannel" @@ -85,10 +93,12 @@ class ScmiPlatformChannel(ScmiChannel): """ This is a Platform to Agent channel (The platform is the initiator) """ - type = 'ScmiPlatformChannel' + + type = "ScmiPlatformChannel" cxx_header = "dev/arm/css/scmi_platform.hh" cxx_class = "gem5::scmi::PlatformChannel" + class ScmiCommunication(SimObject): """ The SCMI Communication class models a bidirectional @@ -96,36 +106,41 @@ class ScmiCommunication(SimObject): As such it has a ScmiAgentChannel and a ScmiPlatformChannel object as members. """ - type = 'ScmiCommunication' + + type = "ScmiCommunication" cxx_header = "dev/arm/css/scmi_platform.hh" cxx_class = "gem5::scmi::Communication" - agent_channel = Param.ScmiAgentChannel( - "Agent to Platform channel") - platform_channel = Param.ScmiPlatformChannel( - "Platform to Agent channel") + agent_channel = Param.ScmiAgentChannel("Agent to Platform channel") + platform_channel = Param.ScmiPlatformChannel("Platform to Agent channel") + class ScmiPlatform(Scp): - type = 'ScmiPlatform' + type = "ScmiPlatform" cxx_header = "dev/arm/css/scmi_platform.hh" cxx_class = "gem5::scmi::Platform" - comms = VectorParam.ScmiCommunication([], - "SCMI Communications") - agents = VectorParam.String([ "OSPM" ], - "Vector of SCMI agents (names) in the system") + comms = VectorParam.ScmiCommunication([], "SCMI Communications") + agents = VectorParam.String( + ["OSPM"], "Vector of SCMI agents (names) in the system" + ) sys = Param.System(Parent.any, "System object parameter") dma = MasterPort("DMA port") # Protocol params - base_vendor = Param.String("arm", - "Return string for the Base protocol DISCOVER_VENDOR command") - base_subvendor = Param.String("gem5", - "Return string for the Base protocol DISCOVER_SUBVENDOR command") - base_impl_version = Param.Unsigned(0, + base_vendor = Param.String( + "arm", "Return string for the Base protocol DISCOVER_VENDOR command" + ) + base_subvendor = Param.String( + "gem5", + "Return string for the Base protocol DISCOVER_SUBVENDOR command", + ) + base_impl_version = Param.Unsigned( + 0, "Return value for the Base protocol " - "DISCOVER_IMPLEMENTATION_VERSION command") + "DISCOVER_IMPLEMENTATION_VERSION command", + ) def generateDeviceTree(self, state): scmi_node = self.generateScmiNode(state) @@ -141,12 +156,14 @@ class ScmiPlatform(Scp): mbox_phandle = state.phandle(self._parent.unproxy(self).mailbox) shmem_phandles = [] for comm in self.unproxy(self).comms: - shmem_phandles.append(state.phandle( - "scmi_virt" + str(comm.agent_channel.virt_id))) - shmem_phandles.append(state.phandle( - "scmi_virt" + str(comm.platform_channel.virt_id))) + shmem_phandles.append( + state.phandle("scmi_virt" + str(comm.agent_channel.virt_id)) + ) + shmem_phandles.append( + state.phandle("scmi_virt" + str(comm.platform_channel.virt_id)) + ) - phys_channel = 1 # HP-NonSecure - node.append(FdtPropertyWords("mboxes", [ mbox_phandle, phys_channel ])) + phys_channel = 1 # HP-NonSecure + node.append(FdtPropertyWords("mboxes", [mbox_phandle, phys_channel])) node.append(FdtPropertyWords("shmem", shmem_phandles)) return node diff --git a/src/dev/arm/css/Scp.py b/src/dev/arm/css/Scp.py index c8348118af..3efa59fad3 100644 --- a/src/dev/arm/css/Scp.py +++ b/src/dev/arm/css/Scp.py @@ -36,8 +36,9 @@ from m5.objects.ClockedObject import ClockedObject from m5.params import * + class Scp(ClockedObject): - type = 'Scp' + type = "Scp" abstract = True cxx_header = "dev/arm/css/scp.hh" - cxx_class = 'gem5::Scp' + cxx_class = "gem5::Scp" diff --git a/src/dev/arm/gic_v3.cc b/src/dev/arm/gic_v3.cc index dde3818b07..e14d1f2bef 100644 --- a/src/dev/arm/gic_v3.cc +++ b/src/dev/arm/gic_v3.cc @@ -147,7 +147,7 @@ Gicv3::init() for (int i = 0; i < threads; i++) { redistributors[i] = new Gicv3Redistributor(this, i); - cpuInterfaces[i] = new Gicv3CPUInterface(this, i); + cpuInterfaces[i] = new Gicv3CPUInterface(this, sys->threads[i]); } distRange = RangeSize(params().dist_addr, diff --git a/src/dev/arm/gic_v3_cpu_interface.cc b/src/dev/arm/gic_v3_cpu_interface.cc index b089ba0bda..28a173943d 100644 --- a/src/dev/arm/gic_v3_cpu_interface.cc +++ b/src/dev/arm/gic_v3_cpu_interface.cc @@ -55,15 +55,19 @@ using namespace ArmISA; const uint8_t Gicv3CPUInterface::GIC_MIN_BPR; const uint8_t Gicv3CPUInterface::GIC_MIN_BPR_NS; -Gicv3CPUInterface::Gicv3CPUInterface(Gicv3 * gic, uint32_t cpu_id) +Gicv3CPUInterface::Gicv3CPUInterface(Gicv3 * gic, ThreadContext *_tc) : BaseISADevice(), gic(gic), redistributor(nullptr), distributor(nullptr), - cpuId(cpu_id) + tc(_tc), + maintenanceInterrupt(gic->params().maint_int->get(tc)), + cpuId(tc->contextId()) { hppi.prio = 0xff; hppi.intid = Gicv3::INTID_SPURIOUS; + + setISA(static_cast(tc->getIsaPtr())); } void @@ -81,8 +85,9 @@ Gicv3CPUInterface::resetHppi(uint32_t intid) } void -Gicv3CPUInterface::setThreadContext(ThreadContext *tc) +Gicv3CPUInterface::setThreadContext(ThreadContext *_tc) { + tc = _tc; maintenanceInterrupt = gic->params().maint_int->get(tc); fatal_if(maintenanceInterrupt->num() >= redistributor->irqPending.size(), "Invalid maintenance interrupt number\n"); @@ -91,7 +96,7 @@ Gicv3CPUInterface::setThreadContext(ThreadContext *tc) bool Gicv3CPUInterface::getHCREL2FMO() const { - HCR hcr = isa->readMiscRegNoEffect(MISCREG_HCR_EL2); + HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR_EL2); if (hcr.tge && hcr.e2h) { return false; @@ -105,7 +110,7 @@ Gicv3CPUInterface::getHCREL2FMO() const bool Gicv3CPUInterface::getHCREL2IMO() const { - HCR hcr = isa->readMiscRegNoEffect(MISCREG_HCR_EL2); + HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR_EL2); if (hcr.tge && hcr.e2h) { return false; @@ -230,7 +235,7 @@ Gicv3CPUInterface::readMiscReg(int misc_reg) uint8_t rprio = highestActivePriority(); if (haveEL(EL3) && !inSecureState() && - (isa->readMiscRegNoEffect(MISCREG_SCR_EL3) & (1U << 2))) { + (tc->readMiscRegNoEffect(MISCREG_SCR_EL3) & (1U << 2))) { // Spec section 4.8.1 // For Non-secure access to ICC_RPR_EL1 when SCR_EL3.FIQ == 1 if ((rprio & 0x80) == 0) { @@ -366,7 +371,7 @@ Gicv3CPUInterface::readMiscReg(int misc_reg) } if (haveEL(EL3) && !inSecureState() && - (isa->readMiscRegNoEffect(MISCREG_SCR_EL3) & (1U << 2))) { + (tc->readMiscRegNoEffect(MISCREG_SCR_EL3) & (1U << 2))) { // Spec section 4.8.1 // For Non-secure access to ICC_PMR_EL1 when SCR_EL3.FIQ == 1: if ((value & 0x80) == 0) { @@ -968,7 +973,7 @@ Gicv3CPUInterface::setMiscReg(int misc_reg, RegVal val) bool irq_is_grp0 = group == Gicv3::G0S; bool single_sec_state = distributor->DS; bool irq_is_secure = !single_sec_state && (group != Gicv3::G1NS); - SCR scr_el3 = isa->readMiscRegNoEffect(MISCREG_SCR_EL3); + SCR scr_el3 = tc->readMiscRegNoEffect(MISCREG_SCR_EL3); bool route_fiq_to_el3 = scr_el3.fiq; bool route_irq_to_el3 = scr_el3.irq; bool route_fiq_to_el2 = hcr_fmo; @@ -1290,7 +1295,7 @@ Gicv3CPUInterface::setMiscReg(int misc_reg, RegVal val) } val &= 0xff; - SCR scr_el3 = isa->readMiscRegNoEffect(MISCREG_SCR_EL3); + SCR scr_el3 = tc->readMiscRegNoEffect(MISCREG_SCR_EL3); if (haveEL(EL3) && !inSecureState() && (scr_el3.fiq)) { // Spec section 4.8.1 @@ -2339,13 +2344,13 @@ Gicv3CPUInterface::groupEnabled(Gicv3::GroupId group) const bool Gicv3CPUInterface::inSecureState() const { - return isa->inSecureState(); + return ArmISA::isSecure(tc); } ExceptionLevel Gicv3CPUInterface::currEL() const { - return isa->currEL(); + return ArmISA::currEL(tc); } bool @@ -2371,32 +2376,19 @@ Gicv3CPUInterface::haveEL(ExceptionLevel el) const bool Gicv3CPUInterface::isSecureBelowEL3() const { - SCR scr = isa->readMiscRegNoEffect(MISCREG_SCR_EL3); - return haveEL(EL3) && scr.ns == 0; + return ArmISA::isSecureBelowEL3(tc); } bool Gicv3CPUInterface::isAA64() const { - CPSR cpsr = isa->readMiscRegNoEffect(MISCREG_CPSR); - return opModeIs64((OperatingMode)(uint8_t) cpsr.mode); + return ArmISA::inAArch64(tc); } bool Gicv3CPUInterface::isEL3OrMon() const { - if (haveEL(EL3)) { - CPSR cpsr = isa->readMiscRegNoEffect(MISCREG_CPSR); - bool is_64 = opModeIs64((OperatingMode)(uint8_t) cpsr.mode); - - if (is_64 && (cpsr.el == EL3)) { - return true; - } else if (!is_64 && (cpsr.mode == MODE_MON)) { - return true; - } - } - - return false; + return currEL() == EL3; } // Computes ICH_EISR_EL2 diff --git a/src/dev/arm/gic_v3_cpu_interface.hh b/src/dev/arm/gic_v3_cpu_interface.hh index 5bcfba51ff..ff476bc3c6 100644 --- a/src/dev/arm/gic_v3_cpu_interface.hh +++ b/src/dev/arm/gic_v3_cpu_interface.hh @@ -68,9 +68,10 @@ class Gicv3CPUInterface : public ArmISA::BaseISADevice, public Serializable Gicv3 * gic; Gicv3Redistributor * redistributor; Gicv3Distributor * distributor; - uint32_t cpuId; + ThreadContext *tc; ArmInterruptPin *maintenanceInterrupt; + uint32_t cpuId; BitUnion64(ICC_CTLR_EL1) Bitfield<63, 20> res0_3; @@ -204,6 +205,7 @@ class Gicv3CPUInterface : public ArmISA::BaseISADevice, public Serializable static const AddrRange GICH_APR; static const AddrRange GICH_LR; + public: BitUnion64(ICH_HCR_EL2) Bitfield<63, 32> res0_2; Bitfield<31, 27> EOIcount; @@ -224,6 +226,7 @@ class Gicv3CPUInterface : public ArmISA::BaseISADevice, public Serializable Bitfield<0> En; EndBitUnion(ICH_HCR_EL2) + protected: BitUnion64(ICH_LR_EL2) Bitfield<63, 62> State; Bitfield<61> HW; @@ -356,7 +359,7 @@ class Gicv3CPUInterface : public ArmISA::BaseISADevice, public Serializable void setBankedMiscReg(ArmISA::MiscRegIndex misc_reg, RegVal val) const; public: - Gicv3CPUInterface(Gicv3 * gic, uint32_t cpu_id); + Gicv3CPUInterface(Gicv3 * gic, ThreadContext *tc); void init(); diff --git a/src/dev/arm/gic_v3_distributor.cc b/src/dev/arm/gic_v3_distributor.cc index 820f8bcbb2..1cb485c5f5 100644 --- a/src/dev/arm/gic_v3_distributor.cc +++ b/src/dev/arm/gic_v3_distributor.cc @@ -60,7 +60,7 @@ const AddrRange Gicv3Distributor::GICD_ICPENDR (0x0280, 0x0300); const AddrRange Gicv3Distributor::GICD_ISACTIVER (0x0300, 0x0380); const AddrRange Gicv3Distributor::GICD_ICACTIVER (0x0380, 0x0400); const AddrRange Gicv3Distributor::GICD_IPRIORITYR(0x0400, 0x0800); -const AddrRange Gicv3Distributor::GICD_ITARGETSR (0x0800, 0x0900); +const AddrRange Gicv3Distributor::GICD_ITARGETSR (0x0800, 0x0c00); const AddrRange Gicv3Distributor::GICD_ICFGR (0x0c00, 0x0d00); const AddrRange Gicv3Distributor::GICD_IGRPMODR (0x0d00, 0x0d80); const AddrRange Gicv3Distributor::GICD_NSACR (0x0e00, 0x0f00); diff --git a/src/dev/arm/ssc.cc b/src/dev/arm/ssc.cc new file mode 100644 index 0000000000..4a576470a9 --- /dev/null +++ b/src/dev/arm/ssc.cc @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2022 Arm Limited + * All rights reserved + * + * The license below extends only to copyright in the software and shall + * not be construed as granting a license to any other intellectual + * property including but not limited to intellectual property relating + * to a hardware implementation of the functionality of the software + * licensed hereunder. You may use the software subject to the license + * terms below provided that you ensure that this notice is replicated + * unmodified and in its entirety in all distributions of the software, + * modified or unmodified, in source code or in binary form. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "dev/arm/ssc.hh" + +namespace gem5 +{ + +SysSecCtrl::SysSecCtrl(const Params &p) + : BasicPioDevice(p, 0x1000), + sscDbgcfgStat("ssc_dbgcfg_stat", p.ssc_dbgcfg_stat), + sscDbgcfgSet("ssc_dbgcfg_set"), + sscDbgcfgClr("ssc_dbgcfg_clr"), + space0("space0", 0x28 - 0x1c), + sscAuxDbgcfg("ssc_aux_dbgcfg"), + space1("space1", 0x4), + sscAuxGpretn("ssc_aux_gpretn"), + space2("space2", 0x40 - 0x34), + sscVersion("ssc_version", p.ssc_version), + space3("space3", 0x100 - 0x44), + sscSwScratch("ssc_sw_scratch"), + space4("space4", 0x200 - 0x180), + sscSwCap("ssc_sw_cap"), + sscSwCapCtrl("ssc_sw_capctrl"), + space5("space5", 0x500 - 0x304), + sscChipIdSt("ssc_chipid_st"), + space6("space6", 0xfd0 - 0x504), + sscPid4("ssc_pid4", p.ssc_pid4), + space7("space7", 0xfe0 - 0xfd4), + sscPid0("ssc_pid0", p.ssc_pid0), + sscPid1("ssc_pid1", p.ssc_pid1), + sscPid2("ssc_pid2", p.ssc_pid2), + space8("space8", 0xff0 - 0xfec), + compid0("compid0", p.compid0), + compid1("compid1", p.compid1), + compid2("compid2", p.compid2), + compid3("compid3", p.compid3), + regBank("ssc", 0x0010) +{ + // RO registers + sscDbgcfgStat.readonly(); + sscVersion.readonly(); + sscChipIdSt.readonly(); + sscPid0.readonly(); + sscPid1.readonly(); + sscPid2.readonly(); + sscPid4.readonly(); + compid0.readonly(); + compid1.readonly(); + compid2.readonly(); + compid3.readonly(); + + regBank.addRegisters({ + sscDbgcfgStat, sscDbgcfgSet, sscDbgcfgClr, + space0, + sscAuxDbgcfg, + space1, + sscAuxGpretn, + space2, + sscVersion, + space3, + sscSwScratch, + space4, + sscSwCap, sscSwCapCtrl, + space5, + sscChipIdSt, + space6, + sscPid4, + space7, + sscPid0, sscPid1, sscPid2, + space8, + compid0, compid1, compid2, compid3, + }); +} + +Tick +SysSecCtrl::read(PacketPtr pkt) +{ + assert(pkt->getAddr() >= pioAddr && pkt->getAddr() < pioAddr + pioSize); + Addr daddr = pkt->getAddr() - pioAddr; + + regBank.read(daddr, pkt->getPtr(), pkt->getSize()); + + pkt->makeAtomicResponse(); + return pioDelay; +} + +Tick +SysSecCtrl::write(PacketPtr pkt) +{ + assert(pkt->getAddr() >= pioAddr && pkt->getAddr() < pioAddr + pioSize); + Addr daddr = pkt->getAddr() - pioAddr; + + regBank.write(daddr, pkt->getPtr(), pkt->getSize()); + + pkt->makeAtomicResponse(); + return pioDelay; +} + +} diff --git a/src/arch/null/vecregs.hh b/src/dev/arm/ssc.hh similarity index 58% rename from src/arch/null/vecregs.hh rename to src/dev/arm/ssc.hh index 4ca2d2b94d..25475d9dbc 100644 --- a/src/arch/null/vecregs.hh +++ b/src/dev/arm/ssc.hh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 ARM Limited + * Copyright (c) 2022 Arm Limited * All rights reserved * * The license below extends only to copyright in the software and shall @@ -35,23 +35,74 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef __ARCH_NULL_VECREGS_HH__ -#define __ARCH_NULL_VECREGS_HH__ +#ifndef __DEV_ARM_SSC_H__ +#define __DEV_ARM_SSC_H__ -#include "arch/generic/vec_pred_reg.hh" -#include "arch/generic/vec_reg.hh" +#include "dev/io_device.hh" +#include "dev/reg_bank.hh" +#include "params/SysSecCtrl.hh" namespace gem5 { -namespace NullISA +/** System Security Control registers */ +class SysSecCtrl : public BasicPioDevice { + public: + PARAMS(SysSecCtrl); + SysSecCtrl(const Params &p); -// Not applicable to null -using VecRegContainer = ::gem5::DummyVecRegContainer; -using VecPredRegContainer = ::gem5::DummyVecPredRegContainer; + /** + * Handle a read to the device + * @param pkt The memory request. + * @param data Where to put the data. + */ + Tick read(PacketPtr pkt) override; + + /** + * All writes are simply ignored. + * @param pkt The memory request. + * @param data the data + */ + Tick write(PacketPtr pkt) override; + + protected: + using Register = RegisterBankLE::Register32LE; + using Space = RegisterBankLE::RegisterRaz; + template + using Block = RegisterBankLE::RegisterLBuf; + + Register sscDbgcfgStat; + Register sscDbgcfgSet; + Register sscDbgcfgClr; + Space space0; + Register sscAuxDbgcfg; + Space space1; + Register sscAuxGpretn; + Space space2; + Register sscVersion; + Space space3; + Block<0x80> sscSwScratch; + Space space4; + Block<0x100> sscSwCap; + Register sscSwCapCtrl; + Space space5; + Register sscChipIdSt; + Space space6; + Register sscPid4; + Space space7; + Register sscPid0; + Register sscPid1; + Register sscPid2; + Space space8; + Register compid0; + Register compid1; + Register compid2; + Register compid3; + + RegisterBankLE regBank; +}; -} // namespace NullISA } // namespace gem5 -#endif // __ARCH_NULL_VECREGS_HH__ +#endif diff --git a/src/dev/hsa/HSADevice.py b/src/dev/hsa/HSADevice.py index 73d5911922..b22269d5bb 100644 --- a/src/dev/hsa/HSADevice.py +++ b/src/dev/hsa/HSADevice.py @@ -33,10 +33,11 @@ from m5.proxy import * from m5.objects.Device import DmaVirtDevice from m5.objects.VegaGPUTLB import VegaPagetableWalker + class HSAPacketProcessor(DmaVirtDevice): - type = 'HSAPacketProcessor' - cxx_header = 'dev/hsa/hsa_packet_processor.hh' - cxx_class = 'gem5::HSAPacketProcessor' + type = "HSAPacketProcessor" + cxx_header = "dev/hsa/hsa_packet_processor.hh" + cxx_class = "gem5::HSAPacketProcessor" pioAddr = Param.Addr("doorbell physical address") numHWQueues = Param.Int("Number of HW queues") @@ -49,5 +50,6 @@ class HSAPacketProcessor(DmaVirtDevice): # See: https://github.com/RadeonOpenCompute/atmi/tree/master/examples/ # runtime/kps pktProcessDelay = Param.Tick(4400000, "Packet processing delay") - walker = Param.VegaPagetableWalker(VegaPagetableWalker(), - "Page table walker") + walker = Param.VegaPagetableWalker( + VegaPagetableWalker(), "Page table walker" + ) diff --git a/src/dev/hsa/HSADriver.py b/src/dev/hsa/HSADriver.py index 021bd607ff..c3e12df4d2 100644 --- a/src/dev/hsa/HSADriver.py +++ b/src/dev/hsa/HSADriver.py @@ -32,9 +32,10 @@ from m5.params import * from m5.proxy import * from m5.objects.Process import EmulatedDriver + class HSADriver(EmulatedDriver): - type = 'HSADriver' + type = "HSADriver" abstract = True - cxx_header = 'dev/hsa/hsa_driver.hh' - cxx_class = 'gem5::HSADriver' - device = Param.HSADevice('HSA device controlled by this driver') + cxx_header = "dev/hsa/hsa_driver.hh" + cxx_class = "gem5::HSADriver" + device = Param.HSADevice("HSA device controlled by this driver") diff --git a/src/dev/hsa/hsa.h b/src/dev/hsa/hsa.h index 09380d3a96..41117a4ecd 100644 --- a/src/dev/hsa/hsa.h +++ b/src/dev/hsa/hsa.h @@ -2,24 +2,24 @@ // // The University of Illinois/NCSA // Open Source License (NCSA) -// +// // Copyright (c) 2014-2015, Advanced Micro Devices, Inc. All rights reserved. -// +// // Developed by: -// +// // AMD Research and AMD HSA Software Development -// +// // Advanced Micro Devices, Inc. -// +// // www.amd.com -// +// // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to // deal with the Software without restriction, including without limitation // the rights to use, copy, modify, merge, publish, distribute, sublicense, // and/or sell copies of the Software, and to permit persons to whom the // Software is furnished to do so, subject to the following conditions: -// +// // - Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimers. // - Redistributions in binary form must reproduce the above copyright @@ -29,7 +29,7 @@ // nor the names of its contributors may be used to endorse or promote // products derived from this Software without specific prior written // permission. -// +// // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL @@ -632,7 +632,7 @@ hsa_status_t HSA_API hsa_system_major_extension_supported( uint16_t version_major, uint16_t *version_minor, bool* result); - + /** * @deprecated @@ -711,7 +711,7 @@ hsa_status_t HSA_API hsa_system_get_major_extension_table( uint16_t extension, uint16_t version_major, size_t table_length, - void *table); + void *table); /** * @brief Struct containing an opaque handle to an agent, a device that participates in @@ -1291,7 +1291,7 @@ hsa_status_t HSA_API hsa_agent_major_extension_supported( uint16_t version_major, uint16_t *version_minor, bool* result); - + /** @} */ diff --git a/src/dev/hsa/hsa_packet_processor.cc b/src/dev/hsa/hsa_packet_processor.cc index 1236256f71..d0afcf816f 100644 --- a/src/dev/hsa/hsa_packet_processor.cc +++ b/src/dev/hsa/hsa_packet_processor.cc @@ -608,14 +608,18 @@ void AQLRingBuffer::setRdIdx(uint64_t value) { _rdIdx = value; +} - // Mark entries below the previous doorbell value as complete. This will - // cause the next call to freeEntry on the queue to increment the read - // index to the next value which will be written to the doorbell. - for (int i = 0; i <= value; ++i) { - _aqlComplete[i] = true; - DPRINTF(HSAPacketProcessor, "Marking _aqlComplete[%d] true\n", i); - } +void +AQLRingBuffer::setWrIdx(uint64_t value) +{ + _wrIdx = value; +} + +void +AQLRingBuffer::setDispIdx(uint64_t value) +{ + _dispIdx = value; } bool diff --git a/src/dev/hsa/hsa_packet_processor.hh b/src/dev/hsa/hsa_packet_processor.hh index 65d1b44a25..b72092538d 100644 --- a/src/dev/hsa/hsa_packet_processor.hh +++ b/src/dev/hsa/hsa_packet_processor.hh @@ -235,6 +235,8 @@ class AQLRingBuffer void incDispIdx(uint64_t value) { _dispIdx += value; } uint64_t compltnPending() { return (_dispIdx - _rdIdx); } void setRdIdx(uint64_t value); + void setWrIdx(uint64_t value); + void setDispIdx(uint64_t value); }; struct QCntxt diff --git a/src/dev/hsa/hw_scheduler.cc b/src/dev/hsa/hw_scheduler.cc index a0f1e87a25..5dfe19110f 100644 --- a/src/dev/hsa/hw_scheduler.cc +++ b/src/dev/hsa/hw_scheduler.cc @@ -116,6 +116,8 @@ HWScheduler::registerNewQueue(uint64_t hostReadIndexPointer, new AQLRingBuffer(NUM_DMA_BUFS, hsaPP->name()); if (rd_idx > 0) { aql_buf->setRdIdx(rd_idx); + aql_buf->setWrIdx(rd_idx); + aql_buf->setDispIdx(rd_idx); } DPRINTF(HSAPacketProcessor, "Setting read index for %#lx to %ld\n", offset, rd_idx); @@ -340,8 +342,9 @@ HWScheduler::write(Addr db_addr, uint64_t doorbell_reg) // processor gets commands from host, the correct entry is read after // remapping. activeList[al_idx].qDesc->readIndex = doorbell_reg - 1; - DPRINTF(HSAPacketProcessor, "queue %d qDesc->writeIndex %d\n", - al_idx, activeList[al_idx].qDesc->writeIndex); + DPRINTF(HSAPacketProcessor, "q %d readIndex %d writeIndex %d\n", + al_idx, activeList[al_idx].qDesc->readIndex, + activeList[al_idx].qDesc->writeIndex); // If this queue is mapped, then start DMA to fetch the // AQL packet if (regdListMap.find(al_idx) != regdListMap.end()) { @@ -354,6 +357,7 @@ HWScheduler::unregisterQueue(uint64_t queue_id, int doorbellSize) { assert(qidMap.count(queue_id)); Addr db_offset = qidMap[queue_id]; + qidMap.erase(queue_id); auto dbmap_iter = dbMap.find(db_offset); if (dbmap_iter == dbMap.end()) { panic("Destroying a non-existing queue (db_offset %x)", diff --git a/src/dev/i2c/I2C.py b/src/dev/i2c/I2C.py index aa9afead28..1d3de26d29 100644 --- a/src/dev/i2c/I2C.py +++ b/src/dev/i2c/I2C.py @@ -37,15 +37,17 @@ from m5.SimObject import SimObject from m5.params import * from m5.objects.Device import BasicPioDevice + class I2CDevice(SimObject): - type = 'I2CDevice' + type = "I2CDevice" cxx_header = "dev/i2c/device.hh" - cxx_class = 'gem5::I2CDevice' + cxx_class = "gem5::I2CDevice" abstract = True i2c_addr = Param.UInt8("Address of device on i2c bus") + class I2CBus(BasicPioDevice): - type = 'I2CBus' + type = "I2CBus" cxx_header = "dev/i2c/bus.hh" - cxx_class = 'gem5::I2CBus' + cxx_class = "gem5::I2CBus" devices = VectorParam.I2CDevice([], "Devices") diff --git a/src/dev/i2c/SConscript b/src/dev/i2c/SConscript index 808caa9ef9..9c767994d3 100644 --- a/src/dev/i2c/SConscript +++ b/src/dev/i2c/SConscript @@ -37,8 +37,5 @@ Import('*') -if env['CONF']['TARGET_ISA'] == 'null': - Return() - SimObject('I2C.py', sim_objects=['I2CDevice', 'I2CBus']) Source('bus.cc') diff --git a/src/dev/intpin.cc b/src/dev/intpin.cc index 3268fdbba3..c378337d4a 100644 --- a/src/dev/intpin.cc +++ b/src/dev/intpin.cc @@ -55,11 +55,6 @@ IntSourcePinBase::bind(Port &peer) fatal_if(!sink, "Attempt to bind interrupt source pin %s to " "incompatible port %s.", name(), peer.name()); Port::bind(peer); - - if (_state) - raise(); - else - lower(); } void diff --git a/src/dev/intpin.hh b/src/dev/intpin.hh index d5f81a5a31..2704887f35 100644 --- a/src/dev/intpin.hh +++ b/src/dev/intpin.hh @@ -94,11 +94,10 @@ class IntSourcePinBase : public Port { private: IntSinkPinBase *sink = nullptr; - bool _state = false; public: - IntSourcePinBase(const std::string &_name, PortID _id, bool def_state) : - Port(_name, _id), _state(def_state) + IntSourcePinBase(const std::string &_name, PortID _id): + Port(_name, _id) {} void raise() { sink->raise(); } @@ -112,9 +111,8 @@ template class IntSourcePin : public IntSourcePinBase { public: - IntSourcePin(const std::string &_name, PortID _id, Device *owner, - bool def_state=false) : - IntSourcePinBase(_name, _id, def_state) + IntSourcePin(const std::string &_name, PortID _id, Device *owner) : + IntSourcePinBase(_name, _id) {} }; diff --git a/src/dev/lupio/LupioBLK.py b/src/dev/lupio/LupioBLK.py index fc3479f7e2..786c2ccbc5 100644 --- a/src/dev/lupio/LupioBLK.py +++ b/src/dev/lupio/LupioBLK.py @@ -29,15 +29,15 @@ from m5.objects.Device import DmaDevice from m5.params import Param from m5.proxy import Parent + class LupioBLK(DmaDevice): - type = 'LupioBLK' - cxx_class='gem5::LupioBLK' - cxx_header = 'dev/lupio/lupio_blk.hh' + type = "LupioBLK" + cxx_class = "gem5::LupioBLK" + cxx_header = "dev/lupio/lupio_blk.hh" pio_size = Param.Addr(0x1000, "PIO Size") image = Param.DiskImage("Disk image") pio_addr = Param.Addr("Device Address") - latency = Param.Latency('0ns', "DMA Device Latency") - platform = Param.Platform(Parent.any, - "Platform this device is part of.") + latency = Param.Latency("0ns", "DMA Device Latency") + platform = Param.Platform(Parent.any, "Platform this device is part of.") int_id = Param.Int("Interrupt ID for the PIC to use") diff --git a/src/dev/lupio/LupioIPI.py b/src/dev/lupio/LupioIPI.py index f74c5c6c67..12082afc2f 100644 --- a/src/dev/lupio/LupioIPI.py +++ b/src/dev/lupio/LupioIPI.py @@ -27,10 +27,11 @@ from m5.objects.Device import BasicPioDevice from m5.params import Param + class LupioIPI(BasicPioDevice): - type = 'LupioIPI' - cxx_class='gem5::LupioIPI' - cxx_header = 'dev/lupio/lupio_ipi.hh' + type = "LupioIPI" + cxx_class = "gem5::LupioIPI" + cxx_header = "dev/lupio/lupio_ipi.hh" pio_size = Param.Addr(0x1000, "PIO Size") int_type = Param.Int("Type of interrupt") num_threads = Param.Int("Number of threads") diff --git a/src/dev/lupio/LupioPIC.py b/src/dev/lupio/LupioPIC.py index 992501b080..7afa727e4b 100644 --- a/src/dev/lupio/LupioPIC.py +++ b/src/dev/lupio/LupioPIC.py @@ -27,11 +27,12 @@ from m5.objects.Device import BasicPioDevice from m5.params import Param + class LupioPIC(BasicPioDevice): - type = 'LupioPIC' - cxx_class='gem5::LupioPIC' - cxx_header = 'dev/lupio/lupio_pic.hh' + type = "LupioPIC" + cxx_class = "gem5::LupioPIC" + cxx_header = "dev/lupio/lupio_pic.hh" pio_size = Param.Addr(0x1000, "PIO Size") n_src = Param.Int("Number of interrupt sources") num_threads = Param.Int("Number of threads") diff --git a/src/dev/lupio/LupioRNG.py b/src/dev/lupio/LupioRNG.py index c6d2c1787a..d6b7b8a199 100644 --- a/src/dev/lupio/LupioRNG.py +++ b/src/dev/lupio/LupioRNG.py @@ -28,13 +28,13 @@ from m5.objects.Device import BasicPioDevice from m5.params import Param from m5.proxy import Parent + class LupioRNG(BasicPioDevice): - type = 'LupioRNG' - cxx_class='gem5::LupioRNG' - cxx_header = 'dev/lupio/lupio_rng.hh' + type = "LupioRNG" + cxx_class = "gem5::LupioRNG" + cxx_header = "dev/lupio/lupio_rng.hh" pio_size = Param.Addr(0x1000, "PIO Size") seed = Param.Int(0, "Initial seed for the random number generator") - platform = Param.Platform(Parent.any, - "Platform this device is part of.") + platform = Param.Platform(Parent.any, "Platform this device is part of.") int_id = Param.Int("Interrupt ID to be used by the PIC") diff --git a/src/dev/lupio/LupioRTC.py b/src/dev/lupio/LupioRTC.py index 8392e65ea5..a569ebeac2 100644 --- a/src/dev/lupio/LupioRTC.py +++ b/src/dev/lupio/LupioRTC.py @@ -27,9 +27,10 @@ from m5.objects.Device import BasicPioDevice from m5.params import Param + class LupioRTC(BasicPioDevice): - type = 'LupioRTC' - cxx_class='gem5::LupioRTC' - cxx_header = 'dev/lupio/lupio_rtc.hh' - time = Param.Time('01/01/2020', "Initial system time to use") + type = "LupioRTC" + cxx_class = "gem5::LupioRTC" + cxx_header = "dev/lupio/lupio_rtc.hh" + time = Param.Time("01/01/2020", "Initial system time to use") pio_size = Param.Addr(0x1000, "PIO Size") diff --git a/src/dev/lupio/LupioSYS.py b/src/dev/lupio/LupioSYS.py index d300e910df..1575258393 100644 --- a/src/dev/lupio/LupioSYS.py +++ b/src/dev/lupio/LupioSYS.py @@ -27,8 +27,9 @@ from m5.objects.Device import BasicPioDevice from m5.params import Param + class LupioSYS(BasicPioDevice): - type = 'LupioSYS' - cxx_class='gem5::LupioSYS' - cxx_header = 'dev/lupio/lupio_sys.hh' + type = "LupioSYS" + cxx_class = "gem5::LupioSYS" + cxx_header = "dev/lupio/lupio_sys.hh" pio_size = Param.Addr(0x1000, "PIO Size") diff --git a/src/dev/lupio/LupioTMR.py b/src/dev/lupio/LupioTMR.py index 93340daeac..306f273d7e 100644 --- a/src/dev/lupio/LupioTMR.py +++ b/src/dev/lupio/LupioTMR.py @@ -27,10 +27,11 @@ from m5.objects.Device import BasicPioDevice from m5.params import Param + class LupioTMR(BasicPioDevice): - type = 'LupioTMR' - cxx_class='gem5::LupioTMR' - cxx_header = 'dev/lupio/lupio_tmr.hh' + type = "LupioTMR" + cxx_class = "gem5::LupioTMR" + cxx_header = "dev/lupio/lupio_tmr.hh" pio_size = Param.Addr(0x1000, "PIO Size") num_threads = Param.Int("Number of threads in the system.") int_type = Param.Int("Type of interrupt.") diff --git a/src/dev/lupio/LupioTTY.py b/src/dev/lupio/LupioTTY.py index 972311fea7..ff35004481 100644 --- a/src/dev/lupio/LupioTTY.py +++ b/src/dev/lupio/LupioTTY.py @@ -29,12 +29,12 @@ from m5.proxy import Parent from m5.objects.Device import BasicPioDevice + class LupioTTY(BasicPioDevice): - type = 'LupioTTY' - cxx_class = 'gem5::LupioTTY' + type = "LupioTTY" + cxx_class = "gem5::LupioTTY" cxx_header = "dev/lupio/lupio_tty.hh" terminal = Param.SerialDevice(Parent.any, "The terminal") pio_size = Param.Addr(0x1000, "PIO size") - platform = Param.Platform(Parent.any, - "Platform this device is part of.") + platform = Param.Platform(Parent.any, "Platform this device is part of.") int_id = Param.Int("Interrupt ID for the PIC to use") diff --git a/src/dev/lupio/lupio_ipi.cc b/src/dev/lupio/lupio_ipi.cc index 1504275330..892f4d4ee9 100644 --- a/src/dev/lupio/lupio_ipi.cc +++ b/src/dev/lupio/lupio_ipi.cc @@ -42,23 +42,49 @@ LupioIPI::LupioIPI(const Params ¶ms) : intType(params.int_type), nThread(params.num_threads) { - word.resize(nThread, 0); + mask.resize(nThread, 0); + pending.resize(nThread, 0); DPRINTF(LupioIPI, "LupioIPI initalized--number of CPUs: %d\n", nThread); } +void +LupioIPI::lupioIPIUpdateIRQ() +{ + for (int cpu = 0; cpu < nThread; cpu++) { + auto tc = system->threads[cpu]; + + if (mask[cpu] & pending[cpu]) { + tc->getCpuPtr()->postInterrupt(tc->threadId(), intType, 0); + } else { + tc->getCpuPtr()->clearInterrupt(tc->threadId(), intType, 0); + } + } +} + uint64_t LupioIPI::lupioIPIRead(uint8_t addr, int size) { - int cpu = addr >> 2; uint32_t r = 0; - // Reading automatically lowers corresponding IRQ - r = word[cpu]; - auto tc = system->threads[cpu]; - tc->getCpuPtr()->clearInterrupt(tc->threadId(), intType, 0); - // Also reset value after reading - word[cpu] = 0; + int cpu = addr / LUPIO_IPI_MAX; + int reg = addr % LUPIO_IPI_MAX; + + switch (reg) { + case LUPIO_IPI_MASK: + r = mask[cpu]; + DPRINTF(LupioIPI, "Read IPI_MASK[%d]: %#x\n", cpu, r); + break; + case LUPIO_IPI_PEND: + r = pending[cpu]; + DPRINTF(LupioIPI, "Read IPI_PEND[%d]: %#x\n", cpu, r); + break; + + default: + panic("Unexpected read to LupioIPI device at address %#llx!", + addr); + break; + } return r; } @@ -66,15 +92,28 @@ LupioIPI::lupioIPIRead(uint8_t addr, int size) void LupioIPI::lupioIPIWrite(uint8_t addr, uint64_t val64, int size) { - int cpu = addr >> 2;; uint32_t val = val64; - word[cpu] = val; + int cpu = addr / LUPIO_IPI_MAX; + int reg = addr % LUPIO_IPI_MAX; - // Raise IRQ - auto tc = system->threads[cpu]; + switch (reg) { + case LUPIO_IPI_MASK: + mask[cpu] = val; + DPRINTF(LupioIPI, "Write IPI_MASK[%d]: %#x\n", cpu, mask[cpu]); + lupioIPIUpdateIRQ(); + break; + case LUPIO_IPI_PEND: + pending[cpu] = val; + DPRINTF(LupioIPI, "Write IPI_PEND[%d]: %#x\n", cpu, pending[cpu]); + lupioIPIUpdateIRQ(); + break; - tc->getCpuPtr()->postInterrupt(tc->threadId(), intType, 0); + default: + panic("Unexpected write to LupioIPI device at address %#llx!", + addr); + break; + } } Tick @@ -109,4 +148,3 @@ LupioIPI::write(PacketPtr pkt) return pioDelay; } } // namespace gem5 - diff --git a/src/dev/lupio/lupio_ipi.hh b/src/dev/lupio/lupio_ipi.hh index 7133ff2a78..ab50661ddc 100644 --- a/src/dev/lupio/lupio_ipi.hh +++ b/src/dev/lupio/lupio_ipi.hh @@ -52,10 +52,11 @@ class LupioIPI : public BasicPioDevice // Register map enum { - LUPIO_IPI_WORD, + LUPIO_IPI_MASK = 0x0, + LUPIO_IPI_PEND = 0x4, // Max offset - LUPIO_IPI_MAX, + LUPIO_IPI_MAX = 0x8, }; uint32_t nThread; @@ -63,7 +64,8 @@ class LupioIPI : public BasicPioDevice * Set of registers corresponding to each CPU for sending * inter-processor interrupts */ - std::vector word; + std::vector mask; + std::vector pending; /** * Function to return the value in the word register of the corresponding @@ -75,6 +77,10 @@ class LupioIPI : public BasicPioDevice * raise the IRQ */ void lupioIPIWrite(const uint8_t addr, uint64_t val64, int size); + /** + * Function to post and clear interrupts + **/ + void lupioIPIUpdateIRQ(); public: PARAMS(LupioIPI); @@ -90,4 +96,3 @@ class LupioIPI : public BasicPioDevice } // namespace gem5 #endif // __DEV_LUPIO_LUPIO_IPI_HH - diff --git a/src/dev/lupio/lupio_pic.cc b/src/dev/lupio/lupio_pic.cc index d83ffcff85..6bab5e5525 100644 --- a/src/dev/lupio/lupio_pic.cc +++ b/src/dev/lupio/lupio_pic.cc @@ -90,8 +90,8 @@ LupioPIC::lupioPicRead(uint8_t addr) { uint32_t r = 0; - int cpu = addr >> LUPIO_PIC_MAX; - int reg = (addr >> 2) & (LUPIO_PIC_MAX - 1); + int cpu = addr / LUPIO_PIC_MAX; + int reg = addr % LUPIO_PIC_MAX; switch (reg) { case LUPIO_PIC_PRIO: @@ -124,8 +124,8 @@ LupioPIC::lupioPicWrite(uint8_t addr, uint64_t val64) { uint32_t val = val64; - int cpu = addr >> LUPIO_PIC_MAX; - int reg = (addr >> 2) & (LUPIO_PIC_MAX - 1); + int cpu = addr / LUPIO_PIC_MAX; + int reg = addr % LUPIO_PIC_MAX; switch (reg) { case LUPIO_PIC_MASK: diff --git a/src/dev/lupio/lupio_pic.hh b/src/dev/lupio/lupio_pic.hh index ce4815cf2e..c668336941 100644 --- a/src/dev/lupio/lupio_pic.hh +++ b/src/dev/lupio/lupio_pic.hh @@ -61,13 +61,13 @@ class LupioPIC : public BasicPioDevice private: enum { - LUPIO_PIC_PRIO, - LUPIO_PIC_MASK, - LUPIO_PIC_PEND, - LUPIO_PIC_ENAB, + LUPIO_PIC_PRIO = 0x0, + LUPIO_PIC_MASK = 0x4, + LUPIO_PIC_PEND = 0x8, + LUPIO_PIC_ENAB = 0xC, // Max offset - LUPIO_PIC_MAX, + LUPIO_PIC_MAX = 0x10, }; uint32_t pending = 0; diff --git a/src/dev/lupio/lupio_tmr.cc b/src/dev/lupio/lupio_tmr.cc index 63c002130e..58e2d11fa3 100644 --- a/src/dev/lupio/lupio_tmr.cc +++ b/src/dev/lupio/lupio_tmr.cc @@ -123,8 +123,8 @@ LupioTMR::lupioTMRRead(uint8_t addr, int size) { uint32_t r = 0; - size_t cpu = addr >> LUPIO_TMR_MAX; - size_t reg = (addr >> 2) & (LUPIO_TMR_MAX - 1); + size_t cpu = addr / LUPIO_TMR_MAX; + size_t reg = addr % LUPIO_TMR_MAX; switch (reg) { case LUPIO_TMR_TIME: @@ -159,8 +159,8 @@ LupioTMR::lupioTMRWrite(uint8_t addr, uint64_t val64, int size) { uint32_t val = val64; - size_t cpu = addr >> LUPIO_TMR_MAX; - size_t reg = (addr >> 2) & (LUPIO_TMR_MAX - 1); + size_t cpu = addr / LUPIO_TMR_MAX; + size_t reg = addr % LUPIO_TMR_MAX; switch (reg) { case LUPIO_TMR_LOAD: diff --git a/src/dev/lupio/lupio_tmr.hh b/src/dev/lupio/lupio_tmr.hh index 7a69fb7a7e..a966b299e0 100644 --- a/src/dev/lupio/lupio_tmr.hh +++ b/src/dev/lupio/lupio_tmr.hh @@ -54,13 +54,13 @@ class LupioTMR : public BasicPioDevice // Register map enum { - LUPIO_TMR_TIME, - LUPIO_TMR_LOAD, - LUPIO_TMR_CTRL, - LUPIO_TMR_STAT, + LUPIO_TMR_TIME = 0x0, + LUPIO_TMR_LOAD = 0x4, + LUPIO_TMR_CTRL = 0x8, + LUPIO_TMR_STAT = 0xC, // Max offset - LUPIO_TMR_MAX, + LUPIO_TMR_MAX = 0x10, }; struct LupioTimer diff --git a/src/dev/mips/Malta.py b/src/dev/mips/Malta.py index f461256ed1..9199bd5b3f 100755 --- a/src/dev/mips/Malta.py +++ b/src/dev/mips/Malta.py @@ -32,29 +32,35 @@ from m5.objects.Device import BasicPioDevice from m5.objects.Platform import Platform from m5.objects.Uart import Uart8250 + class MaltaCChip(BasicPioDevice): - type = 'MaltaCChip' + type = "MaltaCChip" cxx_header = "dev/mips/malta_cchip.hh" - cxx_class = 'gem5::MaltaCChip' + cxx_class = "gem5::MaltaCChip" malta = Param.Malta(Parent.any, "Malta") + class MaltaIO(BasicPioDevice): - type = 'MaltaIO' + type = "MaltaIO" cxx_header = "dev/mips/malta_io.hh" - cxx_class = 'gem5::MaltaIO' - time = Param.Time('01/01/2009', - "System time to use (0 for actual time, default is 1/1/06)") - year_is_bcd = Param.Bool(False, - "The RTC should interpret the year as a BCD value") + cxx_class = "gem5::MaltaIO" + time = Param.Time( + "01/01/2009", + "System time to use (0 for actual time, default is 1/1/06)", + ) + year_is_bcd = Param.Bool( + False, "The RTC should interpret the year as a BCD value" + ) malta = Param.Malta(Parent.any, "Malta") - frequency = Param.Frequency('1024Hz', "frequency of interrupts") + frequency = Param.Frequency("1024Hz", "frequency of interrupts") + class Malta(Platform): - type = 'Malta' + type = "Malta" cxx_header = "dev/mips/malta.hh" - cxx_class = 'gem5::Malta' - cchip = MaltaCChip(pio_addr=0x801a0000000) - io = MaltaIO(pio_addr=0x801fc000000) + cxx_class = "gem5::Malta" + cchip = MaltaCChip(pio_addr=0x801A0000000) + io = MaltaIO(pio_addr=0x801FC000000) uart = Uart8250(pio_addr=0xBFD003F8) # Attach I/O devices to specified bus object. Can't do this diff --git a/src/dev/net/Ethernet.py b/src/dev/net/Ethernet.py index 1753d48867..97da54c118 100644 --- a/src/dev/net/Ethernet.py +++ b/src/dev/net/Ethernet.py @@ -42,130 +42,149 @@ from m5.params import * from m5.proxy import * from m5.objects.PciDevice import PciDevice, PciIoBar, PciMemBar -ETHERNET_ROLE = 'ETHERNET' +ETHERNET_ROLE = "ETHERNET" Port.compat(ETHERNET_ROLE, ETHERNET_ROLE) + class EtherInt(Port): def __init__(self, desc): super().__init__(ETHERNET_ROLE, desc) + class VectorEtherInt(VectorPort): def __init__(self, desc): super().__init__(ETHERNET_ROLE, desc) + class EtherLink(SimObject): - type = 'EtherLink' + type = "EtherLink" cxx_header = "dev/net/etherlink.hh" - cxx_class = 'gem5::EtherLink' + cxx_class = "gem5::EtherLink" int0 = EtherInt("interface 0") int1 = EtherInt("interface 1") - delay = Param.Latency('0us', "packet transmit delay") - delay_var = Param.Latency('0ns', "packet transmit delay variability") - speed = Param.NetworkBandwidth('1Gbps', "link speed") + delay = Param.Latency("0us", "packet transmit delay") + delay_var = Param.Latency("0ns", "packet transmit delay variability") + speed = Param.NetworkBandwidth("1Gbps", "link speed") dump = Param.EtherDump(NULL, "dump object") + class DistEtherLink(SimObject): - type = 'DistEtherLink' + type = "DistEtherLink" cxx_header = "dev/net/dist_etherlink.hh" - cxx_class = 'gem5::DistEtherLink' + cxx_class = "gem5::DistEtherLink" int0 = EtherInt("interface 0") - delay = Param.Latency('0us', "packet transmit delay") - delay_var = Param.Latency('0ns', "packet transmit delay variability") - speed = Param.NetworkBandwidth('1Gbps', "link speed") + delay = Param.Latency("0us", "packet transmit delay") + delay_var = Param.Latency("0ns", "packet transmit delay variability") + speed = Param.NetworkBandwidth("1Gbps", "link speed") dump = Param.EtherDump(NULL, "dump object") - dist_rank = Param.UInt32('0', "Rank of this gem5 process (dist run)") - dist_size = Param.UInt32('1', "Number of gem5 processes (dist run)") - sync_start = Param.Latency('5200000000000t', "first dist sync barrier") - sync_repeat = Param.Latency('10us', "dist sync barrier repeat") - server_name = Param.String('localhost', "Message server name") - server_port = Param.UInt32('2200', "Message server port") + dist_rank = Param.UInt32("0", "Rank of this gem5 process (dist run)") + dist_size = Param.UInt32("1", "Number of gem5 processes (dist run)") + sync_start = Param.Latency("5200000000000t", "first dist sync barrier") + sync_repeat = Param.Latency("10us", "dist sync barrier repeat") + server_name = Param.String("localhost", "Message server name") + server_port = Param.UInt32("2200", "Message server port") is_switch = Param.Bool(False, "true if this a link in etherswitch") dist_sync_on_pseudo_op = Param.Bool(False, "Start sync with pseudo_op") - num_nodes = Param.UInt32('2', "Number of simulate nodes") + num_nodes = Param.UInt32("2", "Number of simulate nodes") + class EtherBus(SimObject): - type = 'EtherBus' + type = "EtherBus" cxx_header = "dev/net/etherbus.hh" - cxx_class = 'gem5::EtherBus' + cxx_class = "gem5::EtherBus" loopback = Param.Bool(True, "send packet back to the sending interface") dump = Param.EtherDump(NULL, "dump object") - speed = Param.NetworkBandwidth('100Mbps', "bus speed in bits per second") + speed = Param.NetworkBandwidth("100Mbps", "bus speed in bits per second") + class EtherSwitch(SimObject): - type = 'EtherSwitch' + type = "EtherSwitch" cxx_header = "dev/net/etherswitch.hh" - cxx_class = 'gem5::EtherSwitch' + cxx_class = "gem5::EtherSwitch" dump = Param.EtherDump(NULL, "dump object") - fabric_speed = Param.NetworkBandwidth('10Gbps', "switch fabric speed in " - "bits per second") + fabric_speed = Param.NetworkBandwidth( + "10Gbps", "switch fabric speed in " "bits per second" + ) interface = VectorEtherInt("Ethernet Interface") - output_buffer_size = Param.MemorySize('1MiB', - "size of output port buffers") - delay = Param.Latency('0us', "packet transmit delay") - delay_var = Param.Latency('0ns', "packet transmit delay variability") - time_to_live = Param.Latency('10ms', "time to live of MAC address maping") + output_buffer_size = Param.MemorySize( + "1MiB", "size of output port buffers" + ) + delay = Param.Latency("0us", "packet transmit delay") + delay_var = Param.Latency("0ns", "packet transmit delay variability") + time_to_live = Param.Latency("10ms", "time to live of MAC address maping") + class EtherTapBase(SimObject): - type = 'EtherTapBase' + type = "EtherTapBase" abstract = True cxx_header = "dev/net/ethertap.hh" - cxx_class = 'gem5::EtherTapBase' + cxx_class = "gem5::EtherTapBase" bufsz = Param.Int(10000, "tap buffer size") dump = Param.EtherDump(NULL, "dump object") tap = EtherInt("Ethernet interface to connect to gem5's network") -if buildEnv['HAVE_TUNTAP']: - class EtherTap(EtherTapBase): - type = 'EtherTap' - cxx_header = "dev/net/ethertap.hh" - cxx_class = 'gem5::EtherTap' - tun_clone_device = Param.String('/dev/net/tun', - "Path to the tun clone device node") - tap_device_name = Param.String('gem5-tap', "Tap device name") +if buildEnv["HAVE_TUNTAP"]: + + class EtherTap(EtherTapBase): + type = "EtherTap" + cxx_header = "dev/net/ethertap.hh" + cxx_class = "gem5::EtherTap" + + tun_clone_device = Param.String( + "/dev/net/tun", "Path to the tun clone device node" + ) + tap_device_name = Param.String("gem5-tap", "Tap device name") + class EtherTapStub(EtherTapBase): - type = 'EtherTapStub' + type = "EtherTapStub" cxx_header = "dev/net/ethertap.hh" - cxx_class = 'gem5::EtherTapStub' + cxx_class = "gem5::EtherTapStub" port = Param.UInt16(3500, "Port helper should send packets to") + class EtherDump(SimObject): - type = 'EtherDump' + type = "EtherDump" cxx_header = "dev/net/etherdump.hh" - cxx_class = 'gem5::EtherDump' + cxx_class = "gem5::EtherDump" file = Param.String("dump file") maxlen = Param.Int(96, "max portion of packet data to dump") + class EtherDevice(PciDevice): - type = 'EtherDevice' + type = "EtherDevice" abstract = True cxx_header = "dev/net/etherdevice.hh" - cxx_class = 'gem5::EtherDevice' + cxx_class = "gem5::EtherDevice" interface = EtherInt("Ethernet Interface") + class IGbE(EtherDevice): # Base class for two IGbE adapters listed above - type = 'IGbE' + type = "IGbE" cxx_header = "dev/net/i8254xGBe.hh" - cxx_class = 'gem5::IGbE' + cxx_class = "gem5::IGbE" - hardware_address = Param.EthernetAddr(NextEthernetAddr, - "Ethernet Hardware Address") - rx_fifo_size = Param.MemorySize('384KiB', "Size of the rx FIFO") - tx_fifo_size = Param.MemorySize('384KiB', "Size of the tx FIFO") - rx_desc_cache_size = Param.Int(64, - "Number of enteries in the rx descriptor cache") - tx_desc_cache_size = Param.Int(64, - "Number of enteries in the rx descriptor cache") + hardware_address = Param.EthernetAddr( + NextEthernetAddr, "Ethernet Hardware Address" + ) + rx_fifo_size = Param.MemorySize("384KiB", "Size of the rx FIFO") + tx_fifo_size = Param.MemorySize("384KiB", "Size of the tx FIFO") + rx_desc_cache_size = Param.Int( + 64, "Number of enteries in the rx descriptor cache" + ) + tx_desc_cache_size = Param.Int( + 64, "Number of enteries in the rx descriptor cache" + ) VendorID = 0x8086 SubsystemID = 0x1008 SubsystemVendorID = 0x8086 @@ -173,20 +192,21 @@ class IGbE(EtherDevice): SubClassCode = 0x00 ClassCode = 0x02 ProgIF = 0x00 - BAR0 = PciMemBar(size='128KiB') + BAR0 = PciMemBar(size="128KiB") MaximumLatency = 0x00 - MinimumGrant = 0xff - InterruptLine = 0x1e + MinimumGrant = 0xFF + InterruptLine = 0x1E InterruptPin = 0x01 - wb_delay = Param.Latency('10ns', "delay before desc writeback occurs") - fetch_delay = Param.Latency('10ns', "delay before desc fetch occurs") - fetch_comp_delay = Param.Latency('10ns', "delay after desc fetch occurs") - wb_comp_delay = Param.Latency('10ns', "delay after desc wb occurs") - tx_read_delay = Param.Latency('0ns', "delay after tx dma read") - rx_write_delay = Param.Latency('0ns', "delay after rx dma read") + wb_delay = Param.Latency("10ns", "delay before desc writeback occurs") + fetch_delay = Param.Latency("10ns", "delay before desc fetch occurs") + fetch_comp_delay = Param.Latency("10ns", "delay after desc fetch occurs") + wb_comp_delay = Param.Latency("10ns", "delay after desc wb occurs") + tx_read_delay = Param.Latency("0ns", "delay after tx dma read") + rx_write_delay = Param.Latency("0ns", "delay after rx dma read") phy_pid = Param.UInt16("Phy PID that corresponds to device ID") phy_epid = Param.UInt16("Phy EPID that corresponds to device ID") + class IGbE_e1000(IGbE): # Older Intel 8254x based gigabit ethernet adapter # Uses Intel e1000 driver @@ -194,6 +214,7 @@ class IGbE_e1000(IGbE): phy_pid = 0x02A8 phy_epid = 0x0380 + class IGbE_igb(IGbE): # Newer Intel 8257x based gigabit ethernet adapter # Uses Intel igb driver and in theory supports packet splitting and LRO @@ -201,35 +222,38 @@ class IGbE_igb(IGbE): phy_pid = 0x0141 phy_epid = 0x0CC0 + class EtherDevBase(EtherDevice): - type = 'EtherDevBase' + type = "EtherDevBase" abstract = True cxx_header = "dev/net/etherdevice.hh" - cxx_class = 'gem5::EtherDevBase' + cxx_class = "gem5::EtherDevBase" - hardware_address = Param.EthernetAddr(NextEthernetAddr, - "Ethernet Hardware Address") + hardware_address = Param.EthernetAddr( + NextEthernetAddr, "Ethernet Hardware Address" + ) - dma_read_delay = Param.Latency('0us', "fixed delay for dma reads") - dma_read_factor = Param.Latency('0us', "multiplier for dma reads") - dma_write_delay = Param.Latency('0us', "fixed delay for dma writes") - dma_write_factor = Param.Latency('0us', "multiplier for dma writes") + dma_read_delay = Param.Latency("0us", "fixed delay for dma reads") + dma_read_factor = Param.Latency("0us", "multiplier for dma reads") + dma_write_delay = Param.Latency("0us", "fixed delay for dma writes") + dma_write_factor = Param.Latency("0us", "multiplier for dma writes") - rx_delay = Param.Latency('1us', "Receive Delay") - tx_delay = Param.Latency('1us', "Transmit Delay") - rx_fifo_size = Param.MemorySize('512KiB', "max size of rx fifo") - tx_fifo_size = Param.MemorySize('512KiB', "max size of tx fifo") + rx_delay = Param.Latency("1us", "Receive Delay") + tx_delay = Param.Latency("1us", "Transmit Delay") + rx_fifo_size = Param.MemorySize("512KiB", "max size of rx fifo") + tx_fifo_size = Param.MemorySize("512KiB", "max size of tx fifo") rx_filter = Param.Bool(True, "Enable Receive Filter") - intr_delay = Param.Latency('10us', "Interrupt propagation delay") + intr_delay = Param.Latency("10us", "Interrupt propagation delay") rx_thread = Param.Bool(False, "dedicated kernel thread for transmit") tx_thread = Param.Bool(False, "dedicated kernel threads for receive") rss = Param.Bool(False, "Receive Side Scaling") + class NSGigE(EtherDevBase): - type = 'NSGigE' + type = "NSGigE" cxx_header = "dev/net/ns_gige.hh" - cxx_class = 'gem5::NSGigE' + cxx_class = "gem5::NSGigE" dma_data_free = Param.Bool(False, "DMA of Data is free") dma_desc_free = Param.Bool(False, "DMA of Descriptors is free") @@ -241,30 +265,30 @@ class NSGigE(EtherDevBase): SubClassCode = 0x00 ClassCode = 0x02 ProgIF = 0x00 - BARs = (PciIoBar(size='256B'), PciMemBar(size='4KiB')) + BARs = (PciIoBar(size="256B"), PciMemBar(size="4KiB")) MaximumLatency = 0x34 - MinimumGrant = 0xb0 - InterruptLine = 0x1e + MinimumGrant = 0xB0 + InterruptLine = 0x1E InterruptPin = 0x01 - class Sinic(EtherDevBase): - type = 'Sinic' - cxx_class = 'gem5::sinic::Device' + type = "Sinic" + cxx_class = "gem5::sinic::Device" cxx_header = "dev/net/sinic.hh" - rx_max_copy = Param.MemorySize('1514B', "rx max copy") - tx_max_copy = Param.MemorySize('16KiB', "tx max copy") + rx_max_copy = Param.MemorySize("1514B", "rx max copy") + tx_max_copy = Param.MemorySize("16KiB", "tx max copy") rx_max_intr = Param.UInt32(10, "max rx packets per interrupt") - rx_fifo_threshold = Param.MemorySize('384KiB', "rx fifo high threshold") - rx_fifo_low_mark = Param.MemorySize('128KiB', "rx fifo low threshold") - tx_fifo_high_mark = Param.MemorySize('384KiB', "tx fifo high threshold") - tx_fifo_threshold = Param.MemorySize('128KiB', "tx fifo low threshold") + rx_fifo_threshold = Param.MemorySize("384KiB", "rx fifo high threshold") + rx_fifo_low_mark = Param.MemorySize("128KiB", "rx fifo low threshold") + tx_fifo_high_mark = Param.MemorySize("384KiB", "tx fifo high threshold") + tx_fifo_threshold = Param.MemorySize("128KiB", "tx fifo low threshold") virtual_count = Param.UInt32(1, "Virtualized SINIC") zero_copy_size = Param.UInt32(64, "Bytes to copy if below threshold") - zero_copy_threshold = Param.UInt32(256, - "Only zero copy above this threshold") + zero_copy_threshold = Param.UInt32( + 256, "Only zero copy above this threshold" + ) zero_copy = Param.Bool(False, "Zero copy receive") delay_copy = Param.Bool(False, "Delayed copy transmit") virtual_addr = Param.Bool(False, "Virtual addressing") @@ -275,8 +299,8 @@ class Sinic(EtherDevBase): SubClassCode = 0x00 ClassCode = 0x02 ProgIF = 0x00 - BARs = PciMemBar(size='64KiB') + BARs = PciMemBar(size="64KiB") MaximumLatency = 0x34 - MinimumGrant = 0xb0 - InterruptLine = 0x1e + MinimumGrant = 0xB0 + InterruptLine = 0x1E InterruptPin = 0x01 diff --git a/src/dev/net/dist_iface.cc b/src/dev/net/dist_iface.cc index d0cfd88cce..824139bc31 100644 --- a/src/dev/net/dist_iface.cc +++ b/src/dev/net/dist_iface.cc @@ -868,12 +868,10 @@ DistIface::toggleSync(ThreadContext *tc) // stop point. Suspend execution of all local thread contexts. // Dist-gem5 will reactivate all thread contexts when everyone has // reached the sync stop point. -#if !IS_NULL_ISA for (auto *tc: primary->sys->threads) { if (tc->status() == ThreadContext::Active) tc->quiesce(); } -#endif } else { inform("Request toggling syncronization on\n"); primary->syncEvent->start(); @@ -882,12 +880,10 @@ DistIface::toggleSync(ThreadContext *tc) // nodes to prevent causality errors. We can also schedule CPU // activation here, since we know exactly when the next sync will // occur. -#if !IS_NULL_ISA for (auto *tc: primary->sys->threads) { if (tc->status() == ThreadContext::Active) tc->quiesceTick(primary->syncEvent->when() + 1); } -#endif } } diff --git a/src/dev/pci/CopyEngine.py b/src/dev/pci/CopyEngine.py index a1888af0a8..821f7c5cf0 100644 --- a/src/dev/pci/CopyEngine.py +++ b/src/dev/pci/CopyEngine.py @@ -30,14 +30,15 @@ from m5.proxy import * from m5.objects.PciDevice import PciDevice, PciMemBar + class CopyEngine(PciDevice): - type = 'CopyEngine' + type = "CopyEngine" cxx_header = "dev/pci/copy_engine.hh" - cxx_class = 'gem5::CopyEngine' + cxx_class = "gem5::CopyEngine" dma = VectorRequestPort("Copy engine DMA port") VendorID = 0x8086 - DeviceID = 0x1a38 - Revision = 0xA2 # CM2 stepping (newest listed) + DeviceID = 0x1A38 + Revision = 0xA2 # CM2 stepping (newest listed) SubsystemID = 0 SubsystemVendorID = 0 Status = 0x0000 @@ -45,20 +46,22 @@ class CopyEngine(PciDevice): ClassCode = 0x80 ProgIF = 0x00 MaximumLatency = 0x00 - MinimumGrant = 0xff + MinimumGrant = 0xFF InterruptLine = 0x20 InterruptPin = 0x01 - BAR0 = PciMemBar(size='1KiB') + BAR0 = PciMemBar(size="1KiB") ChanCnt = Param.UInt8(4, "Number of DMA channels that exist on device") - XferCap = Param.MemorySize('4KiB', - "Number of bits of transfer size that are supported") - - latBeforeBegin = Param.Latency('20ns', - "Latency after a DMA command is seen before it's proccessed") - latAfterCompletion = Param.Latency('20ns', - "Latency after a DMA command is complete before " - "it's reported as such") - + XferCap = Param.MemorySize( + "4KiB", "Number of bits of transfer size that are supported" + ) + latBeforeBegin = Param.Latency( + "20ns", "Latency after a DMA command is seen before it's proccessed" + ) + latAfterCompletion = Param.Latency( + "20ns", + "Latency after a DMA command is complete before " + "it's reported as such", + ) diff --git a/src/dev/pci/PciDevice.py b/src/dev/pci/PciDevice.py index cc794d1640..8466101287 100644 --- a/src/dev/pci/PciDevice.py +++ b/src/dev/pci/PciDevice.py @@ -42,50 +42,57 @@ from m5.proxy import * from m5.objects.Device import DmaDevice from m5.objects.PciHost import PciHost + class PciBar(SimObject): - type = 'PciBar' - cxx_class = 'gem5::PciBar' + type = "PciBar" + cxx_class = "gem5::PciBar" cxx_header = "dev/pci/device.hh" abstract = True + class PciBarNone(PciBar): - type = 'PciBarNone' - cxx_class = 'gem5::PciBarNone' + type = "PciBarNone" + cxx_class = "gem5::PciBarNone" cxx_header = "dev/pci/device.hh" + class PciIoBar(PciBar): - type = 'PciIoBar' - cxx_class = 'gem5::PciIoBar' + type = "PciIoBar" + cxx_class = "gem5::PciIoBar" cxx_header = "dev/pci/device.hh" size = Param.MemorySize32("IO region size") + class PciLegacyIoBar(PciIoBar): - type = 'PciLegacyIoBar' - cxx_class = 'gem5::PciLegacyIoBar' + type = "PciLegacyIoBar" + cxx_class = "gem5::PciLegacyIoBar" cxx_header = "dev/pci/device.hh" addr = Param.UInt32("Legacy IO address") + # To set up a 64 bit memory BAR, put a PciMemUpperBar immediately after # a PciMemBar. The pair will take up the right number of BARs, and will be # recognized by the device and turned into a 64 bit BAR when the config is # consumed. class PciMemBar(PciBar): - type = 'PciMemBar' - cxx_class = 'gem5::PciMemBar' + type = "PciMemBar" + cxx_class = "gem5::PciMemBar" cxx_header = "dev/pci/device.hh" size = Param.MemorySize("Memory region size") + class PciMemUpperBar(PciBar): - type = 'PciMemUpperBar' - cxx_class = 'gem5::PciMemUpperBar' + type = "PciMemUpperBar" + cxx_class = "gem5::PciMemUpperBar" cxx_header = "dev/pci/device.hh" + class PciDevice(DmaDevice): - type = 'PciDevice' - cxx_class = 'gem5::PciDevice' + type = "PciDevice" + cxx_class = "gem5::PciDevice" cxx_header = "dev/pci/device.hh" abstract = True @@ -94,8 +101,8 @@ class PciDevice(DmaDevice): pci_dev = Param.Int("PCI device number") pci_func = Param.Int("PCI function code") - pio_latency = Param.Latency('30ns', "Programmed IO latency") - config_latency = Param.Latency('20ns', "Config read or write latency") + pio_latency = Param.Latency("30ns", "Programmed IO latency") + config_latency = Param.Latency("20ns", "Config read or write latency") VendorID = Param.UInt16("Vendor ID") DeviceID = Param.UInt16("Device ID") @@ -110,12 +117,12 @@ class PciDevice(DmaDevice): HeaderType = Param.UInt8(0, "PCI Header Type") BIST = Param.UInt8(0, "Built In Self Test") - BAR0 = Param.PciBar(PciBarNone(), "Base address register 0"); - BAR1 = Param.PciBar(PciBarNone(), "Base address register 1"); - BAR2 = Param.PciBar(PciBarNone(), "Base address register 2"); - BAR3 = Param.PciBar(PciBarNone(), "Base address register 3"); - BAR4 = Param.PciBar(PciBarNone(), "Base address register 4"); - BAR5 = Param.PciBar(PciBarNone(), "Base address register 5"); + BAR0 = Param.PciBar(PciBarNone(), "Base address register 0") + BAR1 = Param.PciBar(PciBarNone(), "Base address register 1") + BAR2 = Param.PciBar(PciBarNone(), "Base address register 2") + BAR3 = Param.PciBar(PciBarNone(), "Base address register 3") + BAR4 = Param.PciBar(PciBarNone(), "Base address register 4") + BAR5 = Param.PciBar(PciBarNone(), "Base address register 5") CardbusCIS = Param.UInt32(0x00, "Cardbus Card Information Structure") SubsystemID = Param.UInt16(0x00, "Subsystem ID") @@ -129,22 +136,27 @@ class PciDevice(DmaDevice): # Capabilities List structures for PCIe devices # PMCAP - PCI Power Management Capability - PMCAPBaseOffset = \ - Param.UInt8(0x00, "Base offset of PMCAP in PCI Config space") - PMCAPNextCapability = \ - Param.UInt8(0x00, "Pointer to next capability block") - PMCAPCapId = \ - Param.UInt8(0x00, "Specifies this is the Power Management capability") - PMCAPCapabilities = \ - Param.UInt16(0x0000, "PCI Power Management Capabilities Register") - PMCAPCtrlStatus = \ - Param.UInt16(0x0000, "PCI Power Management Control and Status") + PMCAPBaseOffset = Param.UInt8( + 0x00, "Base offset of PMCAP in PCI Config space" + ) + PMCAPNextCapability = Param.UInt8(0x00, "Pointer to next capability block") + PMCAPCapId = Param.UInt8( + 0x00, "Specifies this is the Power Management capability" + ) + PMCAPCapabilities = Param.UInt16( + 0x0000, "PCI Power Management Capabilities Register" + ) + PMCAPCtrlStatus = Param.UInt16( + 0x0000, "PCI Power Management Control and Status" + ) # MSICAP - Message Signaled Interrupt Capability - MSICAPBaseOffset = \ - Param.UInt8(0x00, "Base offset of MSICAP in PCI Config space") - MSICAPNextCapability = \ - Param.UInt8(0x00, "Pointer to next capability block") + MSICAPBaseOffset = Param.UInt8( + 0x00, "Base offset of MSICAP in PCI Config space" + ) + MSICAPNextCapability = Param.UInt8( + 0x00, "Pointer to next capability block" + ) MSICAPCapId = Param.UInt8(0x00, "Specifies this is the MSI Capability") MSICAPMsgCtrl = Param.UInt16(0x0000, "MSI Message Control") MSICAPMsgAddr = Param.UInt32(0x00000000, "MSI Message Address") @@ -154,19 +166,23 @@ class PciDevice(DmaDevice): MSICAPPendingBits = Param.UInt32(0x00000000, "MSI Pending Bits") # MSIXCAP - MSI-X Capability - MSIXCAPBaseOffset = \ - Param.UInt8(0x00, "Base offset of MSIXCAP in PCI Config space") - MSIXCAPNextCapability = \ - Param.UInt8(0x00, "Pointer to next capability block") + MSIXCAPBaseOffset = Param.UInt8( + 0x00, "Base offset of MSIXCAP in PCI Config space" + ) + MSIXCAPNextCapability = Param.UInt8( + 0x00, "Pointer to next capability block" + ) MSIXCAPCapId = Param.UInt8(0x00, "Specifices this the MSI-X Capability") MSIXMsgCtrl = Param.UInt16(0x0000, "MSI-X Message Control") - MSIXTableOffset = \ - Param.UInt32(0x00000000, "MSI-X Table Offset and Table BIR") + MSIXTableOffset = Param.UInt32( + 0x00000000, "MSI-X Table Offset and Table BIR" + ) MSIXPbaOffset = Param.UInt32(0x00000000, "MSI-X PBA Offset and PBA BIR") # PXCAP - PCI Express Capability - PXCAPBaseOffset = \ - Param.UInt8(0x00, "Base offset of PXCAP in PCI Config space") + PXCAPBaseOffset = Param.UInt8( + 0x00, "Base offset of PXCAP in PCI Config space" + ) PXCAPNextCapability = Param.UInt8(0x00, "Pointer to next capability block") PXCAPCapId = Param.UInt8(0x00, "Specifies this is the PCIe Capability") PXCAPCapabilities = Param.UInt16(0x0000, "PCIe Capabilities") diff --git a/src/dev/pci/PciHost.py b/src/dev/pci/PciHost.py index 36a88564e6..ef8a5ab1f5 100644 --- a/src/dev/pci/PciHost.py +++ b/src/dev/pci/PciHost.py @@ -39,55 +39,77 @@ from m5.proxy import * from m5.objects.Device import PioDevice from m5.objects.Platform import Platform + class PciHost(PioDevice): - type = 'PciHost' - cxx_class = 'gem5::PciHost' + type = "PciHost" + cxx_class = "gem5::PciHost" cxx_header = "dev/pci/host.hh" abstract = True + class GenericPciHost(PciHost): - type = 'GenericPciHost' - cxx_class = 'gem5::GenericPciHost' + type = "GenericPciHost" + cxx_class = "gem5::GenericPciHost" cxx_header = "dev/pci/host.hh" platform = Param.Platform(Parent.any, "Platform to use for interrupts") conf_base = Param.Addr("Config space base address") conf_size = Param.Addr("Config space base address") - conf_device_bits = Param.UInt8(8, "Number of bits used to as an " - "offset a devices address space") + conf_device_bits = Param.UInt8( + 8, "Number of bits used to as an " "offset a devices address space" + ) pci_pio_base = Param.Addr(0, "Base address for PCI IO accesses") pci_mem_base = Param.Addr(0, "Base address for PCI memory accesses") pci_dma_base = Param.Addr(0, "Base address for DMA memory accesses") - def pciFdtAddr(self, bus=0, device=0, function=0, register=0, space=0, - aliased=0, prefetchable=0, relocatable=0, addr=0): + def pciFdtAddr( + self, + bus=0, + device=0, + function=0, + register=0, + space=0, + aliased=0, + prefetchable=0, + relocatable=0, + addr=0, + ): - busf = bus & 0xff - devicef = device & 0x1f + busf = bus & 0xFF + devicef = device & 0x1F functionf = function & 0x7 - registerf = register & 0xff + registerf = register & 0xFF spacef = space & 0x3 aliasedf = aliased & 0x1 prefetchablef = prefetchable & 0x1 relocatablef = relocatable & 0x1 - if busf != bus or \ - devicef != device or \ - functionf != function or \ - registerf != register or \ - spacef != space or \ - aliasedf != aliased or \ - prefetchablef != prefetchable or \ - relocatablef != relocatable: + if ( + busf != bus + or devicef != device + or functionf != function + or registerf != register + or spacef != space + or aliasedf != aliased + or prefetchablef != prefetchable + or relocatablef != relocatable + ): fatal("One of the fields for the PCI address is out of bounds") - address = registerf | (functionf << 8) | (devicef << 11) | \ - (busf << 16) | (spacef << 24) | (aliasedf << 29) | \ - (prefetchablef << 30) | (relocatablef << 31) + address = ( + registerf + | (functionf << 8) + | (devicef << 11) + | (busf << 16) + | (spacef << 24) + | (aliasedf << 29) + | (prefetchablef << 30) + | (relocatablef << 31) + ) - low_addr = addr & 0xffffffff - high_addr = (addr >> 32) & 0xffffffff + low_addr = addr & 0xFFFFFFFF + high_addr = (addr >> 32) & 0xFFFFFFFF return [address, high_addr, low_addr] diff --git a/src/dev/ps2/PS2.py b/src/dev/ps2/PS2.py index b017025ba0..9a0b16495f 100644 --- a/src/dev/ps2/PS2.py +++ b/src/dev/ps2/PS2.py @@ -37,26 +37,30 @@ from m5.SimObject import SimObject from m5.params import * from m5.proxy import * + class PS2Device(SimObject): - type = 'PS2Device' + type = "PS2Device" cxx_header = "dev/ps2/device.hh" cxx_class = "gem5::ps2::Device" abstract = True + class PS2Keyboard(PS2Device): - type = 'PS2Keyboard' + type = "PS2Keyboard" cxx_header = "dev/ps2/keyboard.hh" cxx_class = "gem5::ps2::PS2Keyboard" vnc = Param.VncInput(Parent.any, "VNC server providing keyboard input") + class PS2Mouse(PS2Device): - type = 'PS2Mouse' + type = "PS2Mouse" cxx_header = "dev/ps2/mouse.hh" cxx_class = "gem5::ps2::PS2Mouse" + class PS2TouchKit(PS2Device): - type = 'PS2TouchKit' + type = "PS2TouchKit" cxx_header = "dev/ps2/touchkit.hh" cxx_class = "gem5::ps2::TouchKit" diff --git a/src/dev/ps2/SConscript b/src/dev/ps2/SConscript index aa9d1fddaa..ac0f8d937f 100644 --- a/src/dev/ps2/SConscript +++ b/src/dev/ps2/SConscript @@ -37,9 +37,6 @@ Import('*') -if env['CONF']['TARGET_ISA'] == 'null': - Return() - SimObject('PS2.py', sim_objects=[ 'PS2Device', 'PS2Keyboard', 'PS2Mouse', 'PS2TouchKit']) Source('device.cc') diff --git a/src/dev/ps2/mouse.hh b/src/dev/ps2/mouse.hh index 3304d950c0..9ec3f68b94 100644 --- a/src/dev/ps2/mouse.hh +++ b/src/dev/ps2/mouse.hh @@ -81,4 +81,3 @@ class PS2Mouse : public Device } // namespace gem5 #endif // __DEV_PS2_MOUSE_hH__ - diff --git a/src/dev/qemu/QemuFwCfg.py b/src/dev/qemu/QemuFwCfg.py index 0851ef0f40..bb237b9bfc 100644 --- a/src/dev/qemu/QemuFwCfg.py +++ b/src/dev/qemu/QemuFwCfg.py @@ -27,69 +27,78 @@ from m5.params import * from m5.objects.SimObject import SimObject from m5.objects.Device import PioDevice + class QemuFwCfgItem(SimObject): - type = 'QemuFwCfgItem' - cxx_class = 'gem5::qemu::FwCfgItemFactoryBase' - cxx_header = 'dev/qemu/fw_cfg.hh' + type = "QemuFwCfgItem" + cxx_class = "gem5::qemu::FwCfgItemFactoryBase" + cxx_header = "dev/qemu/fw_cfg.hh" abstract = True # The path this item will be listed under in the firmware config directory. - arch_specific = Param.Bool(False, 'if this item is archiecture specific') - index = Param.Unsigned(0, 'Fixed index, or 0 for automatic') - path = Param.String('Path to item in the firmware config directory') + arch_specific = Param.Bool(False, "if this item is archiecture specific") + index = Param.Unsigned(0, "Fixed index, or 0 for automatic") + path = Param.String("Path to item in the firmware config directory") + class QemuFwCfgItemFile(QemuFwCfgItem): - type = 'QemuFwCfgItemFile' - cxx_class = 'gem5::qemu::FwCfgItemFactory' - cxx_template_params = ['class ItemType'] - cxx_header = 'dev/qemu/fw_cfg.hh' + type = "QemuFwCfgItemFile" + cxx_class = "gem5::qemu::FwCfgItemFactory" + cxx_template_params = ["class ItemType"] + cxx_header = "dev/qemu/fw_cfg.hh" # The path to the file that will be used to populate this item. - file = Param.String('Path to file to export') + file = Param.String("Path to file to export") + class QemuFwCfgItemString(QemuFwCfgItem): - type = 'QemuFwCfgItemString' - cxx_class = 'gem5::qemu::FwCfgItemFactory' - cxx_template_params = ['class ItemType'] - cxx_header = 'dev/qemu/fw_cfg.hh' + type = "QemuFwCfgItemString" + cxx_class = "gem5::qemu::FwCfgItemFactory" + cxx_template_params = ["class ItemType"] + cxx_header = "dev/qemu/fw_cfg.hh" # The string which directly populates this item. - string = Param.String('String to export') + string = Param.String("String to export") + class QemuFwCfgItemBytes(QemuFwCfgItem): - type = 'QemuFwCfgItemBytes' - cxx_class = 'gem5::qemu::FwCfgItemFactory' - cxx_template_params = ['class ItemType'] - cxx_header = 'dev/qemu/fw_cfg.hh' + type = "QemuFwCfgItemBytes" + cxx_class = "gem5::qemu::FwCfgItemFactory" + cxx_template_params = ["class ItemType"] + cxx_header = "dev/qemu/fw_cfg.hh" + + data = VectorParam.UInt8("Bytes to export") - data = VectorParam.UInt8('Bytes to export') class QemuFwCfg(PioDevice): - type = 'QemuFwCfg' - cxx_class = 'gem5::qemu::FwCfg' - cxx_header = 'dev/qemu/fw_cfg.hh' + type = "QemuFwCfg" + cxx_class = "gem5::qemu::FwCfg" + cxx_header = "dev/qemu/fw_cfg.hh" abstract = True - items = VectorParam.QemuFwCfgItem([], - 'Items exported by the firmware config device') + items = VectorParam.QemuFwCfgItem( + [], "Items exported by the firmware config device" + ) + class QemuFwCfgIo(QemuFwCfg): - type = 'QemuFwCfgIo' - cxx_class = 'gem5::qemu::FwCfgIo' - cxx_header = 'dev/qemu/fw_cfg.hh' + type = "QemuFwCfgIo" + cxx_class = "gem5::qemu::FwCfgIo" + cxx_header = "dev/qemu/fw_cfg.hh" # The selector register is 16 bits wide, and little endian. The data # register must be one port ahead of the selector. - selector_addr = Param.Addr('IO port for the selector register') + selector_addr = Param.Addr("IO port for the selector register") + class QemuFwCfgMmio(QemuFwCfg): - type = 'QemuFwCfgMmio' - cxx_class = 'gem5::qemu::FwCfgMmio' - cxx_header = 'dev/qemu/fw_cfg.hh' + type = "QemuFwCfgMmio" + cxx_class = "gem5::qemu::FwCfgMmio" + cxx_header = "dev/qemu/fw_cfg.hh" # The selector register is 16 bits wide, and big endian. - selector_addr = Param.Addr('Memory address for the selector register') + selector_addr = Param.Addr("Memory address for the selector register") # The data register is 8, 16, 32 or 64 bits wide. - data_addr_range = \ - Param.AddrRange('Memory address range for the data register') + data_addr_range = Param.AddrRange( + "Memory address range for the data register" + ) diff --git a/src/dev/riscv/Clint.py b/src/dev/riscv/Clint.py index 70ec0ed4cf..afa3caa7b4 100644 --- a/src/dev/riscv/Clint.py +++ b/src/dev/riscv/Clint.py @@ -39,6 +39,7 @@ from m5.params import * from m5.proxy import * from m5.util.fdthelper import * + class Clint(BasicPioDevice): """ This implementation of CLINT is based on @@ -47,16 +48,18 @@ class Clint(BasicPioDevice): 0e07-48d0-9602-e437d5367806_sifive_U54MC_rtl_ full_20G1.03.00_manual.pdf """ - type = 'Clint' - cxx_header = 'dev/riscv/clint.hh' - cxx_class = 'gem5::Clint' - int_pin = IntSinkPin('Pin to receive RTC signal') + + type = "Clint" + cxx_header = "dev/riscv/clint.hh" + cxx_class = "gem5::Clint" + int_pin = IntSinkPin("Pin to receive RTC signal") pio_size = Param.Addr(0xC000, "PIO Size") num_threads = Param.Int("Number of threads in the system.") def generateDeviceTree(self, state): - node = self.generateBasicPioDeviceNode(state, "clint", self.pio_addr, - self.pio_size) + node = self.generateBasicPioDeviceNode( + state, "clint", self.pio_addr, self.pio_size + ) cpus = self.system.unproxy(self).cpu int_extended = list() diff --git a/src/dev/riscv/HiFive.py b/src/dev/riscv/HiFive.py index 08ef943579..466968602b 100755 --- a/src/dev/riscv/HiFive.py +++ b/src/dev/riscv/HiFive.py @@ -47,18 +47,22 @@ from m5.util.fdthelper import * from m5.objects.PciHost import GenericPciHost + class GenericRiscvPciHost(GenericPciHost): - type = 'GenericRiscvPciHost' + type = "GenericRiscvPciHost" cxx_header = "dev/riscv/pci_host.hh" - cxx_class = 'gem5::GenericRiscvPciHost' - int_base = Param.Int(0x10, - "Base number used as interrupt line and PLIC source.") - int_count = Param.Unsigned(4, - "Maximum number of interrupts used by this host") + cxx_class = "gem5::GenericRiscvPciHost" + int_base = Param.Int( + 0x10, "Base number used as interrupt line and PLIC source." + ) + int_count = Param.Unsigned( + 4, "Maximum number of interrupts used by this host" + ) # This python parameter can be used in configuration scripts to turn # on/off the fdt dma-coherent flag when doing dtb autogeneration _dma_coherent = True + class HiFive(Platform): """HiFive Platform @@ -110,38 +114,39 @@ class HiFive(Platform): each CPU (which allows them to differ). See fs_linux.py for setup example. """ - type = 'HiFive' + + type = "HiFive" cxx_header = "dev/riscv/hifive.hh" - cxx_class = 'gem5::HiFive' + cxx_class = "gem5::HiFive" # CLINT clint = Param.Clint(Clint(pio_addr=0x2000000), "CLINT") # PLIC - plic = Param.Plic(Plic(pio_addr=0xc000000), "PLIC") + plic = Param.Plic(Plic(pio_addr=0xC000000), "PLIC") - #PCI - pci_host = GenericRiscvPciHost(conf_base=0x30000000, conf_size='256MB', - conf_device_bits=12, pci_pio_base=0x2f000000, pci_mem_base=0x40000000) + # PCI + pci_host = GenericRiscvPciHost( + conf_base=0x30000000, + conf_size="256MB", + conf_device_bits=12, + pci_pio_base=0x2F000000, + pci_mem_base=0x40000000, + ) # Uart uart = RiscvUart8250(pio_addr=0x10000000) # Int source ID to redirect console interrupts to # Set to 0 if using a pci interrupt for Uart instead - uart_int_id = Param.Int(0xa, "PLIC Uart interrupt ID") + uart_int_id = Param.Int(0xA, "PLIC Uart interrupt ID") terminal = Terminal() def _on_chip_devices(self): - """Returns a list of on-chip peripherals - """ - return [ - self.clint, - self.plic - ] + """Returns a list of on-chip peripherals""" + return [self.clint, self.plic] def _off_chip_devices(self): - """Returns a list of off-chip peripherals - """ + """Returns a list of off-chip peripherals""" devices = [self.uart] if hasattr(self, "disk"): devices.append(self.disk) @@ -151,7 +156,7 @@ class HiFive(Platform): def _on_chip_ranges(self): """Returns a list of on-chip peripherals - address range + address range """ return [ AddrRange(dev.pio_addr, size=dev.pio_size) @@ -160,7 +165,7 @@ class HiFive(Platform): def _off_chip_ranges(self): """Returns a list of off-chip peripherals - address range + address range """ return [ AddrRange(dev.pio_addr, size=dev.pio_size) @@ -168,10 +173,11 @@ class HiFive(Platform): ] def attachPlic(self): - """Count number of PLIC interrupt sources - """ - plic_srcs = [self.uart_int_id, self.pci_host.int_base - + self.pci_host.int_count] + """Count number of PLIC interrupt sources""" + plic_srcs = [ + self.uart_int_id, + self.pci_host.int_base + self.pci_host.int_count, + ] for device in self._off_chip_devices(): if hasattr(device, "interrupt_id"): plic_srcs.append(device.interrupt_id) @@ -179,21 +185,21 @@ class HiFive(Platform): def attachOnChipIO(self, bus): """Attach on-chip IO devices, needs modification - to support DMA + to support DMA """ for device in self._on_chip_devices(): device.pio = bus.mem_side_ports def attachOffChipIO(self, bus): """Attach off-chip IO devices, needs modification - to support DMA + to support DMA """ for device in self._off_chip_devices(): device.pio = bus.mem_side_ports def setNumCores(self, num_cpu): - """ Sets the PLIC and CLINT to have the right number of threads and - contexts. Assumes that the cores have a single hardware thread. + """Sets the PLIC and CLINT to have the right number of threads and + contexts. Assumes that the cores have a single hardware thread. """ self.plic.n_contexts = num_cpu * 2 self.clint.num_threads = num_cpu @@ -217,10 +223,11 @@ class HiFive(Platform): # For generating devicetree _cpu_count = 0 + def annotateCpuDeviceNode(self, cpu, state): - cpu.append(FdtPropertyStrings('mmu-type', 'riscv,sv48')) - cpu.append(FdtPropertyStrings('status', 'okay')) - cpu.append(FdtPropertyStrings('riscv,isa', 'rv64imafdcsu')) + cpu.append(FdtPropertyStrings("mmu-type", "riscv,sv48")) + cpu.append(FdtPropertyStrings("status", "okay")) + cpu.append(FdtPropertyStrings("riscv,isa", "rv64imafdcsu")) cpu.appendCompatible(["riscv"]) int_node = FdtNode("interrupt-controller") diff --git a/src/dev/riscv/LupV.py b/src/dev/riscv/LupV.py index 12273a6dd3..4229661c1c 100644 --- a/src/dev/riscv/LupV.py +++ b/src/dev/riscv/LupV.py @@ -27,9 +27,10 @@ from m5.objects.Platform import Platform from m5.params import Param + class LupV(Platform): - type = 'LupV' + type = "LupV" cxx_header = "dev/riscv/lupv.hh" - cxx_class = 'gem5::LupV' + cxx_class = "gem5::LupV" pic = Param.LupioPIC("PIC") uart_int_id = Param.Int("Interrupt ID to be used if the PLIC is used here") diff --git a/src/dev/riscv/Plic.py b/src/dev/riscv/Plic.py index be0b629a76..33b6940c3f 100644 --- a/src/dev/riscv/Plic.py +++ b/src/dev/riscv/Plic.py @@ -38,6 +38,7 @@ from m5.params import * from m5.proxy import * from m5.util.fdthelper import * + class Plic(BasicPioDevice): """ This implementation of PLIC is based on @@ -46,17 +47,21 @@ class Plic(BasicPioDevice): 0e07-48d0-9602-e437d5367806_sifive_U54MC_rtl_ full_20G1.03.00_manual.pdf """ - type = 'Plic' - cxx_header = 'dev/riscv/plic.hh' - cxx_class = 'gem5::Plic' + + type = "Plic" + cxx_header = "dev/riscv/plic.hh" + cxx_class = "gem5::Plic" pio_size = Param.Addr(0x4000000, "PIO Size") n_src = Param.Int("Number of interrupt sources") - n_contexts = Param.Int("Number of interrupt contexts. Usually the number " - "of threads * 2. One for M mode, one for S mode") + n_contexts = Param.Int( + "Number of interrupt contexts. Usually the number " + "of threads * 2. One for M mode, one for S mode" + ) def generateDeviceTree(self, state): - node = self.generateBasicPioDeviceNode(state, "plic", self.pio_addr, - self.pio_size) + node = self.generateBasicPioDeviceNode( + state, "plic", self.pio_addr, self.pio_size + ) int_state = FdtState(addr_cells=0, interrupt_cells=1) node.append(int_state.addrCellsProperty()) @@ -71,7 +76,7 @@ class Plic(BasicPioDevice): for cpu in cpus: phandle = int_state.phandle(cpu) int_extended.append(phandle) - int_extended.append(0xb) + int_extended.append(0xB) int_extended.append(phandle) int_extended.append(0x9) diff --git a/src/dev/riscv/PlicDevice.py b/src/dev/riscv/PlicDevice.py index a124ee7ad6..249ca21546 100644 --- a/src/dev/riscv/PlicDevice.py +++ b/src/dev/riscv/PlicDevice.py @@ -38,10 +38,11 @@ from m5.params import * from m5.proxy import * from m5.util.fdthelper import * + class PlicIntDevice(BasicPioDevice): - type = 'PlicIntDevice' - cxx_header = 'dev/riscv/plic_device.hh' - cxx_class = 'gem5::PlicIntDevice' + type = "PlicIntDevice" + cxx_header = "dev/riscv/plic_device.hh" + cxx_class = "gem5::PlicIntDevice" abstract = True platform = Param.Platform(Parent.any, "Platform") @@ -49,8 +50,9 @@ class PlicIntDevice(BasicPioDevice): interrupt_id = Param.Int("PLIC Interrupt ID") def generatePlicDeviceNode(self, state, name): - node = self.generateBasicPioDeviceNode(state, name, - self.pio_addr, self.pio_size) + node = self.generateBasicPioDeviceNode( + state, name, self.pio_addr, self.pio_size + ) plic = self.platform.unproxy(self).plic diff --git a/src/dev/riscv/RTC.py b/src/dev/riscv/RTC.py index 52bdd4cad2..a6559eaf48 100644 --- a/src/dev/riscv/RTC.py +++ b/src/dev/riscv/RTC.py @@ -38,13 +38,13 @@ from m5.proxy import * from m5.SimObject import SimObject from m5.objects.IntPin import IntSourcePin + class RiscvRTC(SimObject): - type = 'RiscvRTC' - cxx_class='gem5::RiscvRTC' + type = "RiscvRTC" + cxx_class = "gem5::RiscvRTC" cxx_header = "dev/riscv/rtc.hh" - time = Param.Time('01/01/2012', - "System time to use") - int_pin = IntSourcePin('Pin to signal RTC interrupts to') + time = Param.Time("01/01/2012", "System time to use") + int_pin = IntSourcePin("Pin to signal RTC interrupts to") # The default 1MHz setting is taken from SiFive's U54MC # core complex. Set to other frequencies if necessary. frequency = Param.Frequency("1MHz", "RTC Frequency") diff --git a/src/dev/riscv/RiscvVirtIOMMIO.py b/src/dev/riscv/RiscvVirtIOMMIO.py index 6bdb9ddf45..17019502fa 100644 --- a/src/dev/riscv/RiscvVirtIOMMIO.py +++ b/src/dev/riscv/RiscvVirtIOMMIO.py @@ -42,10 +42,11 @@ from m5.util.fdthelper import * from m5.objects.PlicDevice import PlicIntDevice from m5.objects.VirtIO import VirtIODummyDevice + class RiscvMmioVirtIO(PlicIntDevice): - type = 'RiscvMmioVirtIO' - cxx_header = 'dev/riscv/vio_mmio.hh' - cxx_class = 'gem5::RiscvISA::MmioVirtIO' + type = "RiscvMmioVirtIO" + cxx_header = "dev/riscv/vio_mmio.hh" + cxx_class = "gem5::RiscvISA::MmioVirtIO" vio = Param.VirtIODeviceBase(VirtIODummyDevice(), "VirtIO device") def generateDeviceTree(self, state): diff --git a/src/dev/riscv/plic_device.cc b/src/dev/riscv/plic_device.cc index 34828cba9d..e5ade53674 100644 --- a/src/dev/riscv/plic_device.cc +++ b/src/dev/riscv/plic_device.cc @@ -40,8 +40,6 @@ namespace gem5 { -using namespace RiscvISA; - PlicIntDevice::PlicIntDevice(const Params ¶ms) : BasicPioDevice(params, params.pio_size), system(params.system), diff --git a/src/dev/riscv/plic_device.hh b/src/dev/riscv/plic_device.hh index a969b909e1..9b2a37997c 100644 --- a/src/dev/riscv/plic_device.hh +++ b/src/dev/riscv/plic_device.hh @@ -46,8 +46,6 @@ namespace gem5 { -using namespace RiscvISA; - class PlicIntDevice : public BasicPioDevice { protected: diff --git a/src/dev/rtcreg.h b/src/dev/rtcreg.h index 93fc1c7996..ea742590b4 100644 --- a/src/dev/rtcreg.h +++ b/src/dev/rtcreg.h @@ -52,4 +52,3 @@ static const int RTC_STAT_REGB = 0x0B; static const int RTC_STAT_REGC = 0x0C; static const int RTC_STAT_REGD = 0x0D; - diff --git a/src/dev/serial/SConscript b/src/dev/serial/SConscript index a862def056..45b32d2731 100644 --- a/src/dev/serial/SConscript +++ b/src/dev/serial/SConscript @@ -40,9 +40,6 @@ Import('*') -if env['CONF']['TARGET_ISA'] == 'null': - Return() - SimObject('Serial.py', sim_objects=['SerialDevice', 'SerialNullDevice']) SimObject('Terminal.py', sim_objects=['Terminal'], enums=['TerminalDump']) SimObject('Uart.py', sim_objects=['Uart', 'SimpleUart', 'Uart8250']) diff --git a/src/dev/serial/Serial.py b/src/dev/serial/Serial.py index 53af8ae070..3711525f47 100644 --- a/src/dev/serial/Serial.py +++ b/src/dev/serial/Serial.py @@ -36,13 +36,15 @@ from m5.params import * from m5.SimObject import SimObject + class SerialDevice(SimObject): - type = 'SerialDevice' + type = "SerialDevice" abstract = True cxx_header = "dev/serial/serial.hh" - cxx_class = 'gem5::SerialDevice' + cxx_class = "gem5::SerialDevice" + class SerialNullDevice(SerialDevice): - type = 'SerialNullDevice' + type = "SerialNullDevice" cxx_header = "dev/serial/serial.hh" - cxx_class = 'gem5::SerialNullDevice' + cxx_class = "gem5::SerialNullDevice" diff --git a/src/dev/serial/Terminal.py b/src/dev/serial/Terminal.py index 76045214b5..c77437906c 100644 --- a/src/dev/serial/Terminal.py +++ b/src/dev/serial/Terminal.py @@ -42,14 +42,17 @@ from m5.proxy import * from m5.objects.Serial import SerialDevice -class TerminalDump(ScopedEnum): vals = [ - "none", "stdoutput", "stderror", "file"] + +class TerminalDump(ScopedEnum): + vals = ["none", "stdoutput", "stderror", "file"] + class Terminal(SerialDevice): - type = 'Terminal' + type = "Terminal" cxx_header = "dev/serial/terminal.hh" - cxx_class = 'gem5::Terminal' + cxx_class = "gem5::Terminal" port = Param.TcpPort(3456, "listen port") number = Param.Int(0, "terminal number") - outfile = Param.TerminalDump("file", - "Selects if and where the terminal is dumping its output") + outfile = Param.TerminalDump( + "file", "Selects if and where the terminal is dumping its output" + ) diff --git a/src/dev/serial/Uart.py b/src/dev/serial/Uart.py index aea6fa6931..f3348d6775 100644 --- a/src/dev/serial/Uart.py +++ b/src/dev/serial/Uart.py @@ -44,40 +44,43 @@ from m5.defines import buildEnv from m5.objects.Device import BasicPioDevice from m5.objects.Serial import SerialDevice + class Uart(BasicPioDevice): - type = 'Uart' + type = "Uart" abstract = True cxx_header = "dev/serial/uart.hh" - cxx_class = 'gem5::Uart' + cxx_class = "gem5::Uart" platform = Param.Platform(Parent.any, "Platform this device is part of.") device = Param.SerialDevice(Parent.any, "The terminal") + class SimpleUart(Uart): - type = 'SimpleUart' + type = "SimpleUart" cxx_header = "dev/serial/simple.hh" - cxx_class = 'gem5::SimpleUart' + cxx_class = "gem5::SimpleUart" byte_order = Param.ByteOrder("little", "Device byte order") pio_size = Param.Addr(0x4, "Size of address range") - end_on_eot = Param.Bool(False, "End the simulation when a EOT is "\ - "received on the UART") + end_on_eot = Param.Bool( + False, "End the simulation when a EOT is " "received on the UART" + ) + class Uart8250(Uart): - type = 'Uart8250' + type = "Uart8250" cxx_header = "dev/serial/uart8250.hh" - cxx_class = 'gem5::Uart8250' + cxx_class = "gem5::Uart8250" pio_size = Param.Addr(0x8, "Size of address range") + class RiscvUart8250(Uart8250): def generateDeviceTree(self, state): node = self.generateBasicPioDeviceNode( - state, "uart", self.pio_addr, self.pio_size) + state, "uart", self.pio_addr, self.pio_size + ) platform = self.platform.unproxy(self) plic = platform.plic - node.append( - FdtPropertyWords("interrupts", [platform.uart_int_id])) - node.append( - FdtPropertyWords("clock-frequency", [0x384000])) - node.append( - FdtPropertyWords("interrupt-parent", state.phandle(plic))) + node.append(FdtPropertyWords("interrupts", [platform.uart_int_id])) + node.append(FdtPropertyWords("clock-frequency", [0x384000])) + node.append(FdtPropertyWords("interrupt-parent", state.phandle(plic))) node.appendCompatible(["ns8250"]) yield node diff --git a/src/dev/sparc/T1000.py b/src/dev/sparc/T1000.py index f2d87e7103..9e473a395d 100644 --- a/src/dev/sparc/T1000.py +++ b/src/dev/sparc/T1000.py @@ -34,83 +34,121 @@ from m5.objects.Uart import Uart8250 class MmDisk(BasicPioDevice): - type = 'MmDisk' + type = "MmDisk" cxx_header = "dev/sparc/mm_disk.hh" - cxx_class = 'gem5::MmDisk' + cxx_class = "gem5::MmDisk" image = Param.DiskImage("Disk Image") pio_addr = 0x1F40000000 + class DumbTOD(BasicPioDevice): - type = 'DumbTOD' + type = "DumbTOD" cxx_header = "dev/sparc/dtod.hh" - cxx_class = 'gem5::DumbTOD' - time = Param.Time('01/01/2009', "System time to use ('Now' for real time)") - pio_addr = 0xfff0c1fff8 + cxx_class = "gem5::DumbTOD" + time = Param.Time("01/01/2009", "System time to use ('Now' for real time)") + pio_addr = 0xFFF0C1FFF8 + class Iob(PioDevice): - type = 'Iob' + type = "Iob" cxx_header = "dev/sparc/iob.hh" - cxx_class = 'gem5::Iob' + cxx_class = "gem5::Iob" platform = Param.Platform(Parent.any, "Platform this device is part of.") - pio_latency = Param.Latency('1ns', "Programed IO latency") + pio_latency = Param.Latency("1ns", "Programed IO latency") class T1000(Platform): - type = 'T1000' + type = "T1000" cxx_header = "dev/sparc/t1000.hh" - cxx_class = 'gem5::T1000' + cxx_class = "gem5::T1000" fake_clk = IsaFake(pio_addr=0x9600000000, pio_size=0x100000000) - #warn_access="Accessing Clock Unit -- Unimplemented!") + # warn_access="Accessing Clock Unit -- Unimplemented!") - fake_membnks = IsaFake(pio_addr=0x9700000000, pio_size=16384, - ret_data64=0x0000000000000000, update_data=False) - #warn_access="Accessing Memory Banks -- Unimplemented!") + fake_membnks = IsaFake( + pio_addr=0x9700000000, + pio_size=16384, + ret_data64=0x0000000000000000, + update_data=False, + ) + # warn_access="Accessing Memory Banks -- Unimplemented!") fake_jbi = IsaFake(pio_addr=0x8000000000, pio_size=0x100000000) - #warn_access="Accessing JBI -- Unimplemented!") + # warn_access="Accessing JBI -- Unimplemented!") - fake_l2_1 = IsaFake(pio_addr=0xA900000000, pio_size=0x8, - ret_data64=0x0000000000000001, update_data=True) - #warn_access="Accessing L2 Cache Banks -- Unimplemented!") + fake_l2_1 = IsaFake( + pio_addr=0xA900000000, + pio_size=0x8, + ret_data64=0x0000000000000001, + update_data=True, + ) + # warn_access="Accessing L2 Cache Banks -- Unimplemented!") - fake_l2_2 = IsaFake(pio_addr=0xA900000040, pio_size=0x8, - ret_data64=0x0000000000000001, update_data=True) - #warn_access="Accessing L2 Cache Banks -- Unimplemented!") + fake_l2_2 = IsaFake( + pio_addr=0xA900000040, + pio_size=0x8, + ret_data64=0x0000000000000001, + update_data=True, + ) + # warn_access="Accessing L2 Cache Banks -- Unimplemented!") - fake_l2_3 = IsaFake(pio_addr=0xA900000080, pio_size=0x8, - ret_data64=0x0000000000000001, update_data=True) - #warn_access="Accessing L2 Cache Banks -- Unimplemented!") + fake_l2_3 = IsaFake( + pio_addr=0xA900000080, + pio_size=0x8, + ret_data64=0x0000000000000001, + update_data=True, + ) + # warn_access="Accessing L2 Cache Banks -- Unimplemented!") - fake_l2_4 = IsaFake(pio_addr=0xA9000000C0, pio_size=0x8, - ret_data64=0x0000000000000001, update_data=True) - #warn_access="Accessing L2 Cache Banks -- Unimplemented!") + fake_l2_4 = IsaFake( + pio_addr=0xA9000000C0, + pio_size=0x8, + ret_data64=0x0000000000000001, + update_data=True, + ) + # warn_access="Accessing L2 Cache Banks -- Unimplemented!") - fake_l2esr_1 = IsaFake(pio_addr=0xAB00000000, pio_size=0x8, - ret_data64=0x0000000000000000, update_data=True) - #warn_access="Accessing L2 ESR Cache Banks -- Unimplemented!") + fake_l2esr_1 = IsaFake( + pio_addr=0xAB00000000, + pio_size=0x8, + ret_data64=0x0000000000000000, + update_data=True, + ) + # warn_access="Accessing L2 ESR Cache Banks -- Unimplemented!") - fake_l2esr_2 = IsaFake(pio_addr=0xAB00000040, pio_size=0x8, - ret_data64=0x0000000000000000, update_data=True) - #warn_access="Accessing L2 ESR Cache Banks -- Unimplemented!") + fake_l2esr_2 = IsaFake( + pio_addr=0xAB00000040, + pio_size=0x8, + ret_data64=0x0000000000000000, + update_data=True, + ) + # warn_access="Accessing L2 ESR Cache Banks -- Unimplemented!") - fake_l2esr_3 = IsaFake(pio_addr=0xAB00000080, pio_size=0x8, - ret_data64=0x0000000000000000, update_data=True) - #warn_access="Accessing L2 ESR Cache Banks -- Unimplemented!") + fake_l2esr_3 = IsaFake( + pio_addr=0xAB00000080, + pio_size=0x8, + ret_data64=0x0000000000000000, + update_data=True, + ) + # warn_access="Accessing L2 ESR Cache Banks -- Unimplemented!") - fake_l2esr_4 = IsaFake(pio_addr=0xAB000000C0, pio_size=0x8, - ret_data64=0x0000000000000000, update_data=True) - #warn_access="Accessing L2 ESR Cache Banks -- Unimplemented!") + fake_l2esr_4 = IsaFake( + pio_addr=0xAB000000C0, + pio_size=0x8, + ret_data64=0x0000000000000000, + update_data=True, + ) + # warn_access="Accessing L2 ESR Cache Banks -- Unimplemented!") - fake_ssi = IsaFake(pio_addr=0xff00000000, pio_size=0x10000000) - #warn_access="Accessing SSI -- Unimplemented!") + fake_ssi = IsaFake(pio_addr=0xFF00000000, pio_size=0x10000000) + # warn_access="Accessing SSI -- Unimplemented!") hterm = Terminal() - hvuart = Uart8250(pio_addr=0xfff0c2c000) + hvuart = Uart8250(pio_addr=0xFFF0C2C000) htod = DumbTOD() pterm = Terminal() - puart0 = Uart8250(pio_addr=0x1f10000000) + puart0 = Uart8250(pio_addr=0x1F10000000) iob = Iob() # Attach I/O devices that are on chip @@ -118,7 +156,6 @@ class T1000(Platform): self.iob.pio = bus.mem_side_ports self.htod.pio = bus.mem_side_ports - # Attach I/O devices to specified bus object. Can't do this # earlier, since the bus object itself is typically defined at the # System level. diff --git a/src/dev/storage/DiskImage.py b/src/dev/storage/DiskImage.py index d0942b8a76..e7657e556c 100644 --- a/src/dev/storage/DiskImage.py +++ b/src/dev/storage/DiskImage.py @@ -26,24 +26,27 @@ from m5.SimObject import SimObject from m5.params import * + + class DiskImage(SimObject): - type = 'DiskImage' + type = "DiskImage" abstract = True cxx_header = "dev/storage/disk_image.hh" - cxx_class = 'gem5::DiskImage' + cxx_class = "gem5::DiskImage" image_file = Param.String("disk image file") read_only = Param.Bool(False, "read only image") + class RawDiskImage(DiskImage): - type = 'RawDiskImage' + type = "RawDiskImage" cxx_header = "dev/storage/disk_image.hh" - cxx_class = 'gem5::RawDiskImage' + cxx_class = "gem5::RawDiskImage" + class CowDiskImage(DiskImage): - type = 'CowDiskImage' + type = "CowDiskImage" cxx_header = "dev/storage/disk_image.hh" - cxx_class = 'gem5::CowDiskImage' - child = Param.DiskImage(RawDiskImage(read_only=True), - "child image") + cxx_class = "gem5::CowDiskImage" + child = Param.DiskImage(RawDiskImage(read_only=True), "child image") table_size = Param.Int(65536, "initial table size") image_file = "" diff --git a/src/dev/storage/Ide.py b/src/dev/storage/Ide.py index 0b1890e661..7498a52ecb 100644 --- a/src/dev/storage/Ide.py +++ b/src/dev/storage/Ide.py @@ -28,20 +28,24 @@ from m5.SimObject import SimObject from m5.params import * from m5.objects.PciDevice import PciDevice, PciIoBar -class IdeID(Enum): vals = ['device0', 'device1'] + +class IdeID(Enum): + vals = ["device0", "device1"] + class IdeDisk(SimObject): - type = 'IdeDisk' + type = "IdeDisk" cxx_header = "dev/storage/ide_disk.hh" - cxx_class = 'gem5::IdeDisk' - delay = Param.Latency('1us', "Fixed disk delay in microseconds") - driveID = Param.IdeID('device0', "Drive ID") + cxx_class = "gem5::IdeDisk" + delay = Param.Latency("1us", "Fixed disk delay in microseconds") + driveID = Param.IdeID("device0", "Drive ID") image = Param.DiskImage("Disk image") + class IdeController(PciDevice): - type = 'IdeController' + type = "IdeController" cxx_header = "dev/storage/ide_ctrl.hh" - cxx_class = 'gem5::IdeController' + cxx_class = "gem5::IdeController" disks = VectorParam.IdeDisk("IDE disks attached to this controller") VendorID = 0x8086 @@ -52,17 +56,17 @@ class IdeController(PciDevice): ClassCode = 0x01 SubClassCode = 0x01 ProgIF = 0x85 - InterruptLine = 0x1f + InterruptLine = 0x1F InterruptPin = 0x01 # Primary - BAR0 = PciIoBar(size='8B') - BAR1 = PciIoBar(size='4B') + BAR0 = PciIoBar(size="8B") + BAR1 = PciIoBar(size="4B") # Secondary - BAR2 = PciIoBar(size='8B') - BAR3 = PciIoBar(size='4B') + BAR2 = PciIoBar(size="8B") + BAR3 = PciIoBar(size="4B") # DMA - BAR4 = PciIoBar(size='16B') + BAR4 = PciIoBar(size="16B") - io_shift = Param.UInt32(0x0, "IO port shift"); + io_shift = Param.UInt32(0x0, "IO port shift") ctrl_offset = Param.UInt32(0x0, "IDE disk control offset") diff --git a/src/dev/storage/SConscript b/src/dev/storage/SConscript index 615e1bf319..50135171e5 100644 --- a/src/dev/storage/SConscript +++ b/src/dev/storage/SConscript @@ -40,9 +40,6 @@ Import('*') -if env['CONF']['TARGET_ISA'] == 'null': - Return() - # Controllers SimObject('Ide.py', sim_objects=['IdeDisk', 'IdeController'], enums=['IdeID']) diff --git a/src/dev/storage/SimpleDisk.py b/src/dev/storage/SimpleDisk.py index b59d5af44f..252ce38a42 100644 --- a/src/dev/storage/SimpleDisk.py +++ b/src/dev/storage/SimpleDisk.py @@ -28,9 +28,10 @@ from m5.SimObject import SimObject from m5.params import * from m5.proxy import * + class SimpleDisk(SimObject): - type = 'SimpleDisk' + type = "SimpleDisk" cxx_header = "dev/storage/simple_disk.hh" - cxx_class = 'gem5::SimpleDisk' + cxx_class = "gem5::SimpleDisk" disk = Param.DiskImage("Disk Image") system = Param.System(Parent.any, "System Pointer") diff --git a/src/dev/storage/ide_ctrl.cc b/src/dev/storage/ide_ctrl.cc index 45e6242409..3de0268aa4 100644 --- a/src/dev/storage/ide_ctrl.cc +++ b/src/dev/storage/ide_ctrl.cc @@ -76,7 +76,7 @@ IdeController::IdeController(const Params &p) secondary(name() + ".secondary", this, false), ioShift(p.io_shift), ctrlOffset(p.ctrl_offset) { - panic_if(params().disks.size() > 3, + panic_if(params().disks.size() > 4, "IDE controllers support a maximum of 4 devices attached!"); // Assign the disks to channels diff --git a/src/dev/virtio/SConscript b/src/dev/virtio/SConscript index e004da6254..6b71506881 100644 --- a/src/dev/virtio/SConscript +++ b/src/dev/virtio/SConscript @@ -37,9 +37,6 @@ Import('*') -if env['CONF']['TARGET_ISA'] == 'null': - Return() - SimObject('VirtIO.py', sim_objects=[ 'VirtIODeviceBase', 'VirtIODummyDevice', 'PciVirtIO']) SimObject('VirtIOConsole.py', sim_objects=['VirtIOConsole']) diff --git a/src/dev/virtio/VirtIO.py b/src/dev/virtio/VirtIO.py index 93c31c6f21..1d652bca64 100644 --- a/src/dev/virtio/VirtIO.py +++ b/src/dev/virtio/VirtIO.py @@ -43,35 +43,37 @@ from m5.objects.PciDevice import PciDevice, PciIoBar class VirtIODeviceBase(SimObject): - type = 'VirtIODeviceBase' - cxx_header = 'dev/virtio/base.hh' - cxx_class = 'gem5::VirtIODeviceBase' + type = "VirtIODeviceBase" + cxx_header = "dev/virtio/base.hh" + cxx_class = "gem5::VirtIODeviceBase" abstract = True subsystem = Param.UInt8(0x00, "VirtIO subsystem ID") system = Param.System(Parent.any, "system object") - byte_order = Param.ByteOrder('little', "Device byte order") + byte_order = Param.ByteOrder("little", "Device byte order") + class VirtIODummyDevice(VirtIODeviceBase): - type = 'VirtIODummyDevice' - cxx_header = 'dev/virtio/base.hh' - cxx_class = 'gem5::VirtIODummyDevice' + type = "VirtIODummyDevice" + cxx_header = "dev/virtio/base.hh" + cxx_class = "gem5::VirtIODummyDevice" + class PciVirtIO(PciDevice): - type = 'PciVirtIO' - cxx_header = 'dev/virtio/pci.hh' - cxx_class = 'gem5::PciVirtIO' + type = "PciVirtIO" + cxx_header = "dev/virtio/pci.hh" + cxx_class = "gem5::PciVirtIO" vio = Param.VirtIODeviceBase(VirtIODummyDevice(), "VirtIO device") VendorID = 0x1AF4 - SubsystemVendorID = VendorID; + SubsystemVendorID = VendorID DeviceID = 0x1000 - ClassCode = 0xff # Misc device + ClassCode = 0xFF # Misc device # The size is overridden by the device model. - BAR0 = PciIoBar(size='4B') + BAR0 = PciIoBar(size="4B") - InterruptPin = 0x01 # Use #INTA + InterruptPin = 0x01 # Use #INTA diff --git a/src/dev/virtio/VirtIO9P.py b/src/dev/virtio/VirtIO9P.py index 84e1a7bef1..b6611713b7 100644 --- a/src/dev/virtio/VirtIO9P.py +++ b/src/dev/virtio/VirtIO9P.py @@ -39,35 +39,38 @@ from m5.params import * from m5.proxy import * from m5.objects.VirtIO import VirtIODeviceBase + class VirtIO9PBase(VirtIODeviceBase): - type = 'VirtIO9PBase' + type = "VirtIO9PBase" abstract = True - cxx_header = 'dev/virtio/fs9p.hh' - cxx_class = 'gem5::VirtIO9PBase' + cxx_header = "dev/virtio/fs9p.hh" + cxx_class = "gem5::VirtIO9PBase" queueSize = Param.Unsigned(32, "Output queue size (pages)") tag = Param.String("gem5", "Mount tag") class VirtIO9PProxy(VirtIO9PBase): - type = 'VirtIO9PProxy' + type = "VirtIO9PProxy" abstract = True - cxx_header = 'dev/virtio/fs9p.hh' - cxx_class = 'gem5::VirtIO9PProxy' + cxx_header = "dev/virtio/fs9p.hh" + cxx_class = "gem5::VirtIO9PProxy" + class VirtIO9PDiod(VirtIO9PProxy): - type = 'VirtIO9PDiod' - cxx_header = 'dev/virtio/fs9p.hh' - cxx_class = 'gem5::VirtIO9PDiod' + type = "VirtIO9PDiod" + cxx_header = "dev/virtio/fs9p.hh" + cxx_class = "gem5::VirtIO9PDiod" diod = Param.String("diod", "Path to diod, optionally in PATH") root = Param.String("Path to export through diod") socketPath = Param.String("Unused socket to diod") + class VirtIO9PSocket(VirtIO9PProxy): - type = 'VirtIO9PSocket' - cxx_header = 'dev/virtio/fs9p.hh' - cxx_class = 'gem5::VirtIO9PSocket' + type = "VirtIO9PSocket" + cxx_header = "dev/virtio/fs9p.hh" + cxx_class = "gem5::VirtIO9PSocket" server = Param.String("127.0.0.1", "9P server address or host name") port = Param.String("564", "9P server port") diff --git a/src/dev/virtio/VirtIOBlock.py b/src/dev/virtio/VirtIOBlock.py index 25afd243cd..6a75c00956 100644 --- a/src/dev/virtio/VirtIOBlock.py +++ b/src/dev/virtio/VirtIOBlock.py @@ -39,10 +39,11 @@ from m5.params import * from m5.proxy import * from m5.objects.VirtIO import VirtIODeviceBase + class VirtIOBlock(VirtIODeviceBase): - type = 'VirtIOBlock' - cxx_header = 'dev/virtio/block.hh' - cxx_class = 'gem5::VirtIOBlock' + type = "VirtIOBlock" + cxx_header = "dev/virtio/block.hh" + cxx_class = "gem5::VirtIOBlock" queueSize = Param.Unsigned(128, "Output queue size (pages)") diff --git a/src/dev/virtio/VirtIOConsole.py b/src/dev/virtio/VirtIOConsole.py index 41c419a24d..72826aa6fb 100644 --- a/src/dev/virtio/VirtIOConsole.py +++ b/src/dev/virtio/VirtIOConsole.py @@ -40,10 +40,11 @@ from m5.proxy import * from m5.objects.VirtIO import VirtIODeviceBase from m5.objects.Serial import SerialDevice + class VirtIOConsole(VirtIODeviceBase): - type = 'VirtIOConsole' - cxx_header = 'dev/virtio/console.hh' - cxx_class = 'gem5::VirtIOConsole' + type = "VirtIOConsole" + cxx_header = "dev/virtio/console.hh" + cxx_class = "gem5::VirtIOConsole" qRecvSize = Param.Unsigned(16, "Receive queue size (descriptors)") qTransSize = Param.Unsigned(16, "Transmit queue size (descriptors)") diff --git a/src/dev/virtio/VirtIORng 2.py b/src/dev/virtio/VirtIORng 2.py index 13df059fa0..925fccdabe 100644 --- a/src/dev/virtio/VirtIORng 2.py +++ b/src/dev/virtio/VirtIORng 2.py @@ -40,9 +40,10 @@ from m5.params import * from m5.proxy import * from m5.objects.VirtIO import VirtIODeviceBase + class VirtIORng(VirtIODeviceBase): - type = 'VirtIORng' - cxx_header = 'dev/virtio/rng.hh' - cxx_class = 'gem5::VirtIORng' + type = "VirtIORng" + cxx_header = "dev/virtio/rng.hh" + cxx_class = "gem5::VirtIORng" qSize = Param.Unsigned(16, "Request queue size") diff --git a/src/dev/virtio/VirtIORng.py b/src/dev/virtio/VirtIORng.py index 13df059fa0..925fccdabe 100644 --- a/src/dev/virtio/VirtIORng.py +++ b/src/dev/virtio/VirtIORng.py @@ -40,9 +40,10 @@ from m5.params import * from m5.proxy import * from m5.objects.VirtIO import VirtIODeviceBase + class VirtIORng(VirtIODeviceBase): - type = 'VirtIORng' - cxx_header = 'dev/virtio/rng.hh' - cxx_class = 'gem5::VirtIORng' + type = "VirtIORng" + cxx_header = "dev/virtio/rng.hh" + cxx_class = "gem5::VirtIORng" qSize = Param.Unsigned(16, "Request queue size") diff --git a/src/dev/x86/Cmos.py b/src/dev/x86/Cmos.py index 14f9e45360..ccc14de8c1 100644 --- a/src/dev/x86/Cmos.py +++ b/src/dev/x86/Cmos.py @@ -29,11 +29,13 @@ from m5.proxy import * from m5.objects.Device import BasicPioDevice from m5.objects.IntPin import IntSourcePin + class Cmos(BasicPioDevice): - type = 'Cmos' - cxx_class='gem5::X86ISA::Cmos' + type = "Cmos" + cxx_class = "gem5::X86ISA::Cmos" cxx_header = "dev/x86/cmos.hh" - time = Param.Time('01/01/2012', - "System time to use ('Now' for actual time)") - int_pin = IntSourcePin('Pin to signal RTC alarm interrupts to') + time = Param.Time( + "01/01/2012", "System time to use ('Now' for actual time)" + ) + int_pin = IntSourcePin("Pin to signal RTC alarm interrupts to") diff --git a/src/dev/x86/I8042.py b/src/dev/x86/I8042.py index 956a1bf8c9..0dae054588 100644 --- a/src/dev/x86/I8042.py +++ b/src/dev/x86/I8042.py @@ -30,16 +30,17 @@ from m5.objects.Device import PioDevice from m5.objects.IntPin import IntSourcePin from m5.objects.PS2 import * + class I8042(PioDevice): - type = 'I8042' - cxx_class = 'gem5::X86ISA::I8042' + type = "I8042" + cxx_class = "gem5::X86ISA::I8042" cxx_header = "dev/x86/i8042.hh" - pio_latency = Param.Latency('100ns', "Programmed IO latency") - data_port = Param.Addr('Data port address') - command_port = Param.Addr('Command/status port address') - mouse_int_pin = IntSourcePin('Pin to signal the mouse has data') - keyboard_int_pin = IntSourcePin('Pin to signal the keyboard has data') + pio_latency = Param.Latency("100ns", "Programmed IO latency") + data_port = Param.Addr("Data port address") + command_port = Param.Addr("Command/status port address") + mouse_int_pin = IntSourcePin("Pin to signal the mouse has data") + keyboard_int_pin = IntSourcePin("Pin to signal the keyboard has data") keyboard = Param.PS2Device(PS2Keyboard(vnc=NULL), "PS/2 keyboard device") mouse = Param.PS2Device(PS2Mouse(), "PS/2 mouse device") diff --git a/src/dev/x86/I82094AA.py b/src/dev/x86/I82094AA.py index 591c8d1d4f..228bc5a5eb 100644 --- a/src/dev/x86/I82094AA.py +++ b/src/dev/x86/I82094AA.py @@ -29,14 +29,16 @@ from m5.proxy import * from m5.objects.Device import BasicPioDevice from m5.objects.IntPin import VectorIntSinkPin + class I82094AA(BasicPioDevice): - type = 'I82094AA' - cxx_class = 'gem5::X86ISA::I82094AA' + type = "I82094AA" + cxx_class = "gem5::X86ISA::I82094AA" cxx_header = "dev/x86/i82094aa.hh" - apic_id = Param.Int(1, 'APIC id for this IO APIC') + apic_id = Param.Int(1, "APIC id for this IO APIC") int_requestor = RequestPort("Port for sending interrupt messages") - int_latency = Param.Latency('1ns', \ - "Latency for an interrupt to propagate through this device.") + int_latency = Param.Latency( + "1ns", "Latency for an interrupt to propagate through this device." + ) - inputs = VectorIntSinkPin('The pins that drive this IO APIC') + inputs = VectorIntSinkPin("The pins that drive this IO APIC") diff --git a/src/dev/x86/I8237.py b/src/dev/x86/I8237.py index 327836753d..b8a8a8ce51 100644 --- a/src/dev/x86/I8237.py +++ b/src/dev/x86/I8237.py @@ -28,7 +28,8 @@ from m5.params import * from m5.proxy import * from m5.objects.Device import BasicPioDevice + class I8237(BasicPioDevice): - type = 'I8237' - cxx_class = 'gem5::X86ISA::I8237' + type = "I8237" + cxx_class = "gem5::X86ISA::I8237" cxx_header = "dev/x86/i8237.hh" diff --git a/src/dev/x86/I8254.py b/src/dev/x86/I8254.py index d98c2c09ca..545f13739a 100644 --- a/src/dev/x86/I8254.py +++ b/src/dev/x86/I8254.py @@ -29,9 +29,10 @@ from m5.proxy import * from m5.objects.Device import BasicPioDevice from m5.objects.IntPin import IntSourcePin + class I8254(BasicPioDevice): - type = 'I8254' - cxx_class = 'gem5::X86ISA::I8254' + type = "I8254" + cxx_class = "gem5::X86ISA::I8254" cxx_header = "dev/x86/i8254.hh" - int_pin = IntSourcePin('Pin to signal timer interrupts to') + int_pin = IntSourcePin("Pin to signal timer interrupts to") diff --git a/src/dev/x86/I8259.py b/src/dev/x86/I8259.py index d73bc85422..5fcef01f3c 100644 --- a/src/dev/x86/I8259.py +++ b/src/dev/x86/I8259.py @@ -29,18 +29,17 @@ from m5.proxy import * from m5.objects.Device import BasicPioDevice from m5.objects.IntPin import IntSourcePin, VectorIntSinkPin + class X86I8259CascadeMode(Enum): - map = {'I8259Master' : 0, - 'I8259Slave' : 1, - 'I8259Single' : 2 - } + map = {"I8259Master": 0, "I8259Slave": 1, "I8259Single": 2} + class I8259(BasicPioDevice): - type = 'I8259' - cxx_class='gem5::X86ISA::I8259' + type = "I8259" + cxx_class = "gem5::X86ISA::I8259" cxx_header = "dev/x86/i8259.hh" - output = IntSourcePin('The pin this I8259 drives') - inputs = VectorIntSinkPin('The pins that drive this I8259') - mode = Param.X86I8259CascadeMode('How this I8259 is cascaded') - slave = Param.I8259(NULL, 'Slave I8259, if any') + output = IntSourcePin("The pin this I8259 drives") + inputs = VectorIntSinkPin("The pins that drive this I8259") + mode = Param.X86I8259CascadeMode("How this I8259 is cascaded") + slave = Param.I8259(NULL, "Slave I8259, if any") diff --git a/src/dev/x86/Pc.py b/src/dev/x86/Pc.py index 42c35c13b6..0039d67230 100644 --- a/src/dev/x86/Pc.py +++ b/src/dev/x86/Pc.py @@ -35,9 +35,11 @@ from m5.objects.Uart import Uart8250 from m5.objects.PciHost import GenericPciHost from m5.objects.XBar import IOXBar + def x86IOAddress(port): IO_address_space_base = 0x8000000000000000 - return IO_address_space_base + port; + return IO_address_space_base + port + class PcPciHost(GenericPciHost): conf_base = 0xC000000000000000 @@ -45,10 +47,11 @@ class PcPciHost(GenericPciHost): pci_pio_base = 0x8000000000000000 + class Pc(Platform): - type = 'Pc' + type = "Pc" cxx_header = "dev/x86/pc.hh" - cxx_class = 'gem5::Pc' + cxx_class = "gem5::Pc" system = Param.System(Parent.any, "system") south_bridge = Param.SouthBridge(SouthBridge(), "Southbridge") @@ -56,29 +59,35 @@ class Pc(Platform): # Serial port and terminal com_1 = Uart8250() - com_1.pio_addr = x86IOAddress(0x3f8) + com_1.pio_addr = x86IOAddress(0x3F8) com_1.device = Terminal() # Devices to catch access to non-existant serial ports. - fake_com_2 = IsaFake(pio_addr=x86IOAddress(0x2f8), pio_size=8) - fake_com_3 = IsaFake(pio_addr=x86IOAddress(0x3e8), pio_size=8) - fake_com_4 = IsaFake(pio_addr=x86IOAddress(0x2e8), pio_size=8) + fake_com_2 = IsaFake(pio_addr=x86IOAddress(0x2F8), pio_size=8) + fake_com_3 = IsaFake(pio_addr=x86IOAddress(0x3E8), pio_size=8) + fake_com_4 = IsaFake(pio_addr=x86IOAddress(0x2E8), pio_size=8) # A device to catch accesses to the non-existant floppy controller. - fake_floppy = IsaFake(pio_addr=x86IOAddress(0x3f2), pio_size=2) + fake_floppy = IsaFake(pio_addr=x86IOAddress(0x3F2), pio_size=2) # A bus for accesses not claimed by a specific device. default_bus = IOXBar() # A device to handle accesses to unclaimed IO ports. - empty_isa = IsaFake(pio_addr=x86IOAddress(0), pio_size='64KiB', - ret_data8=0, ret_data16=0, ret_data32=0, ret_data64=0, - pio=default_bus.mem_side_ports) + empty_isa = IsaFake( + pio_addr=x86IOAddress(0), + pio_size="64KiB", + ret_data8=0, + ret_data16=0, + ret_data32=0, + ret_data64=0, + pio=default_bus.mem_side_ports, + ) # A device to handle any other type of unclaimed access. bad_addr = BadAddr(pio=default_bus.default) - def attachIO(self, bus, dma_ports = []): + def attachIO(self, bus, dma_ports=[]): self.south_bridge.attachIO(bus, dma_ports) self.com_1.pio = bus.mem_side_ports self.fake_com_2.pio = bus.mem_side_ports diff --git a/src/dev/x86/PcSpeaker.py b/src/dev/x86/PcSpeaker.py index 41c31d4488..3337b6a07b 100644 --- a/src/dev/x86/PcSpeaker.py +++ b/src/dev/x86/PcSpeaker.py @@ -28,8 +28,9 @@ from m5.params import * from m5.proxy import * from m5.objects.Device import BasicPioDevice + class PcSpeaker(BasicPioDevice): - type = 'PcSpeaker' - cxx_class = 'gem5::X86ISA::Speaker' + type = "PcSpeaker" + cxx_class = "gem5::X86ISA::Speaker" cxx_header = "dev/x86/speaker.hh" - i8254 = Param.I8254('Timer that drives the speaker') + i8254 = Param.I8254("Timer that drives the speaker") diff --git a/src/dev/x86/SouthBridge.py b/src/dev/x86/SouthBridge.py index 35866a763b..670f687175 100644 --- a/src/dev/x86/SouthBridge.py +++ b/src/dev/x86/SouthBridge.py @@ -37,29 +37,40 @@ from m5.objects.PcSpeaker import PcSpeaker from m5.objects.X86Ide import X86IdeController from m5.SimObject import SimObject + def x86IOAddress(port): IO_address_space_base = 0x8000000000000000 - return IO_address_space_base + port; + return IO_address_space_base + port + class SouthBridge(SimObject): - type = 'SouthBridge' + type = "SouthBridge" cxx_header = "dev/x86/south_bridge.hh" - cxx_class = 'gem5::SouthBridge' + cxx_class = "gem5::SouthBridge" - pic1 = Param.I8259(I8259(pio_addr=x86IOAddress(0x20), mode='I8259Master'), - "Master PIC") - pic2 = Param.I8259(I8259(pio_addr=x86IOAddress(0xA0), mode='I8259Slave'), - "Slave PIC") - cmos = Param.Cmos(Cmos(pio_addr=x86IOAddress(0x70)), - "CMOS memory and real time clock device") - dma1 = Param.I8237(I8237(pio_addr=x86IOAddress(0x0)), - "The first dma controller") - keyboard = Param.I8042(I8042(data_port=x86IOAddress(0x60), \ - command_port=x86IOAddress(0x64)), "The keyboard controller") - pit = Param.I8254(I8254(pio_addr=x86IOAddress(0x40)), - "Programmable interval timer") - speaker = Param.PcSpeaker(PcSpeaker(pio_addr=x86IOAddress(0x61)), - "PC speaker") + pic1 = Param.I8259( + I8259(pio_addr=x86IOAddress(0x20), mode="I8259Master"), "Master PIC" + ) + pic2 = Param.I8259( + I8259(pio_addr=x86IOAddress(0xA0), mode="I8259Slave"), "Slave PIC" + ) + cmos = Param.Cmos( + Cmos(pio_addr=x86IOAddress(0x70)), + "CMOS memory and real time clock device", + ) + dma1 = Param.I8237( + I8237(pio_addr=x86IOAddress(0x0)), "The first dma controller" + ) + keyboard = Param.I8042( + I8042(data_port=x86IOAddress(0x60), command_port=x86IOAddress(0x64)), + "The keyboard controller", + ) + pit = Param.I8254( + I8254(pio_addr=x86IOAddress(0x40)), "Programmable interval timer" + ) + speaker = Param.PcSpeaker( + PcSpeaker(pio_addr=x86IOAddress(0x61)), "PC speaker" + ) io_apic = Param.I82094AA(I82094AA(pio_addr=0xFEC00000), "I/O APIC") # IDE controller @@ -86,7 +97,7 @@ class SouthBridge(SimObject): self.dma1.pio = bus.mem_side_ports self.ide.pio = bus.mem_side_ports if dma_ports.count(self.ide.dma) == 0: - self.ide.dma = bus.cpu_side_ports + self.ide.dma = bus.cpu_side_ports self.keyboard.pio = bus.mem_side_ports self.pic1.pio = bus.mem_side_ports self.pic2.pio = bus.mem_side_ports diff --git a/src/dev/x86/X86Ide.py b/src/dev/x86/X86Ide.py index 99aa853b8a..9ae0704503 100644 --- a/src/dev/x86/X86Ide.py +++ b/src/dev/x86/X86Ide.py @@ -29,21 +29,22 @@ from m5.objects.Ide import IdeController from m5.objects.IntPin import IntSourcePin from m5.objects.PciDevice import PciLegacyIoBar + class X86IdeController(IdeController): - type = 'X86IdeController' + type = "X86IdeController" cxx_header = "dev/x86/ide_ctrl.hh" - cxx_class = 'gem5::X86IdeController' + cxx_class = "gem5::X86IdeController" VendorID = 0x8086 DeviceID = 0x7111 ProgIF = 0x80 - InterruptLine = 0xff + InterruptLine = 0xFF InterruptPin = 0x01 - BAR0 = PciLegacyIoBar(addr=0x1f0, size='8B') - BAR1 = PciLegacyIoBar(addr=0x3f4, size='3B') - BAR2 = PciLegacyIoBar(addr=0x170, size='8B') - BAR3 = PciLegacyIoBar(addr=0x374, size='3B') + BAR0 = PciLegacyIoBar(addr=0x1F0, size="8B") + BAR1 = PciLegacyIoBar(addr=0x3F4, size="3B") + BAR2 = PciLegacyIoBar(addr=0x170, size="8B") + BAR3 = PciLegacyIoBar(addr=0x374, size="3B") - int_primary = IntSourcePin('Interrupt for the primary channel') - int_secondary = IntSourcePin('Interrupt for the secondary channel') + int_primary = IntSourcePin("Interrupt for the primary channel") + int_secondary = IntSourcePin("Interrupt for the secondary channel") diff --git a/src/dev/x86/X86QemuFwCfg.py b/src/dev/x86/X86QemuFwCfg.py index 6af2b796e6..4998f9a8b5 100644 --- a/src/dev/x86/X86QemuFwCfg.py +++ b/src/dev/x86/X86QemuFwCfg.py @@ -27,22 +27,25 @@ from m5.params import * from m5.objects.E820 import X86E820Entry from m5.objects.QemuFwCfg import QemuFwCfgIo, QemuFwCfgItem + def x86IOAddress(port): IO_address_space_base = 0x8000000000000000 - return IO_address_space_base + port; + return IO_address_space_base + port + class X86QemuFwCfg(QemuFwCfgIo): selector_addr = x86IOAddress(0x510) + class QemuFwCfgItemE820(QemuFwCfgItem): - type = 'QemuFwCfgItemE820' - cxx_class = 'gem5::qemu::FwCfgItemFactory' - cxx_template_params = ['class ItemType'] - cxx_header = 'dev/x86/qemu_fw_cfg.hh' + type = "QemuFwCfgItemE820" + cxx_class = "gem5::qemu::FwCfgItemFactory" + cxx_template_params = ["class ItemType"] + cxx_header = "dev/x86/qemu_fw_cfg.hh" # There is a fixed index for this file. index = 0x8003 arch_specific = True path = "etc/e820" - entries = VectorParam.X86E820Entry('entries for the e820 table') + entries = VectorParam.X86E820Entry("entries for the e820 table") diff --git a/src/gpu-compute/GPU.py b/src/gpu-compute/GPU.py index a0154a77dc..517d1801c0 100644 --- a/src/gpu-compute/GPU.py +++ b/src/gpu-compute/GPU.py @@ -39,137 +39,156 @@ from m5.objects.LdsState import LdsState from m5.objects.Process import EmulatedDriver from m5.objects.VegaGPUTLB import VegaPagetableWalker -class PrefetchType(Enum): vals = [ - 'PF_CU', - 'PF_PHASE', - 'PF_WF', - 'PF_STRIDE', - 'PF_END', - ] -class GfxVersion(ScopedEnum): vals = [ - 'gfx801', - 'gfx803', - 'gfx900', - 'gfx902', - ] +class PrefetchType(Enum): + vals = ["PF_CU", "PF_PHASE", "PF_WF", "PF_STRIDE", "PF_END"] + + +class GfxVersion(ScopedEnum): + vals = ["gfx801", "gfx803", "gfx900", "gfx902"] + class PoolManager(SimObject): - type = 'PoolManager' + type = "PoolManager" abstract = True - cxx_class = 'gem5::PoolManager' + cxx_class = "gem5::PoolManager" cxx_header = "gpu-compute/pool_manager.hh" - min_alloc = Param.Int(4, 'min number of VGPRs allocated per WF') - pool_size = Param.Int(2048, 'number of vector registers per SIMD') + min_alloc = Param.Int(4, "min number of VGPRs allocated per WF") + pool_size = Param.Int(2048, "number of vector registers per SIMD") + # The simple pool manage only allows one workgroup to # be executing on a CU at any given time. class SimplePoolManager(PoolManager): - type = 'SimplePoolManager' - cxx_class = 'gem5::SimplePoolManager' + type = "SimplePoolManager" + cxx_class = "gem5::SimplePoolManager" cxx_header = "gpu-compute/simple_pool_manager.hh" + ## This is for allowing multiple workgroups on one CU class DynPoolManager(PoolManager): - type = 'DynPoolManager' - cxx_class = 'gem5::DynPoolManager' + type = "DynPoolManager" + cxx_class = "gem5::DynPoolManager" cxx_header = "gpu-compute/dyn_pool_manager.hh" -class RegisterFile(SimObject): - type = 'RegisterFile' - cxx_class = 'gem5::RegisterFile' - cxx_header = 'gpu-compute/register_file.hh' - simd_id = Param.Int(-1, 'SIMD ID associated with this Register File') - num_regs = Param.Int(2048, 'number of registers in this RF') - wf_size = Param.Int(64, 'Wavefront size (in work items)') +class RegisterFile(SimObject): + type = "RegisterFile" + cxx_class = "gem5::RegisterFile" + cxx_header = "gpu-compute/register_file.hh" + + simd_id = Param.Int(-1, "SIMD ID associated with this Register File") + num_regs = Param.Int(2048, "number of registers in this RF") + wf_size = Param.Int(64, "Wavefront size (in work items)") + class ScalarRegisterFile(RegisterFile): - type = 'ScalarRegisterFile' - cxx_class = 'gem5::ScalarRegisterFile' - cxx_header = 'gpu-compute/scalar_register_file.hh' + type = "ScalarRegisterFile" + cxx_class = "gem5::ScalarRegisterFile" + cxx_header = "gpu-compute/scalar_register_file.hh" + class VectorRegisterFile(RegisterFile): - type = 'VectorRegisterFile' - cxx_class = 'gem5::VectorRegisterFile' - cxx_header = 'gpu-compute/vector_register_file.hh' + type = "VectorRegisterFile" + cxx_class = "gem5::VectorRegisterFile" + cxx_header = "gpu-compute/vector_register_file.hh" + class RegisterManager(SimObject): - type = 'RegisterManager' - cxx_class = 'gem5::RegisterManager' - cxx_header = 'gpu-compute/register_manager.hh' + type = "RegisterManager" + cxx_class = "gem5::RegisterManager" + cxx_header = "gpu-compute/register_manager.hh" policy = Param.String("static", "Register Manager Policy") - vrf_pool_managers = VectorParam.PoolManager('VRF Pool Managers') - srf_pool_managers = VectorParam.PoolManager('SRF Pool Managers') + vrf_pool_managers = VectorParam.PoolManager("VRF Pool Managers") + srf_pool_managers = VectorParam.PoolManager("SRF Pool Managers") + class Wavefront(SimObject): - type = 'Wavefront' - cxx_class = 'gem5::Wavefront' - cxx_header = 'gpu-compute/wavefront.hh' + type = "Wavefront" + cxx_class = "gem5::Wavefront" + cxx_header = "gpu-compute/wavefront.hh" + + simdId = Param.Int("SIMD id (0-ComputeUnit.num_SIMDs)") + wf_slot_id = Param.Int("wavefront id (0-ComputeUnit.max_wfs)") + wf_size = Param.Int(64, "Wavefront size (in work items)") + max_ib_size = Param.Int( + 13, + "Maximum size (in number of insts) of the " "instruction buffer (IB).", + ) - simdId = Param.Int('SIMD id (0-ComputeUnit.num_SIMDs)') - wf_slot_id = Param.Int('wavefront id (0-ComputeUnit.max_wfs)') - wf_size = Param.Int(64, 'Wavefront size (in work items)') - max_ib_size = Param.Int(13, 'Maximum size (in number of insts) of the ' - 'instruction buffer (IB).') # Most of the default values here are obtained from the # AMD Graphics Core Next (GCN) Architecture whitepaper. class ComputeUnit(ClockedObject): - type = 'ComputeUnit' - cxx_class = 'gem5::ComputeUnit' - cxx_header = 'gpu-compute/compute_unit.hh' + type = "ComputeUnit" + cxx_class = "gem5::ComputeUnit" + cxx_header = "gpu-compute/compute_unit.hh" - wavefronts = VectorParam.Wavefront('Number of wavefronts') + wavefronts = VectorParam.Wavefront("Number of wavefronts") # Wavefront size is 64. This is configurable, however changing # this value to anything other than 64 will likely cause errors. - wf_size = Param.Int(64, 'Wavefront size (in work items)') - num_barrier_slots = Param.Int(4, 'Number of barrier slots in a CU') - num_SIMDs = Param.Int(4, 'number of SIMD units per CU') - num_scalar_cores = Param.Int(1, 'number of Scalar cores per CU') - num_scalar_mem_pipes = Param.Int(1, 'number of Scalar memory pipelines '\ - 'per CU') - simd_width = Param.Int(16, 'width (number of lanes) per SIMD unit') + wf_size = Param.Int(64, "Wavefront size (in work items)") + num_barrier_slots = Param.Int(4, "Number of barrier slots in a CU") + num_SIMDs = Param.Int(4, "number of SIMD units per CU") + num_scalar_cores = Param.Int(1, "number of Scalar cores per CU") + num_scalar_mem_pipes = Param.Int( + 1, "number of Scalar memory pipelines " "per CU" + ) + simd_width = Param.Int(16, "width (number of lanes) per SIMD unit") - operand_network_length = Param.Int(1, 'number of pipe stages of operand '\ - 'network') + operand_network_length = Param.Int( + 1, "number of pipe stages of operand " "network" + ) - spbypass_pipe_length = Param.Int(4, 'vector ALU Single Precision bypass '\ - 'latency') + spbypass_pipe_length = Param.Int( + 4, "vector ALU Single Precision bypass " "latency" + ) - dpbypass_pipe_length = Param.Int(4, 'vector ALU Double Precision bypass '\ - 'latency') - scalar_pipe_length = Param.Int(1, 'number of pipe stages per scalar ALU') - issue_period = Param.Int(4, 'number of cycles per issue period') + dpbypass_pipe_length = Param.Int( + 4, "vector ALU Double Precision bypass " "latency" + ) + scalar_pipe_length = Param.Int(1, "number of pipe stages per scalar ALU") + issue_period = Param.Int(4, "number of cycles per issue period") - vrf_gm_bus_latency = Param.Int(1, 'number of cycles per use of VRF to '\ - 'GM bus') - srf_scm_bus_latency = Param.Int(1, 'number of cycles per use of SRF '\ - 'to Scalar Mem bus') - vrf_lm_bus_latency = Param.Int(1, 'number of cycles per use of VRF to '\ - 'LM bus') + vrf_gm_bus_latency = Param.Int( + 1, "number of cycles per use of VRF to " "GM bus" + ) + srf_scm_bus_latency = Param.Int( + 1, "number of cycles per use of SRF " "to Scalar Mem bus" + ) + vrf_lm_bus_latency = Param.Int( + 1, "number of cycles per use of VRF to " "LM bus" + ) - num_global_mem_pipes = Param.Int(1,'number of global memory pipes per CU') - num_shared_mem_pipes = Param.Int(1,'number of shared memory pipes per CU') - n_wf = Param.Int(10, 'Number of wavefront slots per SIMD') - mem_req_latency = Param.Int(50, "Latency for request from the cu to ruby. "\ - "Represents the pipeline to reach the TCP "\ - "and specified in GPU clock cycles") - mem_resp_latency = Param.Int(50, "Latency for responses from ruby to the "\ - "cu. Represents the pipeline between the "\ - "TCP and cu as well as TCP data array "\ - "access. Specified in GPU clock cycles") + num_global_mem_pipes = Param.Int(1, "number of global memory pipes per CU") + num_shared_mem_pipes = Param.Int(1, "number of shared memory pipes per CU") + n_wf = Param.Int(10, "Number of wavefront slots per SIMD") + mem_req_latency = Param.Int( + 50, + "Latency for request from the cu to ruby. " + "Represents the pipeline to reach the TCP " + "and specified in GPU clock cycles", + ) + mem_resp_latency = Param.Int( + 50, + "Latency for responses from ruby to the " + "cu. Represents the pipeline between the " + "TCP and cu as well as TCP data array " + "access. Specified in GPU clock cycles", + ) system = Param.System(Parent.any, "system object") - cu_id = Param.Int('CU id') - vrf_to_coalescer_bus_width = Param.Int(64, "VRF->Coalescer data bus "\ - "width in bytes") - coalescer_to_vrf_bus_width = Param.Int(64, "Coalescer->VRF data bus "\ - "width in bytes") + cu_id = Param.Int("CU id") + vrf_to_coalescer_bus_width = Param.Int( + 64, "VRF->Coalescer data bus " "width in bytes" + ) + coalescer_to_vrf_bus_width = Param.Int( + 64, "Coalescer->VRF data bus " "width in bytes" + ) memory_port = VectorRequestPort("Port to the memory system") - translation_port = VectorRequestPort('Port to the TLB hierarchy') + translation_port = VectorRequestPort("Port to the TLB hierarchy") sqc_port = RequestPort("Port to the SQC (I-cache") sqc_tlb_port = RequestPort("Port to the TLB for the SQC (I-cache)") scalar_port = RequestPort("Port to the scalar data cache") @@ -177,113 +196,146 @@ class ComputeUnit(ClockedObject): gmTokenPort = RequestPort("Port to the GPU coalesecer for sharing tokens") perLaneTLB = Param.Bool(False, "enable per-lane TLB") - prefetch_depth = Param.Int(0, "Number of prefetches triggered at a time"\ - "(0 turns off prefetching)") + prefetch_depth = Param.Int( + 0, + "Number of prefetches triggered at a time" "(0 turns off prefetching)", + ) prefetch_stride = Param.Int(1, "Fixed Prefetch Stride (1 means next-page)") - prefetch_prev_type = Param.PrefetchType('PF_PHASE', "Prefetch the stride "\ - "from last mem req in lane of "\ - "CU|Phase|Wavefront") - execPolicy = Param.String("OLDEST-FIRST", "WF execution selection policy"); + prefetch_prev_type = Param.PrefetchType( + "PF_PHASE", + "Prefetch the stride " + "from last mem req in lane of " + "CU|Phase|Wavefront", + ) + execPolicy = Param.String("OLDEST-FIRST", "WF execution selection policy") debugSegFault = Param.Bool(False, "enable debugging GPU seg faults") functionalTLB = Param.Bool(False, "Assume TLB causes no delay") - localMemBarrier = Param.Bool(False, "Assume Barriers do not wait on "\ - "kernel end") + localMemBarrier = Param.Bool( + False, "Assume Barriers do not wait on " "kernel end" + ) - countPages = Param.Bool(False, "Generate per-CU file of all pages "\ - "touched and how many times") - scalar_mem_queue_size = Param.Int(32, "Number of entries in scalar "\ - "memory pipeline's queues") - global_mem_queue_size = Param.Int(256, "Number of entries in the global " - "memory pipeline's queues") - local_mem_queue_size = Param.Int(256, "Number of entries in the local " - "memory pipeline's queues") - max_wave_requests = Param.Int(64, "number of pending vector memory "\ - "requests per wavefront") - max_cu_tokens = Param.Int(4, "Maximum number of tokens, i.e., the number"\ - " of instructions that can be sent to coalescer") - ldsBus = Bridge() # the bridge between the CU and its LDS + countPages = Param.Bool( + False, + "Generate per-CU file of all pages " "touched and how many times", + ) + scalar_mem_queue_size = Param.Int( + 32, "Number of entries in scalar " "memory pipeline's queues" + ) + global_mem_queue_size = Param.Int( + 256, "Number of entries in the global " "memory pipeline's queues" + ) + local_mem_queue_size = Param.Int( + 256, "Number of entries in the local " "memory pipeline's queues" + ) + max_wave_requests = Param.Int( + 64, "number of pending vector memory " "requests per wavefront" + ) + max_cu_tokens = Param.Int( + 4, + "Maximum number of tokens, i.e., the number" + " of instructions that can be sent to coalescer", + ) + ldsBus = Bridge() # the bridge between the CU and its LDS ldsPort = RequestPort("The port that goes to the LDS") localDataStore = Param.LdsState("the LDS for this CU") - vector_register_file = VectorParam.VectorRegisterFile("Vector register "\ - "file") + vector_register_file = VectorParam.VectorRegisterFile( + "Vector register " "file" + ) - scalar_register_file = VectorParam.ScalarRegisterFile("Scalar register "\ - "file") - out_of_order_data_delivery = Param.Bool(False, "enable OoO data delivery" - " in the GM pipeline") + scalar_register_file = VectorParam.ScalarRegisterFile( + "Scalar register " "file" + ) + out_of_order_data_delivery = Param.Bool( + False, "enable OoO data delivery" " in the GM pipeline" + ) register_manager = Param.RegisterManager("Register Manager") - fetch_depth = Param.Int(2, 'number of i-cache lines that may be ' - 'buffered in the fetch unit.') + fetch_depth = Param.Int( + 2, "number of i-cache lines that may be " "buffered in the fetch unit." + ) + class Shader(ClockedObject): - type = 'Shader' - cxx_class = 'gem5::Shader' - cxx_header = 'gpu-compute/shader.hh' - CUs = VectorParam.ComputeUnit('Number of compute units') - gpu_cmd_proc = Param.GPUCommandProcessor('Command processor for GPU') - dispatcher = Param.GPUDispatcher('GPU workgroup dispatcher') - system_hub = Param.AMDGPUSystemHub(NULL, 'GPU System Hub (FS Mode only)') - n_wf = Param.Int(10, 'Number of wavefront slots per SIMD') - impl_kern_launch_acq = Param.Bool(True, """Insert acq packet into - ruby at kernel launch""") - impl_kern_end_rel = Param.Bool(False, """Insert rel packet into - ruby at kernel end""") - globalmem = Param.MemorySize('64kB', 'Memory size') - timing = Param.Bool(False, 'timing memory accesses') + type = "Shader" + cxx_class = "gem5::Shader" + cxx_header = "gpu-compute/shader.hh" + CUs = VectorParam.ComputeUnit("Number of compute units") + gpu_cmd_proc = Param.GPUCommandProcessor("Command processor for GPU") + dispatcher = Param.GPUDispatcher("GPU workgroup dispatcher") + system_hub = Param.AMDGPUSystemHub(NULL, "GPU System Hub (FS Mode only)") + n_wf = Param.Int(10, "Number of wavefront slots per SIMD") + impl_kern_launch_acq = Param.Bool( + True, + """Insert acq packet into + ruby at kernel launch""", + ) + impl_kern_end_rel = Param.Bool( + False, + """Insert rel packet into + ruby at kernel end""", + ) + globalmem = Param.MemorySize("64kB", "Memory size") + timing = Param.Bool(False, "timing memory accesses") cpu_pointer = Param.BaseCPU(NULL, "pointer to base CPU") - translation = Param.Bool(False, "address translation"); - timer_period = Param.Clock('10us', "system timer period") + translation = Param.Bool(False, "address translation") + timer_period = Param.Clock("10us", "system timer period") idlecu_timeout = Param.Tick(0, "Idle CU watchdog timeout threshold") max_valu_insts = Param.Int(0, "Maximum vALU insts before exiting") + class GPUComputeDriver(EmulatedDriver): - type = 'GPUComputeDriver' - cxx_class = 'gem5::GPUComputeDriver' - cxx_header = 'gpu-compute/gpu_compute_driver.hh' - device = Param.GPUCommandProcessor('GPU controlled by this driver') - isdGPU = Param.Bool(False, 'Driver is for a dGPU') - gfxVersion = Param.GfxVersion('gfx801', 'ISA of gpu to model') - dGPUPoolID = Param.Int(0, 'Pool ID for dGPU.') + type = "GPUComputeDriver" + cxx_class = "gem5::GPUComputeDriver" + cxx_header = "gpu-compute/gpu_compute_driver.hh" + device = Param.GPUCommandProcessor("GPU controlled by this driver") + isdGPU = Param.Bool(False, "Driver is for a dGPU") + gfxVersion = Param.GfxVersion("gfx801", "ISA of gpu to model") + dGPUPoolID = Param.Int(0, "Pool ID for dGPU.") # Default Mtype for caches - #-- 1 1 1 C_RW_S (Cached-ReadWrite-Shared) - #-- 1 1 0 C_RW_US (Cached-ReadWrite-Unshared) - #-- 1 0 1 C_RO_S (Cached-ReadOnly-Shared) - #-- 1 0 0 C_RO_US (Cached-ReadOnly-Unshared) - #-- 0 1 x UC_L2 (Uncached_GL2) - #-- 0 0 x UC_All (Uncached_All_Load) + # -- 1 1 1 C_RW_S (Cached-ReadWrite-Shared) + # -- 1 1 0 C_RW_US (Cached-ReadWrite-Unshared) + # -- 1 0 1 C_RO_S (Cached-ReadOnly-Shared) + # -- 1 0 0 C_RO_US (Cached-ReadOnly-Unshared) + # -- 0 1 x UC_L2 (Uncached_GL2) + # -- 0 0 x UC_All (Uncached_All_Load) # default value: 5/C_RO_S (only allow caching in GL2 for read. Shared) - m_type = Param.Int("Default MTYPE for cache. Valid values between 0-7"); + m_type = Param.Int("Default MTYPE for cache. Valid values between 0-7") + class GPURenderDriver(EmulatedDriver): - type = 'GPURenderDriver' - cxx_class = 'gem5::GPURenderDriver' - cxx_header = 'gpu-compute/gpu_render_driver.hh' + type = "GPURenderDriver" + cxx_class = "gem5::GPURenderDriver" + cxx_header = "gpu-compute/gpu_render_driver.hh" + class GPUDispatcher(SimObject): - type = 'GPUDispatcher' - cxx_class = 'gem5::GPUDispatcher' - cxx_header = 'gpu-compute/dispatcher.hh' + type = "GPUDispatcher" + cxx_class = "gem5::GPUDispatcher" + cxx_header = "gpu-compute/dispatcher.hh" + class GPUCommandProcessor(DmaVirtDevice): - type = 'GPUCommandProcessor' - cxx_class = 'gem5::GPUCommandProcessor' - cxx_header = 'gpu-compute/gpu_command_processor.hh' - dispatcher = Param.GPUDispatcher('workgroup dispatcher for the GPU') + type = "GPUCommandProcessor" + cxx_class = "gem5::GPUCommandProcessor" + cxx_header = "gpu-compute/gpu_command_processor.hh" + dispatcher = Param.GPUDispatcher("workgroup dispatcher for the GPU") - hsapp = Param.HSAPacketProcessor('PP attached to this device') - walker = Param.VegaPagetableWalker(VegaPagetableWalker(), - "Page table walker") + hsapp = Param.HSAPacketProcessor("PP attached to this device") + walker = Param.VegaPagetableWalker( + VegaPagetableWalker(), "Page table walker" + ) -class StorageClassType(Enum): vals = [ - 'SC_SPILL', - 'SC_GLOBAL', - 'SC_GROUP', - 'SC_PRIVATE', - 'SC_READONLY', - 'SC_KERNARG', - 'SC_ARG', - 'SC_NONE', + +class StorageClassType(Enum): + vals = [ + "SC_SPILL", + "SC_GLOBAL", + "SC_GROUP", + "SC_PRIVATE", + "SC_READONLY", + "SC_KERNARG", + "SC_ARG", + "SC_NONE", ] diff --git a/src/gpu-compute/GPUStaticInstFlags.py b/src/gpu-compute/GPUStaticInstFlags.py index 75bd673c19..b75e2c6c92 100644 --- a/src/gpu-compute/GPUStaticInstFlags.py +++ b/src/gpu-compute/GPUStaticInstFlags.py @@ -29,86 +29,79 @@ from m5.params import * + class GPUStaticInstFlags(Enum): - wrapper_name = 'GPUStaticInstFlags' + wrapper_name = "GPUStaticInstFlags" wrapper_is_struct = True - enum_name = 'Flags' + enum_name = "Flags" vals = [ # Op types - 'ALU', # ALU op - 'Branch', # Branch instruction - 'CondBranch', # Conditinal Branch instruction - 'Nop', # No-op (no effect at all) - 'Return', # Subroutine return instruction - 'EndOfKernel', # Kernel termination instruction - 'KernelLaunch', # Kernel launch inst - 'UnconditionalJump', # - 'SpecialOp', # Special op - 'Waitcnt', # Is a waitcnt instruction - 'Sleep', # Is a sleep instruction - + "ALU", # ALU op + "Branch", # Branch instruction + "CondBranch", # Conditinal Branch instruction + "Nop", # No-op (no effect at all) + "Return", # Subroutine return instruction + "EndOfKernel", # Kernel termination instruction + "KernelLaunch", # Kernel launch inst + "UnconditionalJump", # + "SpecialOp", # Special op + "Waitcnt", # Is a waitcnt instruction + "Sleep", # Is a sleep instruction # Memory ops - 'MemBarrier', # Barrier instruction - 'MemSync', # Synchronizing instruction - 'MemoryRef', # References memory (load, store, or atomic) - 'Flat', # Flat memory op - 'FlatGlobal', # Global memory op - 'Load', # Reads from memory - 'Store', # Writes to memory - + "MemBarrier", # Barrier instruction + "MemSync", # Synchronizing instruction + "MemoryRef", # References memory (load, store, or atomic) + "Flat", # Flat memory op + "FlatGlobal", # Global memory op + "Load", # Reads from memory + "Store", # Writes to memory # Atomic ops - 'AtomicReturn', # Atomic instruction that returns data - 'AtomicNoReturn', # Atomic instruction that doesn't return data - + "AtomicReturn", # Atomic instruction that returns data + "AtomicNoReturn", # Atomic instruction that doesn't return data # Instruction attributes - 'Scalar', # A scalar (not vector) operation - 'ReadsSCC', # The instruction reads SCC - 'WritesSCC', # The instruction writes SCC - 'ReadsVCC', # The instruction reads VCC - 'WritesVCC', # The instruction writes VCC - 'ReadsEXEC', # The instruction reads Exec Mask - 'WritesEXEC', # The instruction writes Exec Mask - 'ReadsMode', # The instruction reads Mode register - 'WritesMode', # The instruction writes Mode register - 'IgnoreExec', # The instruction ignores the Exec Mask - 'IsSDWA', # The instruction is a SDWA instruction - 'IsDPP', # The instruction is a DPP instruction - + "Scalar", # A scalar (not vector) operation + "ReadsSCC", # The instruction reads SCC + "WritesSCC", # The instruction writes SCC + "ReadsVCC", # The instruction reads VCC + "WritesVCC", # The instruction writes VCC + "ReadsEXEC", # The instruction reads Exec Mask + "WritesEXEC", # The instruction writes Exec Mask + "ReadsMode", # The instruction reads Mode register + "WritesMode", # The instruction writes Mode register + "IgnoreExec", # The instruction ignores the Exec Mask + "IsSDWA", # The instruction is a SDWA instruction + "IsDPP", # The instruction is a DPP instruction # Atomic OP types - 'AtomicAnd', - 'AtomicOr', - 'AtomicXor', - 'AtomicCAS', - 'AtomicExch', - 'AtomicAdd', - 'AtomicSub', - 'AtomicInc', - 'AtomicDec', - 'AtomicMax', - 'AtomicMin', - + "AtomicAnd", + "AtomicOr", + "AtomicXor", + "AtomicCAS", + "AtomicExch", + "AtomicAdd", + "AtomicSub", + "AtomicInc", + "AtomicDec", + "AtomicMax", + "AtomicMin", # Segment access flags - 'ArgSegment', # Accesses the arg segment - 'GlobalSegment', # Accesses global memory - 'GroupSegment', # Accesses local memory (LDS), aka shared memory - 'KernArgSegment', # Accesses the kernel argument segment - 'PrivateSegment', # Accesses the private segment - 'ReadOnlySegment', # Accesses read only memory - 'SpillSegment', # Accesses the spill segment - 'NoSegment', # Does not have an associated segment - + "ArgSegment", # Accesses the arg segment + "GlobalSegment", # Accesses global memory + "GroupSegment", # Accesses local memory (LDS), aka shared memory + "KernArgSegment", # Accesses the kernel argument segment + "PrivateSegment", # Accesses the private segment + "ReadOnlySegment", # Accesses read only memory + "SpillSegment", # Accesses the spill segment + "NoSegment", # Does not have an associated segment # Coherence flags - 'GloballyCoherent', # Coherent with other work-items on same device - 'SystemCoherent', # Coherent with a different device, or the host - + "GloballyCoherent", # Coherent with other work-items on same device + "SystemCoherent", # Coherent with a different device, or the host # Floating-point flags - 'F16', # F16 operation - 'F32', # F32 operation - 'F64', # F64 operation - + "F16", # F16 operation + "F32", # F32 operation + "F64", # F64 operation # MAC, MAD, FMA - 'FMA', # FMA - 'MAC', # MAC - 'MAD' # MAD - ] + "FMA", # FMA + "MAC", # MAC + "MAD", # MAD + ] diff --git a/src/gpu-compute/LdsState.py b/src/gpu-compute/LdsState.py index fb35d825bb..637cf11264 100644 --- a/src/gpu-compute/LdsState.py +++ b/src/gpu-compute/LdsState.py @@ -33,13 +33,15 @@ from m5.proxy import * from m5.objects.ClockedObject import ClockedObject + class LdsState(ClockedObject): - type = 'LdsState' - cxx_class = 'gem5::LdsState' - cxx_header = 'gpu-compute/lds_state.hh' - size = Param.Int(65536, 'the size of the LDS') - range = Param.AddrRange('64kB', "address space of the LDS") - bankConflictPenalty = Param.Int(1, 'penalty per LDS bank conflict when '\ - 'accessing data') - banks = Param.Int(32, 'Number of LDS banks') + type = "LdsState" + cxx_class = "gem5::LdsState" + cxx_header = "gpu-compute/lds_state.hh" + size = Param.Int(65536, "the size of the LDS") + range = Param.AddrRange("64kB", "address space of the LDS") + bankConflictPenalty = Param.Int( + 1, "penalty per LDS bank conflict when " "accessing data" + ) + banks = Param.Int(32, "Number of LDS banks") cuPort = ResponsePort("port that goes to the compute unit") diff --git a/src/gpu-compute/compute_unit.cc b/src/gpu-compute/compute_unit.cc index 1b20530a8a..8498ea475e 100644 --- a/src/gpu-compute/compute_unit.cc +++ b/src/gpu-compute/compute_unit.cc @@ -1183,9 +1183,10 @@ ComputeUnit::sendRequest(GPUDynInstPtr gpuDynInst, PortID index, PacketPtr pkt) tlbPort[tlbPort_index].retries.push_back(pkt); } else { - DPRINTF(GPUTLB, - "CU%d: WF[%d][%d]: Translation for addr %#x sent!\n", - cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, tmp_vaddr); + DPRINTF(GPUTLB, "CU%d: WF[%d][%d]: Translation for addr %#x from " + "instruction %s sent!\n", cu_id, gpuDynInst->simdId, + gpuDynInst->wfSlotId, tmp_vaddr, + gpuDynInst->disassemble().c_str()); } } else { if (pkt->cmd == MemCmd::MemSyncReq) { diff --git a/src/gpu-compute/gpu_command_processor.cc b/src/gpu-compute/gpu_command_processor.cc index d46ace624c..af59b7822b 100644 --- a/src/gpu-compute/gpu_command_processor.cc +++ b/src/gpu-compute/gpu_command_processor.cc @@ -118,6 +118,7 @@ GPUCommandProcessor::submitDispatchPkt(void *raw_pkt, uint32_t queue_id, { static int dynamic_task_id = 0; _hsa_dispatch_packet_t *disp_pkt = (_hsa_dispatch_packet_t*)raw_pkt; + assert(!(disp_pkt->kernel_object & (system()->cacheLineSize() - 1))); /** * we need to read a pointer in the application's address @@ -150,6 +151,10 @@ GPUCommandProcessor::submitDispatchPkt(void *raw_pkt, uint32_t queue_id, is_system_page); } + DPRINTF(GPUCommandProc, "kernobj vaddr %#lx paddr %#lx size %d s:%d\n", + disp_pkt->kernel_object, phys_addr, sizeof(AMDKernelCode), + is_system_page); + /** * The kernel_object is a pointer to the machine code, whose entry * point is an 'amd_kernel_code_t' type, which is included in the @@ -167,20 +172,27 @@ GPUCommandProcessor::submitDispatchPkt(void *raw_pkt, uint32_t queue_id, } else { assert(FullSystem); DPRINTF(GPUCommandProc, "kernel_object in device, using device mem\n"); - // Read from GPU memory manager - uint8_t raw_akc[sizeof(AMDKernelCode)]; - for (int i = 0; i < sizeof(AMDKernelCode) / sizeof(uint8_t); ++i) { - Addr mmhubAddr = phys_addr + i*sizeof(uint8_t); + + // Read from GPU memory manager one cache line at a time to prevent + // rare cases where the AKC spans two memory pages. + ChunkGenerator gen(disp_pkt->kernel_object, sizeof(AMDKernelCode), + system()->cacheLineSize()); + for (; !gen.done(); gen.next()) { + Addr chunk_addr = gen.addr(); + int vmid = 1; + unsigned dummy; + walker->startFunctional(gpuDevice->getVM().getPageTableBase(vmid), + chunk_addr, dummy, BaseMMU::Mode::Read, + is_system_page); + Request::Flags flags = Request::PHYSICAL; - RequestPtr request = std::make_shared( - mmhubAddr, sizeof(uint8_t), flags, walker->getDevRequestor()); + RequestPtr request = std::make_shared(chunk_addr, + system()->cacheLineSize(), flags, walker->getDevRequestor()); Packet *readPkt = new Packet(request, MemCmd::ReadReq); - readPkt->allocate(); + readPkt->dataStatic((uint8_t *)&akc + gen.complete()); system()->getDeviceMemory(readPkt)->access(readPkt); - raw_akc[i] = readPkt->getLE(); delete readPkt; } - memcpy(&akc, &raw_akc, sizeof(AMDKernelCode)); } DPRINTF(GPUCommandProc, "GPU machine code is %lli bytes from start of the " diff --git a/src/gpu-compute/gpu_command_processor.hh b/src/gpu-compute/gpu_command_processor.hh index ba8b0072e8..bafe733ee1 100644 --- a/src/gpu-compute/gpu_command_processor.hh +++ b/src/gpu-compute/gpu_command_processor.hh @@ -220,10 +220,6 @@ class GPUCommandProcessor : public DmaVirtDevice task->amdQueue.compute_tmpring_size_wavesize * 1024, task->privMemPerItem()); - // Currently this is not supported in GPU full system - fatal_if(FullSystem, - "Runtime dynamic scratch allocation not supported"); - updateHsaSignal(task->amdQueue.queue_inactive_signal.handle, 1, [ = ] (const uint64_t &dma_buffer) { WaitScratchDmaEvent(task, dma_buffer); }); @@ -273,7 +269,15 @@ class GPUCommandProcessor : public DmaVirtDevice auto cb = new DmaVirtCallback( [ = ] (const uint64_t &dma_buffer) { WaitScratchDmaEvent(task, dma_buffer); } ); - dmaReadVirt(value_addr, sizeof(Addr), cb, &cb->dmaBuffer); + + /** + * Delay for a large amount of ticks to give the CPU time to + * setup the scratch space. The delay should be non-zero to since + * this method calls back itself and can cause an infinite loop + * in the event queue if the allocation is not completed by the + * first time this is called. + */ + dmaReadVirt(value_addr, sizeof(Addr), cb, &cb->dmaBuffer, 1e9); } } }; diff --git a/src/gpu-compute/gpu_compute_driver.cc b/src/gpu-compute/gpu_compute_driver.cc index 203d087c1a..6c843c654f 100644 --- a/src/gpu-compute/gpu_compute_driver.cc +++ b/src/gpu-compute/gpu_compute_driver.cc @@ -733,13 +733,13 @@ GPUComputeDriver::ioctl(ThreadContext *tc, unsigned req, Addr ioc_buf) args.copyIn(virt_proxy); assert(isdGPU || gfxVersion == GfxVersion::gfx902); - assert((args->va_addr % TheISA::PageBytes) == 0); + assert((args->va_addr % X86ISA::PageBytes) == 0); [[maybe_unused]] Addr mmap_offset = 0; Request::CacheCoherenceFlags mtype = defaultMtype; Addr pa_addr = 0; - int npages = divCeil(args->size, (int64_t)TheISA::PageBytes); + int npages = divCeil(args->size, (int64_t)X86ISA::PageBytes); bool cacheable = true; if (KFD_IOC_ALLOC_MEM_FLAGS_VRAM & args->flags) { diff --git a/src/gpu-compute/hsa_queue_entry.hh b/src/gpu-compute/hsa_queue_entry.hh index 4261f2c631..fbe0efef21 100644 --- a/src/gpu-compute/hsa_queue_entry.hh +++ b/src/gpu-compute/hsa_queue_entry.hh @@ -96,9 +96,22 @@ class HSAQueueEntry if (!numVgprs) numVgprs = (akc->granulated_workitem_vgpr_count + 1) * 4; - // TODO: Granularity changes for GFX9! - if (!numSgprs) - numSgprs = (akc->granulated_wavefront_sgpr_count + 1) * 8; + if (!numSgprs || numSgprs == + std::numeric_limitswavefront_sgpr_count)>::max()) { + // Supported major generation numbers: 0 (BLIT kernels), 8, and 9 + uint16_t version = akc->amd_machine_version_major; + assert((version == 0) || (version == 8) || (version == 9)); + // SGPR allocation granularies: + // - GFX8: 8 + // - GFX9: 16 + // Source: https://llvm.org/docs/AMDGPUUsage.html + if ((version == 0) || (version == 8)) { + // We assume that BLIT kernels use the same granularity as GFX8 + numSgprs = (akc->granulated_wavefront_sgpr_count + 1) * 8; + } else if (version == 9) { + numSgprs = ((akc->granulated_wavefront_sgpr_count + 1) * 16)/2; + } + } initialVgprState.reset(); initialSgprState.reset(); diff --git a/src/gpu-compute/lds_state.hh b/src/gpu-compute/lds_state.hh index 9dcaccb660..5fe259506b 100644 --- a/src/gpu-compute/lds_state.hh +++ b/src/gpu-compute/lds_state.hh @@ -101,6 +101,27 @@ class LdsChunk *p0 = value; } + /** + * an atomic operation + */ + template + T + atomic(const uint32_t index, AtomicOpFunctorPtr amoOp) + { + /** + * Atomics that are outside the bounds of the LDS + * chunk allocated to this WG are dropped. + */ + if (index >= chunk.size()) { + return (T)0; + } + T *p0 = (T *) (&(chunk.at(index))); + T tmp = *p0; + + (*amoOp)((uint8_t *)p0); + return tmp; + } + /** * get the size of this chunk */ diff --git a/src/gpu-compute/scoreboard_check_stage.cc b/src/gpu-compute/scoreboard_check_stage.cc index fa16e54faa..3d18260822 100644 --- a/src/gpu-compute/scoreboard_check_stage.cc +++ b/src/gpu-compute/scoreboard_check_stage.cc @@ -154,7 +154,7 @@ ScoreboardCheckStage::ready(Wavefront *w, nonrdytype_e *rdyStatus, if (!(ii->isBarrier() || ii->isNop() || ii->isReturn() || ii->isBranch() || ii->isALU() || ii->isLoad() || ii->isStore() || ii->isAtomic() || ii->isEndOfKernel() || ii->isMemSync() || ii->isFlat() || - ii->isFlatGlobal() || ii->isSleep())) { + ii->isFlatGlobal() || ii->isSleep() || ii->isLocalMem())) { panic("next instruction: %s is of unknown type\n", ii->disassemble()); } diff --git a/src/gpu-compute/shader.hh b/src/gpu-compute/shader.hh index 0978acb376..08dfd24b76 100644 --- a/src/gpu-compute/shader.hh +++ b/src/gpu-compute/shader.hh @@ -88,6 +88,9 @@ class Shader : public ClockedObject ApertureRegister _scratchApe; Addr shHiddenPrivateBaseVmid; + // Hardware regs accessed by getreg/setreg instructions, set by queues + std::unordered_map hwRegs; + // Number of active Cus attached to this shader int _activeCus; @@ -109,6 +112,18 @@ class Shader : public ClockedObject ThreadContext *gpuTc; BaseCPU *cpuPointer; + void + setHwReg(int regIdx, uint32_t val) + { + hwRegs[regIdx] = val; + } + + uint32_t + getHwReg(int regIdx) + { + return hwRegs[regIdx]; + } + const ApertureRegister& gpuVmApe() const { @@ -121,12 +136,26 @@ class Shader : public ClockedObject return _ldsApe; } + void + setLdsApe(Addr base, Addr limit) + { + _ldsApe.base = base; + _ldsApe.limit = limit; + } + const ApertureRegister& scratchApe() const { return _scratchApe; } + void + setScratchApe(Addr base, Addr limit) + { + _scratchApe.base = base; + _scratchApe.limit = limit; + } + bool isGpuVmApe(Addr addr) const { diff --git a/src/gpu-compute/wavefront.cc b/src/gpu-compute/wavefront.cc index 8e1a066bbb..7e4b36f7e5 100644 --- a/src/gpu-compute/wavefront.cc +++ b/src/gpu-compute/wavefront.cc @@ -252,6 +252,18 @@ Wavefront::initRegState(HSAQueueEntry *task, int wgSizeInWorkItems) ++regInitIdx; break; + case DispatchId: + physSgprIdx + = computeUnit->registerManager->mapSgpr(this, regInitIdx); + computeUnit->srf[simdId]->write(physSgprIdx, + task->dispatchId()); + ++regInitIdx; + DPRINTF(GPUInitAbi, "CU%d: WF[%d][%d]: wave[%d] " + "Setting DispatchId: s[%d] = %x\n", + computeUnit->cu_id, simdId, + wfSlotId, wfDynId, physSgprIdx, + task->dispatchId()); + break; case FlatScratchInit: physSgprIdx = computeUnit->registerManager->mapSgpr(this, regInitIdx); @@ -309,6 +321,18 @@ Wavefront::initRegState(HSAQueueEntry *task, int wgSizeInWorkItems) hidden_priv_base, task->amdQueue.scratch_backing_memory_location); break; + case PrivateSegSize: + physSgprIdx + = computeUnit->registerManager->mapSgpr(this, regInitIdx); + computeUnit->srf[simdId]->write(physSgprIdx, + task->privMemPerItem()); + ++regInitIdx; + DPRINTF(GPUInitAbi, "CU%d: WF[%d][%d]: wave[%d] " + "Setting private segment size: s[%d] = %x\n", + computeUnit->cu_id, simdId, + wfSlotId, wfDynId, physSgprIdx, + task->privMemPerItem()); + break; case GridWorkgroupCountX: physSgprIdx = computeUnit->registerManager->mapSgpr(this, regInitIdx); diff --git a/src/kern/SConscript b/src/kern/SConscript index 7ccff2a913..88bb209d13 100644 --- a/src/kern/SConscript +++ b/src/kern/SConscript @@ -28,9 +28,6 @@ Import('*') -if env['CONF']['TARGET_ISA'] == 'null': - Return() - Source('linux/events.cc') Source('linux/linux.cc') Source('linux/helpers.cc') diff --git a/src/kern/linux/linux.hh b/src/kern/linux/linux.hh index 73b0404434..5b7a20ff4a 100644 --- a/src/kern/linux/linux.hh +++ b/src/kern/linux/linux.hh @@ -309,6 +309,7 @@ class Linux : public OperatingSystem static const unsigned TGT_CLONE_FS = 0x00000200; static const unsigned TGT_CLONE_FILES = 0x00000400; static const unsigned TGT_CLONE_SIGHAND = 0x00000800; + static const unsigned TGT_CLONE_PIDFD = 0x00001000; static const unsigned TGT_CLONE_PTRACE = 0x00002000; static const unsigned TGT_CLONE_VFORK = 0x00004000; static const unsigned TGT_CLONE_PARENT = 0x00008000; diff --git a/src/learning_gem5/part2/HelloObject.py b/src/learning_gem5/part2/HelloObject.py index c7daf10224..07ffd01c06 100644 --- a/src/learning_gem5/part2/HelloObject.py +++ b/src/learning_gem5/part2/HelloObject.py @@ -28,23 +28,28 @@ from m5.params import * from m5.SimObject import SimObject + class HelloObject(SimObject): - type = 'HelloObject' + type = "HelloObject" cxx_header = "learning_gem5/part2/hello_object.hh" - cxx_class = 'gem5::HelloObject' + cxx_class = "gem5::HelloObject" time_to_wait = Param.Latency("Time before firing the event") - number_of_fires = Param.Int(1, "Number of times to fire the event before " - "goodbye") + number_of_fires = Param.Int( + 1, "Number of times to fire the event before " "goodbye" + ) goodbye_object = Param.GoodbyeObject("A goodbye object") -class GoodbyeObject(SimObject): - type = 'GoodbyeObject' - cxx_header = "learning_gem5/part2/goodbye_object.hh" - cxx_class = 'gem5::GoodbyeObject' - buffer_size = Param.MemorySize('1kB', - "Size of buffer to fill with goodbye") - write_bandwidth = Param.MemoryBandwidth('100MB/s', "Bandwidth to fill " - "the buffer") +class GoodbyeObject(SimObject): + type = "GoodbyeObject" + cxx_header = "learning_gem5/part2/goodbye_object.hh" + cxx_class = "gem5::GoodbyeObject" + + buffer_size = Param.MemorySize( + "1kB", "Size of buffer to fill with goodbye" + ) + write_bandwidth = Param.MemoryBandwidth( + "100MB/s", "Bandwidth to fill " "the buffer" + ) diff --git a/src/learning_gem5/part2/SimpleCache.py b/src/learning_gem5/part2/SimpleCache.py index 40a075c85c..1295e543fd 100644 --- a/src/learning_gem5/part2/SimpleCache.py +++ b/src/learning_gem5/part2/SimpleCache.py @@ -29,10 +29,11 @@ from m5.params import * from m5.proxy import * from m5.objects.ClockedObject import ClockedObject + class SimpleCache(ClockedObject): - type = 'SimpleCache' + type = "SimpleCache" cxx_header = "learning_gem5/part2/simple_cache.hh" - cxx_class = 'gem5::SimpleCache' + cxx_class = "gem5::SimpleCache" # Vector port example. Both the instruction and data ports connect to this # port which is automatically split out into two ports. @@ -41,6 +42,6 @@ class SimpleCache(ClockedObject): latency = Param.Cycles(1, "Cycles taken on a hit or to resolve a miss") - size = Param.MemorySize('16kB', "The size of the cache") + size = Param.MemorySize("16kB", "The size of the cache") system = Param.System(Parent.any, "The system this cache is part of") diff --git a/src/learning_gem5/part2/SimpleMemobj.py b/src/learning_gem5/part2/SimpleMemobj.py index 0231b1f623..2ab95ff76e 100644 --- a/src/learning_gem5/part2/SimpleMemobj.py +++ b/src/learning_gem5/part2/SimpleMemobj.py @@ -28,10 +28,11 @@ from m5.params import * from m5.SimObject import SimObject + class SimpleMemobj(SimObject): - type = 'SimpleMemobj' + type = "SimpleMemobj" cxx_header = "learning_gem5/part2/simple_memobj.hh" - cxx_class = 'gem5::SimpleMemobj' + cxx_class = "gem5::SimpleMemobj" inst_port = ResponsePort("CPU side port, receives requests") data_port = ResponsePort("CPU side port, receives requests") diff --git a/src/learning_gem5/part2/SimpleObject.py b/src/learning_gem5/part2/SimpleObject.py index 28555ddb6f..2acbc77759 100644 --- a/src/learning_gem5/part2/SimpleObject.py +++ b/src/learning_gem5/part2/SimpleObject.py @@ -28,7 +28,8 @@ from m5.params import * from m5.SimObject import SimObject + class SimpleObject(SimObject): - type = 'SimpleObject' + type = "SimpleObject" cxx_header = "learning_gem5/part2/simple_object.hh" - cxx_class = 'gem5::SimpleObject' + cxx_class = "gem5::SimpleObject" diff --git a/src/mem/AbstractMemory.py b/src/mem/AbstractMemory.py index ed2a02c806..ea88fd879c 100644 --- a/src/mem/AbstractMemory.py +++ b/src/mem/AbstractMemory.py @@ -39,16 +39,18 @@ from m5.params import * from m5.objects.ClockedObject import ClockedObject + class AbstractMemory(ClockedObject): - type = 'AbstractMemory' + type = "AbstractMemory" abstract = True cxx_header = "mem/abstract_mem.hh" - cxx_class = 'gem5::memory::AbstractMemory' + cxx_class = "gem5::memory::AbstractMemory" # A default memory size of 128 MiB (starting at 0) is used to # simplify the regressions - range = Param.AddrRange('128MiB', - "Address range (potentially interleaved)") + range = Param.AddrRange( + "128MiB", "Address range (potentially interleaved)" + ) null = Param.Bool(False, "Do not store data, always return zero") # All memories are passed to the global physical memory, and @@ -69,5 +71,6 @@ class AbstractMemory(ClockedObject): # Image file to load into this memory as its initial contents. This is # particularly useful for ROMs. - image_file = Param.String('', - "Image to load into memory as its initial contents") + image_file = Param.String( + "", "Image to load into memory as its initial contents" + ) diff --git a/src/mem/AddrMapper.py b/src/mem/AddrMapper.py index bd6b08eea0..1897236526 100644 --- a/src/mem/AddrMapper.py +++ b/src/mem/AddrMapper.py @@ -43,28 +43,33 @@ from m5.SimObject import SimObject # the request port (i.e. the memory side) to the response port are # currently not modified. class AddrMapper(SimObject): - type = 'AddrMapper' - cxx_header = 'mem/addr_mapper.hh' - cxx_class = 'gem5::AddrMapper' + type = "AddrMapper" + cxx_header = "mem/addr_mapper.hh" + cxx_class = "gem5::AddrMapper" abstract = True # one port in each direction - mem_side_port = RequestPort("This port sends requests and " - "receives responses") - master = DeprecatedParam(mem_side_port, - '`master` is now called `mem_side_port`') - cpu_side_port = ResponsePort("This port receives requests and " - "sends responses") - slave = DeprecatedParam(cpu_side_port, - '`slave` is now called `cpu_side_port`') + mem_side_port = RequestPort( + "This port sends requests and " "receives responses" + ) + master = DeprecatedParam( + mem_side_port, "`master` is now called `mem_side_port`" + ) + cpu_side_port = ResponsePort( + "This port receives requests and " "sends responses" + ) + slave = DeprecatedParam( + cpu_side_port, "`slave` is now called `cpu_side_port`" + ) + # Range address mapper that maps a set of original ranges to a set of # remapped ranges, where a specific range is of the same size # (original and remapped), only with an offset. class RangeAddrMapper(AddrMapper): - type = 'RangeAddrMapper' - cxx_header = 'mem/addr_mapper.hh' - cxx_class = 'gem5::RangeAddrMapper' + type = "RangeAddrMapper" + cxx_header = "mem/addr_mapper.hh" + cxx_class = "gem5::RangeAddrMapper" # These two vectors should be the exact same length and each range # should be the exact same size. Each range in original_ranges is @@ -72,6 +77,8 @@ class RangeAddrMapper(AddrMapper): # that the same range can occur multiple times in the remapped # ranges for address aliasing. original_ranges = VectorParam.AddrRange( - "Ranges of memory that should me remapped") + "Ranges of memory that should me remapped" + ) remapped_ranges = VectorParam.AddrRange( - "Ranges of memory that are being mapped to") + "Ranges of memory that are being mapped to" + ) diff --git a/src/mem/Bridge.py b/src/mem/Bridge.py index 691e703089..a82f410d56 100644 --- a/src/mem/Bridge.py +++ b/src/mem/Bridge.py @@ -39,22 +39,28 @@ from m5.params import * from m5.objects.ClockedObject import ClockedObject -class Bridge(ClockedObject): - type = 'Bridge' - cxx_header = "mem/bridge.hh" - cxx_class = 'gem5::Bridge' - mem_side_port = RequestPort("This port sends requests and " - "receives responses") - master = DeprecatedParam(mem_side_port, - '`master` is now called `mem_side_port`') - cpu_side_port = ResponsePort("This port receives requests and " - "sends responses") - slave = DeprecatedParam(cpu_side_port, - '`slave` is now called `cpu_side_port`') +class Bridge(ClockedObject): + type = "Bridge" + cxx_header = "mem/bridge.hh" + cxx_class = "gem5::Bridge" + + mem_side_port = RequestPort( + "This port sends requests and " "receives responses" + ) + master = DeprecatedParam( + mem_side_port, "`master` is now called `mem_side_port`" + ) + cpu_side_port = ResponsePort( + "This port receives requests and " "sends responses" + ) + slave = DeprecatedParam( + cpu_side_port, "`slave` is now called `cpu_side_port`" + ) req_size = Param.Unsigned(16, "The number of requests to buffer") resp_size = Param.Unsigned(16, "The number of responses to buffer") - delay = Param.Latency('0ns', "The latency of this bridge") - ranges = VectorParam.AddrRange([AllMemory], - "Address ranges to pass through the bridge") + delay = Param.Latency("0ns", "The latency of this bridge") + ranges = VectorParam.AddrRange( + [AllMemory], "Address ranges to pass through the bridge" + ) diff --git a/src/mem/CfiMemory.py b/src/mem/CfiMemory.py index aa6b18a45f..c8de9e511e 100644 --- a/src/mem/CfiMemory.py +++ b/src/mem/CfiMemory.py @@ -40,19 +40,21 @@ from m5.params import * from m5.objects.AbstractMemory import AbstractMemory from m5.util.fdthelper import FdtNode, FdtPropertyWords + class CfiMemory(AbstractMemory): - type = 'CfiMemory' + type = "CfiMemory" cxx_header = "mem/cfi_mem.hh" - cxx_class = 'gem5::memory::CfiMemory' + cxx_class = "gem5::memory::CfiMemory" port = ResponsePort("Response port") - latency = Param.Latency('30ns', "Request to response latency") - latency_var = Param.Latency('0ns', "Request to response latency variance") + latency = Param.Latency("30ns", "Request to response latency") + latency_var = Param.Latency("0ns", "Request to response latency variance") # The memory bandwidth limit default is set to 12.8GB/s which is # representative of a x64 DDR3-1600 channel. - bandwidth = Param.MemoryBandwidth('12.8GB/s', - "Combined read and write bandwidth") + bandwidth = Param.MemoryBandwidth( + "12.8GB/s", "Combined read and write bandwidth" + ) vendor_id = Param.UInt16(0, "vendor ID") device_id = Param.UInt16(0, "device ID") diff --git a/src/mem/CommMonitor.py b/src/mem/CommMonitor.py index ff02b61d1d..288aeb5a07 100644 --- a/src/mem/CommMonitor.py +++ b/src/mem/CommMonitor.py @@ -41,21 +41,25 @@ from m5.SimObject import SimObject # The communication monitor will most typically be used in combination # with periodic dumping and resetting of stats using schedStatEvent class CommMonitor(SimObject): - type = 'CommMonitor' + type = "CommMonitor" cxx_header = "mem/comm_monitor.hh" - cxx_class = 'gem5::CommMonitor' + cxx_class = "gem5::CommMonitor" system = Param.System(Parent.any, "System that the monitor belongs to.") # one port in each direction - mem_side_port = RequestPort("This port sends requests and " - "receives responses") - master = DeprecatedParam(mem_side_port, - '`master` is now called `mem_side_port`') - cpu_side_port = ResponsePort("This port receives requests and " - "sends responses") - slave = DeprecatedParam(cpu_side_port, - '`slave` is now called `cpu_side_port`') + mem_side_port = RequestPort( + "This port sends requests and " "receives responses" + ) + master = DeprecatedParam( + mem_side_port, "`master` is now called `mem_side_port`" + ) + cpu_side_port = ResponsePort( + "This port receives requests and " "sends responses" + ) + slave = DeprecatedParam( + cpu_side_port, "`slave` is now called `cpu_side_port`" + ) # control the sample period window length of this monitor sample_period = Param.Clock("1ms", "Sample period for histograms") @@ -65,39 +69,45 @@ class CommMonitor(SimObject): # parameters # histogram of burst length of packets (not using sample period) - burst_length_bins = Param.Unsigned('20', "# bins in burst length " \ - "histograms") - disable_burst_length_hists = Param.Bool(False, "Disable burst length " \ - "histograms") + burst_length_bins = Param.Unsigned( + "20", "# bins in burst length " "histograms" + ) + disable_burst_length_hists = Param.Bool( + False, "Disable burst length " "histograms" + ) # bandwidth per sample period - bandwidth_bins = Param.Unsigned('20', "# bins in bandwidth histograms") + bandwidth_bins = Param.Unsigned("20", "# bins in bandwidth histograms") disable_bandwidth_hists = Param.Bool(False, "Disable bandwidth histograms") # latency from request to response (not using sample period) - latency_bins = Param.Unsigned('20', "# bins in latency histograms") + latency_bins = Param.Unsigned("20", "# bins in latency histograms") disable_latency_hists = Param.Bool(False, "Disable latency histograms") # inter transaction time (ITT) distributions in uniformly sized # bins up to the maximum, independently for read-to-read, # write-to-write and the combined request-to-request that does not # separate read and write requests - itt_bins = Param.Unsigned('20', "# bins in ITT distributions") - itt_max_bin = Param.Latency('100ns', "Max bin of ITT distributions") + itt_bins = Param.Unsigned("20", "# bins in ITT distributions") + itt_max_bin = Param.Latency("100ns", "Max bin of ITT distributions") disable_itt_dists = Param.Bool(False, "Disable ITT distributions") # outstanding requests (that did not yet get a response) per # sample period - outstanding_bins = Param.Unsigned('20', "# bins in outstanding " \ - "requests histograms") - disable_outstanding_hists = Param.Bool(False, "Disable outstanding " \ - "requests histograms") + outstanding_bins = Param.Unsigned( + "20", "# bins in outstanding " "requests histograms" + ) + disable_outstanding_hists = Param.Bool( + False, "Disable outstanding " "requests histograms" + ) # transactions (requests) observed per sample period - transaction_bins = Param.Unsigned('20', "# bins in transaction " \ - "count histograms") - disable_transaction_hists = Param.Bool(False, "Disable transaction count " \ - "histograms") + transaction_bins = Param.Unsigned( + "20", "# bins in transaction " "count histograms" + ) + disable_transaction_hists = Param.Bool( + False, "Disable transaction count " "histograms" + ) # address distributions (heatmaps) with associated address masks # to selectively only look at certain bits of the address diff --git a/src/mem/DRAMInterface.py b/src/mem/DRAMInterface.py index 3d062c31df..87bc11b94f 100644 --- a/src/mem/DRAMInterface.py +++ b/src/mem/DRAMInterface.py @@ -43,20 +43,22 @@ from m5.objects.MemInterface import * # Enum for the page policy, either open, open_adaptive, close, or # close_adaptive. -class PageManage(Enum): vals = ['open', 'open_adaptive', 'close', - 'close_adaptive'] +class PageManage(Enum): + vals = ["open", "open_adaptive", "close", "close_adaptive"] + class DRAMInterface(MemInterface): - type = 'DRAMInterface' + type = "DRAMInterface" cxx_header = "mem/dram_interface.hh" - cxx_class = 'gem5::memory::DRAMInterface' + cxx_class = "gem5::memory::DRAMInterface" # scheduler page policy - page_policy = Param.PageManage('open_adaptive', "Page management policy") + page_policy = Param.PageManage("open_adaptive", "Page management policy") # enforce a limit on the number of accesses per row - max_accesses_per_row = Param.Unsigned(16, "Max accesses per row before " - "closing"); + max_accesses_per_row = Param.Unsigned( + 16, "Max accesses per row before " "closing" + ) # default to 0 bank groups per rank, indicating bank group architecture # is not used @@ -123,8 +125,9 @@ class DRAMInterface(MemInterface): # only utilized with bank group architectures; set to 0 for default case # This will be used to enable different same bank group delays # for writes versus reads - tCCD_L_WR = Param.Latency(Self.tCCD_L, - "Same bank group Write to Write delay") + tCCD_L_WR = Param.Latency( + Self.tCCD_L, "Same bank group Write to Write delay" + ) # time taken to complete one refresh cycle (N rows in all banks) tRFC = Param.Latency("Refresh cycle time") @@ -134,18 +137,22 @@ class DRAMInterface(MemInterface): tREFI = Param.Latency("Refresh command interval") # write-to-read, same rank turnaround penalty for same bank group - tWTR_L = Param.Latency(Self.tWTR, "Write to read, same rank switching " - "time, same bank group") + tWTR_L = Param.Latency( + Self.tWTR, + "Write to read, same rank switching " "time, same bank group", + ) # minimum precharge to precharge delay time tPPD = Param.Latency("0ns", "PRE to PRE delay") # maximum delay between two-cycle ACT command phases - tAAD = Param.Latency(Self.tCK, - "Maximum delay between two-cycle ACT commands") + tAAD = Param.Latency( + Self.tCK, "Maximum delay between two-cycle ACT commands" + ) - two_cycle_activate = Param.Bool(False, - "Two cycles required to send activate") + two_cycle_activate = Param.Bool( + False, "Two cycles required to send activate" + ) # minimum row activate to row activate delay time tRRD = Param.Latency("ACT to ACT delay") @@ -272,12 +279,13 @@ class DRAMInterface(MemInterface): controller.dram = self return controller + # A single DDR3-1600 x64 channel (one command and address bus), with # timings based on a DDR3-1600 4 Gbit datasheet (Micron MT41J512M8) in # an 8x8 configuration. class DDR3_1600_8x8(DRAMInterface): # size of device in bytes - device_size = '512MiB' + device_size = "512MiB" # 8x8 configuration, 8 devices each with an 8-bit interface device_bus_width = 8 @@ -286,7 +294,7 @@ class DDR3_1600_8x8(DRAMInterface): burst_length = 8 # Each device has a page (row buffer) size of 1 Kbyte (1K columns x8) - device_rowbuffer_size = '1KiB' + device_rowbuffer_size = "1KiB" # 8x8 configuration, so 8 devices devices_per_rank = 8 @@ -298,55 +306,56 @@ class DDR3_1600_8x8(DRAMInterface): banks_per_rank = 8 # 800 MHz - tCK = '1.25ns' + tCK = "1.25ns" # 8 beats across an x64 interface translates to 4 clocks @ 800 MHz - tBURST = '5ns' + tBURST = "5ns" # DDR3-1600 11-11-11 - tRCD = '13.75ns' - tCL = '13.75ns' - tRP = '13.75ns' - tRAS = '35ns' - tRRD = '6ns' - tXAW = '30ns' + tRCD = "13.75ns" + tCL = "13.75ns" + tRP = "13.75ns" + tRAS = "35ns" + tRRD = "6ns" + tXAW = "30ns" activation_limit = 4 - tRFC = '260ns' + tRFC = "260ns" - tWR = '15ns' + tWR = "15ns" # Greater of 4 CK or 7.5 ns - tWTR = '7.5ns' + tWTR = "7.5ns" # Greater of 4 CK or 7.5 ns - tRTP = '7.5ns' + tRTP = "7.5ns" # Default same rank rd-to-wr bus turnaround to 2 CK, @800 MHz = 2.5 ns - tRTW = '2.5ns' + tRTW = "2.5ns" # Default different rank bus delay to 2 CK, @800 MHz = 2.5 ns - tCS = '2.5ns' + tCS = "2.5ns" # <=85C, half for >85C - tREFI = '7.8us' + tREFI = "7.8us" # active powerdown and precharge powerdown exit time - tXP = '6ns' + tXP = "6ns" # self refresh exit time - tXS = '270ns' + tXS = "270ns" # Current values from datasheet Die Rev E,J - IDD0 = '55mA' - IDD2N = '32mA' - IDD3N = '38mA' - IDD4W = '125mA' - IDD4R = '157mA' - IDD5 = '235mA' - IDD3P1 = '38mA' - IDD2P1 = '32mA' - IDD6 = '20mA' - VDD = '1.5V' + IDD0 = "55mA" + IDD2N = "32mA" + IDD3N = "38mA" + IDD4W = "125mA" + IDD4R = "157mA" + IDD5 = "235mA" + IDD3P1 = "38mA" + IDD2P1 = "32mA" + IDD6 = "20mA" + VDD = "1.5V" + # A single HMC-2500 x32 model based on: # [1] DRAMSpec: a high-level DRAM bank modelling tool @@ -373,7 +382,7 @@ class DDR3_1600_8x8(DRAMInterface): class HMC_2500_1x32(DDR3_1600_8x8): # size of device # two banks per device with each bank 4MiB [2] - device_size = '8MiB' + device_size = "8MiB" # 1x32 configuration, 1 device with 32 TSVs [2] device_bus_width = 32 @@ -382,7 +391,7 @@ class HMC_2500_1x32(DDR3_1600_8x8): burst_length = 8 # Each device has a page (row buffer) size of 256 bytes [2] - device_rowbuffer_size = '256B' + device_rowbuffer_size = "256B" # 1x32 configuration, so 1 device [2] devices_per_rank = 1 @@ -396,45 +405,45 @@ class HMC_2500_1x32(DDR3_1600_8x8): banks_per_rank = 2 # 1250 MHz [2] - tCK = '0.8ns' + tCK = "0.8ns" # 8 beats across an x32 interface translates to 4 clocks @ 1250 MHz - tBURST = '3.2ns' + tBURST = "3.2ns" # Values using DRAMSpec HMC model [1] - tRCD = '10.2ns' - tCL = '9.9ns' - tRP = '7.7ns' - tRAS = '21.6ns' + tRCD = "10.2ns" + tCL = "9.9ns" + tRP = "7.7ns" + tRAS = "21.6ns" # tRRD depends on the power supply network for each vendor. # We assume a tRRD of a double bank approach to be equal to 4 clock # cycles (Assumption) - tRRD = '3.2ns' + tRRD = "3.2ns" # activation limit is set to 0 since there are only 2 banks per vault # layer. activation_limit = 0 # Values using DRAMSpec HMC model [1] - tRFC = '59ns' - tWR = '8ns' - tRTP = '4.9ns' + tRFC = "59ns" + tWR = "8ns" + tRTP = "4.9ns" # Default different rank bus delay assumed to 1 CK for TSVs, @1250 MHz = # 0.8 ns (Assumption) - tCS = '0.8ns' + tCS = "0.8ns" # Value using DRAMSpec HMC model [1] - tREFI = '3.9us' + tREFI = "3.9us" # The default page policy in the vault controllers is simple closed page # [2] nevertheless 'close' policy opens and closes the row multiple times # for bursts largers than 32Bytes. For this reason we use 'close_adaptive' - page_policy = 'close_adaptive' + page_policy = "close_adaptive" # RoCoRaBaCh resembles the default address mapping in HMC - addr_mapping = 'RoCoRaBaCh' + addr_mapping = "RoCoRaBaCh" # These parameters do not directly correlate with buffer_size in real # hardware. Nevertheless, their value has been tuned to achieve a @@ -447,42 +456,46 @@ class HMC_2500_1x32(DDR3_1600_8x8): Instantiate the memory controller and bind it to the current interface. """ - controller = MemCtrl(min_writes_per_switch = 8, - static_backend_latency = '4ns', - static_frontend_latency = '4ns') + controller = MemCtrl( + min_writes_per_switch=8, + static_backend_latency="4ns", + static_frontend_latency="4ns", + ) controller.dram = self return controller + # A single DDR3-2133 x64 channel refining a selected subset of the # options for the DDR-1600 configuration, based on the same DDR3-1600 # 4 Gbit datasheet (Micron MT41J512M8). Most parameters are kept # consistent across the two configurations. class DDR3_2133_8x8(DDR3_1600_8x8): # 1066 MHz - tCK = '0.938ns' + tCK = "0.938ns" # 8 beats across an x64 interface translates to 4 clocks @ 1066 MHz - tBURST = '3.752ns' + tBURST = "3.752ns" # DDR3-2133 14-14-14 - tRCD = '13.09ns' - tCL = '13.09ns' - tRP = '13.09ns' - tRAS = '33ns' - tRRD = '5ns' - tXAW = '25ns' + tRCD = "13.09ns" + tCL = "13.09ns" + tRP = "13.09ns" + tRAS = "33ns" + tRRD = "5ns" + tXAW = "25ns" # Current values from datasheet - IDD0 = '70mA' - IDD2N = '37mA' - IDD3N = '44mA' - IDD4W = '157mA' - IDD4R = '191mA' - IDD5 = '250mA' - IDD3P1 = '44mA' - IDD2P1 = '43mA' - IDD6 ='20mA' - VDD = '1.5V' + IDD0 = "70mA" + IDD2N = "37mA" + IDD3N = "44mA" + IDD4W = "157mA" + IDD4R = "191mA" + IDD5 = "250mA" + IDD3P1 = "44mA" + IDD2P1 = "43mA" + IDD6 = "20mA" + VDD = "1.5V" + # A single DDR4-2400 x64 channel (one command and address bus), with # timings based on a DDR4-2400 8 Gbit datasheet (Micron MT40A2G4) @@ -491,7 +504,7 @@ class DDR3_2133_8x8(DDR3_1600_8x8): # 16 devices/rank * 2 ranks/channel * 1GiB/device = 32GiB/channel class DDR4_2400_16x4(DRAMInterface): # size of device - device_size = '1GiB' + device_size = "1GiB" # 16x4 configuration, 16 devices each with a 4-bit interface device_bus_width = 4 @@ -500,7 +513,7 @@ class DDR4_2400_16x4(DRAMInterface): burst_length = 8 # Each device has a page (row buffer) size of 512 byte (1K columns x4) - device_rowbuffer_size = '512B' + device_rowbuffer_size = "512B" # 16x4 configuration, so 16 devices devices_per_rank = 16 @@ -523,77 +536,78 @@ class DDR4_2400_16x4(DRAMInterface): read_buffer_size = 64 # 1200 MHz - tCK = '0.833ns' + tCK = "0.833ns" # 8 beats across an x64 interface translates to 4 clocks @ 1200 MHz # tBURST is equivalent to the CAS-to-CAS delay (tCCD) # With bank group architectures, tBURST represents the CAS-to-CAS # delay for bursts to different bank groups (tCCD_S) - tBURST = '3.332ns' + tBURST = "3.332ns" # @2400 data rate, tCCD_L is 6 CK # CAS-to-CAS delay for bursts to the same bank group # tBURST is equivalent to tCCD_S; no explicit parameter required # for CAS-to-CAS delay for bursts to different bank groups - tCCD_L = '5ns'; + tCCD_L = "5ns" # DDR4-2400 17-17-17 - tRCD = '14.16ns' - tCL = '14.16ns' - tRP = '14.16ns' - tRAS = '32ns' + tRCD = "14.16ns" + tCL = "14.16ns" + tRP = "14.16ns" + tRAS = "32ns" # RRD_S (different bank group) for 512B page is MAX(4 CK, 3.3ns) - tRRD = '3.332ns' + tRRD = "3.332ns" # RRD_L (same bank group) for 512B page is MAX(4 CK, 4.9ns) - tRRD_L = '4.9ns'; + tRRD_L = "4.9ns" # tFAW for 512B page is MAX(16 CK, 13ns) - tXAW = '13.328ns' + tXAW = "13.328ns" activation_limit = 4 # tRFC is 350ns - tRFC = '350ns' + tRFC = "350ns" - tWR = '15ns' + tWR = "15ns" # Here using the average of WTR_S and WTR_L - tWTR = '5ns' + tWTR = "5ns" # Greater of 4 CK or 7.5 ns - tRTP = '7.5ns' + tRTP = "7.5ns" # Default same rank rd-to-wr bus turnaround to 2 CK, @1200 MHz = 1.666 ns - tRTW = '1.666ns' + tRTW = "1.666ns" # Default different rank bus delay to 2 CK, @1200 MHz = 1.666 ns - tCS = '1.666ns' + tCS = "1.666ns" # <=85C, half for >85C - tREFI = '7.8us' + tREFI = "7.8us" # active powerdown and precharge powerdown exit time - tXP = '6ns' + tXP = "6ns" # self refresh exit time # exit delay to ACT, PRE, PREALL, REF, SREF Enter, and PD Enter is: # tRFC + 10ns = 340ns - tXS = '340ns' + tXS = "340ns" # Current values from datasheet - IDD0 = '43mA' - IDD02 = '3mA' - IDD2N = '34mA' - IDD3N = '38mA' - IDD3N2 = '3mA' - IDD4W = '103mA' - IDD4R = '110mA' - IDD5 = '250mA' - IDD3P1 = '32mA' - IDD2P1 = '25mA' - IDD6 = '30mA' - VDD = '1.2V' - VDD2 = '2.5V' + IDD0 = "43mA" + IDD02 = "3mA" + IDD2N = "34mA" + IDD3N = "38mA" + IDD3N2 = "3mA" + IDD4W = "103mA" + IDD4R = "110mA" + IDD5 = "250mA" + IDD3P1 = "32mA" + IDD2P1 = "25mA" + IDD6 = "30mA" + VDD = "1.2V" + VDD2 = "2.5V" + # A single DDR4-2400 x64 channel (one command and address bus), with # timings based on a DDR4-2400 8 Gbit datasheet (Micron MT40A1G8) @@ -605,22 +619,23 @@ class DDR4_2400_8x8(DDR4_2400_16x4): device_bus_width = 8 # Each device has a page (row buffer) size of 1 Kbyte (1K columns x8) - device_rowbuffer_size = '1KiB' + device_rowbuffer_size = "1KiB" # 8x8 configuration, so 8 devices devices_per_rank = 8 # RRD_L (same bank group) for 1K page is MAX(4 CK, 4.9ns) - tRRD_L = '4.9ns'; + tRRD_L = "4.9ns" - tXAW = '21ns' + tXAW = "21ns" # Current values from datasheet - IDD0 = '48mA' - IDD3N = '43mA' - IDD4W = '123mA' - IDD4R = '135mA' - IDD3P1 = '37mA' + IDD0 = "48mA" + IDD3N = "43mA" + IDD4W = "123mA" + IDD4R = "135mA" + IDD3P1 = "37mA" + # A single DDR4-2400 x64 channel (one command and address bus), with # timings based on a DDR4-2400 8 Gbit datasheet (Micron MT40A512M16) @@ -632,7 +647,7 @@ class DDR4_2400_4x16(DDR4_2400_16x4): device_bus_width = 16 # Each device has a page (row buffer) size of 2 Kbyte (1K columns x16) - device_rowbuffer_size = '2KiB' + device_rowbuffer_size = "2KiB" # 4x16 configuration, so 4 devices devices_per_rank = 4 @@ -650,22 +665,23 @@ class DDR4_2400_4x16(DDR4_2400_16x4): banks_per_rank = 8 # RRD_S (different bank group) for 2K page is MAX(4 CK, 5.3ns) - tRRD = '5.3ns' + tRRD = "5.3ns" # RRD_L (same bank group) for 2K page is MAX(4 CK, 6.4ns) - tRRD_L = '6.4ns'; + tRRD_L = "6.4ns" - tXAW = '30ns' + tXAW = "30ns" # Current values from datasheet - IDD0 = '80mA' - IDD02 = '4mA' - IDD2N = '34mA' - IDD3N = '47mA' - IDD4W = '228mA' - IDD4R = '243mA' - IDD5 = '280mA' - IDD3P1 = '41mA' + IDD0 = "80mA" + IDD02 = "4mA" + IDD2N = "34mA" + IDD3N = "47mA" + IDD4W = "228mA" + IDD4R = "243mA" + IDD5 = "280mA" + IDD3P1 = "41mA" + # A single LPDDR2-S4 x32 interface (one command/address bus), with # default timings based on a LPDDR2-1066 4 Gbit part (Micron MT42L128M32D1) @@ -675,7 +691,7 @@ class LPDDR2_S4_1066_1x32(DRAMInterface): dll = False # size of device - device_size = '512MiB' + device_size = "512MiB" # 1x32 configuration, 1 device with a 32-bit interface device_bus_width = 32 @@ -685,7 +701,7 @@ class LPDDR2_S4_1066_1x32(DRAMInterface): # Each device has a page (row buffer) size of 1KB # (this depends on the memory density) - device_rowbuffer_size = '1KiB' + device_rowbuffer_size = "1KiB" # 1x32 configuration, so 1 device devices_per_rank = 1 @@ -697,75 +713,76 @@ class LPDDR2_S4_1066_1x32(DRAMInterface): banks_per_rank = 8 # 533 MHz - tCK = '1.876ns' + tCK = "1.876ns" # Fixed at 15 ns - tRCD = '15ns' + tRCD = "15ns" # 8 CK read latency, 4 CK write latency @ 533 MHz, 1.876 ns cycle time - tCL = '15ns' + tCL = "15ns" # Pre-charge one bank 15 ns (all banks 18 ns) - tRP = '15ns' + tRP = "15ns" - tRAS = '42ns' - tWR = '15ns' + tRAS = "42ns" + tWR = "15ns" - tRTP = '7.5ns' + tRTP = "7.5ns" # 8 beats across an x32 DDR interface translates to 4 clocks @ 533 MHz. # Note this is a BL8 DDR device. # Requests larger than 32 bytes are broken down into multiple requests # in the controller - tBURST = '7.5ns' + tBURST = "7.5ns" # LPDDR2-S4, 4 Gbit - tRFC = '130ns' - tREFI = '3.9us' + tRFC = "130ns" + tREFI = "3.9us" # active powerdown and precharge powerdown exit time - tXP = '7.5ns' + tXP = "7.5ns" # self refresh exit time - tXS = '140ns' + tXS = "140ns" # Irrespective of speed grade, tWTR is 7.5 ns - tWTR = '7.5ns' + tWTR = "7.5ns" # Default same rank rd-to-wr bus turnaround to 2 CK, @533 MHz = 3.75 ns - tRTW = '3.75ns' + tRTW = "3.75ns" # Default different rank bus delay to 2 CK, @533 MHz = 3.75 ns - tCS = '3.75ns' + tCS = "3.75ns" # Activate to activate irrespective of density and speed grade - tRRD = '10.0ns' + tRRD = "10.0ns" # Irrespective of density, tFAW is 50 ns - tXAW = '50ns' + tXAW = "50ns" activation_limit = 4 # Current values from datasheet - IDD0 = '15mA' - IDD02 = '70mA' - IDD2N = '2mA' - IDD2N2 = '30mA' - IDD3N = '2.5mA' - IDD3N2 = '30mA' - IDD4W = '10mA' - IDD4W2 = '190mA' - IDD4R = '3mA' - IDD4R2 = '220mA' - IDD5 = '40mA' - IDD52 = '150mA' - IDD3P1 = '1.2mA' - IDD3P12 = '8mA' - IDD2P1 = '0.6mA' - IDD2P12 = '0.8mA' - IDD6 = '1mA' - IDD62 = '3.2mA' - VDD = '1.8V' - VDD2 = '1.2V' + IDD0 = "15mA" + IDD02 = "70mA" + IDD2N = "2mA" + IDD2N2 = "30mA" + IDD3N = "2.5mA" + IDD3N2 = "30mA" + IDD4W = "10mA" + IDD4W2 = "190mA" + IDD4R = "3mA" + IDD4R2 = "220mA" + IDD5 = "40mA" + IDD52 = "150mA" + IDD3P1 = "1.2mA" + IDD3P12 = "8mA" + IDD2P1 = "0.6mA" + IDD2P12 = "0.8mA" + IDD6 = "1mA" + IDD62 = "3.2mA" + VDD = "1.8V" + VDD2 = "1.2V" + # A single WideIO x128 interface (one command and address bus), with # default timings based on an estimated WIO-200 8 Gbit part. @@ -774,7 +791,7 @@ class WideIO_200_1x128(DRAMInterface): dll = False # size of device - device_size = '1024MiB' + device_size = "1024MiB" # 1x128 configuration, 1 device with a 128-bit interface device_bus_width = 128 @@ -784,7 +801,7 @@ class WideIO_200_1x128(DRAMInterface): # Each device has a page (row buffer) size of 4KB # (this depends on the memory density) - device_rowbuffer_size = '4KiB' + device_rowbuffer_size = "4KiB" # 1x128 configuration, so 1 device devices_per_rank = 1 @@ -796,45 +813,46 @@ class WideIO_200_1x128(DRAMInterface): banks_per_rank = 4 # 200 MHz - tCK = '5ns' + tCK = "5ns" # WIO-200 - tRCD = '18ns' - tCL = '18ns' - tRP = '18ns' - tRAS = '42ns' - tWR = '15ns' + tRCD = "18ns" + tCL = "18ns" + tRP = "18ns" + tRAS = "42ns" + tWR = "15ns" # Read to precharge is same as the burst - tRTP = '20ns' + tRTP = "20ns" # 4 beats across an x128 SDR interface translates to 4 clocks @ 200 MHz. # Note this is a BL4 SDR device. - tBURST = '20ns' + tBURST = "20ns" # WIO 8 Gb - tRFC = '210ns' + tRFC = "210ns" # WIO 8 Gb, <=85C, half for >85C - tREFI = '3.9us' + tREFI = "3.9us" # Greater of 2 CK or 15 ns, 2 CK @ 200 MHz = 10 ns - tWTR = '15ns' + tWTR = "15ns" # Default same rank rd-to-wr bus turnaround to 2 CK, @200 MHz = 10 ns - tRTW = '10ns' + tRTW = "10ns" # Default different rank bus delay to 2 CK, @200 MHz = 10 ns - tCS = '10ns' + tCS = "10ns" # Activate to activate irrespective of density and speed grade - tRRD = '10.0ns' + tRRD = "10.0ns" # Two instead of four activation window - tXAW = '50ns' + tXAW = "50ns" activation_limit = 2 # The WideIO specification does not provide current information + # A single LPDDR3 x32 interface (one command/address bus), with # default timings based on a LPDDR3-1600 4 Gbit part (Micron # EDF8132A1MC) in a 1x32 configuration. @@ -843,7 +861,7 @@ class LPDDR3_1600_1x32(DRAMInterface): dll = False # size of device - device_size = '512MiB' + device_size = "512MiB" # 1x32 configuration, 1 device with a 32-bit interface device_bus_width = 32 @@ -852,7 +870,7 @@ class LPDDR3_1600_1x32(DRAMInterface): burst_length = 8 # Each device has a page (row buffer) size of 4KB - device_rowbuffer_size = '4KiB' + device_rowbuffer_size = "4KiB" # 1x32 configuration, so 1 device devices_per_rank = 1 @@ -865,82 +883,83 @@ class LPDDR3_1600_1x32(DRAMInterface): banks_per_rank = 8 # 800 MHz - tCK = '1.25ns' + tCK = "1.25ns" - tRCD = '18ns' + tRCD = "18ns" # 12 CK read latency, 6 CK write latency @ 800 MHz, 1.25 ns cycle time - tCL = '15ns' + tCL = "15ns" - tRAS = '42ns' - tWR = '15ns' + tRAS = "42ns" + tWR = "15ns" # Greater of 4 CK or 7.5 ns, 4 CK @ 800 MHz = 5 ns - tRTP = '7.5ns' + tRTP = "7.5ns" # Pre-charge one bank 18 ns (all banks 21 ns) - tRP = '18ns' + tRP = "18ns" # 8 beats across a x32 DDR interface translates to 4 clocks @ 800 MHz. # Note this is a BL8 DDR device. # Requests larger than 32 bytes are broken down into multiple requests # in the controller - tBURST = '5ns' + tBURST = "5ns" # LPDDR3, 4 Gb - tRFC = '130ns' - tREFI = '3.9us' + tRFC = "130ns" + tREFI = "3.9us" # active powerdown and precharge powerdown exit time - tXP = '7.5ns' + tXP = "7.5ns" # self refresh exit time - tXS = '140ns' + tXS = "140ns" # Irrespective of speed grade, tWTR is 7.5 ns - tWTR = '7.5ns' + tWTR = "7.5ns" # Default same rank rd-to-wr bus turnaround to 2 CK, @800 MHz = 2.5 ns - tRTW = '2.5ns' + tRTW = "2.5ns" # Default different rank bus delay to 2 CK, @800 MHz = 2.5 ns - tCS = '2.5ns' + tCS = "2.5ns" # Activate to activate irrespective of density and speed grade - tRRD = '10.0ns' + tRRD = "10.0ns" # Irrespective of size, tFAW is 50 ns - tXAW = '50ns' + tXAW = "50ns" activation_limit = 4 # Current values from datasheet - IDD0 = '8mA' - IDD02 = '60mA' - IDD2N = '0.8mA' - IDD2N2 = '26mA' - IDD3N = '2mA' - IDD3N2 = '34mA' - IDD4W = '2mA' - IDD4W2 = '190mA' - IDD4R = '2mA' - IDD4R2 = '230mA' - IDD5 = '28mA' - IDD52 = '150mA' - IDD3P1 = '1.4mA' - IDD3P12 = '11mA' - IDD2P1 = '0.8mA' - IDD2P12 = '1.8mA' - IDD6 = '0.5mA' - IDD62 = '1.8mA' - VDD = '1.8V' - VDD2 = '1.2V' + IDD0 = "8mA" + IDD02 = "60mA" + IDD2N = "0.8mA" + IDD2N2 = "26mA" + IDD3N = "2mA" + IDD3N2 = "34mA" + IDD4W = "2mA" + IDD4W2 = "190mA" + IDD4R = "2mA" + IDD4R2 = "230mA" + IDD5 = "28mA" + IDD52 = "150mA" + IDD3P1 = "1.4mA" + IDD3P12 = "11mA" + IDD2P1 = "0.8mA" + IDD2P12 = "1.8mA" + IDD6 = "0.5mA" + IDD62 = "1.8mA" + VDD = "1.8V" + VDD2 = "1.2V" + # A single GDDR5 x64 interface, with # default timings based on a GDDR5-4000 1 Gbit part (SK Hynix # H5GQ1H24AFR) in a 2x32 configuration. class GDDR5_4000_2x32(DRAMInterface): # size of device - device_size = '128MiB' + device_size = "128MiB" # 2x32 configuration, 1 device with a 32-bit interface device_bus_width = 32 @@ -949,7 +968,7 @@ class GDDR5_4000_2x32(DRAMInterface): burst_length = 8 # Each device has a page (row buffer) size of 2Kbits (256Bytes) - device_rowbuffer_size = '256B' + device_rowbuffer_size = "256B" # 2x32 configuration, so 2 devices devices_per_rank = 2 @@ -964,7 +983,7 @@ class GDDR5_4000_2x32(DRAMInterface): banks_per_rank = 16 # 1000 MHz - tCK = '1ns' + tCK = "1ns" # 8 beats across an x64 interface translates to 2 clocks @ 1000 MHz # Data bus runs @2000 Mhz => DDR ( data runs at 4000 MHz ) @@ -972,49 +991,50 @@ class GDDR5_4000_2x32(DRAMInterface): # tBURST is equivalent to the CAS-to-CAS delay (tCCD) # With bank group architectures, tBURST represents the CAS-to-CAS # delay for bursts to different bank groups (tCCD_S) - tBURST = '2ns' + tBURST = "2ns" # @1000MHz data rate, tCCD_L is 3 CK # CAS-to-CAS delay for bursts to the same bank group # tBURST is equivalent to tCCD_S; no explicit parameter required # for CAS-to-CAS delay for bursts to different bank groups - tCCD_L = '3ns'; + tCCD_L = "3ns" - tRCD = '12ns' + tRCD = "12ns" # tCL is not directly found in datasheet and assumed equal tRCD - tCL = '12ns' + tCL = "12ns" - tRP = '12ns' - tRAS = '28ns' + tRP = "12ns" + tRAS = "28ns" # RRD_S (different bank group) # RRD_S is 5.5 ns in datasheet. # rounded to the next multiple of tCK - tRRD = '6ns' + tRRD = "6ns" # RRD_L (same bank group) # RRD_L is 5.5 ns in datasheet. # rounded to the next multiple of tCK - tRRD_L = '6ns' + tRRD_L = "6ns" - tXAW = '23ns' + tXAW = "23ns" # tXAW < 4 x tRRD. # Therefore, activation limit is set to 0 activation_limit = 0 - tRFC = '65ns' - tWR = '12ns' + tRFC = "65ns" + tWR = "12ns" # Here using the average of WTR_S and WTR_L - tWTR = '5ns' + tWTR = "5ns" # Read-to-Precharge 2 CK - tRTP = '2ns' + tRTP = "2ns" # Assume 2 cycles - tRTW = '2ns' + tRTW = "2ns" + # A single HBM x128 interface (one command and address bus), with # default timings based on data publically released @@ -1037,9 +1057,9 @@ class HBM_1000_4H_1x128(DRAMInterface): # size of channel in bytes, 4H stack of 2Gb dies is 1GiB per stack; # with 8 channels, 128MiB per channel - device_size = '128MiB' + device_size = "128MiB" - device_rowbuffer_size = '2KiB' + device_rowbuffer_size = "2KiB" # 1x128 configuration devices_per_rank = 1 @@ -1059,50 +1079,51 @@ class HBM_1000_4H_1x128(DRAMInterface): bank_groups_per_rank = 0 # 500 MHz for 1Gbps DDR data rate - tCK = '2ns' + tCK = "2ns" # use values from IDD measurement in JEDEC spec # use tRP value for tRCD and tCL similar to other classes - tRP = '15ns' - tRCD = '15ns' - tCL = '15ns' - tRAS = '33ns' + tRP = "15ns" + tRCD = "15ns" + tCL = "15ns" + tRAS = "33ns" # BL2 and BL4 supported, default to BL4 # DDR @ 500 MHz means 4 * 2ns / 2 = 4ns - tBURST = '4ns' + tBURST = "4ns" # value for 2Gb device from JEDEC spec - tRFC = '160ns' + tRFC = "160ns" # value for 2Gb device from JEDEC spec - tREFI = '3.9us' + tREFI = "3.9us" # extrapolate the following from LPDDR configs, using ns values # to minimize burst length, prefetch differences - tWR = '18ns' - tRTP = '7.5ns' - tWTR = '10ns' + tWR = "18ns" + tRTP = "7.5ns" + tWTR = "10ns" # start with 2 cycles turnaround, similar to other memory classes # could be more with variations across the stack - tRTW = '4ns' + tRTW = "4ns" # single rank device, set to 0 - tCS = '0ns' + tCS = "0ns" # from MemCon example, tRRD is 4ns with 2ns tCK - tRRD = '4ns' + tRRD = "4ns" # from MemCon example, tFAW is 30ns with 2ns tCK - tXAW = '30ns' + tXAW = "30ns" activation_limit = 4 # 4tCK - tXP = '8ns' + tXP = "8ns" # start with tRFC + tXP -> 160ns + 8ns = 168ns - tXS = '168ns' + tXS = "168ns" + # A single HBM x64 interface (one command and address bus), with # default timings based on HBM gen1 and data publically released @@ -1126,11 +1147,11 @@ class HBM_1000_4H_1x64(HBM_1000_4H_1x128): # size of channel in bytes, 4H stack of 8Gb dies is 4GiB per stack; # with 16 channels, 256MiB per channel - device_size = '256MiB' + device_size = "256MiB" # page size is halved with pseudo-channel; maintaining the same same number # of rows per pseudo-channel with 2X banks across 2 channels - device_rowbuffer_size = '1KiB' + device_rowbuffer_size = "1KiB" # HBM has 8 or 16 banks depending on capacity # Starting with 4Gb dies, 16 banks are defined @@ -1138,19 +1159,20 @@ class HBM_1000_4H_1x64(HBM_1000_4H_1x128): # reset tRFC for larger, 8Gb device # use HBM1 4Gb value as a starting point - tRFC = '260ns' + tRFC = "260ns" # start with tRFC + tXP -> 160ns + 8ns = 168ns - tXS = '268ns' + tXS = "268ns" # Default different rank bus delay to 2 CK, @1000 MHz = 2 ns - tCS = '2ns' - tREFI = '3.9us' + tCS = "2ns" + tREFI = "3.9us" # active powerdown and precharge powerdown exit time - tXP = '10ns' + tXP = "10ns" # self refresh exit time - tXS = '65ns' + tXS = "65ns" + # A single HBM2 x64 interface (tested with HBMCtrl in gem5) # to be used as a single pseudo channel. The timings are based @@ -1207,7 +1229,7 @@ class HBM_2000_4H_1x64(DRAMInterface): tWTR_L = "9ns" tRTW = "18ns" - #tAAD from RBus + # tAAD from RBus tAAD = "1ns" # single rank device, set to 0 @@ -1226,13 +1248,14 @@ class HBM_2000_4H_1x64(DRAMInterface): # start with tRFC + tXP -> 160ns + 8ns = 168ns tXS = "216ns" - page_policy = 'close_adaptive' + page_policy = "close_adaptive" read_buffer_size = 64 write_buffer_size = 64 two_cycle_activate = True + # A single LPDDR5 x16 interface (one command/address bus) # for a single x16 channel with default timings based on # initial JEDEC specification @@ -1245,7 +1268,7 @@ class LPDDR5_5500_1x16_BG_BL32(DRAMInterface): read_buffer_size = 64 # Set page policy to better suit DMC Huxley - page_policy = 'close_adaptive' + page_policy = "close_adaptive" # 16-bit channel interface device_bus_width = 16 @@ -1256,10 +1279,10 @@ class LPDDR5_5500_1x16_BG_BL32(DRAMInterface): burst_length = 32 # size of device in bytes - device_size = '1GiB' + device_size = "1GiB" # 2KiB page with BG mode - device_rowbuffer_size = '2KiB' + device_rowbuffer_size = "2KiB" # Use a 1x16 configuration devices_per_rank = 1 @@ -1277,75 +1300,75 @@ class LPDDR5_5500_1x16_BG_BL32(DRAMInterface): bank_groups_per_rank = 4 # 5.5Gb/s DDR with 4:1 WCK:CK ratio for 687.5 MHz CK - tCK = '1.455ns' + tCK = "1.455ns" # Greater of 2 CK or 18ns - tRCD = '18ns' + tRCD = "18ns" # Base RL is 16 CK @ 687.5 MHz = 23.28ns - tCL = '23.280ns' + tCL = "23.280ns" # Greater of 2 CK or 18ns - tRP = '18ns' + tRP = "18ns" # Greater of 3 CK or 42ns - tRAS = '42ns' + tRAS = "42ns" # Greater of 3 CK or 34ns - tWR = '34ns' + tWR = "34ns" # active powerdown and precharge powerdown exit time # Greater of 3 CK or 7ns - tXP = '7ns' + tXP = "7ns" # self refresh exit time (tRFCab + 7.5ns) - tXS = '217.5ns' + tXS = "217.5ns" # Greater of 2 CK or 7.5 ns minus 2 CK - tRTP = '4.59ns' + tRTP = "4.59ns" # With BG architecture, burst of 32 transferred in two 16-beat # sub-bursts, with a 16-beat gap in between. # Each 16-beat sub-burst is 8 WCK @2.75 GHz or 2 CK @ 687.5 MHz # tBURST is the delay to transfer the Bstof32 = 6 CK @ 687.5 MHz - tBURST = '8.73ns' + tBURST = "8.73ns" # can interleave a Bstof32 from another bank group at tBURST_MIN # 16-beats is 8 WCK @2.75 GHz or 2 CK @ 687.5 MHz - tBURST_MIN = '2.91ns' + tBURST_MIN = "2.91ns" # tBURST_MAX is the maximum burst delay for same bank group timing # this is 8 CK @ 687.5 MHz - tBURST_MAX = '11.64ns' + tBURST_MAX = "11.64ns" # 8 CK @ 687.5 MHz tCCD_L = "11.64ns" # LPDDR5, 8 Gbit/channel for 280ns tRFCab - tRFC = '210ns' - tREFI = '3.9us' + tRFC = "210ns" + tREFI = "3.9us" # Greater of 4 CK or 6.25 ns - tWTR = '6.25ns' + tWTR = "6.25ns" # Greater of 4 CK or 12 ns - tWTR_L = '12ns' + tWTR_L = "12ns" # Required RD-to-WR timing is RL+ BL/n + tWCKDQ0/tCK - WL # tWCKDQ0/tCK will be 1 CK for most cases # For gem5 RL = WL and BL/n is already accounted for with tBURST # Result is and additional 1 CK is required - tRTW = '1.455ns' + tRTW = "1.455ns" # Default different rank bus delay to 2 CK, @687.5 MHz = 2.91 ns - tCS = '2.91ns' + tCS = "2.91ns" # 2 CK - tPPD = '2.91ns' + tPPD = "2.91ns" # Greater of 2 CK or 5 ns - tRRD = '5ns' - tRRD_L = '5ns' + tRRD = "5ns" + tRRD_L = "5ns" # With Bank Group Arch mode tFAW is 20 ns - tXAW = '20ns' + tXAW = "20ns" activation_limit = 4 # at 5Gbps, 4:1 WCK to CK ratio required @@ -1356,10 +1379,11 @@ class LPDDR5_5500_1x16_BG_BL32(DRAMInterface): # 2 command phases can be sent back-to-back or # with a gap up to tAAD = 8 CK two_cycle_activate = True - tAAD = '11.640ns' + tAAD = "11.640ns" data_clock_sync = True + # A single LPDDR5 x16 interface (one command/address bus) # for a single x16 channel with default timings based on # initial JEDEC specification @@ -1373,10 +1397,10 @@ class LPDDR5_5500_1x16_BG_BL16(LPDDR5_5500_1x16_BG_BL32): burst_length = 16 # For Bstof16 with BG arch, 2 CK @ 687.5 MHz with 4:1 clock ratio - tBURST = '2.91ns' - tBURST_MIN = '2.91ns' + tBURST = "2.91ns" + tBURST_MIN = "2.91ns" # For Bstof16 with BG arch, 4 CK @ 687.5 MHz with 4:1 clock ratio - tBURST_MAX = '5.82ns' + tBURST_MAX = "5.82ns" # 4 CK @ 687.5 MHz tCCD_L = "5.82ns" @@ -1390,7 +1414,7 @@ class LPDDR5_5500_1x16_BG_BL16(LPDDR5_5500_1x16_BG_BL32): class LPDDR5_5500_1x16_8B_BL32(LPDDR5_5500_1x16_BG_BL32): # 4KiB page with 8B mode - device_rowbuffer_size = '4KiB' + device_rowbuffer_size = "4KiB" # LPDDR5 supports configurable bank options # 8B : BL32, all frequencies @@ -1401,18 +1425,18 @@ class LPDDR5_5500_1x16_8B_BL32(LPDDR5_5500_1x16_BG_BL32): bank_groups_per_rank = 0 # For Bstof32 with 8B mode, 4 CK @ 687.5 MHz with 4:1 clock ratio - tBURST = '5.82ns' - tBURST_MIN = '5.82ns' - tBURST_MAX = '5.82ns' + tBURST = "5.82ns" + tBURST_MIN = "5.82ns" + tBURST_MAX = "5.82ns" # Greater of 4 CK or 12 ns - tWTR = '12ns' + tWTR = "12ns" # Greater of 2 CK or 10 ns - tRRD = '10ns' + tRRD = "10ns" # With 8B mode tFAW is 40 ns - tXAW = '40ns' + tXAW = "40ns" activation_limit = 4 # Reset BG arch timing for 8B mode @@ -1420,6 +1444,7 @@ class LPDDR5_5500_1x16_8B_BL32(LPDDR5_5500_1x16_BG_BL32): tRRD_L = "0ns" tWTR_L = "0ns" + # A single LPDDR5 x16 interface (one command/address bus) # for a single x16 channel with default timings based on # initial JEDEC specification @@ -1429,22 +1454,22 @@ class LPDDR5_5500_1x16_8B_BL32(LPDDR5_5500_1x16_BG_BL32): class LPDDR5_6400_1x16_BG_BL32(LPDDR5_5500_1x16_BG_BL32): # 5.5Gb/s DDR with 4:1 WCK:CK ratio for 687.5 MHz CK - tCK = '1.25ns' + tCK = "1.25ns" # Base RL is 17 CK @ 800 MHz = 21.25ns - tCL = '21.25ns' + tCL = "21.25ns" # With BG architecture, burst of 32 transferred in two 16-beat # sub-bursts, with a 16-beat gap in between. # Each 16-beat sub-burst is 8 WCK @3.2 GHz or 2 CK @ 800 MHz # tBURST is the delay to transfer the Bstof32 = 6 CK @ 800 MHz - tBURST = '7.5ns' + tBURST = "7.5ns" # can interleave a Bstof32 from another bank group at tBURST_MIN # 16-beats is 8 WCK @2.3 GHz or 2 CK @ 800 MHz - tBURST_MIN = '2.5ns' + tBURST_MIN = "2.5ns" # tBURST_MAX is the maximum burst delay for same bank group timing # this is 8 CK @ 800 MHz - tBURST_MAX = '10ns' + tBURST_MAX = "10ns" # 8 CK @ 800 MHz tCCD_L = "10ns" @@ -1453,17 +1478,18 @@ class LPDDR5_6400_1x16_BG_BL32(LPDDR5_5500_1x16_BG_BL32): # tWCKDQ0/tCK will be 1 CK for most cases # For gem5 RL = WL and BL/n is already accounted for with tBURST # Result is and additional 1 CK is required - tRTW = '1.25ns' + tRTW = "1.25ns" # Default different rank bus delay to 2 CK, @687.5 MHz = 2.5 ns - tCS = '2.5ns' + tCS = "2.5ns" # 2 CK - tPPD = '2.5ns' + tPPD = "2.5ns" # 2 command phases can be sent back-to-back or # with a gap up to tAAD = 8 CK - tAAD = '10ns' + tAAD = "10ns" + # A single LPDDR5 x16 interface (one command/address bus) # for a single x16 channel with default timings based on initial @@ -1478,10 +1504,10 @@ class LPDDR5_6400_1x16_BG_BL16(LPDDR5_6400_1x16_BG_BL32): burst_length = 16 # For Bstof16 with BG arch, 2 CK @ 800 MHz with 4:1 clock ratio - tBURST = '2.5ns' - tBURST_MIN = '2.5ns' + tBURST = "2.5ns" + tBURST_MIN = "2.5ns" # For Bstof16 with BG arch, 4 CK @ 800 MHz with 4:1 clock ratio - tBURST_MAX = '5ns' + tBURST_MAX = "5ns" # 4 CK @ 800 MHz tCCD_L = "5ns" @@ -1495,7 +1521,7 @@ class LPDDR5_6400_1x16_BG_BL16(LPDDR5_6400_1x16_BG_BL32): class LPDDR5_6400_1x16_8B_BL32(LPDDR5_6400_1x16_BG_BL32): # 4KiB page with 8B mode - device_rowbuffer_size = '4KiB' + device_rowbuffer_size = "4KiB" # LPDDR5 supports configurable bank options # 8B : BL32, all frequencies @@ -1506,18 +1532,18 @@ class LPDDR5_6400_1x16_8B_BL32(LPDDR5_6400_1x16_BG_BL32): bank_groups_per_rank = 0 # For Bstof32 with 8B mode, 4 CK @ 800 MHz with 4:1 clock ratio - tBURST = '5ns' - tBURST_MIN = '5ns' - tBURST_MAX = '5ns' + tBURST = "5ns" + tBURST_MIN = "5ns" + tBURST_MAX = "5ns" # Greater of 4 CK or 12 ns - tWTR = '12ns' + tWTR = "12ns" # Greater of 2 CK or 10 ns - tRRD = '10ns' + tRRD = "10ns" # With 8B mode tFAW is 40 ns - tXAW = '40ns' + tXAW = "40ns" activation_limit = 4 # Reset BG arch timing for 8B mode diff --git a/src/mem/DRAMSim2.py b/src/mem/DRAMSim2.py index 11f9b4e866..364a0d794b 100644 --- a/src/mem/DRAMSim2.py +++ b/src/mem/DRAMSim2.py @@ -38,18 +38,21 @@ from m5.objects.AbstractMemory import * # A wrapper for DRAMSim2 multi-channel memory controller class DRAMSim2(AbstractMemory): - type = 'DRAMSim2' + type = "DRAMSim2" cxx_header = "mem/dramsim2.hh" - cxx_class = 'gem5::memory::DRAMSim2' + cxx_class = "gem5::memory::DRAMSim2" # A single port for now port = ResponsePort("This port sends responses and receives requests") - deviceConfigFile = Param.String("ini/DDR3_micron_32M_8B_x8_sg15.ini", - "Device configuration file") - systemConfigFile = Param.String("system.ini.example", - "Memory organisation configuration file") - filePath = Param.String("ext/dramsim2/DRAMSim2/", - "Directory to prepend to file names") + deviceConfigFile = Param.String( + "ini/DDR3_micron_32M_8B_x8_sg15.ini", "Device configuration file" + ) + systemConfigFile = Param.String( + "system.ini.example", "Memory organisation configuration file" + ) + filePath = Param.String( + "ext/dramsim2/DRAMSim2/", "Directory to prepend to file names" + ) traceFile = Param.String("", "Output file for trace generation") enableDebug = Param.Bool(False, "Enable DRAMSim2 debug output") diff --git a/src/mem/DRAMsim3.py b/src/mem/DRAMsim3.py index 01a735e960..0da9c1067b 100644 --- a/src/mem/DRAMsim3.py +++ b/src/mem/DRAMsim3.py @@ -38,16 +38,19 @@ from m5.objects.AbstractMemory import * # A wrapper for DRAMSim3 multi-channel memory controller class DRAMsim3(AbstractMemory): - type = 'DRAMsim3' + type = "DRAMsim3" cxx_header = "mem/dramsim3.hh" - cxx_class = 'gem5::memory::DRAMsim3' + cxx_class = "gem5::memory::DRAMsim3" # A single port for now - port = ResponsePort("port for receiving requests from" - "the CPU or other requestor") + port = ResponsePort( + "port for receiving requests from" "the CPU or other requestor" + ) - configFile = Param.String("ext/dramsim3/DRAMsim3/configs/" - "DDR4_8Gb_x8_2400.ini", - "The configuration file to use with DRAMSim3") - filePath = Param.String("ext/dramsim3/DRAMsim3/", - "Directory to prepend to file names") + configFile = Param.String( + "ext/dramsim3/DRAMsim3/configs/" "DDR4_8Gb_x8_2400.ini", + "The configuration file to use with DRAMSim3", + ) + filePath = Param.String( + "ext/dramsim3/DRAMsim3/", "Directory to prepend to file names" + ) diff --git a/src/mem/ExternalMaster.py b/src/mem/ExternalMaster.py index 0377591c63..65a13bcd3f 100644 --- a/src/mem/ExternalMaster.py +++ b/src/mem/ExternalMaster.py @@ -37,17 +37,24 @@ from m5.params import * from m5.proxy import * from m5.SimObject import SimObject + class ExternalMaster(SimObject): - type = 'ExternalMaster' + type = "ExternalMaster" cxx_header = "mem/external_master.hh" - cxx_class = 'gem5::ExternalMaster' + cxx_class = "gem5::ExternalMaster" port = RequestPort("Master port") - port_type = Param.String('stub', 'Registered external port handler' - ' to pass this port to in instantiation') - port_data = Param.String('stub', 'A string to pass to the port' - ' handler (in a format specific to the handler) to describe how' - ' the port should be bound/bindable/discoverable') + port_type = Param.String( + "stub", + "Registered external port handler" + " to pass this port to in instantiation", + ) + port_data = Param.String( + "stub", + "A string to pass to the port" + " handler (in a format specific to the handler) to describe how" + " the port should be bound/bindable/discoverable", + ) - system = Param.System(Parent.any, 'System this external port belongs to') + system = Param.System(Parent.any, "System this external port belongs to") diff --git a/src/mem/ExternalSlave.py b/src/mem/ExternalSlave.py index fcce505063..ce2d5b8b36 100644 --- a/src/mem/ExternalSlave.py +++ b/src/mem/ExternalSlave.py @@ -36,18 +36,26 @@ from m5.params import * from m5.SimObject import SimObject + class ExternalSlave(SimObject): - type = 'ExternalSlave' + type = "ExternalSlave" cxx_header = "mem/external_slave.hh" - cxx_class = 'gem5::ExternalSlave' + cxx_class = "gem5::ExternalSlave" port = SlavePort("Slave port") - addr_ranges = VectorParam.AddrRange([], 'Addresses served by' - ' this port\'s external agent') + addr_ranges = VectorParam.AddrRange( + [], "Addresses served by" " this port's external agent" + ) - port_type = Param.String('stub', 'Registered external port handler' - ' to pass this port to in instantiation') - port_data = Param.String('stub', 'A string to pass to the port' - ' handler (in a format specific to the handler) to describe how' - ' the port should be bound/bindable/discoverable') + port_type = Param.String( + "stub", + "Registered external port handler" + " to pass this port to in instantiation", + ) + port_data = Param.String( + "stub", + "A string to pass to the port" + " handler (in a format specific to the handler) to describe how" + " the port should be bound/bindable/discoverable", + ) diff --git a/src/mem/HBMCtrl.py b/src/mem/HBMCtrl.py index a7be7c8cf3..0c7c1ea919 100644 --- a/src/mem/HBMCtrl.py +++ b/src/mem/HBMCtrl.py @@ -30,10 +30,11 @@ from m5.objects.MemCtrl import * # HBMCtrl manages two pseudo channels of HBM2 + class HBMCtrl(MemCtrl): - type = 'HBMCtrl' + type = "HBMCtrl" cxx_header = "mem/hbm_ctrl.hh" - cxx_class = 'gem5::memory::HBMCtrl' + cxx_class = "gem5::memory::HBMCtrl" # HBMCtrl uses the SimpleMemCtlr's interface # `dram` as the first pseudo channel, the second @@ -46,4 +47,4 @@ class HBMCtrl(MemCtrl): min_reads_per_switch = 64 min_writes_per_switch = 64 - partitioned_q = Param.Bool(True, "split queues for pseudo channels") + partitioned_q = Param.Bool(False, "split queues for pseudo channels") diff --git a/src/mem/HMCController.py b/src/mem/HMCController.py index dee1f575ca..ba5495b71f 100644 --- a/src/mem/HMCController.py +++ b/src/mem/HMCController.py @@ -66,7 +66,8 @@ from m5.objects.XBar import * # over them. Yet in this model, we have not made any such assumptions on the # address space. + class HMCController(NoncoherentXBar): - type = 'HMCController' + type = "HMCController" cxx_header = "mem/hmc_controller.hh" - cxx_class = 'gem5::HMCController' + cxx_class = "gem5::HMCController" diff --git a/src/mem/HeteroMemCtrl.py b/src/mem/HeteroMemCtrl.py index d0ba84de9e..8bddc94086 100644 --- a/src/mem/HeteroMemCtrl.py +++ b/src/mem/HeteroMemCtrl.py @@ -46,9 +46,9 @@ from m5.objects.MemCtrl import * # HeteroMemCtrl controls a dram and an nvm interface # Both memory interfaces share the data and command bus class HeteroMemCtrl(MemCtrl): - type = 'HeteroMemCtrl' + type = "HeteroMemCtrl" cxx_header = "mem/hetero_mem_ctrl.hh" - cxx_class = 'gem5::memory::HeteroMemCtrl' + cxx_class = "gem5::memory::HeteroMemCtrl" # Interface to nvm memory media # The dram interface `dram` used by HeteroMemCtrl is defined in diff --git a/src/mem/MemChecker.py b/src/mem/MemChecker.py index 42e4bce8d6..9fc0d7c59c 100644 --- a/src/mem/MemChecker.py +++ b/src/mem/MemChecker.py @@ -37,25 +37,30 @@ from m5.SimObject import SimObject from m5.params import * from m5.proxy import * + class MemChecker(SimObject): - type = 'MemChecker' + type = "MemChecker" cxx_header = "mem/mem_checker.hh" - cxx_class = 'gem5::MemChecker' + cxx_class = "gem5::MemChecker" + class MemCheckerMonitor(SimObject): - type = 'MemCheckerMonitor' + type = "MemCheckerMonitor" cxx_header = "mem/mem_checker_monitor.hh" - cxx_class = 'gem5::MemCheckerMonitor' + cxx_class = "gem5::MemCheckerMonitor" # one port in each direction - mem_side_port = RequestPort("This port sends requests and receives " - "responses") - master = DeprecatedParam(mem_side_port,"`master` is now called " - "`mem_side_port`") - cpu_side_port = ResponsePort("This port receives requests and sends " - "responses") - slave = DeprecatedParam(cpu_side_port,"`slave` is now called " - "`cpu_side_port`") + mem_side_port = RequestPort( + "This port sends requests and receives " "responses" + ) + master = DeprecatedParam( + mem_side_port, "`master` is now called " "`mem_side_port`" + ) + cpu_side_port = ResponsePort( + "This port receives requests and sends " "responses" + ) + slave = DeprecatedParam( + cpu_side_port, "`slave` is now called " "`cpu_side_port`" + ) warn_only = Param.Bool(False, "Warn about violations only") memchecker = Param.MemChecker("Instance shared with other monitors") - diff --git a/src/mem/MemCtrl.py b/src/mem/MemCtrl.py index ea199ee088..c8acd22ed9 100644 --- a/src/mem/MemCtrl.py +++ b/src/mem/MemCtrl.py @@ -44,24 +44,27 @@ from m5.objects.QoSMemCtrl import * # Enum for memory scheduling algorithms, currently First-Come # First-Served and a First-Row Hit then First-Come First-Served -class MemSched(Enum): vals = ['fcfs', 'frfcfs'] +class MemSched(Enum): + vals = ["fcfs", "frfcfs"] + # MemCtrl is a single-channel single-ported Memory controller model # that aims to model the most important system-level performance # effects of a memory controller, interfacing with media specific # interfaces class MemCtrl(QoSMemCtrl): - type = 'MemCtrl' + type = "MemCtrl" cxx_header = "mem/mem_ctrl.hh" - cxx_class = 'gem5::memory::MemCtrl' + cxx_class = "gem5::memory::MemCtrl" # single-ported on the system interface side, instantiate with a # bus in front of the controller for multiple ports port = ResponsePort("This port responds to memory requests") # Interface to memory media - dram = Param.MemInterface("Memory interface, can be a DRAM" - "or an NVM interface ") + dram = Param.MemInterface( + "Memory interface, can be a DRAM" "or an NVM interface " + ) # read and write buffer depths are set in the interface # the controller will read these values when instantiated @@ -75,15 +78,17 @@ class MemCtrl(QoSMemCtrl): write_low_thresh_perc = Param.Percent(50, "Threshold to start writes") # minimum write bursts to schedule before switching back to reads - min_writes_per_switch = Param.Unsigned(16, "Minimum write bursts before " - "switching to reads") + min_writes_per_switch = Param.Unsigned( + 16, "Minimum write bursts before " "switching to reads" + ) # minimum read bursts to schedule before switching back to writes - min_reads_per_switch = Param.Unsigned(16, "Minimum read bursts before " - "switching to writes") + min_reads_per_switch = Param.Unsigned( + 16, "Minimum read bursts before " "switching to writes" + ) # scheduler, address map and page policy - mem_sched_policy = Param.MemSched('frfcfs', "Memory scheduling policy") + mem_sched_policy = Param.MemSched("frfcfs", "Memory scheduling policy") # pipeline latency of the controller and PHY, split into a # frontend part and a backend part, with reads and writes serviced @@ -93,3 +98,4 @@ class MemCtrl(QoSMemCtrl): static_backend_latency = Param.Latency("10ns", "Static backend latency") command_window = Param.Latency("10ns", "Static backend latency") + disable_sanity_check = Param.Bool(False, "Disable port resp Q size check") diff --git a/src/mem/MemDelay.py b/src/mem/MemDelay.py index 9c50ab6b5f..430ffb77f5 100644 --- a/src/mem/MemDelay.py +++ b/src/mem/MemDelay.py @@ -36,25 +36,31 @@ from m5.params import * from m5.objects.ClockedObject import ClockedObject + class MemDelay(ClockedObject): - type = 'MemDelay' - cxx_header = 'mem/mem_delay.hh' - cxx_class = 'gem5::MemDelay' + type = "MemDelay" + cxx_header = "mem/mem_delay.hh" + cxx_class = "gem5::MemDelay" abstract = True - mem_side_port = RequestPort("This port sends requests and " - "receives responses") - master = DeprecatedParam(mem_side_port, - '`master` is now called `mem_side_port`') - cpu_side_port = ResponsePort("This port receives requests and " - "sends responses") - slave = DeprecatedParam(cpu_side_port, - '`slave` is now called `cpu_side_port`') + mem_side_port = RequestPort( + "This port sends requests and " "receives responses" + ) + master = DeprecatedParam( + mem_side_port, "`master` is now called `mem_side_port`" + ) + cpu_side_port = ResponsePort( + "This port receives requests and " "sends responses" + ) + slave = DeprecatedParam( + cpu_side_port, "`slave` is now called `cpu_side_port`" + ) + class SimpleMemDelay(MemDelay): - type = 'SimpleMemDelay' - cxx_header = 'mem/mem_delay.hh' - cxx_class = 'gem5::SimpleMemDelay' + type = "SimpleMemDelay" + cxx_header = "mem/mem_delay.hh" + cxx_class = "gem5::SimpleMemDelay" read_req = Param.Latency("0t", "Read request delay") read_resp = Param.Latency("0t", "Read response delay") diff --git a/src/mem/MemInterface.py b/src/mem/MemInterface.py index 10eb4304f4..a32a3b5ec9 100644 --- a/src/mem/MemInterface.py +++ b/src/mem/MemInterface.py @@ -49,13 +49,15 @@ from m5.objects.AbstractMemory import AbstractMemory # suitable for an open-page policy, optimising for sequential accesses # hitting in the open row. For a closed-page policy, RoCoRaBaCh # maximises parallelism. -class AddrMap(Enum): vals = ['RoRaBaChCo', 'RoRaBaCoCh', 'RoCoRaBaCh'] +class AddrMap(Enum): + vals = ["RoRaBaChCo", "RoRaBaCoCh", "RoCoRaBaCh"] + class MemInterface(AbstractMemory): - type = 'MemInterface' + type = "MemInterface" abstract = True cxx_header = "mem/mem_interface.hh" - cxx_class = 'gem5::memory::MemInterface' + cxx_class = "gem5::memory::MemInterface" # Allow the interface to set required controller buffer sizes # each entry corresponds to a burst for the specific memory channel @@ -65,16 +67,18 @@ class MemInterface(AbstractMemory): read_buffer_size = Param.Unsigned(32, "Number of read queue entries") # scheduler, address map - addr_mapping = Param.AddrMap('RoRaBaCoCh', "Address mapping policy") + addr_mapping = Param.AddrMap("RoRaBaCoCh", "Address mapping policy") # size of memory device in Bytes device_size = Param.MemorySize("Size of memory device") # the physical organisation of the memory - device_bus_width = Param.Unsigned("data bus width in bits for each "\ - "memory device/chip") + device_bus_width = Param.Unsigned( + "data bus width in bits for each " "memory device/chip" + ) burst_length = Param.Unsigned("Burst lenght (BL) in beats") - device_rowbuffer_size = Param.MemorySize("Page (row buffer) size per "\ - "device/chip") + device_rowbuffer_size = Param.MemorySize( + "Page (row buffer) size per " "device/chip" + ) devices_per_rank = Param.Unsigned("Number of devices/chips per rank") ranks_per_channel = Param.Unsigned("Number of ranks per channel") banks_per_rank = Param.Unsigned("Number of banks per rank") @@ -91,8 +95,9 @@ class MemInterface(AbstractMemory): # This parameter has to account for burst length. # Read/Write requests with data size larger than one full burst are broken # down into multiple requests in the controller - tBURST = Param.Latency("Burst duration " - "(typically burst length / 2 cycles)") + tBURST = Param.Latency( + "Burst duration " "(typically burst length / 2 cycles)" + ) # write-to-read, same rank turnaround penalty tWTR = Param.Latency("Write to read, same rank switching time") diff --git a/src/mem/NVMInterface.py b/src/mem/NVMInterface.py index a73e1d84ca..841dc0c047 100644 --- a/src/mem/NVMInterface.py +++ b/src/mem/NVMInterface.py @@ -43,9 +43,9 @@ from m5.objects.DRAMInterface import AddrMap # The most important system-level performance effects of a NVM # are modeled without getting into too much detail of the media itself. class NVMInterface(MemInterface): - type = 'NVMInterface' + type = "NVMInterface" cxx_header = "mem/nvm_interface.hh" - cxx_class = 'gem5::memory::NVMInterface' + cxx_class = "gem5::memory::NVMInterface" # NVM DIMM could have write buffer to offload writes # define buffer depth, which will limit the number of pending writes @@ -63,9 +63,9 @@ class NVMInterface(MemInterface): tWRITE = Param.Latency("200ns", "Average NVM write latency") tSEND = Param.Latency("15ns", "Access latency") - two_cycle_rdwr = Param.Bool(False, - "Two cycles required to send read and write commands") - + two_cycle_rdwr = Param.Bool( + False, "Two cycles required to send read and write commands" + ) def controller(self): """ @@ -76,6 +76,7 @@ class NVMInterface(MemInterface): controller.dram = self return controller + # NVM delays and device architecture defined to mimic PCM like memory. # Can be configured with DDR4_2400 sharing the channel class NVM_2400_1x64(NVMInterface): @@ -85,10 +86,10 @@ class NVM_2400_1x64(NVMInterface): max_pending_writes = 128 max_pending_reads = 64 - device_rowbuffer_size = '256B' + device_rowbuffer_size = "256B" # 8X capacity compared to DDR4 x4 DIMM with 8Gb devices - device_size = '512GiB' + device_size = "512GiB" # Mimic 64-bit media agnostic DIMM interface device_bus_width = 64 devices_per_rank = 1 @@ -100,16 +101,15 @@ class NVM_2400_1x64(NVMInterface): two_cycle_rdwr = True # 1200 MHz - tCK = '0.833ns' + tCK = "0.833ns" - tREAD = '150ns' - tWRITE = '500ns'; - tSEND = '14.16ns'; - tBURST = '3.332ns'; + tREAD = "150ns" + tWRITE = "500ns" + tSEND = "14.16ns" + tBURST = "3.332ns" # Default all bus turnaround and rank bus delay to 2 cycles # With DDR data bus, clock = 1200 MHz = 1.666 ns - tWTR = '1.666ns'; - tRTW = '1.666ns'; - tCS = '1.666ns' - + tWTR = "1.666ns" + tRTW = "1.666ns" + tCS = "1.666ns" diff --git a/src/mem/PortTerminator.py b/src/mem/PortTerminator.py index 761f5edda9..05fdd1177d 100644 --- a/src/mem/PortTerminator.py +++ b/src/mem/PortTerminator.py @@ -28,12 +28,15 @@ from m5.params import * from m5.SimObject import SimObject -class PortTerminator(SimObject): - type = 'PortTerminator' - cxx_header = "mem/port_terminator.hh" - cxx_class = 'gem5::PortTerminator' - req_ports = VectorRequestPort("Vector port for connecting terminating " - "response ports.") - resp_ports = VectorResponsePort("Vector port for terminating " - "request ports.") \ No newline at end of file +class PortTerminator(SimObject): + type = "PortTerminator" + cxx_header = "mem/port_terminator.hh" + cxx_class = "gem5::PortTerminator" + + req_ports = VectorRequestPort( + "Vector port for connecting terminating " "response ports." + ) + resp_ports = VectorResponsePort( + "Vector port for terminating " "request ports." + ) diff --git a/src/mem/SConscript b/src/mem/SConscript index 0f2efed4a3..3bcfc0d9c5 100644 --- a/src/mem/SConscript +++ b/src/mem/SConscript @@ -67,6 +67,7 @@ SimObject('HMCController.py', sim_objects=['HMCController']) SimObject('SerialLink.py', sim_objects=['SerialLink']) SimObject('MemDelay.py', sim_objects=['MemDelay', 'SimpleMemDelay']) SimObject('PortTerminator.py', sim_objects=['PortTerminator']) +SimObject('ThreadBridge.py', sim_objects=['ThreadBridge']) Source('abstract_mem.cc') Source('addr_mapper.cc') @@ -93,6 +94,7 @@ Source('simple_mem.cc') Source('snoop_filter.cc') Source('stack_dist_calc.cc') Source('sys_bridge.cc') +Source('thread_bridge.cc') Source('token_port.cc') Source('tport.cc') Source('xbar.cc') @@ -104,10 +106,9 @@ Source('port_terminator.cc') GTest('translation_gen.test', 'translation_gen.test.cc') -if env['CONF']['TARGET_ISA'] != 'null': - Source('translating_port_proxy.cc') - Source('se_translating_port_proxy.cc') - Source('page_table.cc') +Source('translating_port_proxy.cc') +Source('se_translating_port_proxy.cc') +Source('page_table.cc') if env['HAVE_DRAMSIM']: SimObject('DRAMSim2.py', sim_objects=['DRAMSim2']) @@ -145,6 +146,7 @@ DebugFlag('MemCtrl') DebugFlag('MMU') DebugFlag('MemoryAccess') DebugFlag('PacketQueue') +DebugFlag('ResponsePort') DebugFlag('StackDist') DebugFlag("DRAMSim2") DebugFlag("DRAMsim3") diff --git a/src/mem/SerialLink.py b/src/mem/SerialLink.py index cc33daafdb..a40b714258 100644 --- a/src/mem/SerialLink.py +++ b/src/mem/SerialLink.py @@ -43,27 +43,38 @@ from m5.objects.ClockedObject import ClockedObject # SerialLink is a simple variation of the Bridge class, with the ability to # account for the latency of packet serialization. -class SerialLink(ClockedObject): - type = 'SerialLink' - cxx_header = "mem/serial_link.hh" - cxx_class = 'gem5::SerialLink' - mem_side_port = RequestPort("This port sends requests and " - "receives responses") - master = DeprecatedParam(mem_side_port, - '`master` is now called `mem_side_port`') - cpu_side_port = ResponsePort("This port receives requests and " - "sends responses") - slave = DeprecatedParam(cpu_side_port, - '`slave` is now called `cpu_side_port`') +class SerialLink(ClockedObject): + type = "SerialLink" + cxx_header = "mem/serial_link.hh" + cxx_class = "gem5::SerialLink" + + mem_side_port = RequestPort( + "This port sends requests and " "receives responses" + ) + master = DeprecatedParam( + mem_side_port, "`master` is now called `mem_side_port`" + ) + cpu_side_port = ResponsePort( + "This port receives requests and " "sends responses" + ) + slave = DeprecatedParam( + cpu_side_port, "`slave` is now called `cpu_side_port`" + ) req_size = Param.Unsigned(16, "The number of requests to buffer") resp_size = Param.Unsigned(16, "The number of responses to buffer") - delay = Param.Latency('0ns', "The latency of this serial_link") - ranges = VectorParam.AddrRange([AllMemory], - "Address ranges to pass through the serial_link") + delay = Param.Latency("0ns", "The latency of this serial_link") + ranges = VectorParam.AddrRange( + [AllMemory], "Address ranges to pass through the serial_link" + ) # Bandwidth of the serial link is determined by the clock domain which the # link belongs to and the number of lanes: - num_lanes = Param.Unsigned(1, "Number of parallel lanes inside the serial" - "link. (aka. lane width)") - link_speed = Param.UInt64(1, "Gb/s Speed of each parallel lane inside the" - "serial link. (aka. lane speed)") + num_lanes = Param.Unsigned( + 1, + "Number of parallel lanes inside the serial" "link. (aka. lane width)", + ) + link_speed = Param.UInt64( + 1, + "Gb/s Speed of each parallel lane inside the" + "serial link. (aka. lane speed)", + ) diff --git a/src/mem/SharedMemoryServer.py b/src/mem/SharedMemoryServer.py index 3a63f45a46..97004224de 100644 --- a/src/mem/SharedMemoryServer.py +++ b/src/mem/SharedMemoryServer.py @@ -10,6 +10,8 @@ class SharedMemoryServer(SimObject): system = Param.System( Parent.any, - "The system where the target shared memory is actually stored.") + "The system where the target shared memory is actually stored.", + ) server_path = Param.String( - "The unix socket path where the server should be running upon.") + "The unix socket path where the server should be running upon." + ) diff --git a/src/mem/SimpleMemory.py b/src/mem/SimpleMemory.py index 1d1457e37f..fefda187f2 100644 --- a/src/mem/SimpleMemory.py +++ b/src/mem/SimpleMemory.py @@ -39,18 +39,20 @@ from m5.params import * from m5.objects.AbstractMemory import * + class SimpleMemory(AbstractMemory): - type = 'SimpleMemory' + type = "SimpleMemory" cxx_header = "mem/simple_mem.hh" - cxx_class = 'gem5::memory::SimpleMemory' + cxx_class = "gem5::memory::SimpleMemory" port = ResponsePort("This port sends responses and receives requests") - latency = Param.Latency('30ns', "Request to response latency") - latency_var = Param.Latency('0ns', "Request to response latency variance") + latency = Param.Latency("30ns", "Request to response latency") + latency_var = Param.Latency("0ns", "Request to response latency variance") # The memory bandwidth limit default is set to 12.8GiB/s which is # representative of a x64 DDR3-1600 channel. - bandwidth = Param.MemoryBandwidth('12.8GiB/s', - "Combined read and write bandwidth") + bandwidth = Param.MemoryBandwidth( + "12.8GiB/s", "Combined read and write bandwidth" + ) def controller(self): # Simple memory doesn't use a MemCtrl diff --git a/src/mem/SysBridge.py b/src/mem/SysBridge.py index 2c42b75853..54479d8bb3 100644 --- a/src/mem/SysBridge.py +++ b/src/mem/SysBridge.py @@ -26,17 +26,21 @@ from m5.params import * from m5.SimObject import SimObject + class SysBridge(SimObject): - '''Use this bridge to connect the memory systems belonging to two different - Systems SimObjects. See the header file for more information.''' - type = 'SysBridge' + """Use this bridge to connect the memory systems belonging to two different + Systems SimObjects. See the header file for more information.""" + + type = "SysBridge" cxx_header = "mem/sys_bridge.hh" - cxx_class = 'gem5::SysBridge' + cxx_class = "gem5::SysBridge" source = Param.System("Source System") target = Param.System("Target System") target_port = RequestPort( - "A port which sends requests to a target system.") + "A port which sends requests to a target system." + ) source_port = ResponsePort( - "A port which sends responses to a source system") + "A port which sends responses to a source system" + ) diff --git a/src/mem/ThreadBridge.py b/src/mem/ThreadBridge.py new file mode 100644 index 0000000000..f0ee0897ce --- /dev/null +++ b/src/mem/ThreadBridge.py @@ -0,0 +1,60 @@ +# Copyright 2022 Google, LLC +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from m5.SimObject import SimObject +from m5.params import * + + +class ThreadBridge(SimObject): + """Bridge for SimObjects from different threads (EventQueues) + + When two SimObjects running on two separate threads (EventQueues), an + access from one side to the other side could easily cause event scheduled + on the wrong event queue. + + ThreadBridge is used to migrate the EventQueue to the one used by + ThreadBridge itself before sending transation to the other side to avoid + the issue. The receiver side is expected to use the same EventQueue that + the ThreadBridge is using. + + Given that this is only used for simulation speed accelerating, only the + atomic and functional access are supported. + + Example: + + sys.initator = Initiator(eventq_index=0) + sys.target = Target(eventq_index=1) + sys.bridge = ThreadBridge(eventq_index=1) + + sys.initator.out_port = sys.bridge.in_port + sys.bridge.out_port = sys.target.in_port + """ + + type = "ThreadBridge" + cxx_header = "mem/thread_bridge.hh" + cxx_class = "gem5::ThreadBridge" + + in_port = ResponsePort("Incoming port") + out_port = RequestPort("Outgoing port") diff --git a/src/mem/XBar.py b/src/mem/XBar.py index a424c6f01d..dbadccb861 100644 --- a/src/mem/XBar.py +++ b/src/mem/XBar.py @@ -43,20 +43,25 @@ from m5.SimObject import SimObject from m5.objects.ClockedObject import ClockedObject + class BaseXBar(ClockedObject): - type = 'BaseXBar' + type = "BaseXBar" abstract = True cxx_header = "mem/xbar.hh" - cxx_class = 'gem5::BaseXBar' + cxx_class = "gem5::BaseXBar" - cpu_side_ports = VectorResponsePort("Vector port for connecting " - "mem side ports") - slave = DeprecatedParam(cpu_side_ports, - '`slave` is now called `cpu_side_ports`') - mem_side_ports = VectorRequestPort("Vector port for connecting " - "cpu side ports") - master = DeprecatedParam(mem_side_ports, - '`master` is now called `mem_side_ports`') + cpu_side_ports = VectorResponsePort( + "Vector port for connecting " "mem side ports" + ) + slave = DeprecatedParam( + cpu_side_ports, "`slave` is now called `cpu_side_ports`" + ) + mem_side_ports = VectorRequestPort( + "Vector port for connecting " "cpu side ports" + ) + master = DeprecatedParam( + mem_side_ports, "`master` is now called `mem_side_ports`" + ) # Latencies governing the time taken for the variuos paths a # packet has through the crossbar. Note that the crossbar itself @@ -92,18 +97,21 @@ class BaseXBar(ClockedObject): # ports. The default range is always checked first, thus creating # a two-level hierarchical lookup. This is useful e.g. for the PCI # xbar configuration. - use_default_range = Param.Bool(False, "Perform address mapping for " \ - "the default port") + use_default_range = Param.Bool( + False, "Perform address mapping for " "the default port" + ) + class NoncoherentXBar(BaseXBar): - type = 'NoncoherentXBar' + type = "NoncoherentXBar" cxx_header = "mem/noncoherent_xbar.hh" - cxx_class = 'gem5::NoncoherentXBar' + cxx_class = "gem5::NoncoherentXBar" + class CoherentXBar(BaseXBar): - type = 'CoherentXBar' + type = "CoherentXBar" cxx_header = "mem/coherent_xbar.hh" - cxx_class = 'gem5::CoherentXBar' + cxx_class = "gem5::CoherentXBar" # The coherent crossbar additionally has snoop responses that are # forwarded after a specific latency. @@ -121,19 +129,22 @@ class CoherentXBar(BaseXBar): # Determine how this crossbar handles packets where caches have # already committed to responding, by establishing if the crossbar # is the point of coherency or not. - point_of_coherency = Param.Bool(False, "Consider this crossbar the " \ - "point of coherency") + point_of_coherency = Param.Bool( + False, "Consider this crossbar the " "point of coherency" + ) # Specify whether this crossbar is the point of unification. - point_of_unification = Param.Bool(False, "Consider this crossbar the " \ - "point of unification") + point_of_unification = Param.Bool( + False, "Consider this crossbar the " "point of unification" + ) system = Param.System(Parent.any, "System that the crossbar belongs to.") + class SnoopFilter(SimObject): - type = 'SnoopFilter' + type = "SnoopFilter" cxx_header = "mem/snoop_filter.hh" - cxx_class = 'gem5::SnoopFilter' + cxx_class = "gem5::SnoopFilter" # Lookup latency of the snoop filter, added to requests that pass # through a coherent crossbar. @@ -142,7 +153,8 @@ class SnoopFilter(SimObject): system = Param.System(Parent.any, "System that the crossbar belongs to.") # Sanity check on max capacity to track, adjust if needed. - max_capacity = Param.MemorySize('8MiB', "Maximum capacity of snoop filter") + max_capacity = Param.MemorySize("8MiB", "Maximum capacity of snoop filter") + # We use a coherent crossbar to connect multiple requestors to the L2 # caches. Normally this crossbar would be part of the cache itself. @@ -160,13 +172,14 @@ class L2XBar(CoherentXBar): # Use a snoop-filter by default, and set the latency to zero as # the lookup is assumed to overlap with the frontend latency of # the crossbar - snoop_filter = SnoopFilter(lookup_latency = 0) + snoop_filter = SnoopFilter(lookup_latency=0) # This specialisation of the coherent crossbar is to be considered # the point of unification, it connects the dcache and the icache # to the first level of unified cache. point_of_unification = True + # One of the key coherent crossbar instances is the system # interconnect, tying together the CPU clusters, GPUs, and any I/O # coherent requestors, and DRAM controllers. @@ -182,7 +195,7 @@ class SystemXBar(CoherentXBar): snoop_response_latency = 4 # Use a snoop-filter by default - snoop_filter = SnoopFilter(lookup_latency = 1) + snoop_filter = SnoopFilter(lookup_latency=1) # This specialisation of the coherent crossbar is to be considered # the point of coherency, as there are no (coherent) downstream @@ -196,6 +209,7 @@ class SystemXBar(CoherentXBar): # unification. point_of_unification = True + # In addition to the system interconnect, we typically also have one # or more on-chip I/O crossbars. Note that at some point we might want # to also define an off-chip I/O crossbar such as PCIe. diff --git a/src/mem/cache/Cache.py b/src/mem/cache/Cache.py index 72665e11a3..1dfab1957f 100644 --- a/src/mem/cache/Cache.py +++ b/src/mem/cache/Cache.py @@ -48,46 +48,55 @@ from m5.objects.Tags import * # Enum for cache clusivity, currently mostly inclusive or mostly # exclusive. -class Clusivity(Enum): vals = ['mostly_incl', 'mostly_excl'] +class Clusivity(Enum): + vals = ["mostly_incl", "mostly_excl"] + class WriteAllocator(SimObject): - type = 'WriteAllocator' + type = "WriteAllocator" cxx_header = "mem/cache/cache.hh" - cxx_class = 'gem5::WriteAllocator' + cxx_class = "gem5::WriteAllocator" # Control the limits for when the cache introduces extra delays to # allow whole-line write coalescing, and eventually switches to a # write-no-allocate policy. - coalesce_limit = Param.Unsigned(2, "Consecutive lines written before " - "delaying for coalescing") - no_allocate_limit = Param.Unsigned(12, "Consecutive lines written before" - " skipping allocation") + coalesce_limit = Param.Unsigned( + 2, "Consecutive lines written before " "delaying for coalescing" + ) + no_allocate_limit = Param.Unsigned( + 12, "Consecutive lines written before" " skipping allocation" + ) - delay_threshold = Param.Unsigned(8, "Number of delay quanta imposed on an " - "MSHR with write requests to allow for " - "write coalescing") + delay_threshold = Param.Unsigned( + 8, + "Number of delay quanta imposed on an " + "MSHR with write requests to allow for " + "write coalescing", + ) block_size = Param.Int(Parent.cache_line_size, "block size in bytes") class BaseCache(ClockedObject): - type = 'BaseCache' + type = "BaseCache" abstract = True cxx_header = "mem/cache/base.hh" - cxx_class = 'gem5::BaseCache' + cxx_class = "gem5::BaseCache" size = Param.MemorySize("Capacity") assoc = Param.Unsigned("Associativity") tag_latency = Param.Cycles("Tag lookup latency") data_latency = Param.Cycles("Data access latency") - response_latency = Param.Cycles("Latency for the return path on a miss"); + response_latency = Param.Cycles("Latency for the return path on a miss") - warmup_percentage = Param.Percent(0, - "Percentage of tags to be touched to warm up the cache") + warmup_percentage = Param.Percent( + 0, "Percentage of tags to be touched to warm up the cache" + ) - max_miss_count = Param.Counter(0, - "Number of misses to handle before calling exit") + max_miss_count = Param.Counter( + 0, "Number of misses to handle before calling exit" + ) mshrs = Param.Unsigned("Number of MSHRs (max outstanding requests)") demand_mshr_reserve = Param.Unsigned(1, "MSHRs reserved for demand access") @@ -96,42 +105,55 @@ class BaseCache(ClockedObject): is_read_only = Param.Bool(False, "Is this cache read only (e.g. inst)") - prefetcher = Param.BasePrefetcher(NULL,"Prefetcher attached to cache") - prefetch_on_access = Param.Bool(False, - "Notify the hardware prefetcher on every access (not just misses)") - prefetch_on_pf_hit = Param.Bool(False, - "Notify the hardware prefetcher on hit on prefetched lines") + prefetcher = Param.BasePrefetcher(NULL, "Prefetcher attached to cache") + prefetch_on_access = Param.Bool( + False, + "Notify the hardware prefetcher on every access (not just misses)", + ) + prefetch_on_pf_hit = Param.Bool( + False, "Notify the hardware prefetcher on hit on prefetched lines" + ) tags = Param.BaseTags(BaseSetAssoc(), "Tag store") - replacement_policy = Param.BaseReplacementPolicy(LRURP(), - "Replacement policy") + replacement_policy = Param.BaseReplacementPolicy( + LRURP(), "Replacement policy" + ) compressor = Param.BaseCacheCompressor(NULL, "Cache compressor.") - replace_expansions = Param.Bool(True, "Apply replacement policy to " \ - "decide which blocks should be evicted on a data expansion") + replace_expansions = Param.Bool( + True, + "Apply replacement policy to " + "decide which blocks should be evicted on a data expansion", + ) # When a block passes from uncompressed to compressed, it may become # co-allocatable with another existing entry of the same superblock, # so try move the block to co-allocate it - move_contractions = Param.Bool(True, "Try to co-allocate blocks that " - "contract") + move_contractions = Param.Bool( + True, "Try to co-allocate blocks that " "contract" + ) - sequential_access = Param.Bool(False, - "Whether to access tags and data sequentially") + sequential_access = Param.Bool( + False, "Whether to access tags and data sequentially" + ) cpu_side = ResponsePort("Upstream port closer to the CPU and/or device") mem_side = RequestPort("Downstream port closer to memory") - addr_ranges = VectorParam.AddrRange([AllMemory], - "Address range for the CPU-side port (to allow striping)") + addr_ranges = VectorParam.AddrRange( + [AllMemory], "Address range for the CPU-side port (to allow striping)" + ) system = Param.System(Parent.any, "System we belong to") # Determine if this cache sends out writebacks for clean lines, or - # simply clean evicts. In cases where a downstream cache is mostly - # exclusive with respect to this cache (acting as a victim cache), - # the clean writebacks are essential for performance. In general - # this should be set to True for anything but the last-level - # cache. + # simply clean evicts. If this cache does not have a downstream cache, + # the cache should not writeback clean lines not to waste memory + # bandwidth. If this cache has a downstream cache whose clusivity is + # mostly exclusive (i.e., victim cache), this shoule be set to True. + # If not, there will never be any spills from read-only caches (e.g., + # L1I cache, MMU cache of ARM) to the downstream cache. + # In case of the downstream cache is mostly inclusive, this should be + # set to False. writeback_clean = Param.Bool(False, "Writeback clean lines") # Control whether this cache should be mostly inclusive or mostly @@ -143,8 +165,7 @@ class BaseCache(ClockedObject): # allocating unless they came directly from a non-caching source, # e.g. a table walker. Additionally, on a hit from an upstream # cache a line is dropped for a mostly exclusive cache. - clusivity = Param.Clusivity('mostly_incl', - "Clusivity with upstream cache") + clusivity = Param.Clusivity("mostly_incl", "Clusivity with upstream cache") # The write allocator enables optimizations for streaming write # accesses by first coalescing writes and then avoiding allocation @@ -152,17 +173,18 @@ class BaseCache(ClockedObject): # data cache. write_allocator = Param.WriteAllocator(NULL, "Write allocator") + class Cache(BaseCache): - type = 'Cache' - cxx_header = 'mem/cache/cache.hh' - cxx_class = 'gem5::Cache' + type = "Cache" + cxx_header = "mem/cache/cache.hh" + cxx_class = "gem5::Cache" + class NoncoherentCache(BaseCache): - type = 'NoncoherentCache' - cxx_header = 'mem/cache/noncoherent_cache.hh' - cxx_class = 'gem5::NoncoherentCache' + type = "NoncoherentCache" + cxx_header = "mem/cache/noncoherent_cache.hh" + cxx_class = "gem5::NoncoherentCache" # This is typically a last level cache and any clean # writebacks would be unnecessary traffic to the main memory. writeback_clean = False - diff --git a/src/mem/cache/SConscript b/src/mem/cache/SConscript index f1bd83a82b..dd8f2b145b 100644 --- a/src/mem/cache/SConscript +++ b/src/mem/cache/SConscript @@ -56,4 +56,3 @@ DebugFlag('HWPrefetchQueue') # it explicitly even above and beyond CacheAll. CompoundFlag('CacheAll', ['Cache', 'CacheComp', 'CachePort', 'CacheRepl', 'CacheVerbose', 'HWPrefetch', 'MSHR']) - diff --git a/src/mem/cache/compressors/Compressors.py b/src/mem/cache/compressors/Compressors.py index ec93900ef0..c8f82c55a1 100644 --- a/src/mem/cache/compressors/Compressors.py +++ b/src/mem/cache/compressors/Compressors.py @@ -31,40 +31,55 @@ from m5.SimObject import * from m5.objects.IndexingPolicies import * from m5.objects.ReplacementPolicies import * + class BaseCacheCompressor(SimObject): - type = 'BaseCacheCompressor' + type = "BaseCacheCompressor" abstract = True - cxx_class = 'gem5::compression::Base' + cxx_class = "gem5::compression::Base" cxx_header = "mem/cache/compressors/base.hh" block_size = Param.Int(Parent.cache_line_size, "Block size in bytes") - chunk_size_bits = Param.Unsigned(32, - "Size of a parsing data chunk (in bits)") - size_threshold_percentage = Param.Percent(50, + chunk_size_bits = Param.Unsigned( + 32, "Size of a parsing data chunk (in bits)" + ) + size_threshold_percentage = Param.Percent( + 50, "Minimum percentage of the block size, a compressed block must " - "achieve to be stored in compressed format") + "achieve to be stored in compressed format", + ) + + comp_chunks_per_cycle = Param.Unsigned( + 1, "Number of chunks that can be compressed in parallel per cycle." + ) + comp_extra_latency = Param.Cycles( + 1, + "Number of extra cycles required " + "to finish compression (e.g., due to shifting and packaging).", + ) + decomp_chunks_per_cycle = Param.Unsigned( + 1, "Number of chunks that can be decompressed in parallel per cycle." + ) + decomp_extra_latency = Param.Cycles( + 1, + "Number of extra cycles required " + "to finish decompression (e.g., due to shifting and packaging).", + ) - comp_chunks_per_cycle = Param.Unsigned(1, - "Number of chunks that can be compressed in parallel per cycle.") - comp_extra_latency = Param.Cycles(1, "Number of extra cycles required " - "to finish compression (e.g., due to shifting and packaging).") - decomp_chunks_per_cycle = Param.Unsigned(1, - "Number of chunks that can be decompressed in parallel per cycle.") - decomp_extra_latency = Param.Cycles(1, "Number of extra cycles required " - "to finish decompression (e.g., due to shifting and packaging).") class BaseDictionaryCompressor(BaseCacheCompressor): - type = 'BaseDictionaryCompressor' + type = "BaseDictionaryCompressor" abstract = True - cxx_class = 'gem5::compression::BaseDictionaryCompressor' + cxx_class = "gem5::compression::BaseDictionaryCompressor" cxx_header = "mem/cache/compressors/dictionary_compressor.hh" - dictionary_size = Param.Int(Parent.cache_line_size, - "Number of dictionary entries") + dictionary_size = Param.Int( + Parent.cache_line_size, "Number of dictionary entries" + ) + class Base64Delta8(BaseDictionaryCompressor): - type = 'Base64Delta8' - cxx_class = 'gem5::compression::Base64Delta8' + type = "Base64Delta8" + cxx_class = "gem5::compression::Base64Delta8" cxx_header = "mem/cache/compressors/base_delta.hh" chunk_size_bits = 64 @@ -75,9 +90,10 @@ class Base64Delta8(BaseDictionaryCompressor): decomp_chunks_per_cycle = 8 * Self.block_size / Self.chunk_size_bits decomp_extra_latency = 0 + class Base64Delta16(BaseDictionaryCompressor): - type = 'Base64Delta16' - cxx_class = 'gem5::compression::Base64Delta16' + type = "Base64Delta16" + cxx_class = "gem5::compression::Base64Delta16" cxx_header = "mem/cache/compressors/base_delta.hh" chunk_size_bits = 64 @@ -88,9 +104,10 @@ class Base64Delta16(BaseDictionaryCompressor): decomp_chunks_per_cycle = 8 * Self.block_size / Self.chunk_size_bits decomp_extra_latency = 0 + class Base64Delta32(BaseDictionaryCompressor): - type = 'Base64Delta32' - cxx_class = 'gem5::compression::Base64Delta32' + type = "Base64Delta32" + cxx_class = "gem5::compression::Base64Delta32" cxx_header = "mem/cache/compressors/base_delta.hh" chunk_size_bits = 64 @@ -101,9 +118,10 @@ class Base64Delta32(BaseDictionaryCompressor): decomp_chunks_per_cycle = 8 * Self.block_size / Self.chunk_size_bits decomp_extra_latency = 0 + class Base32Delta8(BaseDictionaryCompressor): - type = 'Base32Delta8' - cxx_class = 'gem5::compression::Base32Delta8' + type = "Base32Delta8" + cxx_class = "gem5::compression::Base32Delta8" cxx_header = "mem/cache/compressors/base_delta.hh" chunk_size_bits = 32 @@ -114,9 +132,10 @@ class Base32Delta8(BaseDictionaryCompressor): decomp_chunks_per_cycle = 8 * Self.block_size / Self.chunk_size_bits decomp_extra_latency = 0 + class Base32Delta16(BaseDictionaryCompressor): - type = 'Base32Delta16' - cxx_class = 'gem5::compression::Base32Delta16' + type = "Base32Delta16" + cxx_class = "gem5::compression::Base32Delta16" cxx_header = "mem/cache/compressors/base_delta.hh" chunk_size_bits = 32 @@ -127,9 +146,10 @@ class Base32Delta16(BaseDictionaryCompressor): decomp_chunks_per_cycle = 8 * Self.block_size / Self.chunk_size_bits decomp_extra_latency = 0 + class Base16Delta8(BaseDictionaryCompressor): - type = 'Base16Delta8' - cxx_class = 'gem5::compression::Base16Delta8' + type = "Base16Delta8" + cxx_class = "gem5::compression::Base16Delta8" cxx_header = "mem/cache/compressors/base_delta.hh" chunk_size_bits = 16 @@ -140,9 +160,10 @@ class Base16Delta8(BaseDictionaryCompressor): decomp_chunks_per_cycle = 8 * Self.block_size / Self.chunk_size_bits decomp_extra_latency = 0 + class CPack(BaseDictionaryCompressor): - type = 'CPack' - cxx_class = 'gem5::compression::CPack' + type = "CPack" + cxx_class = "gem5::compression::CPack" cxx_header = "mem/cache/compressors/cpack.hh" comp_chunks_per_cycle = 2 @@ -151,9 +172,10 @@ class CPack(BaseDictionaryCompressor): decomp_chunks_per_cycle = 2 decomp_extra_latency = 1 + class FPC(BaseDictionaryCompressor): - type = 'FPC' - cxx_class = 'gem5::compression::FPC' + type = "FPC" + cxx_class = "gem5::compression::FPC" cxx_header = "mem/cache/compressors/fpc.hh" comp_chunks_per_cycle = 8 @@ -166,9 +188,10 @@ class FPC(BaseDictionaryCompressor): zero_run_bits = Param.Int(3, "Number of bits of the zero run bit field") + class FPCD(BaseDictionaryCompressor): - type = 'FPCD' - cxx_class = 'gem5::compression::FPCD' + type = "FPCD" + cxx_class = "gem5::compression::FPCD" cxx_header = "mem/cache/compressors/fpcd.hh" # Accounts for checking all patterns, selecting patterns, and shifting @@ -181,50 +204,71 @@ class FPCD(BaseDictionaryCompressor): dictionary_size = 2 + class FrequentValuesCompressor(BaseCacheCompressor): - type = 'FrequentValuesCompressor' - cxx_class = 'gem5::compression::FrequentValues' + type = "FrequentValuesCompressor" + cxx_class = "gem5::compression::FrequentValues" cxx_header = "mem/cache/compressors/frequent_values.hh" chunk_size_bits = 32 - code_generation_ticks = Param.Unsigned(10000, "Number of elapsed " \ - "ticks until the samples are analyzed and their codes are generated.") + code_generation_ticks = Param.Unsigned( + 10000, + "Number of elapsed " + "ticks until the samples are analyzed and their codes are generated.", + ) # @todo The width of a counter width is determined by the maximum # number of times a given value appears in the cache - i.e., # log2(cache_size/chunk_size_bits))". counter_bits = Param.Unsigned(18, "Number of bits per frequency counter.") - max_code_length = Param.Unsigned(18, "Maximum number of bits in a " - "codeword. If 0, table indices are not encoded.") - num_samples = Param.Unsigned(100000, "Number of samples that must be " \ - "taken before compression is effectively used.") - check_saturation = Param.Bool(False, "Whether the counters should be " \ - "manipulated in case of saturation.") + max_code_length = Param.Unsigned( + 18, + "Maximum number of bits in a " + "codeword. If 0, table indices are not encoded.", + ) + num_samples = Param.Unsigned( + 100000, + "Number of samples that must be " + "taken before compression is effectively used.", + ) + check_saturation = Param.Bool( + False, + "Whether the counters should be " "manipulated in case of saturation.", + ) vft_assoc = Param.Int(16, "Associativity of the VFT.") vft_entries = Param.MemorySize("1024", "Number of entries of the VFT.") vft_indexing_policy = Param.BaseIndexingPolicy( - SetAssociative(entry_size = 1, assoc = Parent.vft_assoc, - size = Parent.vft_entries), "Indexing policy of the VFT.") - vft_replacement_policy = Param.BaseReplacementPolicy(LFURP(), - "Replacement policy of the VFT.") + SetAssociative( + entry_size=1, assoc=Parent.vft_assoc, size=Parent.vft_entries + ), + "Indexing policy of the VFT.", + ) + vft_replacement_policy = Param.BaseReplacementPolicy( + LFURP(), "Replacement policy of the VFT." + ) comp_chunks_per_cycle = 1 comp_extra_latency = 1 decomp_chunks_per_cycle = 1 decomp_extra_latency = 0 + class MultiCompressor(BaseCacheCompressor): - type = 'MultiCompressor' - cxx_class = 'gem5::compression::Multi' + type = "MultiCompressor" + cxx_class = "gem5::compression::Multi" cxx_header = "mem/cache/compressors/multi.hh" # Dummy default compressor list. This might not be an optimal choice, # since these compressors have many overlapping patterns - compressors = VectorParam.BaseCacheCompressor([CPack(), FPCD()], - "Array of compressors") - encoding_in_tags = Param.Bool(False, "If set the bits to inform which " + compressors = VectorParam.BaseCacheCompressor( + [CPack(), FPCD()], "Array of compressors" + ) + encoding_in_tags = Param.Bool( + False, + "If set the bits to inform which " "sub-compressor compressed some data are added to its corresponding " - "tag entry.") + "tag entry.", + ) # Use the sub-compressors' latencies comp_chunks_per_cycle = 0 @@ -237,9 +281,10 @@ class MultiCompressor(BaseCacheCompressor): # which sub-compressor should be used to decompress the data decomp_extra_latency = 1 + class PerfectCompressor(BaseCacheCompressor): - type = 'PerfectCompressor' - cxx_class = 'gem5::compression::Perfect' + type = "PerfectCompressor" + cxx_class = "gem5::compression::Perfect" cxx_header = "mem/cache/compressors/perfect.hh" chunk_size_bits = 64 @@ -252,9 +297,10 @@ class PerfectCompressor(BaseCacheCompressor): decomp_chunks_per_cycle = 8 * Self.block_size / Self.chunk_size_bits decomp_extra_latency = 0 + class RepeatedQwordsCompressor(BaseDictionaryCompressor): - type = 'RepeatedQwordsCompressor' - cxx_class = 'gem5::compression::RepeatedQwords' + type = "RepeatedQwordsCompressor" + cxx_class = "gem5::compression::RepeatedQwords" cxx_header = "mem/cache/compressors/repeated_qwords.hh" chunk_size_bits = 64 @@ -265,9 +311,10 @@ class RepeatedQwordsCompressor(BaseDictionaryCompressor): decomp_chunks_per_cycle = 8 * Self.block_size / Self.chunk_size_bits decomp_extra_latency = 0 + class ZeroCompressor(BaseDictionaryCompressor): - type = 'ZeroCompressor' - cxx_class = 'gem5::compression::Zero' + type = "ZeroCompressor" + cxx_class = "gem5::compression::Zero" cxx_header = "mem/cache/compressors/zero.hh" chunk_size_bits = 64 @@ -278,6 +325,7 @@ class ZeroCompressor(BaseDictionaryCompressor): decomp_chunks_per_cycle = 8 * Self.block_size / Self.chunk_size_bits decomp_extra_latency = 0 + class BDI(MultiCompressor): compressors = [ ZeroCompressor(size_threshold_percentage=99), @@ -293,4 +341,4 @@ class BDI(MultiCompressor): # By default assume that the encoding is stored in the tags, and is # retrieved and decoded while (and ends before) the data is being read. decomp_extra_latency = 0 - encoding_in_tags=True + encoding_in_tags = True diff --git a/src/mem/cache/prefetch/Prefetcher.py b/src/mem/cache/prefetch/Prefetcher.py index 024004e57b..397711c09a 100644 --- a/src/mem/cache/prefetch/Prefetcher.py +++ b/src/mem/cache/prefetch/Prefetcher.py @@ -44,6 +44,7 @@ from m5.objects.ClockedObject import ClockedObject from m5.objects.IndexingPolicies import * from m5.objects.ReplacementPolicies import * + class HWPProbeEvent(object): def __init__(self, prefetcher, obj, *listOfNames): self.obj = obj @@ -54,17 +55,16 @@ class HWPProbeEvent(object): if self.obj: for name in self.names: self.prefetcher.getCCObject().addEventProbe( - self.obj.getCCObject(), name) + self.obj.getCCObject(), name + ) + class BasePrefetcher(ClockedObject): - type = 'BasePrefetcher' + type = "BasePrefetcher" abstract = True - cxx_class = 'gem5::prefetch::Base' + cxx_class = "gem5::prefetch::Base" cxx_header = "mem/cache/prefetch/base.hh" - cxx_exports = [ - PyBindMethod("addEventProbe"), - PyBindMethod("addTLB"), - ] + cxx_exports = [PyBindMethod("addEventProbe"), PyBindMethod("addTLB")] sys = Param.System(Parent.any, "System this prefetcher belongs to") # Get the block size from the parent (system) @@ -73,16 +73,22 @@ class BasePrefetcher(ClockedObject): on_miss = Param.Bool(False, "Only notify prefetcher on misses") on_read = Param.Bool(True, "Notify prefetcher on reads") on_write = Param.Bool(True, "Notify prefetcher on writes") - on_data = Param.Bool(True, "Notify prefetcher on data accesses") - on_inst = Param.Bool(True, "Notify prefetcher on instruction accesses") - prefetch_on_access = Param.Bool(Parent.prefetch_on_access, - "Notify the hardware prefetcher on every access (not just misses)") - prefetch_on_pf_hit = Param.Bool(Parent.prefetch_on_pf_hit, - "Notify the hardware prefetcher on hit on prefetched lines") - use_virtual_addresses = Param.Bool(False, - "Use virtual addresses for prefetching") - page_bytes = Param.MemorySize('4KiB', - "Size of pages for virtual addresses") + on_data = Param.Bool(True, "Notify prefetcher on data accesses") + on_inst = Param.Bool(True, "Notify prefetcher on instruction accesses") + prefetch_on_access = Param.Bool( + Parent.prefetch_on_access, + "Notify the hardware prefetcher on every access (not just misses)", + ) + prefetch_on_pf_hit = Param.Bool( + Parent.prefetch_on_pf_hit, + "Notify the hardware prefetcher on hit on prefetched lines", + ) + use_virtual_addresses = Param.Bool( + False, "Use virtual addresses for prefetching" + ) + page_bytes = Param.MemorySize( + "4KiB", "Size of pages for virtual addresses" + ) def __init__(self, **kwargs): super().__init__(**kwargs) @@ -113,27 +119,35 @@ class BasePrefetcher(ClockedObject): raise TypeError("argument must be a SimObject type") self._tlbs.append(simObj) + class MultiPrefetcher(BasePrefetcher): - type = 'MultiPrefetcher' - cxx_class = 'gem5::prefetch::Multi' - cxx_header = 'mem/cache/prefetch/multi.hh' + type = "MultiPrefetcher" + cxx_class = "gem5::prefetch::Multi" + cxx_header = "mem/cache/prefetch/multi.hh" prefetchers = VectorParam.BasePrefetcher([], "Array of prefetchers") + class QueuedPrefetcher(BasePrefetcher): type = "QueuedPrefetcher" abstract = True - cxx_class = 'gem5::prefetch::Queued' + cxx_class = "gem5::prefetch::Queued" cxx_header = "mem/cache/prefetch/queued.hh" latency = Param.Int(1, "Latency for generated prefetches") queue_size = Param.Int(32, "Maximum number of queued prefetches") - max_prefetch_requests_with_pending_translation = Param.Int(32, - "Maximum number of queued prefetches that have a missing translation") + max_prefetch_requests_with_pending_translation = Param.Int( + 32, + "Maximum number of queued prefetches that have a missing translation", + ) queue_squash = Param.Bool(True, "Squash queued prefetch on demand access") queue_filter = Param.Bool(True, "Don't queue redundant prefetches") - cache_snoop = Param.Bool(False, "Snoop cache to eliminate redundant request") + cache_snoop = Param.Bool( + False, "Snoop cache to eliminate redundant request" + ) - tag_prefetch = Param.Bool(True, "Tag prefetch with PC of generating access") + tag_prefetch = Param.Bool( + True, "Tag prefetch with PC of generating access" + ) # The throttle_control_percentage controls how many of the candidate # addresses generated by the prefetcher will be finally turned into @@ -144,28 +158,36 @@ class QueuedPrefetcher(BasePrefetcher): # created for all candidates # - If set to 60, 40% of candidates will generate a request, and the # remaining 60% will be generated depending on the current accuracy - throttle_control_percentage = Param.Percent(0, "Percentage of requests \ - that can be throttled depending on the accuracy of the prefetcher.") + throttle_control_percentage = Param.Percent( + 0, + "Percentage of requests \ + that can be throttled depending on the accuracy of the prefetcher.", + ) + class StridePrefetcherHashedSetAssociative(SetAssociative): - type = 'StridePrefetcherHashedSetAssociative' - cxx_class = 'gem5::prefetch::StridePrefetcherHashedSetAssociative' + type = "StridePrefetcherHashedSetAssociative" + cxx_class = "gem5::prefetch::StridePrefetcherHashedSetAssociative" cxx_header = "mem/cache/prefetch/stride.hh" + class StridePrefetcher(QueuedPrefetcher): - type = 'StridePrefetcher' - cxx_class = 'gem5::prefetch::Stride' + type = "StridePrefetcher" + cxx_class = "gem5::prefetch::Stride" cxx_header = "mem/cache/prefetch/stride.hh" # Do not consult stride prefetcher on instruction accesses on_inst = False - confidence_counter_bits = Param.Unsigned(3, - "Number of bits of the confidence counter") - initial_confidence = Param.Unsigned(4, - "Starting confidence of new entries") - confidence_threshold = Param.Percent(50, - "Prefetch generation confidence threshold") + confidence_counter_bits = Param.Unsigned( + 3, "Number of bits of the confidence counter" + ) + initial_confidence = Param.Unsigned( + 4, "Starting confidence of new entries" + ) + confidence_threshold = Param.Percent( + 50, "Prefetch generation confidence threshold" + ) use_requestor_id = Param.Bool(True, "Use requestor id based history") @@ -174,97 +196,139 @@ class StridePrefetcher(QueuedPrefetcher): table_assoc = Param.Int(4, "Associativity of the PC table") table_entries = Param.MemorySize("64", "Number of entries of the PC table") table_indexing_policy = Param.BaseIndexingPolicy( - StridePrefetcherHashedSetAssociative(entry_size = 1, - assoc = Parent.table_assoc, size = Parent.table_entries), - "Indexing policy of the PC table") - table_replacement_policy = Param.BaseReplacementPolicy(RandomRP(), - "Replacement policy of the PC table") + StridePrefetcherHashedSetAssociative( + entry_size=1, assoc=Parent.table_assoc, size=Parent.table_entries + ), + "Indexing policy of the PC table", + ) + table_replacement_policy = Param.BaseReplacementPolicy( + RandomRP(), "Replacement policy of the PC table" + ) + class TaggedPrefetcher(QueuedPrefetcher): - type = 'TaggedPrefetcher' - cxx_class = 'gem5::prefetch::Tagged' + type = "TaggedPrefetcher" + cxx_class = "gem5::prefetch::Tagged" cxx_header = "mem/cache/prefetch/tagged.hh" degree = Param.Int(2, "Number of prefetches to generate") + class IndirectMemoryPrefetcher(QueuedPrefetcher): - type = 'IndirectMemoryPrefetcher' - cxx_class = 'gem5::prefetch::IndirectMemory' + type = "IndirectMemoryPrefetcher" + cxx_class = "gem5::prefetch::IndirectMemory" cxx_header = "mem/cache/prefetch/indirect_memory.hh" - pt_table_entries = Param.MemorySize("16", - "Number of entries of the Prefetch Table") + pt_table_entries = Param.MemorySize( + "16", "Number of entries of the Prefetch Table" + ) pt_table_assoc = Param.Unsigned(16, "Associativity of the Prefetch Table") pt_table_indexing_policy = Param.BaseIndexingPolicy( - SetAssociative(entry_size = 1, assoc = Parent.pt_table_assoc, - size = Parent.pt_table_entries), - "Indexing policy of the pattern table") - pt_table_replacement_policy = Param.BaseReplacementPolicy(LRURP(), - "Replacement policy of the pattern table") + SetAssociative( + entry_size=1, + assoc=Parent.pt_table_assoc, + size=Parent.pt_table_entries, + ), + "Indexing policy of the pattern table", + ) + pt_table_replacement_policy = Param.BaseReplacementPolicy( + LRURP(), "Replacement policy of the pattern table" + ) max_prefetch_distance = Param.Unsigned(16, "Maximum prefetch distance") - num_indirect_counter_bits = Param.Unsigned(3, - "Number of bits of the indirect counter") - ipd_table_entries = Param.MemorySize("4", - "Number of entries of the Indirect Pattern Detector") - ipd_table_assoc = Param.Unsigned(4, - "Associativity of the Indirect Pattern Detector") + num_indirect_counter_bits = Param.Unsigned( + 3, "Number of bits of the indirect counter" + ) + ipd_table_entries = Param.MemorySize( + "4", "Number of entries of the Indirect Pattern Detector" + ) + ipd_table_assoc = Param.Unsigned( + 4, "Associativity of the Indirect Pattern Detector" + ) ipd_table_indexing_policy = Param.BaseIndexingPolicy( - SetAssociative(entry_size = 1, assoc = Parent.ipd_table_assoc, - size = Parent.ipd_table_entries), - "Indexing policy of the Indirect Pattern Detector") - ipd_table_replacement_policy = Param.BaseReplacementPolicy(LRURP(), - "Replacement policy of the Indirect Pattern Detector") + SetAssociative( + entry_size=1, + assoc=Parent.ipd_table_assoc, + size=Parent.ipd_table_entries, + ), + "Indexing policy of the Indirect Pattern Detector", + ) + ipd_table_replacement_policy = Param.BaseReplacementPolicy( + LRURP(), "Replacement policy of the Indirect Pattern Detector" + ) shift_values = VectorParam.Int([2, 3, 4, -3], "Shift values to evaluate") addr_array_len = Param.Unsigned(4, "Number of misses tracked") - prefetch_threshold = Param.Unsigned(2, - "Counter threshold to start the indirect prefetching") - stream_counter_threshold = Param.Unsigned(4, - "Counter threshold to enable the stream prefetcher") - streaming_distance = Param.Unsigned(4, - "Number of prefetches to generate when using the stream prefetcher") + prefetch_threshold = Param.Unsigned( + 2, "Counter threshold to start the indirect prefetching" + ) + stream_counter_threshold = Param.Unsigned( + 4, "Counter threshold to enable the stream prefetcher" + ) + streaming_distance = Param.Unsigned( + 4, "Number of prefetches to generate when using the stream prefetcher" + ) + class SignaturePathPrefetcher(QueuedPrefetcher): - type = 'SignaturePathPrefetcher' - cxx_class = 'gem5::prefetch::SignaturePath' + type = "SignaturePathPrefetcher" + cxx_class = "gem5::prefetch::SignaturePath" cxx_header = "mem/cache/prefetch/signature_path.hh" - signature_shift = Param.UInt8(3, - "Number of bits to shift when calculating a new signature"); - signature_bits = Param.UInt16(12, - "Size of the signature, in bits"); - signature_table_entries = Param.MemorySize("1024", - "Number of entries of the signature table") - signature_table_assoc = Param.Unsigned(2, - "Associativity of the signature table") + signature_shift = Param.UInt8( + 3, "Number of bits to shift when calculating a new signature" + ) + signature_bits = Param.UInt16(12, "Size of the signature, in bits") + signature_table_entries = Param.MemorySize( + "1024", "Number of entries of the signature table" + ) + signature_table_assoc = Param.Unsigned( + 2, "Associativity of the signature table" + ) signature_table_indexing_policy = Param.BaseIndexingPolicy( - SetAssociative(entry_size = 1, assoc = Parent.signature_table_assoc, - size = Parent.signature_table_entries), - "Indexing policy of the signature table") - signature_table_replacement_policy = Param.BaseReplacementPolicy(LRURP(), - "Replacement policy of the signature table") + SetAssociative( + entry_size=1, + assoc=Parent.signature_table_assoc, + size=Parent.signature_table_entries, + ), + "Indexing policy of the signature table", + ) + signature_table_replacement_policy = Param.BaseReplacementPolicy( + LRURP(), "Replacement policy of the signature table" + ) - num_counter_bits = Param.UInt8(3, - "Number of bits of the saturating counters") - pattern_table_entries = Param.MemorySize("4096", - "Number of entries of the pattern table") - pattern_table_assoc = Param.Unsigned(1, - "Associativity of the pattern table") - strides_per_pattern_entry = Param.Unsigned(4, - "Number of strides stored in each pattern entry") + num_counter_bits = Param.UInt8( + 3, "Number of bits of the saturating counters" + ) + pattern_table_entries = Param.MemorySize( + "4096", "Number of entries of the pattern table" + ) + pattern_table_assoc = Param.Unsigned( + 1, "Associativity of the pattern table" + ) + strides_per_pattern_entry = Param.Unsigned( + 4, "Number of strides stored in each pattern entry" + ) pattern_table_indexing_policy = Param.BaseIndexingPolicy( - SetAssociative(entry_size = 1, assoc = Parent.pattern_table_assoc, - size = Parent.pattern_table_entries), - "Indexing policy of the pattern table") - pattern_table_replacement_policy = Param.BaseReplacementPolicy(LRURP(), - "Replacement policy of the pattern table") + SetAssociative( + entry_size=1, + assoc=Parent.pattern_table_assoc, + size=Parent.pattern_table_entries, + ), + "Indexing policy of the pattern table", + ) + pattern_table_replacement_policy = Param.BaseReplacementPolicy( + LRURP(), "Replacement policy of the pattern table" + ) + + prefetch_confidence_threshold = Param.Float( + 0.5, "Minimum confidence to issue prefetches" + ) + lookahead_confidence_threshold = Param.Float( + 0.75, "Minimum confidence to continue exploring lookahead entries" + ) - prefetch_confidence_threshold = Param.Float(0.5, - "Minimum confidence to issue prefetches") - lookahead_confidence_threshold = Param.Float(0.75, - "Minimum confidence to continue exploring lookahead entries") class SignaturePathPrefetcherV2(SignaturePathPrefetcher): - type = 'SignaturePathPrefetcherV2' - cxx_class = 'gem5::prefetch::SignaturePathV2' + type = "SignaturePathPrefetcherV2" + cxx_class = "gem5::prefetch::SignaturePathV2" cxx_header = "mem/cache/prefetch/signature_path_v2.hh" signature_table_entries = "256" @@ -275,259 +339,351 @@ class SignaturePathPrefetcherV2(SignaturePathPrefetcher): prefetch_confidence_threshold = 0.25 lookahead_confidence_threshold = 0.25 - global_history_register_entries = Param.MemorySize("8", - "Number of entries of global history register") + global_history_register_entries = Param.MemorySize( + "8", "Number of entries of global history register" + ) global_history_register_indexing_policy = Param.BaseIndexingPolicy( - SetAssociative(entry_size = 1, - assoc = Parent.global_history_register_entries, - size = Parent.global_history_register_entries), - "Indexing policy of the global history register") + SetAssociative( + entry_size=1, + assoc=Parent.global_history_register_entries, + size=Parent.global_history_register_entries, + ), + "Indexing policy of the global history register", + ) global_history_register_replacement_policy = Param.BaseReplacementPolicy( - LRURP(), "Replacement policy of the global history register") + LRURP(), "Replacement policy of the global history register" + ) + class AccessMapPatternMatching(ClockedObject): - type = 'AccessMapPatternMatching' - cxx_class = 'gem5::prefetch::AccessMapPatternMatching' + type = "AccessMapPatternMatching" + cxx_class = "gem5::prefetch::AccessMapPatternMatching" cxx_header = "mem/cache/prefetch/access_map_pattern_matching.hh" - block_size = Param.Unsigned(Parent.block_size, - "Cacheline size used by the prefetcher using this object") + block_size = Param.Unsigned( + Parent.block_size, + "Cacheline size used by the prefetcher using this object", + ) - limit_stride = Param.Unsigned(0, - "Limit the strides checked up to -X/X, if 0, disable the limit") - start_degree = Param.Unsigned(4, - "Initial degree (Maximum number of prefetches generated") + limit_stride = Param.Unsigned( + 0, "Limit the strides checked up to -X/X, if 0, disable the limit" + ) + start_degree = Param.Unsigned( + 4, "Initial degree (Maximum number of prefetches generated" + ) hot_zone_size = Param.MemorySize("2KiB", "Memory covered by a hot zone") - access_map_table_entries = Param.MemorySize("256", - "Number of entries in the access map table") - access_map_table_assoc = Param.Unsigned(8, - "Associativity of the access map table") + access_map_table_entries = Param.MemorySize( + "256", "Number of entries in the access map table" + ) + access_map_table_assoc = Param.Unsigned( + 8, "Associativity of the access map table" + ) access_map_table_indexing_policy = Param.BaseIndexingPolicy( - SetAssociative(entry_size = 1, assoc = Parent.access_map_table_assoc, - size = Parent.access_map_table_entries), - "Indexing policy of the access map table") - access_map_table_replacement_policy = Param.BaseReplacementPolicy(LRURP(), - "Replacement policy of the access map table") - high_coverage_threshold = Param.Float(0.25, - "A prefetch coverage factor bigger than this is considered high") - low_coverage_threshold = Param.Float(0.125, - "A prefetch coverage factor smaller than this is considered low") - high_accuracy_threshold = Param.Float(0.5, - "A prefetch accuracy factor bigger than this is considered high") - low_accuracy_threshold = Param.Float(0.25, - "A prefetch accuracy factor smaller than this is considered low") - high_cache_hit_threshold = Param.Float(0.875, - "A cache hit ratio bigger than this is considered high") - low_cache_hit_threshold = Param.Float(0.75, - "A cache hit ratio smaller than this is considered low") + SetAssociative( + entry_size=1, + assoc=Parent.access_map_table_assoc, + size=Parent.access_map_table_entries, + ), + "Indexing policy of the access map table", + ) + access_map_table_replacement_policy = Param.BaseReplacementPolicy( + LRURP(), "Replacement policy of the access map table" + ) + high_coverage_threshold = Param.Float( + 0.25, "A prefetch coverage factor bigger than this is considered high" + ) + low_coverage_threshold = Param.Float( + 0.125, "A prefetch coverage factor smaller than this is considered low" + ) + high_accuracy_threshold = Param.Float( + 0.5, "A prefetch accuracy factor bigger than this is considered high" + ) + low_accuracy_threshold = Param.Float( + 0.25, "A prefetch accuracy factor smaller than this is considered low" + ) + high_cache_hit_threshold = Param.Float( + 0.875, "A cache hit ratio bigger than this is considered high" + ) + low_cache_hit_threshold = Param.Float( + 0.75, "A cache hit ratio smaller than this is considered low" + ) epoch_cycles = Param.Cycles(256000, "Cycles in an epoch period") - offchip_memory_latency = Param.Latency("30ns", - "Memory latency used to compute the required memory bandwidth") + offchip_memory_latency = Param.Latency( + "30ns", "Memory latency used to compute the required memory bandwidth" + ) + class AMPMPrefetcher(QueuedPrefetcher): - type = 'AMPMPrefetcher' - cxx_class = 'gem5::prefetch::AMPM' + type = "AMPMPrefetcher" + cxx_class = "gem5::prefetch::AMPM" cxx_header = "mem/cache/prefetch/access_map_pattern_matching.hh" - ampm = Param.AccessMapPatternMatching( AccessMapPatternMatching(), - "Access Map Pattern Matching object") + ampm = Param.AccessMapPatternMatching( + AccessMapPatternMatching(), "Access Map Pattern Matching object" + ) + class DeltaCorrelatingPredictionTables(SimObject): - type = 'DeltaCorrelatingPredictionTables' - cxx_class = 'gem5::prefetch::DeltaCorrelatingPredictionTables' + type = "DeltaCorrelatingPredictionTables" + cxx_class = "gem5::prefetch::DeltaCorrelatingPredictionTables" cxx_header = "mem/cache/prefetch/delta_correlating_prediction_tables.hh" - deltas_per_entry = Param.Unsigned(20, - "Number of deltas stored in each table entry") + deltas_per_entry = Param.Unsigned( + 20, "Number of deltas stored in each table entry" + ) delta_bits = Param.Unsigned(12, "Bits per delta") - delta_mask_bits = Param.Unsigned(8, - "Lower bits to mask when comparing deltas") - table_entries = Param.MemorySize("128", - "Number of entries in the table") - table_assoc = Param.Unsigned(128, - "Associativity of the table") + delta_mask_bits = Param.Unsigned( + 8, "Lower bits to mask when comparing deltas" + ) + table_entries = Param.MemorySize("128", "Number of entries in the table") + table_assoc = Param.Unsigned(128, "Associativity of the table") table_indexing_policy = Param.BaseIndexingPolicy( - SetAssociative(entry_size = 1, assoc = Parent.table_assoc, - size = Parent.table_entries), - "Indexing policy of the table") - table_replacement_policy = Param.BaseReplacementPolicy(LRURP(), - "Replacement policy of the table") + SetAssociative( + entry_size=1, assoc=Parent.table_assoc, size=Parent.table_entries + ), + "Indexing policy of the table", + ) + table_replacement_policy = Param.BaseReplacementPolicy( + LRURP(), "Replacement policy of the table" + ) + class DCPTPrefetcher(QueuedPrefetcher): - type = 'DCPTPrefetcher' - cxx_class = 'gem5::prefetch::DCPT' + type = "DCPTPrefetcher" + cxx_class = "gem5::prefetch::DCPT" cxx_header = "mem/cache/prefetch/delta_correlating_prediction_tables.hh" dcpt = Param.DeltaCorrelatingPredictionTables( DeltaCorrelatingPredictionTables(), - "Delta Correlating Prediction Tables object") + "Delta Correlating Prediction Tables object", + ) + class IrregularStreamBufferPrefetcher(QueuedPrefetcher): type = "IrregularStreamBufferPrefetcher" - cxx_class = 'gem5::prefetch::IrregularStreamBuffer' + cxx_class = "gem5::prefetch::IrregularStreamBuffer" cxx_header = "mem/cache/prefetch/irregular_stream_buffer.hh" - num_counter_bits = Param.Unsigned(2, - "Number of bits of the confidence counter") - chunk_size = Param.Unsigned(256, - "Maximum number of addresses in a temporal stream") + num_counter_bits = Param.Unsigned( + 2, "Number of bits of the confidence counter" + ) + chunk_size = Param.Unsigned( + 256, "Maximum number of addresses in a temporal stream" + ) degree = Param.Unsigned(4, "Number of prefetches to generate") - training_unit_assoc = Param.Unsigned(128, - "Associativity of the training unit") - training_unit_entries = Param.MemorySize("128", - "Number of entries of the training unit") + training_unit_assoc = Param.Unsigned( + 128, "Associativity of the training unit" + ) + training_unit_entries = Param.MemorySize( + "128", "Number of entries of the training unit" + ) training_unit_indexing_policy = Param.BaseIndexingPolicy( - SetAssociative(entry_size = 1, assoc = Parent.training_unit_assoc, - size = Parent.training_unit_entries), - "Indexing policy of the training unit") - training_unit_replacement_policy = Param.BaseReplacementPolicy(LRURP(), - "Replacement policy of the training unit") + SetAssociative( + entry_size=1, + assoc=Parent.training_unit_assoc, + size=Parent.training_unit_entries, + ), + "Indexing policy of the training unit", + ) + training_unit_replacement_policy = Param.BaseReplacementPolicy( + LRURP(), "Replacement policy of the training unit" + ) - prefetch_candidates_per_entry = Param.Unsigned(16, - "Number of prefetch candidates stored in a SP-AMC entry") - address_map_cache_assoc = Param.Unsigned(128, - "Associativity of the PS/SP AMCs") - address_map_cache_entries = Param.MemorySize("128", - "Number of entries of the PS/SP AMCs") + prefetch_candidates_per_entry = Param.Unsigned( + 16, "Number of prefetch candidates stored in a SP-AMC entry" + ) + address_map_cache_assoc = Param.Unsigned( + 128, "Associativity of the PS/SP AMCs" + ) + address_map_cache_entries = Param.MemorySize( + "128", "Number of entries of the PS/SP AMCs" + ) ps_address_map_cache_indexing_policy = Param.BaseIndexingPolicy( - SetAssociative(entry_size = 1, - assoc = Parent.address_map_cache_assoc, - size = Parent.address_map_cache_entries), - "Indexing policy of the Physical-to-Structural Address Map Cache") + SetAssociative( + entry_size=1, + assoc=Parent.address_map_cache_assoc, + size=Parent.address_map_cache_entries, + ), + "Indexing policy of the Physical-to-Structural Address Map Cache", + ) ps_address_map_cache_replacement_policy = Param.BaseReplacementPolicy( LRURP(), - "Replacement policy of the Physical-to-Structural Address Map Cache") + "Replacement policy of the Physical-to-Structural Address Map Cache", + ) sp_address_map_cache_indexing_policy = Param.BaseIndexingPolicy( - SetAssociative(entry_size = 1, - assoc = Parent.address_map_cache_assoc, - size = Parent.address_map_cache_entries), - "Indexing policy of the Structural-to-Physical Address Mao Cache") + SetAssociative( + entry_size=1, + assoc=Parent.address_map_cache_assoc, + size=Parent.address_map_cache_entries, + ), + "Indexing policy of the Structural-to-Physical Address Mao Cache", + ) sp_address_map_cache_replacement_policy = Param.BaseReplacementPolicy( LRURP(), - "Replacement policy of the Structural-to-Physical Address Map Cache") + "Replacement policy of the Structural-to-Physical Address Map Cache", + ) + class SlimAccessMapPatternMatching(AccessMapPatternMatching): start_degree = 2 limit_stride = 4 + class SlimDeltaCorrelatingPredictionTables(DeltaCorrelatingPredictionTables): table_entries = "256" table_assoc = 256 deltas_per_entry = 9 + class SlimAMPMPrefetcher(QueuedPrefetcher): - type = 'SlimAMPMPrefetcher' - cxx_class = 'gem5::prefetch::SlimAMPM' + type = "SlimAMPMPrefetcher" + cxx_class = "gem5::prefetch::SlimAMPM" cxx_header = "mem/cache/prefetch/slim_ampm.hh" - ampm = Param.AccessMapPatternMatching(SlimAccessMapPatternMatching(), - "Access Map Pattern Matching object") + ampm = Param.AccessMapPatternMatching( + SlimAccessMapPatternMatching(), "Access Map Pattern Matching object" + ) dcpt = Param.DeltaCorrelatingPredictionTables( SlimDeltaCorrelatingPredictionTables(), - "Delta Correlating Prediction Tables object") + "Delta Correlating Prediction Tables object", + ) + class BOPPrefetcher(QueuedPrefetcher): type = "BOPPrefetcher" - cxx_class = 'gem5::prefetch::BOP' + cxx_class = "gem5::prefetch::BOP" cxx_header = "mem/cache/prefetch/bop.hh" score_max = Param.Unsigned(31, "Max. score to update the best offset") round_max = Param.Unsigned(100, "Max. round to update the best offset") bad_score = Param.Unsigned(10, "Score at which the HWP is disabled") rr_size = Param.Unsigned(64, "Number of entries of each RR bank") tag_bits = Param.Unsigned(12, "Bits used to store the tag") - offset_list_size = Param.Unsigned(46, - "Number of entries in the offsets list") - negative_offsets_enable = Param.Bool(True, - "Initialize the offsets list also with negative values \ + offset_list_size = Param.Unsigned( + 46, "Number of entries in the offsets list" + ) + negative_offsets_enable = Param.Bool( + True, + "Initialize the offsets list also with negative values \ (i.e. the table will have half of the entries with positive \ - offsets and the other half with negative ones)") + offsets and the other half with negative ones)", + ) delay_queue_enable = Param.Bool(True, "Enable the delay queue") - delay_queue_size = Param.Unsigned(15, - "Number of entries in the delay queue") - delay_queue_cycles = Param.Cycles(60, - "Cycles to delay a write in the left RR table from the delay \ - queue") + delay_queue_size = Param.Unsigned( + 15, "Number of entries in the delay queue" + ) + delay_queue_cycles = Param.Cycles( + 60, + "Cycles to delay a write in the left RR table from the delay \ + queue", + ) + class SBOOEPrefetcher(QueuedPrefetcher): - type = 'SBOOEPrefetcher' - cxx_class = 'gem5::prefetch::SBOOE' + type = "SBOOEPrefetcher" + cxx_class = "gem5::prefetch::SBOOE" cxx_header = "mem/cache/prefetch/sbooe.hh" latency_buffer_size = Param.Int(32, "Entries in the latency buffer") sequential_prefetchers = Param.Int(9, "Number of sequential prefetchers") sandbox_entries = Param.Int(1024, "Size of the address buffer") - score_threshold_pct = Param.Percent(25, "Min. threshold to issue a \ - prefetch. The value is the percentage of sandbox entries to use") + score_threshold_pct = Param.Percent( + 25, + "Min. threshold to issue a \ + prefetch. The value is the percentage of sandbox entries to use", + ) + class STeMSPrefetcher(QueuedPrefetcher): type = "STeMSPrefetcher" - cxx_class = 'gem5::prefetch::STeMS' + cxx_class = "gem5::prefetch::STeMS" cxx_header = "mem/cache/prefetch/spatio_temporal_memory_streaming.hh" - spatial_region_size = Param.MemorySize("2KiB", - "Memory covered by a hot zone") - active_generation_table_entries = Param.MemorySize("64", - "Number of entries in the active generation table") - active_generation_table_assoc = Param.Unsigned(64, - "Associativity of the active generation table") + spatial_region_size = Param.MemorySize( + "2KiB", "Memory covered by a hot zone" + ) + active_generation_table_entries = Param.MemorySize( + "64", "Number of entries in the active generation table" + ) + active_generation_table_assoc = Param.Unsigned( + 64, "Associativity of the active generation table" + ) active_generation_table_indexing_policy = Param.BaseIndexingPolicy( - SetAssociative(entry_size = 1, - assoc = Parent.active_generation_table_assoc, - size = Parent.active_generation_table_entries), - "Indexing policy of the active generation table") + SetAssociative( + entry_size=1, + assoc=Parent.active_generation_table_assoc, + size=Parent.active_generation_table_entries, + ), + "Indexing policy of the active generation table", + ) active_generation_table_replacement_policy = Param.BaseReplacementPolicy( - LRURP(), "Replacement policy of the active generation table") + LRURP(), "Replacement policy of the active generation table" + ) - pattern_sequence_table_entries = Param.MemorySize("16384", - "Number of entries in the pattern sequence table") - pattern_sequence_table_assoc = Param.Unsigned(16384, - "Associativity of the pattern sequence table") + pattern_sequence_table_entries = Param.MemorySize( + "16384", "Number of entries in the pattern sequence table" + ) + pattern_sequence_table_assoc = Param.Unsigned( + 16384, "Associativity of the pattern sequence table" + ) pattern_sequence_table_indexing_policy = Param.BaseIndexingPolicy( - SetAssociative(entry_size = 1, - assoc = Parent.pattern_sequence_table_assoc, - size = Parent.pattern_sequence_table_entries), - "Indexing policy of the pattern sequence table") + SetAssociative( + entry_size=1, + assoc=Parent.pattern_sequence_table_assoc, + size=Parent.pattern_sequence_table_entries, + ), + "Indexing policy of the pattern sequence table", + ) pattern_sequence_table_replacement_policy = Param.BaseReplacementPolicy( - LRURP(), "Replacement policy of the pattern sequence table") + LRURP(), "Replacement policy of the pattern sequence table" + ) + + region_miss_order_buffer_entries = Param.Unsigned( + 131072, "Number of entries of the Region Miss Order Buffer" + ) + add_duplicate_entries_to_rmob = Param.Bool( + True, "Add duplicate entries to RMOB" + ) + reconstruction_entries = Param.Unsigned( + 256, "Number of reconstruction entries" + ) - region_miss_order_buffer_entries = Param.Unsigned(131072, - "Number of entries of the Region Miss Order Buffer") - add_duplicate_entries_to_rmob = Param.Bool(True, - "Add duplicate entries to RMOB") - reconstruction_entries = Param.Unsigned(256, - "Number of reconstruction entries") class HWPProbeEventRetiredInsts(HWPProbeEvent): def register(self): if self.obj: for name in self.names: self.prefetcher.getCCObject().addEventProbeRetiredInsts( - self.obj.getCCObject(), name) + self.obj.getCCObject(), name + ) + class PIFPrefetcher(QueuedPrefetcher): - type = 'PIFPrefetcher' - cxx_class = 'gem5::prefetch::PIF' + type = "PIFPrefetcher" + cxx_class = "gem5::prefetch::PIF" cxx_header = "mem/cache/prefetch/pif.hh" - cxx_exports = [ - PyBindMethod("addEventProbeRetiredInsts"), - ] + cxx_exports = [PyBindMethod("addEventProbeRetiredInsts")] - prec_spatial_region_bits = Param.Unsigned(2, - "Number of preceding addresses in the spatial region") - succ_spatial_region_bits = Param.Unsigned(8, - "Number of subsequent addresses in the spatial region") + prec_spatial_region_bits = Param.Unsigned( + 2, "Number of preceding addresses in the spatial region" + ) + succ_spatial_region_bits = Param.Unsigned( + 8, "Number of subsequent addresses in the spatial region" + ) compactor_entries = Param.Unsigned(2, "Entries in the temp. compactor") stream_address_buffer_entries = Param.Unsigned(7, "Entries in the SAB") history_buffer_size = Param.Unsigned(16, "Entries in the history buffer") - index_entries = Param.MemorySize("64", - "Number of entries in the index") - index_assoc = Param.Unsigned(64, - "Associativity of the index") + index_entries = Param.MemorySize("64", "Number of entries in the index") + index_assoc = Param.Unsigned(64, "Associativity of the index") index_indexing_policy = Param.BaseIndexingPolicy( - SetAssociative(entry_size = 1, assoc = Parent.index_assoc, - size = Parent.index_entries), - "Indexing policy of the index") - index_replacement_policy = Param.BaseReplacementPolicy(LRURP(), - "Replacement policy of the index") + SetAssociative( + entry_size=1, assoc=Parent.index_assoc, size=Parent.index_entries + ), + "Indexing policy of the index", + ) + index_replacement_policy = Param.BaseReplacementPolicy( + LRURP(), "Replacement policy of the index" + ) def listenFromProbeRetiredInstructions(self, simObj): if not isinstance(simObj, SimObject): raise TypeError("argument must be of SimObject type") - self.addEvent(HWPProbeEventRetiredInsts(self, simObj,"RetiredInstsPC")) + self.addEvent( + HWPProbeEventRetiredInsts(self, simObj, "RetiredInstsPC") + ) diff --git a/src/mem/cache/replacement_policies/ReplacementPolicies.py b/src/mem/cache/replacement_policies/ReplacementPolicies.py index 3676a39126..47eaabf5f5 100644 --- a/src/mem/cache/replacement_policies/ReplacementPolicies.py +++ b/src/mem/cache/replacement_policies/ReplacementPolicies.py @@ -28,78 +28,97 @@ from m5.params import * from m5.proxy import * from m5.SimObject import SimObject + class BaseReplacementPolicy(SimObject): - type = 'BaseReplacementPolicy' + type = "BaseReplacementPolicy" abstract = True - cxx_class = 'gem5::replacement_policy::Base' + cxx_class = "gem5::replacement_policy::Base" cxx_header = "mem/cache/replacement_policies/base.hh" + class DuelingRP(BaseReplacementPolicy): - type = 'DuelingRP' - cxx_class = 'gem5::replacement_policy::Dueling' + type = "DuelingRP" + cxx_class = "gem5::replacement_policy::Dueling" cxx_header = "mem/cache/replacement_policies/dueling_rp.hh" constituency_size = Param.Unsigned( - "The size of a region containing one sample") + "The size of a region containing one sample" + ) team_size = Param.Unsigned( - "Number of entries in a sampling set that belong to a team") + "Number of entries in a sampling set that belong to a team" + ) replacement_policy_a = Param.BaseReplacementPolicy( - "Sub-replacement policy A") + "Sub-replacement policy A" + ) replacement_policy_b = Param.BaseReplacementPolicy( - "Sub-replacement policy B") + "Sub-replacement policy B" + ) + class FIFORP(BaseReplacementPolicy): - type = 'FIFORP' - cxx_class = 'gem5::replacement_policy::FIFO' + type = "FIFORP" + cxx_class = "gem5::replacement_policy::FIFO" cxx_header = "mem/cache/replacement_policies/fifo_rp.hh" + class SecondChanceRP(FIFORP): - type = 'SecondChanceRP' - cxx_class = 'gem5::replacement_policy::SecondChance' + type = "SecondChanceRP" + cxx_class = "gem5::replacement_policy::SecondChance" cxx_header = "mem/cache/replacement_policies/second_chance_rp.hh" + class LFURP(BaseReplacementPolicy): - type = 'LFURP' - cxx_class = 'gem5::replacement_policy::LFU' + type = "LFURP" + cxx_class = "gem5::replacement_policy::LFU" cxx_header = "mem/cache/replacement_policies/lfu_rp.hh" + class LRURP(BaseReplacementPolicy): - type = 'LRURP' - cxx_class = 'gem5::replacement_policy::LRU' + type = "LRURP" + cxx_class = "gem5::replacement_policy::LRU" cxx_header = "mem/cache/replacement_policies/lru_rp.hh" + class BIPRP(LRURP): - type = 'BIPRP' - cxx_class = 'gem5::replacement_policy::BIP' + type = "BIPRP" + cxx_class = "gem5::replacement_policy::BIP" cxx_header = "mem/cache/replacement_policies/bip_rp.hh" btp = Param.Percent(3, "Percentage of blocks to be inserted as MRU") + class LIPRP(BIPRP): btp = 0 + class MRURP(BaseReplacementPolicy): - type = 'MRURP' - cxx_class = 'gem5::replacement_policy::MRU' + type = "MRURP" + cxx_class = "gem5::replacement_policy::MRU" cxx_header = "mem/cache/replacement_policies/mru_rp.hh" + class RandomRP(BaseReplacementPolicy): - type = 'RandomRP' - cxx_class = 'gem5::replacement_policy::Random' + type = "RandomRP" + cxx_class = "gem5::replacement_policy::Random" cxx_header = "mem/cache/replacement_policies/random_rp.hh" + class BRRIPRP(BaseReplacementPolicy): - type = 'BRRIPRP' - cxx_class = 'gem5::replacement_policy::BRRIP' + type = "BRRIPRP" + cxx_class = "gem5::replacement_policy::BRRIP" cxx_header = "mem/cache/replacement_policies/brrip_rp.hh" num_bits = Param.Int(2, "Number of bits per RRPV") - hit_priority = Param.Bool(False, - "Prioritize evicting blocks that havent had a hit recently") - btp = Param.Percent(3, - "Percentage of blocks to be inserted with long RRPV") + hit_priority = Param.Bool( + False, "Prioritize evicting blocks that havent had a hit recently" + ) + btp = Param.Percent( + 3, "Percentage of blocks to be inserted with long RRPV" + ) + class RRIPRP(BRRIPRP): btp = 100 + class DRRIPRP(DuelingRP): # The constituency_size and the team_size must be manually provided, where: # constituency_size = num_cache_entries / @@ -110,42 +129,49 @@ class DRRIPRP(DuelingRP): replacement_policy_a = BRRIPRP() replacement_policy_b = RRIPRP() + class NRURP(BRRIPRP): btp = 100 num_bits = 1 + class SHiPRP(BRRIPRP): - type = 'SHiPRP' + type = "SHiPRP" abstract = True - cxx_class = 'gem5::replacement_policy::SHiP' + cxx_class = "gem5::replacement_policy::SHiP" cxx_header = "mem/cache/replacement_policies/ship_rp.hh" shct_size = Param.Unsigned(16384, "Number of SHCT entries") # By default any value greater than 0 is enough to change insertion policy - insertion_threshold = Param.Percent(1, - "Percentage at which an entry changes insertion policy") + insertion_threshold = Param.Percent( + 1, "Percentage at which an entry changes insertion policy" + ) # Always make hits mark entries as last to be evicted hit_priority = True # Let the predictor decide when to change insertion policy btp = 0 + class SHiPMemRP(SHiPRP): - type = 'SHiPMemRP' - cxx_class = 'gem5::replacement_policy::SHiPMem' + type = "SHiPMemRP" + cxx_class = "gem5::replacement_policy::SHiPMem" cxx_header = "mem/cache/replacement_policies/ship_rp.hh" + class SHiPPCRP(SHiPRP): - type = 'SHiPPCRP' - cxx_class = 'gem5::replacement_policy::SHiPPC' + type = "SHiPPCRP" + cxx_class = "gem5::replacement_policy::SHiPPC" cxx_header = "mem/cache/replacement_policies/ship_rp.hh" + class TreePLRURP(BaseReplacementPolicy): - type = 'TreePLRURP' - cxx_class = 'gem5::replacement_policy::TreePLRU' + type = "TreePLRURP" + cxx_class = "gem5::replacement_policy::TreePLRU" cxx_header = "mem/cache/replacement_policies/tree_plru_rp.hh" num_leaves = Param.Int(Parent.assoc, "Number of leaves in each tree") + class WeightedLRURP(LRURP): type = "WeightedLRURP" - cxx_class = 'gem5::replacement_policy::WeightedLRU' + cxx_class = "gem5::replacement_policy::WeightedLRU" cxx_header = "mem/cache/replacement_policies/weighted_lru_rp.hh" diff --git a/src/mem/cache/tags/Tags.py b/src/mem/cache/tags/Tags.py index 0bc11bb631..4e7f632bfb 100644 --- a/src/mem/cache/tags/Tags.py +++ b/src/mem/cache/tags/Tags.py @@ -38,11 +38,12 @@ from m5.proxy import * from m5.objects.ClockedObject import ClockedObject from m5.objects.IndexingPolicies import * + class BaseTags(ClockedObject): - type = 'BaseTags' + type = "BaseTags" abstract = True cxx_header = "mem/cache/tags/base.hh" - cxx_class = 'gem5::BaseTags' + cxx_class = "gem5::BaseTags" # Get system to which it belongs system = Param.System(Parent.any, "System we belong to") @@ -54,62 +55,75 @@ class BaseTags(ClockedObject): block_size = Param.Int(Parent.cache_line_size, "block size in bytes") # Get the tag lookup latency from the parent (cache) - tag_latency = Param.Cycles(Parent.tag_latency, - "The tag lookup latency for this cache") + tag_latency = Param.Cycles( + Parent.tag_latency, "The tag lookup latency for this cache" + ) # Get the warmup percentage from the parent (cache) - warmup_percentage = Param.Percent(Parent.warmup_percentage, - "Percentage of tags to be touched to warm up the cache") + warmup_percentage = Param.Percent( + Parent.warmup_percentage, + "Percentage of tags to be touched to warm up the cache", + ) - sequential_access = Param.Bool(Parent.sequential_access, - "Whether to access tags and data sequentially") + sequential_access = Param.Bool( + Parent.sequential_access, + "Whether to access tags and data sequentially", + ) # Get indexing policy - indexing_policy = Param.BaseIndexingPolicy(SetAssociative(), - "Indexing policy") + indexing_policy = Param.BaseIndexingPolicy( + SetAssociative(), "Indexing policy" + ) # Set the indexing entry size as the block size - entry_size = Param.Int(Parent.cache_line_size, - "Indexing entry size in bytes") + entry_size = Param.Int( + Parent.cache_line_size, "Indexing entry size in bytes" + ) + class BaseSetAssoc(BaseTags): - type = 'BaseSetAssoc' + type = "BaseSetAssoc" cxx_header = "mem/cache/tags/base_set_assoc.hh" - cxx_class = 'gem5::BaseSetAssoc' + cxx_class = "gem5::BaseSetAssoc" # Get the cache associativity assoc = Param.Int(Parent.assoc, "associativity") # Get replacement policy from the parent (cache) replacement_policy = Param.BaseReplacementPolicy( - Parent.replacement_policy, "Replacement policy") + Parent.replacement_policy, "Replacement policy" + ) + class SectorTags(BaseTags): - type = 'SectorTags' + type = "SectorTags" cxx_header = "mem/cache/tags/sector_tags.hh" - cxx_class = 'gem5::SectorTags' + cxx_class = "gem5::SectorTags" # Get the cache associativity assoc = Param.Int(Parent.assoc, "associativity") # Number of sub-sectors (data blocks) per sector - num_blocks_per_sector = Param.Int(1, "Number of sub-sectors per sector"); + num_blocks_per_sector = Param.Int(1, "Number of sub-sectors per sector") # The indexing entry now is a sector block entry_size = Parent.cache_line_size * Self.num_blocks_per_sector # Get replacement policy from the parent (cache) replacement_policy = Param.BaseReplacementPolicy( - Parent.replacement_policy, "Replacement policy") + Parent.replacement_policy, "Replacement policy" + ) + class CompressedTags(SectorTags): - type = 'CompressedTags' + type = "CompressedTags" cxx_header = "mem/cache/tags/compressed_tags.hh" - cxx_class = 'gem5::CompressedTags' + cxx_class = "gem5::CompressedTags" # Maximum number of compressed blocks per tag - max_compression_ratio = Param.Int(2, - "Maximum number of compressed blocks per tag.") + max_compression_ratio = Param.Int( + 2, "Maximum number of compressed blocks per tag." + ) # We simulate superblock as sector blocks num_blocks_per_sector = Self.max_compression_ratio @@ -118,13 +132,15 @@ class CompressedTags(SectorTags): # the cache size by the compression ratio size = Parent.size * Self.max_compression_ratio -class FALRU(BaseTags): - type = 'FALRU' - cxx_header = "mem/cache/tags/fa_lru.hh" - cxx_class = 'gem5::FALRU' - min_tracked_cache_size = Param.MemorySize("128KiB", "Minimum cache size" - " for which we track statistics") +class FALRU(BaseTags): + type = "FALRU" + cxx_header = "mem/cache/tags/fa_lru.hh" + cxx_class = "gem5::FALRU" + + min_tracked_cache_size = Param.MemorySize( + "128KiB", "Minimum cache size" " for which we track statistics" + ) # This tag uses its own embedded indexing indexing_policy = NULL diff --git a/src/mem/cache/tags/indexing_policies/IndexingPolicies.py b/src/mem/cache/tags/indexing_policies/IndexingPolicies.py index c25a7627ea..83bc15cb5c 100644 --- a/src/mem/cache/tags/indexing_policies/IndexingPolicies.py +++ b/src/mem/cache/tags/indexing_policies/IndexingPolicies.py @@ -28,10 +28,11 @@ from m5.params import * from m5.proxy import * from m5.SimObject import SimObject + class BaseIndexingPolicy(SimObject): - type = 'BaseIndexingPolicy' + type = "BaseIndexingPolicy" abstract = True - cxx_class = 'gem5::BaseIndexingPolicy' + cxx_class = "gem5::BaseIndexingPolicy" cxx_header = "mem/cache/tags/indexing_policies/base.hh" # Get the size from the parent (cache) @@ -43,12 +44,14 @@ class BaseIndexingPolicy(SimObject): # Get the associativity assoc = Param.Int(Parent.assoc, "associativity") + class SetAssociative(BaseIndexingPolicy): - type = 'SetAssociative' - cxx_class = 'gem5::SetAssociative' + type = "SetAssociative" + cxx_class = "gem5::SetAssociative" cxx_header = "mem/cache/tags/indexing_policies/set_associative.hh" + class SkewedAssociative(BaseIndexingPolicy): - type = 'SkewedAssociative' - cxx_class = 'gem5::SkewedAssociative' + type = "SkewedAssociative" + cxx_class = "gem5::SkewedAssociative" cxx_header = "mem/cache/tags/indexing_policies/skewed_associative.hh" diff --git a/src/mem/dram_interface.cc b/src/mem/dram_interface.cc index 159e0bf0c9..d745fe5a29 100644 --- a/src/mem/dram_interface.cc +++ b/src/mem/dram_interface.cc @@ -1381,7 +1381,7 @@ DRAMInterface::Rank::processRefreshEvent() // or have outstanding ACT,RD/WR,Auto-PRE sequence scheduled // should have outstanding precharge or read response event assert(prechargeEvent.scheduled() || - dram.ctrl->respondEventScheduled()); + dram.ctrl->respondEventScheduled(dram.pseudoChannel)); // will start refresh when pwrState transitions to IDLE } diff --git a/src/mem/hbm_ctrl.hh b/src/mem/hbm_ctrl.hh index 9501ff37a0..c9045f0ae7 100644 --- a/src/mem/hbm_ctrl.hh +++ b/src/mem/hbm_ctrl.hh @@ -204,13 +204,18 @@ class HBMCtrl : public MemCtrl public: /** - * Is there a respondEvent for pseudo channel 1 scheduled? + * Is there a respondEvent scheduled? * * @return true if event is scheduled */ - bool respondEventPC1Scheduled() const + bool respondEventScheduled(uint8_t pseudo_channel) const override { - return respondEventPC1.scheduled(); + if (pseudo_channel == 0) { + return MemCtrl::respondEventScheduled(pseudo_channel); + } else { + assert(pseudo_channel == 1); + return respondEventPC1.scheduled(); + } } /** diff --git a/src/mem/mem_ctrl.cc b/src/mem/mem_ctrl.cc index 3baa1b0865..c65d68a5a7 100644 --- a/src/mem/mem_ctrl.cc +++ b/src/mem/mem_ctrl.cc @@ -92,6 +92,9 @@ MemCtrl::MemCtrl(const MemCtrlParams &p) : fatal("Write buffer low threshold %d must be smaller than the " "high threshold %d\n", p.write_low_thresh_perc, p.write_high_thresh_perc); + if (p.disable_sanity_check) { + port.disableSanityCheck(); + } } void @@ -1491,5 +1494,11 @@ MemCtrl::MemoryPort::recvTimingReq(PacketPtr pkt) return ctrl.recvTimingReq(pkt); } +void +MemCtrl::MemoryPort::disableSanityCheck() +{ + queue.disableSanityCheck(); +} + } // namespace memory } // namespace gem5 diff --git a/src/mem/mem_ctrl.hh b/src/mem/mem_ctrl.hh index 3b623fb6fc..fe5d478280 100644 --- a/src/mem/mem_ctrl.hh +++ b/src/mem/mem_ctrl.hh @@ -258,6 +258,7 @@ class MemCtrl : public qos::MemCtrl public: MemoryPort(const std::string& name, MemCtrl& _ctrl); + void disableSanityCheck(); protected: @@ -723,7 +724,11 @@ class MemCtrl : public qos::MemCtrl * * @return true if event is scheduled */ - bool respondEventScheduled() const { return respondEvent.scheduled(); } + virtual bool respondEventScheduled(uint8_t pseudo_channel = 0) const + { + assert(pseudo_channel == 0); + return respondEvent.scheduled(); + } /** * Is there a read/write burst Event scheduled? diff --git a/src/mem/packet.cc b/src/mem/packet.cc index 3cd1bb9a29..31dc330cab 100644 --- a/src/mem/packet.cc +++ b/src/mem/packet.cc @@ -214,6 +214,10 @@ MemCmd::commandInfo[] = { {IsResponse, IsError}, InvalidCmd, "InvalidDestError" }, /* BadAddressError -- memory address invalid */ { {IsResponse, IsError}, InvalidCmd, "BadAddressError" }, + /* ReadError -- packet dest unable to fulfill read command */ + { {IsRead, IsResponse, IsError}, InvalidCmd, "ReadError" }, + /* WriteError -- packet dest unable to fulfill write command */ + { {IsWrite, IsResponse, IsError}, InvalidCmd, "WriteError" }, /* FunctionalReadError */ { {IsRead, IsResponse, IsError}, InvalidCmd, "FunctionalReadError" }, /* FunctionalWriteError */ diff --git a/src/mem/packet.hh b/src/mem/packet.hh index 7d3263412c..9238dbec00 100644 --- a/src/mem/packet.hh +++ b/src/mem/packet.hh @@ -133,6 +133,8 @@ class MemCmd // compatibility InvalidDestError, // packet dest field invalid BadAddressError, // memory address invalid + ReadError, // packet dest unable to fulfill read command + WriteError, // packet dest unable to fulfill write command FunctionalReadError, // unable to fulfill functional read FunctionalWriteError, // unable to fulfill functional write // Fake simulator-only commands @@ -785,6 +787,19 @@ class Packet : public Printable cmd = MemCmd::BadAddressError; } + // Command error conditions. The request is sent to target but the target + // cannot make it. + void + setBadCommand() + { + assert(isResponse()); + if (isWrite()) { + cmd = MemCmd::WriteError; + } else { + cmd = MemCmd::ReadError; + } + } + void copyError(Packet *pkt) { assert(pkt->isError()); cmd = pkt->cmd; } Addr getAddr() const { assert(flags.isSet(VALID_ADDR)); return addr; } diff --git a/src/mem/port.cc b/src/mem/port.cc index ac586c2bb4..00f7ce6efa 100644 --- a/src/mem/port.cc +++ b/src/mem/port.cc @@ -45,6 +45,7 @@ #include "mem/port.hh" #include "base/trace.hh" +#include "debug/ResponsePort.hh" #include "sim/sim_object.hh" namespace gem5 @@ -197,7 +198,8 @@ Tick ResponsePort::recvAtomicBackdoor(PacketPtr pkt, MemBackdoorPtr &backdoor) { if (!defaultBackdoorWarned) { - warn("Port %s doesn't support requesting a back door.", name()); + DPRINTF(ResponsePort, + "Port %s doesn't support requesting a back door.", name()); defaultBackdoorWarned = true; } return recvAtomic(pkt); diff --git a/src/mem/port_terminator.cc b/src/mem/port_terminator.cc index 57263b4aa2..725acdb2d8 100644 --- a/src/mem/port_terminator.cc +++ b/src/mem/port_terminator.cc @@ -54,4 +54,4 @@ PortTerminator::getPort(const std::string &if_name, PortID idx) } } -} \ No newline at end of file +} diff --git a/src/mem/probes/BaseMemProbe.py b/src/mem/probes/BaseMemProbe.py index 4ed022b8a3..5faef2c974 100644 --- a/src/mem/probes/BaseMemProbe.py +++ b/src/mem/probes/BaseMemProbe.py @@ -37,12 +37,14 @@ from m5.params import * from m5.proxy import * from m5.SimObject import SimObject + class BaseMemProbe(SimObject): - type = 'BaseMemProbe' + type = "BaseMemProbe" abstract = True cxx_header = "mem/probes/base.hh" - cxx_class = 'gem5::BaseMemProbe' + cxx_class = "gem5::BaseMemProbe" - manager = VectorParam.SimObject(Parent.any, - "Probe manager(s) to instrument") + manager = VectorParam.SimObject( + Parent.any, "Probe manager(s) to instrument" + ) probe_name = Param.String("PktRequest", "Memory request probe to use") diff --git a/src/mem/probes/MemFootprintProbe.py b/src/mem/probes/MemFootprintProbe.py index 551f808601..707a8b688f 100644 --- a/src/mem/probes/MemFootprintProbe.py +++ b/src/mem/probes/MemFootprintProbe.py @@ -39,11 +39,13 @@ from m5.proxy import * from m5.objects.BaseMemProbe import BaseMemProbe + class MemFootprintProbe(BaseMemProbe): type = "MemFootprintProbe" cxx_header = "mem/probes/mem_footprint.hh" - cxx_class = 'gem5::MemFootprintProbe' + cxx_class = "gem5::MemFootprintProbe" - system = Param.System(Parent.any, - "System pointer to get cache line and mem size") + system = Param.System( + Parent.any, "System pointer to get cache line and mem size" + ) page_size = Param.Unsigned(4096, "Page size for page-level footprint") diff --git a/src/mem/probes/MemTraceProbe.py b/src/mem/probes/MemTraceProbe.py index a5254a1188..d848e9ed61 100644 --- a/src/mem/probes/MemTraceProbe.py +++ b/src/mem/probes/MemTraceProbe.py @@ -37,10 +37,11 @@ from m5.params import * from m5.proxy import * from m5.objects.BaseMemProbe import BaseMemProbe + class MemTraceProbe(BaseMemProbe): - type = 'MemTraceProbe' + type = "MemTraceProbe" cxx_header = "mem/probes/mem_trace.hh" - cxx_class = 'gem5::MemTraceProbe' + cxx_class = "gem5::MemTraceProbe" # Boolean to compress the trace or not. trace_compress = Param.Bool(True, "Enable trace compression") diff --git a/src/mem/probes/StackDistProbe.py b/src/mem/probes/StackDistProbe.py index 132c9faa07..2a9550bea5 100644 --- a/src/mem/probes/StackDistProbe.py +++ b/src/mem/probes/StackDistProbe.py @@ -37,26 +37,31 @@ from m5.params import * from m5.proxy import * from m5.objects.BaseMemProbe import BaseMemProbe + class StackDistProbe(BaseMemProbe): - type = 'StackDistProbe' + type = "StackDistProbe" cxx_header = "mem/probes/stack_dist.hh" - cxx_class = 'gem5::StackDistProbe' + cxx_class = "gem5::StackDistProbe" - system = Param.System(Parent.any, - "System to use when determining system cache " - "line size") + system = Param.System( + Parent.any, "System to use when determining system cache " "line size" + ) - line_size = Param.Unsigned(Parent.cache_line_size, - "Cache line size in bytes (must be larger or " - "equal to the system's line size)") + line_size = Param.Unsigned( + Parent.cache_line_size, + "Cache line size in bytes (must be larger or " + "equal to the system's line size)", + ) # enable verification stack - verify = Param.Bool(False, "Verify behaviuor with reference implementation") + verify = Param.Bool( + False, "Verify behaviuor with reference implementation" + ) # linear histogram bins and enable/disable - linear_hist_bins = Param.Unsigned('16', "Bins in linear histograms") + linear_hist_bins = Param.Unsigned("16", "Bins in linear histograms") disable_linear_hists = Param.Bool(False, "Disable linear histograms") # logarithmic histogram bins and enable/disable - log_hist_bins = Param.Unsigned('32', "Bins in logarithmic histograms") + log_hist_bins = Param.Unsigned("32", "Bins in logarithmic histograms") disable_log_hists = Param.Bool(False, "Disable logarithmic histograms") diff --git a/src/mem/qos/QoSMemCtrl.py b/src/mem/qos/QoSMemCtrl.py index 842b62b35a..3028b439d5 100644 --- a/src/mem/qos/QoSMemCtrl.py +++ b/src/mem/qos/QoSMemCtrl.py @@ -39,12 +39,14 @@ from m5.objects.ClockedObject import ClockedObject from m5.objects.QoSTurnaround import * # QoS Queue Selection policy used to select packets among same-QoS queues -class QoSQPolicy(Enum): vals = ["fifo", "lifo", "lrg"] +class QoSQPolicy(Enum): + vals = ["fifo", "lifo", "lrg"] + class QoSMemCtrl(ClockedObject): - type = 'QoSMemCtrl' + type = "QoSMemCtrl" cxx_header = "mem/qos/mem_ctrl.hh" - cxx_class = 'gem5::memory::qos::MemCtrl' + cxx_class = "gem5::memory::qos::MemCtrl" abstract = True system = Param.System(Parent.any, "System that the controller belongs to.") @@ -55,30 +57,38 @@ class QoSMemCtrl(ClockedObject): qos_priorities = Param.Unsigned(1, "QoS priorities") # QoS scheduler policy: tags request with QoS priority value - qos_policy = Param.QoSPolicy(NULL, - "Memory Controller Requests QoS arbitration policy") + qos_policy = Param.QoSPolicy( + NULL, "Memory Controller Requests QoS arbitration policy" + ) # Select QoS driven turnaround policy # (direction switch triggered by highest priority buffer content) - qos_turnaround_policy = Param.QoSTurnaroundPolicy(NULL, - "Selects QoS driven turnaround policy") + qos_turnaround_policy = Param.QoSTurnaroundPolicy( + NULL, "Selects QoS driven turnaround policy" + ) # QoS Queue Select policy: selects packets among same priority level # (only supported in QoSMemSinkCtrl) - qos_q_policy = Param.QoSQPolicy('fifo', - "Memory Controller Requests same-QoS selection policy") + qos_q_policy = Param.QoSQPolicy( + "fifo", "Memory Controller Requests same-QoS selection policy" + ) # flag to select QoS syncronised scheduling # (calls the scheduler on all requestors at every packet arrival) - qos_syncro_scheduler = Param.Bool(False, - "Enables QoS syncronized scheduling") + qos_syncro_scheduler = Param.Bool( + False, "Enables QoS syncronized scheduling" + ) # flag to enable QoS priority escalation - qos_priority_escalation = Param.Bool(False, - "Enables QoS priority escalation") + qos_priority_escalation = Param.Bool( + False, "Enables QoS priority escalation" + ) # Requestor ID to be mapped to service parameters in QoS schedulers - qos_requestors = VectorParam.String(['']* 16, - "Requestor Names to be mapped to service parameters in QoS scheduler") - qos_masters = DeprecatedParam(qos_requestors, - '`qos_master` is now called `qos_requestors`') + qos_requestors = VectorParam.String( + [""] * 16, + "Requestor Names to be mapped to service parameters in QoS scheduler", + ) + qos_masters = DeprecatedParam( + qos_requestors, "`qos_master` is now called `qos_requestors`" + ) diff --git a/src/mem/qos/QoSMemSinkCtrl.py b/src/mem/qos/QoSMemSinkCtrl.py index 486e74b677..dac0fb5be6 100644 --- a/src/mem/qos/QoSMemSinkCtrl.py +++ b/src/mem/qos/QoSMemSinkCtrl.py @@ -39,15 +39,16 @@ from m5.params import * from m5.objects.QoSMemCtrl import * from m5.objects.QoSMemSinkInterface import * + class QoSMemSinkCtrl(QoSMemCtrl): - type = 'QoSMemSinkCtrl' + type = "QoSMemSinkCtrl" cxx_header = "mem/qos/mem_sink.hh" - cxx_class = 'gem5::memory::qos::MemSinkCtrl' + cxx_class = "gem5::memory::qos::MemSinkCtrl" port = ResponsePort("Response ports") - - interface = Param.QoSMemSinkInterface(QoSMemSinkInterface(), - "Interface to memory") + interface = Param.QoSMemSinkInterface( + QoSMemSinkInterface(), "Interface to memory" + ) # the basic configuration of the controller architecture, note # that each entry corresponds to a burst for the specific DRAM diff --git a/src/mem/qos/QoSMemSinkInterface.py b/src/mem/qos/QoSMemSinkInterface.py index 2544df82c4..34ad5d5d14 100644 --- a/src/mem/qos/QoSMemSinkInterface.py +++ b/src/mem/qos/QoSMemSinkInterface.py @@ -35,10 +35,11 @@ from m5.objects.AbstractMemory import AbstractMemory + class QoSMemSinkInterface(AbstractMemory): - type = 'QoSMemSinkInterface' + type = "QoSMemSinkInterface" cxx_header = "mem/qos/mem_sink.hh" - cxx_class = 'gem5::memory::qos::MemSinkInterface' + cxx_class = "gem5::memory::qos::MemSinkInterface" def controller(self): """ diff --git a/src/mem/qos/QoSPolicy.py b/src/mem/qos/QoSPolicy.py index 99a3f2faae..2dfc974f43 100644 --- a/src/mem/qos/QoSPolicy.py +++ b/src/mem/qos/QoSPolicy.py @@ -38,19 +38,20 @@ from m5.params import * # QoS scheduler policy used to serve incoming transaction class QoSPolicy(SimObject): - type = 'QoSPolicy' + type = "QoSPolicy" abstract = True cxx_header = "mem/qos/policy.hh" - cxx_class = 'gem5::memory::qos::Policy' + cxx_class = "gem5::memory::qos::Policy" + class QoSFixedPriorityPolicy(QoSPolicy): - type = 'QoSFixedPriorityPolicy' + type = "QoSFixedPriorityPolicy" cxx_header = "mem/qos/policy_fixed_prio.hh" - cxx_class = 'gem5::memory::qos::FixedPriorityPolicy' + cxx_class = "gem5::memory::qos::FixedPriorityPolicy" cxx_exports = [ - PyBindMethod('initRequestorName'), - PyBindMethod('initRequestorObj'), + PyBindMethod("initRequestorName"), + PyBindMethod("initRequestorObj"), ] _requestor_priorities = None @@ -62,15 +63,19 @@ class QoSFixedPriorityPolicy(QoSPolicy): self._requestor_priorities.append([request_port, priority]) def setMasterPriority(self, request_port, priority): - warn('QosFixedPriority.setMasterPriority is deprecated in favor of ' - 'setRequestorPriority. See src/mem/qos/QoSPolicy.py for more ' - 'information') + warn( + "QosFixedPriority.setMasterPriority is deprecated in favor of " + "setRequestorPriority. See src/mem/qos/QoSPolicy.py for more " + "information" + ) self.setRequestorPriority(request_port, priority) def init(self): if not self._requestor_priorities: - print("Error," - "use setRequestorPriority to init requestors/priorities\n"); + print( + "Error," + "use setRequestorPriority to init requestors/priorities\n" + ) exit(1) else: for prio in self._requestor_priorities: @@ -78,23 +83,27 @@ class QoSFixedPriorityPolicy(QoSPolicy): priority = prio[1] if isinstance(request_port, str): self.getCCObject().initRequestorName( - request_port, int(priority)) + request_port, int(priority) + ) else: self.getCCObject().initRequestorObj( - request_port.getCCObject(), priority) + request_port.getCCObject(), priority + ) # default fixed priority value for non-listed Requestors - qos_fixed_prio_default_prio = Param.UInt8(0, - "Default priority for non-listed Requestors") + qos_fixed_prio_default_prio = Param.UInt8( + 0, "Default priority for non-listed Requestors" + ) + class QoSPropFairPolicy(QoSPolicy): - type = 'QoSPropFairPolicy' + type = "QoSPropFairPolicy" cxx_header = "mem/qos/policy_pf.hh" - cxx_class = 'gem5::memory::qos::PropFairPolicy' + cxx_class = "gem5::memory::qos::PropFairPolicy" cxx_exports = [ - PyBindMethod('initRequestorName'), - PyBindMethod('initRequestorObj'), + PyBindMethod("initRequestorName"), + PyBindMethod("initRequestorObj"), ] _requestor_scores = None @@ -107,7 +116,7 @@ class QoSPropFairPolicy(QoSPolicy): def init(self): if not self._requestor_scores: - print("Error, use setInitialScore to init requestors/scores\n"); + print("Error, use setInitialScore to init requestors/scores\n") exit(1) else: for prio in self._requestor_scores: @@ -115,9 +124,11 @@ class QoSPropFairPolicy(QoSPolicy): score = prio[1] if isinstance(request_port, str): self.getCCObject().initRequestorName( - request_port, float(score)) + request_port, float(score) + ) else: self.getCCObject().initRequestorObj( - request_port.getCCObject(), float(score)) + request_port.getCCObject(), float(score) + ) weight = Param.Float(0.5, "Pf score weight") diff --git a/src/mem/qos/QoSTurnaround.py b/src/mem/qos/QoSTurnaround.py index 7a8d1e3e6b..f356635a57 100644 --- a/src/mem/qos/QoSTurnaround.py +++ b/src/mem/qos/QoSTurnaround.py @@ -35,14 +35,15 @@ from m5.SimObject import SimObject -#QoS Turnaround policy used to select bus state - READ or WRITE +# QoS Turnaround policy used to select bus state - READ or WRITE class QoSTurnaroundPolicy(SimObject): - type = 'QoSTurnaroundPolicy' + type = "QoSTurnaroundPolicy" cxx_header = "mem/qos/turnaround_policy.hh" - cxx_class = 'gem5::memory::qos::TurnaroundPolicy' + cxx_class = "gem5::memory::qos::TurnaroundPolicy" abstract = True + class QoSTurnaroundPolicyIdeal(QoSTurnaroundPolicy): - type = 'QoSTurnaroundPolicyIdeal' + type = "QoSTurnaroundPolicyIdeal" cxx_header = "mem/qos/turnaround_policy_ideal.hh" - cxx_class = 'gem5::memory::qos::TurnaroundPolicyIdeal' + cxx_class = "gem5::memory::qos::TurnaroundPolicyIdeal" diff --git a/src/mem/ruby/SConscript b/src/mem/ruby/SConscript index 5062efd64d..1e386f922d 100644 --- a/src/mem/ruby/SConscript +++ b/src/mem/ruby/SConscript @@ -68,11 +68,12 @@ DebugFlag('RubyTester') DebugFlag('RubyStats') DebugFlag('RubyResourceStalls') DebugFlag('RubyProtocol') +DebugFlag('RubyHitMiss') CompoundFlag('Ruby', [ 'RubyQueue', 'RubyNetwork', 'RubyTester', 'RubyGenerated', 'RubySlicc', 'RubySystem', 'RubyCache', 'RubyDma', 'RubyPort', 'RubySequencer', 'RubyCacheTrace', - 'RubyPrefetcher', 'RubyProtocol']) + 'RubyPrefetcher', 'RubyProtocol', 'RubyHitMiss']) # # Link includes diff --git a/src/mem/ruby/common/WriteMask.hh b/src/mem/ruby/common/WriteMask.hh index bb8c337bf8..2de21da79b 100644 --- a/src/mem/ruby/common/WriteMask.hh +++ b/src/mem/ruby/common/WriteMask.hh @@ -170,7 +170,7 @@ class WriteMask { assert(mSize == writeMask.mSize); for (int i = 0; i < mSize; i++) { - mMask[i] = (mMask.at(i)) & (writeMask.mMask.at(i)); + mMask[i] = (mMask.at(i)) && (writeMask.mMask.at(i)); } if (writeMask.mAtomic) { @@ -184,7 +184,7 @@ class WriteMask { assert(mSize == writeMask.mSize); for (int i = 0; i < mSize; i++) { - mMask[i] = (mMask.at(i)) | (writeMask.mMask.at(i)); + mMask[i] = (mMask.at(i)) || (writeMask.mMask.at(i)); } if (writeMask.mAtomic) { diff --git a/src/mem/ruby/network/BasicLink.py b/src/mem/ruby/network/BasicLink.py index 5c5fcca336..a275d9bd85 100644 --- a/src/mem/ruby/network/BasicLink.py +++ b/src/mem/ruby/network/BasicLink.py @@ -27,10 +27,11 @@ from m5.params import * from m5.SimObject import SimObject + class BasicLink(SimObject): - type = 'BasicLink' + type = "BasicLink" cxx_header = "mem/ruby/network/BasicLink.hh" - cxx_class = 'gem5::ruby::BasicLink' + cxx_class = "gem5::ruby::BasicLink" link_id = Param.Int("ID in relation to other links") latency = Param.Cycles(1, "latency") @@ -45,19 +46,21 @@ class BasicLink(SimObject): weight = Param.Int(1, "used to restrict routing in shortest path analysis") supported_vnets = VectorParam.Int([], "Vnets supported Default:All([])") + class BasicExtLink(BasicLink): - type = 'BasicExtLink' + type = "BasicExtLink" cxx_header = "mem/ruby/network/BasicLink.hh" - cxx_class = 'gem5::ruby::BasicExtLink' + cxx_class = "gem5::ruby::BasicExtLink" ext_node = Param.RubyController("External node") int_node = Param.BasicRouter("ID of internal node") - bandwidth_factor = 16 # only used by simple network + bandwidth_factor = 16 # only used by simple network + class BasicIntLink(BasicLink): - type = 'BasicIntLink' + type = "BasicIntLink" cxx_header = "mem/ruby/network/BasicLink.hh" - cxx_class = 'gem5::ruby::BasicIntLink' + cxx_class = "gem5::ruby::BasicIntLink" src_node = Param.BasicRouter("Router on src end") dst_node = Param.BasicRouter("Router on dst end") diff --git a/src/mem/ruby/network/BasicRouter.py b/src/mem/ruby/network/BasicRouter.py index eb4a137b75..933470daec 100644 --- a/src/mem/ruby/network/BasicRouter.py +++ b/src/mem/ruby/network/BasicRouter.py @@ -28,12 +28,13 @@ from m5.params import * from m5.objects.ClockedObject import ClockedObject + class BasicRouter(ClockedObject): - type = 'BasicRouter' + type = "BasicRouter" cxx_header = "mem/ruby/network/BasicRouter.hh" - cxx_class = 'gem5::ruby::BasicRouter' + cxx_class = "gem5::ruby::BasicRouter" router_id = Param.Int("ID in relation to other routers") # only used by garnet - latency = Param.Cycles(1, "number of cycles inside router") + latency = Param.Cycles(1, "number of cycles inside router") diff --git a/src/mem/ruby/network/MessageBuffer.py b/src/mem/ruby/network/MessageBuffer.py index b776196f92..bd20239cea 100644 --- a/src/mem/ruby/network/MessageBuffer.py +++ b/src/mem/ruby/network/MessageBuffer.py @@ -45,31 +45,45 @@ from m5.SimObject import SimObject # RubySystem and the param is set to 'ruby_system' (default). 'disabled' # completely prevents randomization. class MessageRandomization(ScopedEnum): - vals = ['disabled', 'enabled', 'ruby_system'] + vals = ["disabled", "enabled", "ruby_system"] + class MessageBuffer(SimObject): - type = 'MessageBuffer' - cxx_class = 'gem5::ruby::MessageBuffer' + type = "MessageBuffer" + cxx_class = "gem5::ruby::MessageBuffer" cxx_header = "mem/ruby/network/MessageBuffer.hh" ordered = Param.Bool(False, "Whether the buffer is ordered") - buffer_size = Param.Unsigned(0, "Maximum number of entries to buffer \ - (0 allows infinite entries)") - randomization = Param.MessageRandomization('ruby_system', - "Randomization parameter") - allow_zero_latency = Param.Bool(False, "Allows messages to be enqueued \ + buffer_size = Param.Unsigned( + 0, + "Maximum number of entries to buffer \ + (0 allows infinite entries)", + ) + randomization = Param.MessageRandomization( + "ruby_system", "Randomization parameter" + ) + allow_zero_latency = Param.Bool( + False, + "Allows messages to be enqueued \ with zero latency. This is useful \ for internall trigger queues and \ should not be used if this msg. \ - buffer connects different objects") + buffer connects different objects", + ) out_port = RequestPort("Request port to MessageBuffer receiver") - master = DeprecatedParam(out_port, '`master` is now called `out_port`') + master = DeprecatedParam(out_port, "`master` is now called `out_port`") in_port = ResponsePort("Response port from MessageBuffer sender") - slave = DeprecatedParam(in_port, '`slave` is now called `in_port`') - max_dequeue_rate = Param.Unsigned(0, "Maximum number of messages that can \ + slave = DeprecatedParam(in_port, "`slave` is now called `in_port`") + max_dequeue_rate = Param.Unsigned( + 0, + "Maximum number of messages that can \ be dequeued per cycle \ - (0 allows dequeueing all ready messages)") - routing_priority = Param.Int(0, "Buffer priority when messages are \ + (0 allows dequeueing all ready messages)", + ) + routing_priority = Param.Int( + 0, + "Buffer priority when messages are \ consumed by the network. Smaller value \ - means higher priority") + means higher priority", + ) diff --git a/src/mem/ruby/network/Network.py b/src/mem/ruby/network/Network.py index 22e8a7354c..3e9f549f89 100644 --- a/src/mem/ruby/network/Network.py +++ b/src/mem/ruby/network/Network.py @@ -29,20 +29,24 @@ from m5.proxy import * from m5.objects.ClockedObject import ClockedObject from m5.objects.BasicLink import BasicLink + class RubyNetwork(ClockedObject): - type = 'RubyNetwork' - cxx_class = 'gem5::ruby::Network' + type = "RubyNetwork" + cxx_class = "gem5::ruby::Network" cxx_header = "mem/ruby/network/Network.hh" abstract = True - topology = Param.String("Not Specified", - "the name of the imported topology module") + topology = Param.String( + "Not Specified", "the name of the imported topology module" + ) - number_of_virtual_networks = Param.Unsigned("Number of virtual networks " - "used by the coherence protocol in use. The on-chip network " - "assumes the protocol numbers vnets starting from 0. Therefore, " - "the number of virtual networks should be one more than the " - "highest numbered vnet in use.") + number_of_virtual_networks = Param.Unsigned( + "Number of virtual networks " + "used by the coherence protocol in use. The on-chip network " + "assumes the protocol numbers vnets starting from 0. Therefore, " + "the number of virtual networks should be one more than the " + "highest numbered vnet in use." + ) control_msg_size = Param.Int(8, "") ruby_system = Param.RubySystem("") @@ -52,11 +56,12 @@ class RubyNetwork(ClockedObject): int_links = VectorParam.BasicIntLink("Links between internal nodes") in_port = VectorResponsePort("CPU input port") - slave = DeprecatedParam(in_port, '`slave` is now called `in_port`') + slave = DeprecatedParam(in_port, "`slave` is now called `in_port`") out_port = VectorRequestPort("CPU output port") - master = DeprecatedParam(out_port, '`master` is now called `out_port`') - - data_msg_size = Param.Int(Parent.block_size_bytes, - "Size of data messages. Defaults to the parent " - "RubySystem cache line size.") + master = DeprecatedParam(out_port, "`master` is now called `out_port`") + data_msg_size = Param.Int( + Parent.block_size_bytes, + "Size of data messages. Defaults to the parent " + "RubySystem cache line size.", + ) diff --git a/src/mem/ruby/network/fault_model/FaultModel.py b/src/mem/ruby/network/fault_model/FaultModel.py index 2bafa7720d..67b34c2c98 100644 --- a/src/mem/ruby/network/fault_model/FaultModel.py +++ b/src/mem/ruby/network/fault_model/FaultModel.py @@ -34,268 +34,2532 @@ from m5.params import * from m5.SimObject import SimObject + class FaultModel(SimObject): - type = 'FaultModel' - cxx_class = 'gem5::ruby::FaultModel' + type = "FaultModel" + cxx_class = "gem5::ruby::FaultModel" cxx_header = "mem/ruby/network/fault_model/FaultModel.hh" - baseline_fault_vector_database = VectorParam.Float([ - 5, 40, 0.080892, 0.109175, 0.018864, 0.130408, 0.059724, 0.077571, 0.034830, 0.083430, 0.067500, 0.121500, - 5, 39, 0.062640, 0.089100, 0.016821, 0.109620, 0.051462, 0.060210, 0.029700, 0.076140, 0.062100, 0.116100, - 5, 38, 0.050490, 0.076950, 0.015782, 0.091530, 0.044550, 0.046170, 0.025920, 0.070200, 0.057294, 0.110700, - 5, 37, 0.042120, 0.067770, 0.014191, 0.082890, 0.040986, 0.037800, 0.023760, 0.065880, 0.053568, 0.104490, - 5, 36, 0.035910, 0.061020, 0.013211, 0.075600, 0.035100, 0.030240, 0.021060, 0.061560, 0.049815, 0.100710, - 5, 35, 0.032130, 0.054810, 0.011964, 0.071550, 0.031860, 0.026730, 0.019710, 0.057510, 0.047169, 0.094230, - 5, 34, 0.028890, 0.051030, 0.011054, 0.067500, 0.030510, 0.023450, 0.018630, 0.054000, 0.045900, 0.088290, - 5, 33, 0.026460, 0.047250, 0.010160, 0.062640, 0.028971, 0.021600, 0.017280, 0.049410, 0.042903, 0.082080, - 5, 32, 0.024300, 0.042930, 0.009312, 0.057780, 0.027000, 0.019710, 0.016470, 0.045360, 0.041310, 0.075600, - 5, 31, 0.022410, 0.037260, 0.008910, 0.054540, 0.024732, 0.018171, 0.015660, 0.043470, 0.039447, 0.070740, - 5, 30, 0.021870, 0.032130, 0.008162, 0.050220, 0.023625, 0.016762, 0.013770, 0.039150, 0.037557, 0.065880, - 5, 29, 0.020790, 0.028080, 0.007657, 0.042660, 0.020061, 0.016043, 0.012690, 0.036720, 0.035451, 0.062370, - 5, 28, 0.019440, 0.025650, 0.007123, 0.037800, 0.018900, 0.015363, 0.011880, 0.033480, 0.032400, 0.057780, - 5, 27, 0.018473, 0.023760, 0.006737, 0.034830, 0.018036, 0.014153, 0.011232, 0.030240, 0.030645, 0.055890, - 5, 26, 0.017550, 0.021330, 0.006440, 0.032130, 0.016497, 0.013511, 0.010031, 0.027621, 0.028242, 0.051030, - 5, 25, 0.016462, 0.020520, 0.006210, 0.028890, 0.015822, 0.013095, 0.009442, 0.021600, 0.026379, 0.046170, - 5, 24, 0.015930, 0.018360, 0.005940, 0.026730, 0.015047, 0.012377, 0.008918, 0.018360, 0.023193, 0.037800, - 5, 23, 0.015390, 0.017931, 0.005594, 0.025488, 0.013365, 0.012037, 0.008775, 0.015120, 0.018657, 0.031590, - 5, 22, 0.014804, 0.017167, 0.005338, 0.023976, 0.012258, 0.011734, 0.008087, 0.013500, 0.015444, 0.026190, - 5, 21, 0.014180, 0.016548, 0.004995, 0.022194, 0.011807, 0.011073, 0.007236, 0.011070, 0.013500, 0.021870, - 5, 20, 0.013743, 0.016176, 0.004613, 0.020414, 0.011070, 0.010415, 0.006220, 0.010415, 0.010800, 0.019077, - 5, 19, 0.011877, 0.015412, 0.003861, 0.016659, 0.008235, 0.008640, 0.005400, 0.009720, 0.008532, 0.013770, - 5, 18, 0.011097, 0.014310, 0.003483, 0.014526, 0.006912, 0.007560, 0.003780, 0.008640, 0.006885, 0.010260, - 5, 17, 0.010419, 0.011939, 0.002700, 0.011394, 0.005400, 0.006318, 0.003038, 0.008100, 0.005400, 0.009450, - 5, 16, 0.009887, 0.009720, 0.002395, 0.010152, 0.004023, 0.005400, 0.002743, 0.007020, 0.004590, 0.008370, - 5, 15, 0.009617, 0.007825, 0.002079, 0.008289, 0.003780, 0.004806, 0.002236, 0.006480, 0.003996, 0.008127, - 5, 14, 0.008710, 0.006820, 0.001817, 0.007749, 0.003240, 0.004185, 0.001760, 0.005400, 0.002538, 0.006615, - 5, 13, 0.008116, 0.006566, 0.001566, 0.006426, 0.002741, 0.003564, 0.001299, 0.004590, 0.001917, 0.005994, - 5, 12, 0.007908, 0.006151, 0.001350, 0.005400, 0.002471, 0.003132, 0.000794, 0.004050, 0.001323, 0.005940, - 5, 11, 0.007690, 0.005627, 0.001094, 0.005076, 0.002363, 0.002052, 0.000567, 0.003510, 0.001188, 0.004860, - 5, 10, 0.007560, 0.005038, 0.000805, 0.004536, 0.001985, 0.000540, 0.000000, 0.002430, 0.000999, 0.003240, - 5, 9, 0.007314, 0.004193, 0.000540, 0.003834, 0.001715, 0.000000, 0.000000, 0.002160, 0.000945, 0.002700, - 5, 8, 0.006750, 0.003240, 0.000000, 0.003240, 0.001323, 0.000000, 0.000000, 0.001350, 0.000837, 0.002646, - 5, 7, 0.006461, 0.002700, 0.000000, 0.002700, 0.001215, 0.000000, 0.000000, 0.000000, 0.000810, 0.001809, - 5, 6, 0.006240, 0.001796, 0.000000, 0.002052, 0.001013, 0.000000, 0.000000, 0.000000, 0.000756, 0.001620, - 5, 5, 0.005430, 0.000675, 0.000000, 0.000864, 0.000864, 0.000000, 0.000000, 0.000000, 0.000729, 0.001593, - 5, 4, 0.003780, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.001080, - 5, 3, 0.001350, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000540, - 5, 2, 0.000540, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000270, - 5, 1, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, - 4, 40, 0.079484, 0.106785, 0.018198, 0.122699, 0.057538, 0.076974, 0.034813, 0.079276, 0.061426, 0.112509, - 4, 39, 0.062146, 0.088671, 0.016205, 0.108082, 0.050454, 0.059905, 0.029600, 0.075465, 0.057362, 0.106596, - 4, 38, 0.050047, 0.076478, 0.014924, 0.090994, 0.043475, 0.045808, 0.025794, 0.069220, 0.054351, 0.101993, - 4, 37, 0.041712, 0.067272, 0.013551, 0.082516, 0.040026, 0.037390, 0.023580, 0.064897, 0.051395, 0.097511, - 4, 36, 0.035384, 0.060487, 0.012434, 0.075287, 0.034229, 0.029784, 0.020859, 0.060167, 0.048222, 0.094055, - 4, 35, 0.031455, 0.054259, 0.011290, 0.071141, 0.031259, 0.026055, 0.019560, 0.055478, 0.045887, 0.088965, - 4, 34, 0.028307, 0.050427, 0.010342, 0.067048, 0.029835, 0.022525, 0.018495, 0.052523, 0.044253, 0.083727, - 4, 33, 0.025847, 0.046687, 0.009384, 0.062196, 0.028000, 0.020646, 0.017102, 0.048735, 0.041837, 0.079286, - 4, 32, 0.023688, 0.042449, 0.008645, 0.057303, 0.026070, 0.018765, 0.016315, 0.045001, 0.040163, 0.073143, - 4, 31, 0.021905, 0.036781, 0.008235, 0.054011, 0.024200, 0.017496, 0.015528, 0.042795, 0.038273, 0.068675, - 4, 30, 0.021195, 0.031563, 0.007456, 0.049545, 0.022757, 0.016081, 0.013646, 0.038817, 0.036410, 0.063086, - 4, 29, 0.020115, 0.027494, 0.006992, 0.042302, 0.019517, 0.015096, 0.012562, 0.035562, 0.034223, 0.059954, - 4, 28, 0.018889, 0.025040, 0.006472, 0.037295, 0.018383, 0.014540, 0.011760, 0.032949, 0.032022, 0.055782, - 4, 27, 0.017630, 0.023150, 0.006055, 0.034262, 0.017183, 0.013759, 0.010949, 0.029876, 0.030294, 0.053339, - 4, 26, 0.016875, 0.020762, 0.005743, 0.031664, 0.016002, 0.013123, 0.009740, 0.026487, 0.027824, 0.048681, - 4, 25, 0.015930, 0.019660, 0.005516, 0.028215, 0.015147, 0.012420, 0.009311, 0.020643, 0.025988, 0.043443, - 4, 24, 0.015425, 0.017790, 0.005211, 0.026190, 0.014530, 0.011838, 0.008783, 0.017314, 0.022518, 0.035681, - 4, 23, 0.014947, 0.017314, 0.004878, 0.024813, 0.012897, 0.011507, 0.008451, 0.014445, 0.017982, 0.029673, - 4, 22, 0.014430, 0.016278, 0.004610, 0.023077, 0.011945, 0.011148, 0.007918, 0.012825, 0.015107, 0.023814, - 4, 21, 0.013643, 0.015699, 0.004320, 0.021686, 0.011598, 0.010383, 0.007113, 0.010395, 0.013176, 0.019197, - 4, 20, 0.013023, 0.015244, 0.003995, 0.019155, 0.010935, 0.009227, 0.005914, 0.009227, 0.010665, 0.016234, - 4, 19, 0.011185, 0.014467, 0.003186, 0.015718, 0.007822, 0.007965, 0.005273, 0.008374, 0.008262, 0.012623, - 4, 18, 0.010399, 0.013419, 0.002808, 0.013696, 0.006681, 0.006885, 0.003579, 0.007579, 0.006197, 0.009315, - 4, 17, 0.009773, 0.011089, 0.002025, 0.010882, 0.005054, 0.005881, 0.002928, 0.007101, 0.004914, 0.008100, - 4, 16, 0.009054, 0.009054, 0.001743, 0.009477, 0.003799, 0.005081, 0.002365, 0.006345, 0.003942, 0.007061, - 4, 15, 0.008575, 0.006882, 0.001404, 0.007792, 0.003449, 0.004131, 0.001793, 0.005327, 0.002903, 0.006264, - 4, 14, 0.008069, 0.005655, 0.001169, 0.006920, 0.002808, 0.003510, 0.001277, 0.004307, 0.001782, 0.005184, - 4, 13, 0.007668, 0.005173, 0.000986, 0.005751, 0.002336, 0.002889, 0.000919, 0.003609, 0.001283, 0.004631, - 4, 12, 0.007403, 0.004563, 0.000675, 0.004852, 0.002066, 0.002457, 0.000532, 0.003083, 0.000662, 0.004374, - 4, 11, 0.007152, 0.004127, 0.000547, 0.004401, 0.001937, 0.001377, 0.000284, 0.002473, 0.000594, 0.003456, - 4, 10, 0.006885, 0.003530, 0.000402, 0.003920, 0.001613, 0.000405, 0.000000, 0.001755, 0.000500, 0.002565, - 4, 9, 0.006746, 0.002920, 0.000270, 0.003159, 0.001404, 0.000000, 0.000000, 0.001485, 0.000473, 0.002025, - 4, 8, 0.006257, 0.002290, 0.000000, 0.002565, 0.001107, 0.000000, 0.000000, 0.000675, 0.000419, 0.001971, - 4, 7, 0.005931, 0.001825, 0.000000, 0.002025, 0.000999, 0.000000, 0.000000, 0.000000, 0.000405, 0.001134, - 4, 6, 0.005585, 0.001199, 0.000000, 0.001463, 0.000844, 0.000000, 0.000000, 0.000000, 0.000378, 0.000945, - 4, 5, 0.004967, 0.000545, 0.000000, 0.000637, 0.000695, 0.000000, 0.000000, 0.000000, 0.000405, 0.000864, - 4, 4, 0.003105, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000540, - 4, 3, 0.000888, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000270, - 4, 2, 0.000270, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000135, - 4, 1, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, - 3, 40, 0.078075, 0.104396, 0.017531, 0.114991, 0.055352, 0.076378, 0.034795, 0.075122, 0.055352, 0.103518, - 3, 39, 0.061652, 0.088241, 0.015590, 0.106545, 0.049445, 0.059600, 0.029500, 0.074790, 0.052623, 0.097092, - 3, 38, 0.049604, 0.076005, 0.014067, 0.090458, 0.042401, 0.045446, 0.025669, 0.068240, 0.051408, 0.093285, - 3, 37, 0.041305, 0.066774, 0.012911, 0.082142, 0.039066, 0.036979, 0.023401, 0.063914, 0.049221, 0.090531, - 3, 36, 0.034857, 0.059954, 0.011656, 0.074974, 0.033359, 0.029327, 0.020658, 0.058774, 0.046629, 0.087399, - 3, 35, 0.030780, 0.053708, 0.010616, 0.070732, 0.030659, 0.025380, 0.019410, 0.053447, 0.044604, 0.083700, - 3, 34, 0.027724, 0.049823, 0.009631, 0.066596, 0.029160, 0.021600, 0.018360, 0.051046, 0.042606, 0.079164, - 3, 33, 0.025234, 0.046124, 0.008608, 0.061752, 0.027030, 0.019691, 0.016924, 0.048060, 0.040770, 0.076491, - 3, 32, 0.023077, 0.041969, 0.007979, 0.056827, 0.025140, 0.017820, 0.016160, 0.044642, 0.039015, 0.070686, - 3, 31, 0.021400, 0.036302, 0.007560, 0.053482, 0.023668, 0.016821, 0.015395, 0.042120, 0.037098, 0.066609, - 3, 30, 0.020520, 0.030996, 0.006750, 0.048870, 0.021889, 0.015401, 0.013522, 0.038483, 0.035262, 0.060291, - 3, 29, 0.019440, 0.026908, 0.006326, 0.041945, 0.018973, 0.014148, 0.012434, 0.034403, 0.032994, 0.057537, - 3, 28, 0.018338, 0.024430, 0.005821, 0.036790, 0.017866, 0.013716, 0.011640, 0.032419, 0.031644, 0.053784, - 3, 27, 0.016786, 0.022540, 0.005373, 0.033693, 0.016330, 0.013365, 0.010665, 0.029511, 0.029943, 0.050787, - 3, 26, 0.016200, 0.020193, 0.005046, 0.031199, 0.015506, 0.012736, 0.009450, 0.025353, 0.027405, 0.046332, - 3, 25, 0.015398, 0.018800, 0.004822, 0.027540, 0.014472, 0.011745, 0.009180, 0.019686, 0.025596, 0.040716, - 3, 24, 0.014920, 0.017221, 0.004482, 0.025650, 0.014013, 0.011300, 0.008648, 0.016268, 0.021843, 0.033561, - 3, 23, 0.014504, 0.016697, 0.004161, 0.024138, 0.012428, 0.010978, 0.008127, 0.013770, 0.017307, 0.027756, - 3, 22, 0.014056, 0.015390, 0.003883, 0.022178, 0.011632, 0.010562, 0.007749, 0.012150, 0.014769, 0.021438, - 3, 21, 0.013106, 0.014850, 0.003645, 0.021179, 0.011389, 0.009693, 0.006990, 0.009720, 0.012852, 0.016524, - 3, 20, 0.012304, 0.014313, 0.003378, 0.017896, 0.010800, 0.008039, 0.005608, 0.008039, 0.010530, 0.013392, - 3, 19, 0.010492, 0.013522, 0.002511, 0.014777, 0.007409, 0.007290, 0.005146, 0.007028, 0.007992, 0.011475, - 3, 18, 0.009701, 0.012528, 0.002133, 0.012866, 0.006450, 0.006210, 0.003378, 0.006518, 0.005508, 0.008370, - 3, 17, 0.009126, 0.010238, 0.001350, 0.010371, 0.004709, 0.005443, 0.002819, 0.006102, 0.004428, 0.006750, - 3, 16, 0.008222, 0.008389, 0.001091, 0.008802, 0.003575, 0.004763, 0.001987, 0.005670, 0.003294, 0.005751, - 3, 15, 0.007533, 0.005940, 0.000729, 0.007295, 0.003119, 0.003456, 0.001350, 0.004174, 0.001809, 0.004401, - 3, 14, 0.007428, 0.004490, 0.000521, 0.006091, 0.002376, 0.002835, 0.000794, 0.003213, 0.001026, 0.003753, - 3, 13, 0.007220, 0.003780, 0.000405, 0.005076, 0.001931, 0.002214, 0.000540, 0.002627, 0.000648, 0.003267, - 3, 12, 0.006899, 0.002975, 0.000000, 0.004304, 0.001661, 0.001782, 0.000270, 0.002117, 0.000000, 0.002808, - 3, 11, 0.006615, 0.002627, 0.000000, 0.003726, 0.001512, 0.000702, 0.000000, 0.001436, 0.000000, 0.002052, - 3, 10, 0.006210, 0.002022, 0.000000, 0.003305, 0.001242, 0.000270, 0.000000, 0.001080, 0.000000, 0.001890, - 3, 9, 0.006178, 0.001647, 0.000000, 0.002484, 0.001094, 0.000000, 0.000000, 0.000810, 0.000000, 0.001350, - 3, 8, 0.005765, 0.001339, 0.000000, 0.001890, 0.000891, 0.000000, 0.000000, 0.000000, 0.000000, 0.001296, - 3, 7, 0.005400, 0.000950, 0.000000, 0.001350, 0.000783, 0.000000, 0.000000, 0.000000, 0.000000, 0.000459, - 3, 6, 0.004930, 0.000602, 0.000000, 0.000875, 0.000675, 0.000000, 0.000000, 0.000000, 0.000000, 0.000270, - 3, 5, 0.004504, 0.000416, 0.000000, 0.000410, 0.000527, 0.000000, 0.000000, 0.000000, 0.000081, 0.000135, - 3, 4, 0.002430, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, - 3, 3, 0.000427, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, - 3, 2, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, - 3, 1, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, - 2, 40, 0.077418, 0.103451, 0.016828, 0.109064, 0.054341, 0.075962, 0.034795, 0.075122, 0.051976, 0.081459, - 2, 39, 0.060720, 0.087215, 0.014756, 0.103532, 0.048041, 0.059297, 0.029376, 0.074115, 0.050193, 0.077382, - 2, 38, 0.048859, 0.074947, 0.013362, 0.089180, 0.041598, 0.045021, 0.025537, 0.066803, 0.048722, 0.074655, - 2, 37, 0.040158, 0.065781, 0.012126, 0.080282, 0.038175, 0.036686, 0.023178, 0.062884, 0.047088, 0.072630, - 2, 36, 0.033881, 0.058774, 0.011001, 0.072895, 0.032542, 0.028434, 0.020461, 0.057424, 0.045225, 0.069728, - 2, 35, 0.030294, 0.052831, 0.009800, 0.069422, 0.029587, 0.024311, 0.019232, 0.052294, 0.043254, 0.067311, - 2, 34, 0.026957, 0.048824, 0.008847, 0.065224, 0.028054, 0.020655, 0.018095, 0.049876, 0.041553, 0.064112, - 2, 33, 0.024349, 0.045159, 0.007976, 0.060371, 0.026226, 0.018652, 0.016776, 0.047385, 0.039704, 0.061695, - 2, 32, 0.022078, 0.040951, 0.007202, 0.055330, 0.024365, 0.017145, 0.016065, 0.043944, 0.037719, 0.057699, - 2, 31, 0.020733, 0.035223, 0.006791, 0.052327, 0.022850, 0.015790, 0.015233, 0.041124, 0.035964, 0.054810, - 2, 30, 0.019626, 0.029984, 0.006021, 0.046551, 0.021086, 0.014673, 0.013133, 0.037757, 0.034007, 0.049923, - 2, 29, 0.018765, 0.025923, 0.005677, 0.039731, 0.018291, 0.013707, 0.011973, 0.033543, 0.031847, 0.047048, - 2, 28, 0.017539, 0.023491, 0.005192, 0.035180, 0.017033, 0.013349, 0.011092, 0.031296, 0.029970, 0.044240, - 2, 27, 0.016270, 0.021488, 0.004753, 0.032258, 0.015628, 0.012909, 0.010419, 0.028179, 0.028202, 0.041756, - 2, 26, 0.015525, 0.019557, 0.004390, 0.029942, 0.014652, 0.012207, 0.009339, 0.024975, 0.025745, 0.037679, - 2, 25, 0.014638, 0.018036, 0.004146, 0.026314, 0.013797, 0.011314, 0.009099, 0.018864, 0.023517, 0.032832, - 2, 24, 0.014075, 0.016575, 0.003788, 0.024444, 0.013045, 0.010780, 0.008541, 0.015587, 0.019710, 0.028013, - 2, 23, 0.013597, 0.015638, 0.003430, 0.022970, 0.011632, 0.010471, 0.008031, 0.012675, 0.015296, 0.023004, - 2, 22, 0.012968, 0.014715, 0.003089, 0.021096, 0.010990, 0.009929, 0.007642, 0.010846, 0.012825, 0.017253, - 2, 21, 0.012088, 0.014175, 0.002884, 0.020046, 0.010148, 0.009032, 0.006813, 0.008261, 0.010449, 0.013500, - 2, 20, 0.010976, 0.013381, 0.002693, 0.016732, 0.009381, 0.007742, 0.005400, 0.006437, 0.008100, 0.010841, - 2, 19, 0.009566, 0.012631, 0.001836, 0.013686, 0.007125, 0.006782, 0.003923, 0.005431, 0.005589, 0.008613, - 2, 18, 0.008982, 0.011349, 0.001458, 0.011744, 0.005708, 0.005742, 0.002724, 0.004884, 0.003618, 0.006764, - 2, 17, 0.008273, 0.009439, 0.000845, 0.009291, 0.004250, 0.004857, 0.002327, 0.004469, 0.002673, 0.004847, - 2, 16, 0.007679, 0.007704, 0.000545, 0.007737, 0.003394, 0.003988, 0.001534, 0.004045, 0.001742, 0.003834, - 2, 15, 0.007236, 0.005265, 0.000365, 0.006442, 0.002565, 0.003089, 0.000675, 0.003023, 0.000959, 0.002808, - 2, 14, 0.007090, 0.003787, 0.000261, 0.004990, 0.001802, 0.002400, 0.000397, 0.002249, 0.000608, 0.002457, - 2, 13, 0.006877, 0.003029, 0.000203, 0.004076, 0.001404, 0.001835, 0.000270, 0.001814, 0.000324, 0.002012, - 2, 12, 0.006575, 0.002311, 0.000000, 0.003502, 0.001249, 0.001458, 0.000135, 0.001436, 0.000000, 0.001850, - 2, 11, 0.006314, 0.001836, 0.000000, 0.003051, 0.001114, 0.000597, 0.000000, 0.000841, 0.000000, 0.001350, - 2, 10, 0.005971, 0.001434, 0.000000, 0.002570, 0.000945, 0.000230, 0.000000, 0.000540, 0.000000, 0.001215, - 2, 9, 0.005627, 0.001172, 0.000000, 0.001809, 0.000783, 0.000019, 0.000000, 0.000405, 0.000000, 0.000675, - 2, 8, 0.005144, 0.000940, 0.000000, 0.001276, 0.000668, 0.000038, 0.000000, 0.000000, 0.000000, 0.000648, - 2, 7, 0.004686, 0.000622, 0.000000, 0.000890, 0.000581, 0.000009, 0.000000, 0.000000, 0.000000, 0.000230, - 2, 6, 0.004247, 0.000428, 0.000000, 0.000541, 0.000473, 0.000019, 0.000000, 0.000000, 0.000000, 0.000135, - 2, 5, 0.003857, 0.000269, 0.000000, 0.000320, 0.000419, 0.000000, 0.000000, 0.000000, 0.000041, 0.000068, - 2, 4, 0.001459, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, - 2, 3, 0.000213, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, - 2, 2, 0.000011, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, - 2, 1, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, - 1, 40, 0.076761, 0.102506, 0.016124, 0.103138, 0.053330, 0.075546, 0.034795, 0.075122, 0.048600, 0.059400, - 1, 39, 0.059789, 0.086189, 0.013921, 0.100518, 0.046637, 0.058995, 0.029252, 0.073440, 0.047763, 0.057672, - 1, 38, 0.048114, 0.073888, 0.012658, 0.087901, 0.040794, 0.044596, 0.025404, 0.065367, 0.046035, 0.056025, - 1, 37, 0.039012, 0.064789, 0.011340, 0.078422, 0.037284, 0.036393, 0.022955, 0.061854, 0.044955, 0.054729, - 1, 36, 0.032905, 0.057594, 0.010346, 0.070816, 0.031725, 0.027540, 0.020264, 0.056074, 0.043821, 0.052056, - 1, 35, 0.029808, 0.051953, 0.008983, 0.068113, 0.028515, 0.023242, 0.019054, 0.051141, 0.041904, 0.050922, - 1, 34, 0.026190, 0.047825, 0.008062, 0.063852, 0.026949, 0.019710, 0.017831, 0.048705, 0.040500, 0.049059, - 1, 33, 0.023463, 0.044194, 0.007344, 0.058990, 0.025423, 0.017612, 0.016629, 0.046710, 0.038637, 0.046899, - 1, 32, 0.021079, 0.039933, 0.006426, 0.053833, 0.023590, 0.016470, 0.015971, 0.043246, 0.036423, 0.044712, - 1, 31, 0.020066, 0.034144, 0.006021, 0.051173, 0.022032, 0.014758, 0.015071, 0.040127, 0.034830, 0.043011, - 1, 30, 0.018733, 0.028971, 0.005292, 0.044231, 0.020282, 0.013946, 0.012744, 0.037031, 0.032751, 0.039555, - 1, 29, 0.018090, 0.024937, 0.005027, 0.037517, 0.017609, 0.013265, 0.011513, 0.032684, 0.030699, 0.036558, - 1, 28, 0.016740, 0.022553, 0.004563, 0.033569, 0.016200, 0.012982, 0.010544, 0.030173, 0.028296, 0.034695, - 1, 27, 0.015755, 0.020436, 0.004134, 0.030823, 0.014926, 0.012452, 0.010174, 0.026846, 0.026460, 0.032724, - 1, 26, 0.014850, 0.018922, 0.003734, 0.028685, 0.013797, 0.011678, 0.009229, 0.024597, 0.024084, 0.029025, - 1, 25, 0.013878, 0.017272, 0.003470, 0.025088, 0.013122, 0.010884, 0.009018, 0.018041, 0.021438, 0.024948, - 1, 24, 0.013230, 0.015930, 0.003094, 0.023239, 0.012077, 0.010260, 0.008435, 0.014907, 0.017577, 0.022464, - 1, 23, 0.012690, 0.014580, 0.002700, 0.021803, 0.010835, 0.009963, 0.007935, 0.011580, 0.013284, 0.018252, - 1, 22, 0.011880, 0.014040, 0.002295, 0.020015, 0.010349, 0.009296, 0.007536, 0.009542, 0.010881, 0.013068, - 1, 21, 0.011070, 0.013500, 0.002122, 0.018914, 0.008907, 0.008370, 0.006637, 0.006801, 0.008046, 0.010476, - 1, 20, 0.009647, 0.012450, 0.002009, 0.015568, 0.007962, 0.007444, 0.005192, 0.004836, 0.005670, 0.008289, - 1, 19, 0.008640, 0.011740, 0.001161, 0.012596, 0.006842, 0.006275, 0.002700, 0.003834, 0.003186, 0.005751, - 1, 18, 0.008262, 0.010171, 0.000783, 0.010622, 0.004965, 0.005273, 0.002071, 0.003251, 0.001728, 0.005157, - 1, 17, 0.007420, 0.008640, 0.000340, 0.008211, 0.003791, 0.004271, 0.001836, 0.002835, 0.000918, 0.002943, - 1, 16, 0.007136, 0.007020, 0.000000, 0.006672, 0.003213, 0.003213, 0.001080, 0.002419, 0.000189, 0.001917, - 1, 15, 0.006939, 0.004590, 0.000000, 0.005589, 0.002012, 0.002722, 0.000000, 0.001871, 0.000108, 0.001215, - 1, 14, 0.006753, 0.003083, 0.000000, 0.003888, 0.001229, 0.001966, 0.000000, 0.001285, 0.000189, 0.001161, - 1, 13, 0.006534, 0.002279, 0.000000, 0.003075, 0.000878, 0.001455, 0.000000, 0.001002, 0.000000, 0.000756, - 1, 12, 0.006251, 0.001647, 0.000000, 0.002700, 0.000837, 0.001134, 0.000000, 0.000756, 0.000000, 0.000891, - 1, 11, 0.006013, 0.001045, 0.000000, 0.002376, 0.000716, 0.000491, 0.000000, 0.000246, 0.000000, 0.000648, - 1, 10, 0.005732, 0.000845, 0.000000, 0.001836, 0.000648, 0.000189, 0.000000, 0.000000, 0.000000, 0.000540, - 1, 9, 0.005076, 0.000697, 0.000000, 0.001134, 0.000473, 0.000038, 0.000000, 0.000000, 0.000000, 0.000000, - 1, 8, 0.004523, 0.000540, 0.000000, 0.000662, 0.000446, 0.000076, 0.000000, 0.000000, 0.000000, 0.000000, - 1, 7, 0.003972, 0.000294, 0.000000, 0.000429, 0.000378, 0.000019, 0.000000, 0.000000, 0.000000, 0.000000, - 1, 6, 0.003564, 0.000254, 0.000000, 0.000208, 0.000270, 0.000038, 0.000000, 0.000000, 0.000000, 0.000000, - 1, 5, 0.003210, 0.000122, 0.000000, 0.000230, 0.000311, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, - 1, 4, 0.000489, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, - 1, 3, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, - 1, 2, 0.000022, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, - 1, 1, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, - -1], ""); + baseline_fault_vector_database = VectorParam.Float( + [ + 5, + 40, + 0.080892, + 0.109175, + 0.018864, + 0.130408, + 0.059724, + 0.077571, + 0.034830, + 0.083430, + 0.067500, + 0.121500, + 5, + 39, + 0.062640, + 0.089100, + 0.016821, + 0.109620, + 0.051462, + 0.060210, + 0.029700, + 0.076140, + 0.062100, + 0.116100, + 5, + 38, + 0.050490, + 0.076950, + 0.015782, + 0.091530, + 0.044550, + 0.046170, + 0.025920, + 0.070200, + 0.057294, + 0.110700, + 5, + 37, + 0.042120, + 0.067770, + 0.014191, + 0.082890, + 0.040986, + 0.037800, + 0.023760, + 0.065880, + 0.053568, + 0.104490, + 5, + 36, + 0.035910, + 0.061020, + 0.013211, + 0.075600, + 0.035100, + 0.030240, + 0.021060, + 0.061560, + 0.049815, + 0.100710, + 5, + 35, + 0.032130, + 0.054810, + 0.011964, + 0.071550, + 0.031860, + 0.026730, + 0.019710, + 0.057510, + 0.047169, + 0.094230, + 5, + 34, + 0.028890, + 0.051030, + 0.011054, + 0.067500, + 0.030510, + 0.023450, + 0.018630, + 0.054000, + 0.045900, + 0.088290, + 5, + 33, + 0.026460, + 0.047250, + 0.010160, + 0.062640, + 0.028971, + 0.021600, + 0.017280, + 0.049410, + 0.042903, + 0.082080, + 5, + 32, + 0.024300, + 0.042930, + 0.009312, + 0.057780, + 0.027000, + 0.019710, + 0.016470, + 0.045360, + 0.041310, + 0.075600, + 5, + 31, + 0.022410, + 0.037260, + 0.008910, + 0.054540, + 0.024732, + 0.018171, + 0.015660, + 0.043470, + 0.039447, + 0.070740, + 5, + 30, + 0.021870, + 0.032130, + 0.008162, + 0.050220, + 0.023625, + 0.016762, + 0.013770, + 0.039150, + 0.037557, + 0.065880, + 5, + 29, + 0.020790, + 0.028080, + 0.007657, + 0.042660, + 0.020061, + 0.016043, + 0.012690, + 0.036720, + 0.035451, + 0.062370, + 5, + 28, + 0.019440, + 0.025650, + 0.007123, + 0.037800, + 0.018900, + 0.015363, + 0.011880, + 0.033480, + 0.032400, + 0.057780, + 5, + 27, + 0.018473, + 0.023760, + 0.006737, + 0.034830, + 0.018036, + 0.014153, + 0.011232, + 0.030240, + 0.030645, + 0.055890, + 5, + 26, + 0.017550, + 0.021330, + 0.006440, + 0.032130, + 0.016497, + 0.013511, + 0.010031, + 0.027621, + 0.028242, + 0.051030, + 5, + 25, + 0.016462, + 0.020520, + 0.006210, + 0.028890, + 0.015822, + 0.013095, + 0.009442, + 0.021600, + 0.026379, + 0.046170, + 5, + 24, + 0.015930, + 0.018360, + 0.005940, + 0.026730, + 0.015047, + 0.012377, + 0.008918, + 0.018360, + 0.023193, + 0.037800, + 5, + 23, + 0.015390, + 0.017931, + 0.005594, + 0.025488, + 0.013365, + 0.012037, + 0.008775, + 0.015120, + 0.018657, + 0.031590, + 5, + 22, + 0.014804, + 0.017167, + 0.005338, + 0.023976, + 0.012258, + 0.011734, + 0.008087, + 0.013500, + 0.015444, + 0.026190, + 5, + 21, + 0.014180, + 0.016548, + 0.004995, + 0.022194, + 0.011807, + 0.011073, + 0.007236, + 0.011070, + 0.013500, + 0.021870, + 5, + 20, + 0.013743, + 0.016176, + 0.004613, + 0.020414, + 0.011070, + 0.010415, + 0.006220, + 0.010415, + 0.010800, + 0.019077, + 5, + 19, + 0.011877, + 0.015412, + 0.003861, + 0.016659, + 0.008235, + 0.008640, + 0.005400, + 0.009720, + 0.008532, + 0.013770, + 5, + 18, + 0.011097, + 0.014310, + 0.003483, + 0.014526, + 0.006912, + 0.007560, + 0.003780, + 0.008640, + 0.006885, + 0.010260, + 5, + 17, + 0.010419, + 0.011939, + 0.002700, + 0.011394, + 0.005400, + 0.006318, + 0.003038, + 0.008100, + 0.005400, + 0.009450, + 5, + 16, + 0.009887, + 0.009720, + 0.002395, + 0.010152, + 0.004023, + 0.005400, + 0.002743, + 0.007020, + 0.004590, + 0.008370, + 5, + 15, + 0.009617, + 0.007825, + 0.002079, + 0.008289, + 0.003780, + 0.004806, + 0.002236, + 0.006480, + 0.003996, + 0.008127, + 5, + 14, + 0.008710, + 0.006820, + 0.001817, + 0.007749, + 0.003240, + 0.004185, + 0.001760, + 0.005400, + 0.002538, + 0.006615, + 5, + 13, + 0.008116, + 0.006566, + 0.001566, + 0.006426, + 0.002741, + 0.003564, + 0.001299, + 0.004590, + 0.001917, + 0.005994, + 5, + 12, + 0.007908, + 0.006151, + 0.001350, + 0.005400, + 0.002471, + 0.003132, + 0.000794, + 0.004050, + 0.001323, + 0.005940, + 5, + 11, + 0.007690, + 0.005627, + 0.001094, + 0.005076, + 0.002363, + 0.002052, + 0.000567, + 0.003510, + 0.001188, + 0.004860, + 5, + 10, + 0.007560, + 0.005038, + 0.000805, + 0.004536, + 0.001985, + 0.000540, + 0.000000, + 0.002430, + 0.000999, + 0.003240, + 5, + 9, + 0.007314, + 0.004193, + 0.000540, + 0.003834, + 0.001715, + 0.000000, + 0.000000, + 0.002160, + 0.000945, + 0.002700, + 5, + 8, + 0.006750, + 0.003240, + 0.000000, + 0.003240, + 0.001323, + 0.000000, + 0.000000, + 0.001350, + 0.000837, + 0.002646, + 5, + 7, + 0.006461, + 0.002700, + 0.000000, + 0.002700, + 0.001215, + 0.000000, + 0.000000, + 0.000000, + 0.000810, + 0.001809, + 5, + 6, + 0.006240, + 0.001796, + 0.000000, + 0.002052, + 0.001013, + 0.000000, + 0.000000, + 0.000000, + 0.000756, + 0.001620, + 5, + 5, + 0.005430, + 0.000675, + 0.000000, + 0.000864, + 0.000864, + 0.000000, + 0.000000, + 0.000000, + 0.000729, + 0.001593, + 5, + 4, + 0.003780, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.001080, + 5, + 3, + 0.001350, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000540, + 5, + 2, + 0.000540, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000270, + 5, + 1, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 4, + 40, + 0.079484, + 0.106785, + 0.018198, + 0.122699, + 0.057538, + 0.076974, + 0.034813, + 0.079276, + 0.061426, + 0.112509, + 4, + 39, + 0.062146, + 0.088671, + 0.016205, + 0.108082, + 0.050454, + 0.059905, + 0.029600, + 0.075465, + 0.057362, + 0.106596, + 4, + 38, + 0.050047, + 0.076478, + 0.014924, + 0.090994, + 0.043475, + 0.045808, + 0.025794, + 0.069220, + 0.054351, + 0.101993, + 4, + 37, + 0.041712, + 0.067272, + 0.013551, + 0.082516, + 0.040026, + 0.037390, + 0.023580, + 0.064897, + 0.051395, + 0.097511, + 4, + 36, + 0.035384, + 0.060487, + 0.012434, + 0.075287, + 0.034229, + 0.029784, + 0.020859, + 0.060167, + 0.048222, + 0.094055, + 4, + 35, + 0.031455, + 0.054259, + 0.011290, + 0.071141, + 0.031259, + 0.026055, + 0.019560, + 0.055478, + 0.045887, + 0.088965, + 4, + 34, + 0.028307, + 0.050427, + 0.010342, + 0.067048, + 0.029835, + 0.022525, + 0.018495, + 0.052523, + 0.044253, + 0.083727, + 4, + 33, + 0.025847, + 0.046687, + 0.009384, + 0.062196, + 0.028000, + 0.020646, + 0.017102, + 0.048735, + 0.041837, + 0.079286, + 4, + 32, + 0.023688, + 0.042449, + 0.008645, + 0.057303, + 0.026070, + 0.018765, + 0.016315, + 0.045001, + 0.040163, + 0.073143, + 4, + 31, + 0.021905, + 0.036781, + 0.008235, + 0.054011, + 0.024200, + 0.017496, + 0.015528, + 0.042795, + 0.038273, + 0.068675, + 4, + 30, + 0.021195, + 0.031563, + 0.007456, + 0.049545, + 0.022757, + 0.016081, + 0.013646, + 0.038817, + 0.036410, + 0.063086, + 4, + 29, + 0.020115, + 0.027494, + 0.006992, + 0.042302, + 0.019517, + 0.015096, + 0.012562, + 0.035562, + 0.034223, + 0.059954, + 4, + 28, + 0.018889, + 0.025040, + 0.006472, + 0.037295, + 0.018383, + 0.014540, + 0.011760, + 0.032949, + 0.032022, + 0.055782, + 4, + 27, + 0.017630, + 0.023150, + 0.006055, + 0.034262, + 0.017183, + 0.013759, + 0.010949, + 0.029876, + 0.030294, + 0.053339, + 4, + 26, + 0.016875, + 0.020762, + 0.005743, + 0.031664, + 0.016002, + 0.013123, + 0.009740, + 0.026487, + 0.027824, + 0.048681, + 4, + 25, + 0.015930, + 0.019660, + 0.005516, + 0.028215, + 0.015147, + 0.012420, + 0.009311, + 0.020643, + 0.025988, + 0.043443, + 4, + 24, + 0.015425, + 0.017790, + 0.005211, + 0.026190, + 0.014530, + 0.011838, + 0.008783, + 0.017314, + 0.022518, + 0.035681, + 4, + 23, + 0.014947, + 0.017314, + 0.004878, + 0.024813, + 0.012897, + 0.011507, + 0.008451, + 0.014445, + 0.017982, + 0.029673, + 4, + 22, + 0.014430, + 0.016278, + 0.004610, + 0.023077, + 0.011945, + 0.011148, + 0.007918, + 0.012825, + 0.015107, + 0.023814, + 4, + 21, + 0.013643, + 0.015699, + 0.004320, + 0.021686, + 0.011598, + 0.010383, + 0.007113, + 0.010395, + 0.013176, + 0.019197, + 4, + 20, + 0.013023, + 0.015244, + 0.003995, + 0.019155, + 0.010935, + 0.009227, + 0.005914, + 0.009227, + 0.010665, + 0.016234, + 4, + 19, + 0.011185, + 0.014467, + 0.003186, + 0.015718, + 0.007822, + 0.007965, + 0.005273, + 0.008374, + 0.008262, + 0.012623, + 4, + 18, + 0.010399, + 0.013419, + 0.002808, + 0.013696, + 0.006681, + 0.006885, + 0.003579, + 0.007579, + 0.006197, + 0.009315, + 4, + 17, + 0.009773, + 0.011089, + 0.002025, + 0.010882, + 0.005054, + 0.005881, + 0.002928, + 0.007101, + 0.004914, + 0.008100, + 4, + 16, + 0.009054, + 0.009054, + 0.001743, + 0.009477, + 0.003799, + 0.005081, + 0.002365, + 0.006345, + 0.003942, + 0.007061, + 4, + 15, + 0.008575, + 0.006882, + 0.001404, + 0.007792, + 0.003449, + 0.004131, + 0.001793, + 0.005327, + 0.002903, + 0.006264, + 4, + 14, + 0.008069, + 0.005655, + 0.001169, + 0.006920, + 0.002808, + 0.003510, + 0.001277, + 0.004307, + 0.001782, + 0.005184, + 4, + 13, + 0.007668, + 0.005173, + 0.000986, + 0.005751, + 0.002336, + 0.002889, + 0.000919, + 0.003609, + 0.001283, + 0.004631, + 4, + 12, + 0.007403, + 0.004563, + 0.000675, + 0.004852, + 0.002066, + 0.002457, + 0.000532, + 0.003083, + 0.000662, + 0.004374, + 4, + 11, + 0.007152, + 0.004127, + 0.000547, + 0.004401, + 0.001937, + 0.001377, + 0.000284, + 0.002473, + 0.000594, + 0.003456, + 4, + 10, + 0.006885, + 0.003530, + 0.000402, + 0.003920, + 0.001613, + 0.000405, + 0.000000, + 0.001755, + 0.000500, + 0.002565, + 4, + 9, + 0.006746, + 0.002920, + 0.000270, + 0.003159, + 0.001404, + 0.000000, + 0.000000, + 0.001485, + 0.000473, + 0.002025, + 4, + 8, + 0.006257, + 0.002290, + 0.000000, + 0.002565, + 0.001107, + 0.000000, + 0.000000, + 0.000675, + 0.000419, + 0.001971, + 4, + 7, + 0.005931, + 0.001825, + 0.000000, + 0.002025, + 0.000999, + 0.000000, + 0.000000, + 0.000000, + 0.000405, + 0.001134, + 4, + 6, + 0.005585, + 0.001199, + 0.000000, + 0.001463, + 0.000844, + 0.000000, + 0.000000, + 0.000000, + 0.000378, + 0.000945, + 4, + 5, + 0.004967, + 0.000545, + 0.000000, + 0.000637, + 0.000695, + 0.000000, + 0.000000, + 0.000000, + 0.000405, + 0.000864, + 4, + 4, + 0.003105, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000540, + 4, + 3, + 0.000888, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000270, + 4, + 2, + 0.000270, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000135, + 4, + 1, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 3, + 40, + 0.078075, + 0.104396, + 0.017531, + 0.114991, + 0.055352, + 0.076378, + 0.034795, + 0.075122, + 0.055352, + 0.103518, + 3, + 39, + 0.061652, + 0.088241, + 0.015590, + 0.106545, + 0.049445, + 0.059600, + 0.029500, + 0.074790, + 0.052623, + 0.097092, + 3, + 38, + 0.049604, + 0.076005, + 0.014067, + 0.090458, + 0.042401, + 0.045446, + 0.025669, + 0.068240, + 0.051408, + 0.093285, + 3, + 37, + 0.041305, + 0.066774, + 0.012911, + 0.082142, + 0.039066, + 0.036979, + 0.023401, + 0.063914, + 0.049221, + 0.090531, + 3, + 36, + 0.034857, + 0.059954, + 0.011656, + 0.074974, + 0.033359, + 0.029327, + 0.020658, + 0.058774, + 0.046629, + 0.087399, + 3, + 35, + 0.030780, + 0.053708, + 0.010616, + 0.070732, + 0.030659, + 0.025380, + 0.019410, + 0.053447, + 0.044604, + 0.083700, + 3, + 34, + 0.027724, + 0.049823, + 0.009631, + 0.066596, + 0.029160, + 0.021600, + 0.018360, + 0.051046, + 0.042606, + 0.079164, + 3, + 33, + 0.025234, + 0.046124, + 0.008608, + 0.061752, + 0.027030, + 0.019691, + 0.016924, + 0.048060, + 0.040770, + 0.076491, + 3, + 32, + 0.023077, + 0.041969, + 0.007979, + 0.056827, + 0.025140, + 0.017820, + 0.016160, + 0.044642, + 0.039015, + 0.070686, + 3, + 31, + 0.021400, + 0.036302, + 0.007560, + 0.053482, + 0.023668, + 0.016821, + 0.015395, + 0.042120, + 0.037098, + 0.066609, + 3, + 30, + 0.020520, + 0.030996, + 0.006750, + 0.048870, + 0.021889, + 0.015401, + 0.013522, + 0.038483, + 0.035262, + 0.060291, + 3, + 29, + 0.019440, + 0.026908, + 0.006326, + 0.041945, + 0.018973, + 0.014148, + 0.012434, + 0.034403, + 0.032994, + 0.057537, + 3, + 28, + 0.018338, + 0.024430, + 0.005821, + 0.036790, + 0.017866, + 0.013716, + 0.011640, + 0.032419, + 0.031644, + 0.053784, + 3, + 27, + 0.016786, + 0.022540, + 0.005373, + 0.033693, + 0.016330, + 0.013365, + 0.010665, + 0.029511, + 0.029943, + 0.050787, + 3, + 26, + 0.016200, + 0.020193, + 0.005046, + 0.031199, + 0.015506, + 0.012736, + 0.009450, + 0.025353, + 0.027405, + 0.046332, + 3, + 25, + 0.015398, + 0.018800, + 0.004822, + 0.027540, + 0.014472, + 0.011745, + 0.009180, + 0.019686, + 0.025596, + 0.040716, + 3, + 24, + 0.014920, + 0.017221, + 0.004482, + 0.025650, + 0.014013, + 0.011300, + 0.008648, + 0.016268, + 0.021843, + 0.033561, + 3, + 23, + 0.014504, + 0.016697, + 0.004161, + 0.024138, + 0.012428, + 0.010978, + 0.008127, + 0.013770, + 0.017307, + 0.027756, + 3, + 22, + 0.014056, + 0.015390, + 0.003883, + 0.022178, + 0.011632, + 0.010562, + 0.007749, + 0.012150, + 0.014769, + 0.021438, + 3, + 21, + 0.013106, + 0.014850, + 0.003645, + 0.021179, + 0.011389, + 0.009693, + 0.006990, + 0.009720, + 0.012852, + 0.016524, + 3, + 20, + 0.012304, + 0.014313, + 0.003378, + 0.017896, + 0.010800, + 0.008039, + 0.005608, + 0.008039, + 0.010530, + 0.013392, + 3, + 19, + 0.010492, + 0.013522, + 0.002511, + 0.014777, + 0.007409, + 0.007290, + 0.005146, + 0.007028, + 0.007992, + 0.011475, + 3, + 18, + 0.009701, + 0.012528, + 0.002133, + 0.012866, + 0.006450, + 0.006210, + 0.003378, + 0.006518, + 0.005508, + 0.008370, + 3, + 17, + 0.009126, + 0.010238, + 0.001350, + 0.010371, + 0.004709, + 0.005443, + 0.002819, + 0.006102, + 0.004428, + 0.006750, + 3, + 16, + 0.008222, + 0.008389, + 0.001091, + 0.008802, + 0.003575, + 0.004763, + 0.001987, + 0.005670, + 0.003294, + 0.005751, + 3, + 15, + 0.007533, + 0.005940, + 0.000729, + 0.007295, + 0.003119, + 0.003456, + 0.001350, + 0.004174, + 0.001809, + 0.004401, + 3, + 14, + 0.007428, + 0.004490, + 0.000521, + 0.006091, + 0.002376, + 0.002835, + 0.000794, + 0.003213, + 0.001026, + 0.003753, + 3, + 13, + 0.007220, + 0.003780, + 0.000405, + 0.005076, + 0.001931, + 0.002214, + 0.000540, + 0.002627, + 0.000648, + 0.003267, + 3, + 12, + 0.006899, + 0.002975, + 0.000000, + 0.004304, + 0.001661, + 0.001782, + 0.000270, + 0.002117, + 0.000000, + 0.002808, + 3, + 11, + 0.006615, + 0.002627, + 0.000000, + 0.003726, + 0.001512, + 0.000702, + 0.000000, + 0.001436, + 0.000000, + 0.002052, + 3, + 10, + 0.006210, + 0.002022, + 0.000000, + 0.003305, + 0.001242, + 0.000270, + 0.000000, + 0.001080, + 0.000000, + 0.001890, + 3, + 9, + 0.006178, + 0.001647, + 0.000000, + 0.002484, + 0.001094, + 0.000000, + 0.000000, + 0.000810, + 0.000000, + 0.001350, + 3, + 8, + 0.005765, + 0.001339, + 0.000000, + 0.001890, + 0.000891, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.001296, + 3, + 7, + 0.005400, + 0.000950, + 0.000000, + 0.001350, + 0.000783, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000459, + 3, + 6, + 0.004930, + 0.000602, + 0.000000, + 0.000875, + 0.000675, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000270, + 3, + 5, + 0.004504, + 0.000416, + 0.000000, + 0.000410, + 0.000527, + 0.000000, + 0.000000, + 0.000000, + 0.000081, + 0.000135, + 3, + 4, + 0.002430, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 3, + 3, + 0.000427, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 3, + 2, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 3, + 1, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 2, + 40, + 0.077418, + 0.103451, + 0.016828, + 0.109064, + 0.054341, + 0.075962, + 0.034795, + 0.075122, + 0.051976, + 0.081459, + 2, + 39, + 0.060720, + 0.087215, + 0.014756, + 0.103532, + 0.048041, + 0.059297, + 0.029376, + 0.074115, + 0.050193, + 0.077382, + 2, + 38, + 0.048859, + 0.074947, + 0.013362, + 0.089180, + 0.041598, + 0.045021, + 0.025537, + 0.066803, + 0.048722, + 0.074655, + 2, + 37, + 0.040158, + 0.065781, + 0.012126, + 0.080282, + 0.038175, + 0.036686, + 0.023178, + 0.062884, + 0.047088, + 0.072630, + 2, + 36, + 0.033881, + 0.058774, + 0.011001, + 0.072895, + 0.032542, + 0.028434, + 0.020461, + 0.057424, + 0.045225, + 0.069728, + 2, + 35, + 0.030294, + 0.052831, + 0.009800, + 0.069422, + 0.029587, + 0.024311, + 0.019232, + 0.052294, + 0.043254, + 0.067311, + 2, + 34, + 0.026957, + 0.048824, + 0.008847, + 0.065224, + 0.028054, + 0.020655, + 0.018095, + 0.049876, + 0.041553, + 0.064112, + 2, + 33, + 0.024349, + 0.045159, + 0.007976, + 0.060371, + 0.026226, + 0.018652, + 0.016776, + 0.047385, + 0.039704, + 0.061695, + 2, + 32, + 0.022078, + 0.040951, + 0.007202, + 0.055330, + 0.024365, + 0.017145, + 0.016065, + 0.043944, + 0.037719, + 0.057699, + 2, + 31, + 0.020733, + 0.035223, + 0.006791, + 0.052327, + 0.022850, + 0.015790, + 0.015233, + 0.041124, + 0.035964, + 0.054810, + 2, + 30, + 0.019626, + 0.029984, + 0.006021, + 0.046551, + 0.021086, + 0.014673, + 0.013133, + 0.037757, + 0.034007, + 0.049923, + 2, + 29, + 0.018765, + 0.025923, + 0.005677, + 0.039731, + 0.018291, + 0.013707, + 0.011973, + 0.033543, + 0.031847, + 0.047048, + 2, + 28, + 0.017539, + 0.023491, + 0.005192, + 0.035180, + 0.017033, + 0.013349, + 0.011092, + 0.031296, + 0.029970, + 0.044240, + 2, + 27, + 0.016270, + 0.021488, + 0.004753, + 0.032258, + 0.015628, + 0.012909, + 0.010419, + 0.028179, + 0.028202, + 0.041756, + 2, + 26, + 0.015525, + 0.019557, + 0.004390, + 0.029942, + 0.014652, + 0.012207, + 0.009339, + 0.024975, + 0.025745, + 0.037679, + 2, + 25, + 0.014638, + 0.018036, + 0.004146, + 0.026314, + 0.013797, + 0.011314, + 0.009099, + 0.018864, + 0.023517, + 0.032832, + 2, + 24, + 0.014075, + 0.016575, + 0.003788, + 0.024444, + 0.013045, + 0.010780, + 0.008541, + 0.015587, + 0.019710, + 0.028013, + 2, + 23, + 0.013597, + 0.015638, + 0.003430, + 0.022970, + 0.011632, + 0.010471, + 0.008031, + 0.012675, + 0.015296, + 0.023004, + 2, + 22, + 0.012968, + 0.014715, + 0.003089, + 0.021096, + 0.010990, + 0.009929, + 0.007642, + 0.010846, + 0.012825, + 0.017253, + 2, + 21, + 0.012088, + 0.014175, + 0.002884, + 0.020046, + 0.010148, + 0.009032, + 0.006813, + 0.008261, + 0.010449, + 0.013500, + 2, + 20, + 0.010976, + 0.013381, + 0.002693, + 0.016732, + 0.009381, + 0.007742, + 0.005400, + 0.006437, + 0.008100, + 0.010841, + 2, + 19, + 0.009566, + 0.012631, + 0.001836, + 0.013686, + 0.007125, + 0.006782, + 0.003923, + 0.005431, + 0.005589, + 0.008613, + 2, + 18, + 0.008982, + 0.011349, + 0.001458, + 0.011744, + 0.005708, + 0.005742, + 0.002724, + 0.004884, + 0.003618, + 0.006764, + 2, + 17, + 0.008273, + 0.009439, + 0.000845, + 0.009291, + 0.004250, + 0.004857, + 0.002327, + 0.004469, + 0.002673, + 0.004847, + 2, + 16, + 0.007679, + 0.007704, + 0.000545, + 0.007737, + 0.003394, + 0.003988, + 0.001534, + 0.004045, + 0.001742, + 0.003834, + 2, + 15, + 0.007236, + 0.005265, + 0.000365, + 0.006442, + 0.002565, + 0.003089, + 0.000675, + 0.003023, + 0.000959, + 0.002808, + 2, + 14, + 0.007090, + 0.003787, + 0.000261, + 0.004990, + 0.001802, + 0.002400, + 0.000397, + 0.002249, + 0.000608, + 0.002457, + 2, + 13, + 0.006877, + 0.003029, + 0.000203, + 0.004076, + 0.001404, + 0.001835, + 0.000270, + 0.001814, + 0.000324, + 0.002012, + 2, + 12, + 0.006575, + 0.002311, + 0.000000, + 0.003502, + 0.001249, + 0.001458, + 0.000135, + 0.001436, + 0.000000, + 0.001850, + 2, + 11, + 0.006314, + 0.001836, + 0.000000, + 0.003051, + 0.001114, + 0.000597, + 0.000000, + 0.000841, + 0.000000, + 0.001350, + 2, + 10, + 0.005971, + 0.001434, + 0.000000, + 0.002570, + 0.000945, + 0.000230, + 0.000000, + 0.000540, + 0.000000, + 0.001215, + 2, + 9, + 0.005627, + 0.001172, + 0.000000, + 0.001809, + 0.000783, + 0.000019, + 0.000000, + 0.000405, + 0.000000, + 0.000675, + 2, + 8, + 0.005144, + 0.000940, + 0.000000, + 0.001276, + 0.000668, + 0.000038, + 0.000000, + 0.000000, + 0.000000, + 0.000648, + 2, + 7, + 0.004686, + 0.000622, + 0.000000, + 0.000890, + 0.000581, + 0.000009, + 0.000000, + 0.000000, + 0.000000, + 0.000230, + 2, + 6, + 0.004247, + 0.000428, + 0.000000, + 0.000541, + 0.000473, + 0.000019, + 0.000000, + 0.000000, + 0.000000, + 0.000135, + 2, + 5, + 0.003857, + 0.000269, + 0.000000, + 0.000320, + 0.000419, + 0.000000, + 0.000000, + 0.000000, + 0.000041, + 0.000068, + 2, + 4, + 0.001459, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 2, + 3, + 0.000213, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 2, + 2, + 0.000011, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 2, + 1, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 1, + 40, + 0.076761, + 0.102506, + 0.016124, + 0.103138, + 0.053330, + 0.075546, + 0.034795, + 0.075122, + 0.048600, + 0.059400, + 1, + 39, + 0.059789, + 0.086189, + 0.013921, + 0.100518, + 0.046637, + 0.058995, + 0.029252, + 0.073440, + 0.047763, + 0.057672, + 1, + 38, + 0.048114, + 0.073888, + 0.012658, + 0.087901, + 0.040794, + 0.044596, + 0.025404, + 0.065367, + 0.046035, + 0.056025, + 1, + 37, + 0.039012, + 0.064789, + 0.011340, + 0.078422, + 0.037284, + 0.036393, + 0.022955, + 0.061854, + 0.044955, + 0.054729, + 1, + 36, + 0.032905, + 0.057594, + 0.010346, + 0.070816, + 0.031725, + 0.027540, + 0.020264, + 0.056074, + 0.043821, + 0.052056, + 1, + 35, + 0.029808, + 0.051953, + 0.008983, + 0.068113, + 0.028515, + 0.023242, + 0.019054, + 0.051141, + 0.041904, + 0.050922, + 1, + 34, + 0.026190, + 0.047825, + 0.008062, + 0.063852, + 0.026949, + 0.019710, + 0.017831, + 0.048705, + 0.040500, + 0.049059, + 1, + 33, + 0.023463, + 0.044194, + 0.007344, + 0.058990, + 0.025423, + 0.017612, + 0.016629, + 0.046710, + 0.038637, + 0.046899, + 1, + 32, + 0.021079, + 0.039933, + 0.006426, + 0.053833, + 0.023590, + 0.016470, + 0.015971, + 0.043246, + 0.036423, + 0.044712, + 1, + 31, + 0.020066, + 0.034144, + 0.006021, + 0.051173, + 0.022032, + 0.014758, + 0.015071, + 0.040127, + 0.034830, + 0.043011, + 1, + 30, + 0.018733, + 0.028971, + 0.005292, + 0.044231, + 0.020282, + 0.013946, + 0.012744, + 0.037031, + 0.032751, + 0.039555, + 1, + 29, + 0.018090, + 0.024937, + 0.005027, + 0.037517, + 0.017609, + 0.013265, + 0.011513, + 0.032684, + 0.030699, + 0.036558, + 1, + 28, + 0.016740, + 0.022553, + 0.004563, + 0.033569, + 0.016200, + 0.012982, + 0.010544, + 0.030173, + 0.028296, + 0.034695, + 1, + 27, + 0.015755, + 0.020436, + 0.004134, + 0.030823, + 0.014926, + 0.012452, + 0.010174, + 0.026846, + 0.026460, + 0.032724, + 1, + 26, + 0.014850, + 0.018922, + 0.003734, + 0.028685, + 0.013797, + 0.011678, + 0.009229, + 0.024597, + 0.024084, + 0.029025, + 1, + 25, + 0.013878, + 0.017272, + 0.003470, + 0.025088, + 0.013122, + 0.010884, + 0.009018, + 0.018041, + 0.021438, + 0.024948, + 1, + 24, + 0.013230, + 0.015930, + 0.003094, + 0.023239, + 0.012077, + 0.010260, + 0.008435, + 0.014907, + 0.017577, + 0.022464, + 1, + 23, + 0.012690, + 0.014580, + 0.002700, + 0.021803, + 0.010835, + 0.009963, + 0.007935, + 0.011580, + 0.013284, + 0.018252, + 1, + 22, + 0.011880, + 0.014040, + 0.002295, + 0.020015, + 0.010349, + 0.009296, + 0.007536, + 0.009542, + 0.010881, + 0.013068, + 1, + 21, + 0.011070, + 0.013500, + 0.002122, + 0.018914, + 0.008907, + 0.008370, + 0.006637, + 0.006801, + 0.008046, + 0.010476, + 1, + 20, + 0.009647, + 0.012450, + 0.002009, + 0.015568, + 0.007962, + 0.007444, + 0.005192, + 0.004836, + 0.005670, + 0.008289, + 1, + 19, + 0.008640, + 0.011740, + 0.001161, + 0.012596, + 0.006842, + 0.006275, + 0.002700, + 0.003834, + 0.003186, + 0.005751, + 1, + 18, + 0.008262, + 0.010171, + 0.000783, + 0.010622, + 0.004965, + 0.005273, + 0.002071, + 0.003251, + 0.001728, + 0.005157, + 1, + 17, + 0.007420, + 0.008640, + 0.000340, + 0.008211, + 0.003791, + 0.004271, + 0.001836, + 0.002835, + 0.000918, + 0.002943, + 1, + 16, + 0.007136, + 0.007020, + 0.000000, + 0.006672, + 0.003213, + 0.003213, + 0.001080, + 0.002419, + 0.000189, + 0.001917, + 1, + 15, + 0.006939, + 0.004590, + 0.000000, + 0.005589, + 0.002012, + 0.002722, + 0.000000, + 0.001871, + 0.000108, + 0.001215, + 1, + 14, + 0.006753, + 0.003083, + 0.000000, + 0.003888, + 0.001229, + 0.001966, + 0.000000, + 0.001285, + 0.000189, + 0.001161, + 1, + 13, + 0.006534, + 0.002279, + 0.000000, + 0.003075, + 0.000878, + 0.001455, + 0.000000, + 0.001002, + 0.000000, + 0.000756, + 1, + 12, + 0.006251, + 0.001647, + 0.000000, + 0.002700, + 0.000837, + 0.001134, + 0.000000, + 0.000756, + 0.000000, + 0.000891, + 1, + 11, + 0.006013, + 0.001045, + 0.000000, + 0.002376, + 0.000716, + 0.000491, + 0.000000, + 0.000246, + 0.000000, + 0.000648, + 1, + 10, + 0.005732, + 0.000845, + 0.000000, + 0.001836, + 0.000648, + 0.000189, + 0.000000, + 0.000000, + 0.000000, + 0.000540, + 1, + 9, + 0.005076, + 0.000697, + 0.000000, + 0.001134, + 0.000473, + 0.000038, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 1, + 8, + 0.004523, + 0.000540, + 0.000000, + 0.000662, + 0.000446, + 0.000076, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 1, + 7, + 0.003972, + 0.000294, + 0.000000, + 0.000429, + 0.000378, + 0.000019, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 1, + 6, + 0.003564, + 0.000254, + 0.000000, + 0.000208, + 0.000270, + 0.000038, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 1, + 5, + 0.003210, + 0.000122, + 0.000000, + 0.000230, + 0.000311, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 1, + 4, + 0.000489, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 1, + 3, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 1, + 2, + 0.000022, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 1, + 1, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + 0.000000, + -1, + ], + "", + ) - temperature_weights_database = VectorParam.Int([ - 71, 1, - 72, 2, - 73, 3, - 74, 4, - 75, 5, - 76, 5, - 77, 6, - 78, 7, - 79, 8, - 80, 10, - 81, 11, - 82, 12, - 83, 12, - 84, 13, - 85, 14, - 86, 16, - 87, 17, - 88, 18, - 89, 19, - 90, 20, - 91, 22, - 92, 24, - 93, 26, - 94, 27, - 95, 29, - 96, 30, - 97, 32, - 98, 35, - 99, 37, - 100, 39, - 101, 42, - 102, 45, - 103, 47, - 104, 50, - 105, 53, - 106, 56, - 107, 61, - 108, 65, - 109, 70, - 110, 74, - 111, 78, - 112, 82, - 113, 89, - 114, 95, - 115, 100, - 116, 106, - 117, 115, - 118, 122, - 119, 130, - 120, 139, - 121, 147, - 122, 156, - 123, 169, - 124, 178, - 125, 190, - -1], ""); + temperature_weights_database = VectorParam.Int( + [ + 71, + 1, + 72, + 2, + 73, + 3, + 74, + 4, + 75, + 5, + 76, + 5, + 77, + 6, + 78, + 7, + 79, + 8, + 80, + 10, + 81, + 11, + 82, + 12, + 83, + 12, + 84, + 13, + 85, + 14, + 86, + 16, + 87, + 17, + 88, + 18, + 89, + 19, + 90, + 20, + 91, + 22, + 92, + 24, + 93, + 26, + 94, + 27, + 95, + 29, + 96, + 30, + 97, + 32, + 98, + 35, + 99, + 37, + 100, + 39, + 101, + 42, + 102, + 45, + 103, + 47, + 104, + 50, + 105, + 53, + 106, + 56, + 107, + 61, + 108, + 65, + 109, + 70, + 110, + 74, + 111, + 78, + 112, + 82, + 113, + 89, + 114, + 95, + 115, + 100, + 116, + 106, + 117, + 115, + 118, + 122, + 119, + 130, + 120, + 139, + 121, + 147, + 122, + 156, + 123, + 169, + 124, + 178, + 125, + 190, + -1, + ], + "", + ) diff --git a/src/mem/ruby/network/fault_model/SConscript b/src/mem/ruby/network/fault_model/SConscript index 701b59a19f..bd55947795 100644 --- a/src/mem/ruby/network/fault_model/SConscript +++ b/src/mem/ruby/network/fault_model/SConscript @@ -38,4 +38,3 @@ if env['CONF']['PROTOCOL'] == 'None': SimObject('FaultModel.py', sim_objects=['FaultModel']) Source('FaultModel.cc') - diff --git a/src/mem/ruby/network/garnet/CrossbarSwitch.cc b/src/mem/ruby/network/garnet/CrossbarSwitch.cc index cae7113dca..b4d66962ff 100644 --- a/src/mem/ruby/network/garnet/CrossbarSwitch.cc +++ b/src/mem/ruby/network/garnet/CrossbarSwitch.cc @@ -91,6 +91,17 @@ CrossbarSwitch::wakeup() } } +bool +CrossbarSwitch::functionalRead(Packet *pkt, WriteMask &mask) +{ + bool read = false; + for (auto& switch_buffer : switchBuffers) { + if (switch_buffer.functionalRead(pkt, mask)) + read = true; + } + return read; +} + uint32_t CrossbarSwitch::functionalWrite(Packet *pkt) { diff --git a/src/mem/ruby/network/garnet/CrossbarSwitch.hh b/src/mem/ruby/network/garnet/CrossbarSwitch.hh index 8ab632e715..b4970786f1 100644 --- a/src/mem/ruby/network/garnet/CrossbarSwitch.hh +++ b/src/mem/ruby/network/garnet/CrossbarSwitch.hh @@ -67,6 +67,7 @@ class CrossbarSwitch : public Consumer inline double get_crossbar_activity() { return m_crossbar_activity; } + bool functionalRead(Packet *pkt, WriteMask &mask); uint32_t functionalWrite(Packet *pkt); void resetStats(); diff --git a/src/mem/ruby/network/garnet/GarnetLink.py b/src/mem/ruby/network/garnet/GarnetLink.py index f9baa89100..3cc44e4614 100644 --- a/src/mem/ruby/network/garnet/GarnetLink.py +++ b/src/mem/ruby/network/garnet/GarnetLink.py @@ -30,52 +30,59 @@ from m5.proxy import * from m5.objects.ClockedObject import ClockedObject from m5.objects.BasicLink import BasicIntLink, BasicExtLink -class CDCType(Enum): vals = [ - 'LINK_OBJECT', - 'OBJECT_LINK', - ] + +class CDCType(Enum): + vals = ["LINK_OBJECT", "OBJECT_LINK"] + class NetworkLink(ClockedObject): - type = 'NetworkLink' + type = "NetworkLink" cxx_header = "mem/ruby/network/garnet/NetworkLink.hh" - cxx_class = 'gem5::ruby::garnet::NetworkLink' + cxx_class = "gem5::ruby::garnet::NetworkLink" link_id = Param.Int(Parent.link_id, "link id") link_latency = Param.Cycles(Parent.latency, "link latency") - vcs_per_vnet = Param.Int(Parent.vcs_per_vnet, - "virtual channels per virtual network") - virt_nets = Param.Int(Parent.number_of_virtual_networks, - "number of virtual networks") - supported_vnets = VectorParam.Int(Parent.supported_vnets, - "Vnets supported") + vcs_per_vnet = Param.Int( + Parent.vcs_per_vnet, "virtual channels per virtual network" + ) + virt_nets = Param.Int( + Parent.number_of_virtual_networks, "number of virtual networks" + ) + supported_vnets = VectorParam.Int( + Parent.supported_vnets, "Vnets supported" + ) width = Param.UInt32(Parent.width, "bit-width of the link") + class CreditLink(NetworkLink): - type = 'CreditLink' + type = "CreditLink" cxx_header = "mem/ruby/network/garnet/CreditLink.hh" - cxx_class = 'gem5::ruby::garnet::CreditLink' + cxx_class = "gem5::ruby::garnet::CreditLink" + class NetworkBridge(CreditLink): - type = 'NetworkBridge' + type = "NetworkBridge" cxx_header = "mem/ruby/network/garnet/NetworkBridge.hh" - cxx_class = 'gem5::ruby::garnet::NetworkBridge' + cxx_class = "gem5::ruby::garnet::NetworkBridge" link = Param.NetworkLink("Associated Network Link") - vtype = Param.CDCType('LINK_OBJECT', - "Direction of CDC LINK->OBJECT or OBJECT->LINK") + vtype = Param.CDCType( + "LINK_OBJECT", "Direction of CDC LINK->OBJECT or OBJECT->LINK" + ) serdes_latency = Param.Cycles(1, "Latency of SerDes Unit") cdc_latency = Param.Cycles(1, "Latency of CDC Unit") + # Interior fixed pipeline links between routers class GarnetIntLink(BasicIntLink): - type = 'GarnetIntLink' + type = "GarnetIntLink" cxx_header = "mem/ruby/network/garnet/GarnetLink.hh" - cxx_class = 'gem5::ruby::garnet::GarnetIntLink' + cxx_class = "gem5::ruby::garnet::GarnetIntLink" # The internal link includes one forward link (for flit) # and one backward flow-control link (for credit) network_link = Param.NetworkLink(NetworkLink(), "forward link") - credit_link = Param.CreditLink(CreditLink(), "backward flow-control link") + credit_link = Param.CreditLink(CreditLink(), "backward flow-control link") # The src_cdc and dst_cdc flags are used to enable the # clock domain crossing(CDC) at the source and destination @@ -102,14 +109,16 @@ class GarnetIntLink(BasicIntLink): src_cred_bridge = Param.NetworkBridge(NULL, "Credit Bridge at source") dst_cred_bridge = Param.NetworkBridge(NULL, "Credit Bridge at dest") - width = Param.UInt32(Parent.ni_flit_size, - "bit width supported by the router") + width = Param.UInt32( + Parent.ni_flit_size, "bit width supported by the router" + ) + # Exterior fixed pipeline links between a router and a controller class GarnetExtLink(BasicExtLink): - type = 'GarnetExtLink' + type = "GarnetExtLink" cxx_header = "mem/ruby/network/garnet/GarnetLink.hh" - cxx_class = 'gem5::ruby::garnet::GarnetExtLink' + cxx_class = "gem5::ruby::garnet::GarnetExtLink" # The external link is bi-directional. # It includes two forward links (for flits) @@ -117,16 +126,16 @@ class GarnetExtLink(BasicExtLink): # one per direction _nls = [] # In uni-directional link - _nls.append(NetworkLink()); + _nls.append(NetworkLink()) # Out uni-directional link - _nls.append(NetworkLink()); + _nls.append(NetworkLink()) network_links = VectorParam.NetworkLink(_nls, "forward links") _cls = [] # In uni-directional link - _cls.append(CreditLink()); + _cls.append(CreditLink()) # Out uni-directional link - _cls.append(CreditLink()); + _cls.append(CreditLink()) credit_links = VectorParam.CreditLink(_cls, "backward flow-control links") # The ext_cdc and intt_cdc flags are used to enable the @@ -149,15 +158,19 @@ class GarnetExtLink(BasicExtLink): # The network bridge encapsulates both the CDC and Ser-Des # units in HeteroGarnet. This is automatically enabled when # either CDC or Ser-Des is enabled. - ext_net_bridge = VectorParam.NetworkBridge([], - "Network Bridge at external end") - ext_cred_bridge = VectorParam.NetworkBridge([], - "Credit Bridge at external end") - int_net_bridge = VectorParam.NetworkBridge([], - "Network Bridge at internal end") - int_cred_bridge = VectorParam.NetworkBridge([], - "Credit Bridge at internal end") + ext_net_bridge = VectorParam.NetworkBridge( + [], "Network Bridge at external end" + ) + ext_cred_bridge = VectorParam.NetworkBridge( + [], "Credit Bridge at external end" + ) + int_net_bridge = VectorParam.NetworkBridge( + [], "Network Bridge at internal end" + ) + int_cred_bridge = VectorParam.NetworkBridge( + [], "Credit Bridge at internal end" + ) - - width = Param.UInt32(Parent.ni_flit_size, - "bit width supported by the router") + width = Param.UInt32( + Parent.ni_flit_size, "bit width supported by the router" + ) diff --git a/src/mem/ruby/network/garnet/GarnetNetwork.cc b/src/mem/ruby/network/garnet/GarnetNetwork.cc index 01b2473f37..57922698e0 100644 --- a/src/mem/ruby/network/garnet/GarnetNetwork.cc +++ b/src/mem/ruby/network/garnet/GarnetNetwork.cc @@ -197,10 +197,12 @@ GarnetNetwork::makeExtInLink(NodeID global_src, SwitchID dest, BasicLink* link, if (garnet_link->extBridgeEn) { DPRINTF(RubyNetwork, "Enable external bridge for %s\n", garnet_link->name()); + NetworkBridge *n_bridge = garnet_link->extNetBridge[LinkDirection_In]; m_nis[local_src]-> - addOutPort(garnet_link->extNetBridge[LinkDirection_In], + addOutPort(n_bridge, garnet_link->extCredBridge[LinkDirection_In], dest, m_routers[dest]->get_vc_per_vnet()); + m_networkbridges.push_back(n_bridge); } else { m_nis[local_src]->addOutPort(net_link, credit_link, dest, m_routers[dest]->get_vc_per_vnet()); @@ -209,10 +211,12 @@ GarnetNetwork::makeExtInLink(NodeID global_src, SwitchID dest, BasicLink* link, if (garnet_link->intBridgeEn) { DPRINTF(RubyNetwork, "Enable internal bridge for %s\n", garnet_link->name()); + NetworkBridge *n_bridge = garnet_link->intNetBridge[LinkDirection_In]; m_routers[dest]-> addInPort(dst_inport_dirn, - garnet_link->intNetBridge[LinkDirection_In], + n_bridge, garnet_link->intCredBridge[LinkDirection_In]); + m_networkbridges.push_back(n_bridge); } else { m_routers[dest]->addInPort(dst_inport_dirn, net_link, credit_link); } @@ -266,9 +270,10 @@ GarnetNetwork::makeExtOutLink(SwitchID src, NodeID global_dest, if (garnet_link->extBridgeEn) { DPRINTF(RubyNetwork, "Enable external bridge for %s\n", garnet_link->name()); + NetworkBridge *n_bridge = garnet_link->extNetBridge[LinkDirection_Out]; m_nis[local_dest]-> - addInPort(garnet_link->extNetBridge[LinkDirection_Out], - garnet_link->extCredBridge[LinkDirection_Out]); + addInPort(n_bridge, garnet_link->extCredBridge[LinkDirection_Out]); + m_networkbridges.push_back(n_bridge); } else { m_nis[local_dest]->addInPort(net_link, credit_link); } @@ -276,12 +281,14 @@ GarnetNetwork::makeExtOutLink(SwitchID src, NodeID global_dest, if (garnet_link->intBridgeEn) { DPRINTF(RubyNetwork, "Enable internal bridge for %s\n", garnet_link->name()); + NetworkBridge *n_bridge = garnet_link->intNetBridge[LinkDirection_Out]; m_routers[src]-> addOutPort(src_outport_dirn, - garnet_link->intNetBridge[LinkDirection_Out], + n_bridge, routing_table_entry, link->m_weight, garnet_link->intCredBridge[LinkDirection_Out], m_routers[src]->get_vc_per_vnet()); + m_networkbridges.push_back(n_bridge); } else { m_routers[src]-> addOutPort(src_outport_dirn, net_link, @@ -332,8 +339,10 @@ GarnetNetwork::makeInternalLink(SwitchID src, SwitchID dest, BasicLink* link, if (garnet_link->dstBridgeEn) { DPRINTF(RubyNetwork, "Enable destination bridge for %s\n", garnet_link->name()); - m_routers[dest]->addInPort(dst_inport_dirn, - garnet_link->dstNetBridge, garnet_link->dstCredBridge); + NetworkBridge *n_bridge = garnet_link->dstNetBridge; + m_routers[dest]->addInPort(dst_inport_dirn, n_bridge, + garnet_link->dstCredBridge); + m_networkbridges.push_back(n_bridge); } else { m_routers[dest]->addInPort(dst_inport_dirn, net_link, credit_link); } @@ -341,11 +350,13 @@ GarnetNetwork::makeInternalLink(SwitchID src, SwitchID dest, BasicLink* link, if (garnet_link->srcBridgeEn) { DPRINTF(RubyNetwork, "Enable source bridge for %s\n", garnet_link->name()); + NetworkBridge *n_bridge = garnet_link->srcNetBridge; m_routers[src]-> - addOutPort(src_outport_dirn, garnet_link->srcNetBridge, + addOutPort(src_outport_dirn, n_bridge, routing_table_entry, link->m_weight, garnet_link->srcCredBridge, m_routers[dest]->get_vc_per_vnet()); + m_networkbridges.push_back(n_bridge); } else { m_routers[src]->addOutPort(src_outport_dirn, net_link, routing_table_entry, @@ -604,6 +615,33 @@ GarnetNetwork::update_traffic_distribution(RouteInfo route) (*m_ctrl_traffic_distribution[src_node][dest_node])++; } +bool +GarnetNetwork::functionalRead(Packet *pkt, WriteMask &mask) +{ + bool read = false; + for (unsigned int i = 0; i < m_routers.size(); i++) { + if (m_routers[i]->functionalRead(pkt, mask)) + read = true; + } + + for (unsigned int i = 0; i < m_nis.size(); ++i) { + if (m_nis[i]->functionalRead(pkt, mask)) + read = true; + } + + for (unsigned int i = 0; i < m_networklinks.size(); ++i) { + if (m_networklinks[i]->functionalRead(pkt, mask)) + read = true; + } + + for (unsigned int i = 0; i < m_networkbridges.size(); ++i) { + if (m_networkbridges[i]->functionalRead(pkt, mask)) + read = true; + } + + return read; +} + uint32_t GarnetNetwork::functionalWrite(Packet *pkt) { diff --git a/src/mem/ruby/network/garnet/GarnetNetwork.hh b/src/mem/ruby/network/garnet/GarnetNetwork.hh index d18caaee5c..db37628bea 100644 --- a/src/mem/ruby/network/garnet/GarnetNetwork.hh +++ b/src/mem/ruby/network/garnet/GarnetNetwork.hh @@ -55,6 +55,7 @@ namespace garnet class NetworkInterface; class Router; class NetworkLink; +class NetworkBridge; class CreditLink; class GarnetNetwork : public Network @@ -105,6 +106,7 @@ class GarnetNetwork : public Network PortDirection src_outport_dirn, PortDirection dest_inport_dirn); + bool functionalRead(Packet *pkt, WriteMask &mask); //! Function for performing a functional write. The return value //! indicates the number of messages that were written. uint32_t functionalWrite(Packet *pkt); @@ -208,6 +210,7 @@ class GarnetNetwork : public Network std::vector m_vnet_type; std::vector m_routers; // All Routers in Network std::vector m_networklinks; // All flit links in the network + std::vector m_networkbridges; // All network bridges std::vector m_creditlinks; // All credit links in the network std::vector m_nis; // All NI's in Network int m_next_packet_id; // static vairable for packet id allocation diff --git a/src/mem/ruby/network/garnet/GarnetNetwork.py b/src/mem/ruby/network/garnet/GarnetNetwork.py index 184e64c07e..128118309c 100644 --- a/src/mem/ruby/network/garnet/GarnetNetwork.py +++ b/src/mem/ruby/network/garnet/GarnetNetwork.py @@ -34,43 +34,52 @@ from m5.objects.Network import RubyNetwork from m5.objects.BasicRouter import BasicRouter from m5.objects.ClockedObject import ClockedObject -class GarnetNetwork(RubyNetwork): - type = 'GarnetNetwork' - cxx_header = "mem/ruby/network/garnet/GarnetNetwork.hh" - cxx_class = 'gem5::ruby::garnet::GarnetNetwork' - num_rows = Param.Int(0, "number of rows if 2D (mesh/torus/..) topology"); +class GarnetNetwork(RubyNetwork): + type = "GarnetNetwork" + cxx_header = "mem/ruby/network/garnet/GarnetNetwork.hh" + cxx_class = "gem5::ruby::garnet::GarnetNetwork" + + num_rows = Param.Int(0, "number of rows if 2D (mesh/torus/..) topology") ni_flit_size = Param.UInt32(16, "network interface flit size in bytes") - vcs_per_vnet = Param.UInt32(4, "virtual channels per virtual network"); - buffers_per_data_vc = Param.UInt32(4, "buffers per data virtual channel"); - buffers_per_ctrl_vc = Param.UInt32(1, "buffers per ctrl virtual channel"); - routing_algorithm = Param.Int(0, - "0: Weight-based Table, 1: XY, 2: Custom"); - enable_fault_model = Param.Bool(False, "enable network fault model"); - fault_model = Param.FaultModel(NULL, "network fault model"); - garnet_deadlock_threshold = Param.UInt32(50000, - "network-level deadlock threshold") + vcs_per_vnet = Param.UInt32(4, "virtual channels per virtual network") + buffers_per_data_vc = Param.UInt32(4, "buffers per data virtual channel") + buffers_per_ctrl_vc = Param.UInt32(1, "buffers per ctrl virtual channel") + routing_algorithm = Param.Int(0, "0: Weight-based Table, 1: XY, 2: Custom") + enable_fault_model = Param.Bool(False, "enable network fault model") + fault_model = Param.FaultModel(NULL, "network fault model") + garnet_deadlock_threshold = Param.UInt32( + 50000, "network-level deadlock threshold" + ) + class GarnetNetworkInterface(ClockedObject): - type = 'GarnetNetworkInterface' - cxx_class = 'gem5::ruby::garnet::NetworkInterface' + type = "GarnetNetworkInterface" + cxx_class = "gem5::ruby::garnet::NetworkInterface" cxx_header = "mem/ruby/network/garnet/NetworkInterface.hh" id = Param.UInt32("ID in relation to other network interfaces") - vcs_per_vnet = Param.UInt32(Parent.vcs_per_vnet, - "virtual channels per virtual network") - virt_nets = Param.UInt32(Parent.number_of_virtual_networks, - "number of virtual networks") - garnet_deadlock_threshold = Param.UInt32(Parent.garnet_deadlock_threshold, - "network-level deadlock threshold") + vcs_per_vnet = Param.UInt32( + Parent.vcs_per_vnet, "virtual channels per virtual network" + ) + virt_nets = Param.UInt32( + Parent.number_of_virtual_networks, "number of virtual networks" + ) + garnet_deadlock_threshold = Param.UInt32( + Parent.garnet_deadlock_threshold, "network-level deadlock threshold" + ) + class GarnetRouter(BasicRouter): - type = 'GarnetRouter' - cxx_class = 'gem5::ruby::garnet::Router' + type = "GarnetRouter" + cxx_class = "gem5::ruby::garnet::Router" cxx_header = "mem/ruby/network/garnet/Router.hh" - vcs_per_vnet = Param.UInt32(Parent.vcs_per_vnet, - "virtual channels per virtual network") - virt_nets = Param.UInt32(Parent.number_of_virtual_networks, - "number of virtual networks") - width = Param.UInt32(Parent.ni_flit_size, - "bit width supported by the router") + vcs_per_vnet = Param.UInt32( + Parent.vcs_per_vnet, "virtual channels per virtual network" + ) + virt_nets = Param.UInt32( + Parent.number_of_virtual_networks, "number of virtual networks" + ) + width = Param.UInt32( + Parent.ni_flit_size, "bit width supported by the router" + ) diff --git a/src/mem/ruby/network/garnet/InputUnit.cc b/src/mem/ruby/network/garnet/InputUnit.cc index e8515a6db0..179bb642b7 100644 --- a/src/mem/ruby/network/garnet/InputUnit.cc +++ b/src/mem/ruby/network/garnet/InputUnit.cc @@ -151,6 +151,17 @@ InputUnit::increment_credit(int in_vc, bool free_signal, Tick curTime) m_credit_link->scheduleEventAbsolute(m_router->clockEdge(Cycles(1))); } +bool +InputUnit::functionalRead(Packet *pkt, WriteMask &mask) +{ + bool read = false; + for (auto& virtual_channel : virtualChannels) { + if (virtual_channel.functionalRead(pkt, mask)) + read = true; + } + + return read; +} uint32_t InputUnit::functionalWrite(Packet *pkt) diff --git a/src/mem/ruby/network/garnet/InputUnit.hh b/src/mem/ruby/network/garnet/InputUnit.hh index cc9bb1ac0d..4c4baeb03e 100644 --- a/src/mem/ruby/network/garnet/InputUnit.hh +++ b/src/mem/ruby/network/garnet/InputUnit.hh @@ -152,7 +152,9 @@ class InputUnit : public Consumer double get_buf_write_activity(unsigned int vnet) const { return m_num_buffer_writes[vnet]; } + bool functionalRead(Packet *pkt, WriteMask &mask); uint32_t functionalWrite(Packet *pkt); + void resetStats(); private: diff --git a/src/mem/ruby/network/garnet/NetworkInterface.cc b/src/mem/ruby/network/garnet/NetworkInterface.cc index 1154718082..31d625c4d5 100644 --- a/src/mem/ruby/network/garnet/NetworkInterface.cc +++ b/src/mem/ruby/network/garnet/NetworkInterface.cc @@ -668,6 +668,23 @@ NetworkInterface::print(std::ostream& out) const out << "[Network Interface]"; } +bool +NetworkInterface::functionalRead(Packet *pkt, WriteMask &mask) +{ + bool read = false; + for (auto& ni_out_vc : niOutVcs) { + if (ni_out_vc.functionalRead(pkt, mask)) + read = true; + } + + for (auto &oPort: outPorts) { + if (oPort->outFlitQueue()->functionalRead(pkt, mask)) + read = true; + } + + return read; +} + uint32_t NetworkInterface::functionalWrite(Packet *pkt) { diff --git a/src/mem/ruby/network/garnet/NetworkInterface.hh b/src/mem/ruby/network/garnet/NetworkInterface.hh index b5affa01e0..d42db5ee2a 100644 --- a/src/mem/ruby/network/garnet/NetworkInterface.hh +++ b/src/mem/ruby/network/garnet/NetworkInterface.hh @@ -79,6 +79,7 @@ class NetworkInterface : public ClockedObject, public Consumer int get_vnet(int vc); void init_net_ptr(GarnetNetwork *net_ptr) { m_net_ptr = net_ptr; } + bool functionalRead(Packet *pkt, WriteMask &mask); uint32_t functionalWrite(Packet *); void scheduleFlit(flit *t_flit); diff --git a/src/mem/ruby/network/garnet/NetworkLink.cc b/src/mem/ruby/network/garnet/NetworkLink.cc index 38a8eac468..43bb6c3ca7 100644 --- a/src/mem/ruby/network/garnet/NetworkLink.cc +++ b/src/mem/ruby/network/garnet/NetworkLink.cc @@ -119,6 +119,12 @@ NetworkLink::resetStats() m_link_utilized = 0; } +bool +NetworkLink::functionalRead(Packet *pkt, WriteMask &mask) +{ + return linkBuffer.functionalRead(pkt, mask); +} + uint32_t NetworkLink::functionalWrite(Packet *pkt) { diff --git a/src/mem/ruby/network/garnet/NetworkLink.hh b/src/mem/ruby/network/garnet/NetworkLink.hh index 449d2bc029..b60a513540 100644 --- a/src/mem/ruby/network/garnet/NetworkLink.hh +++ b/src/mem/ruby/network/garnet/NetworkLink.hh @@ -81,6 +81,7 @@ class NetworkLink : public ClockedObject, public Consumer inline flit* peekLink() { return linkBuffer.peekTopFlit(); } inline flit* consumeLink() { return linkBuffer.getTopFlit(); } + bool functionalRead(Packet *pkt, WriteMask &mask); uint32_t functionalWrite(Packet *); void resetStats(); diff --git a/src/mem/ruby/network/garnet/OutputUnit.cc b/src/mem/ruby/network/garnet/OutputUnit.cc index e669c3646a..d5ad75327c 100644 --- a/src/mem/ruby/network/garnet/OutputUnit.cc +++ b/src/mem/ruby/network/garnet/OutputUnit.cc @@ -172,6 +172,12 @@ OutputUnit::insert_flit(flit *t_flit) m_out_link->scheduleEventAbsolute(m_router->clockEdge(Cycles(1))); } +bool +OutputUnit::functionalRead(Packet *pkt, WriteMask &mask) +{ + return outBuffer.functionalRead(pkt, mask); +} + uint32_t OutputUnit::functionalWrite(Packet *pkt) { diff --git a/src/mem/ruby/network/garnet/OutputUnit.hh b/src/mem/ruby/network/garnet/OutputUnit.hh index 703c625701..b07035c43a 100644 --- a/src/mem/ruby/network/garnet/OutputUnit.hh +++ b/src/mem/ruby/network/garnet/OutputUnit.hh @@ -104,6 +104,7 @@ class OutputUnit : public Consumer return m_vc_per_vnet; } + bool functionalRead(Packet *pkt, WriteMask &mask); uint32_t functionalWrite(Packet *pkt); private: diff --git a/src/mem/ruby/network/garnet/README.txt b/src/mem/ruby/network/garnet/README.txt index 573ab60727..6d383cb628 100644 --- a/src/mem/ruby/network/garnet/README.txt +++ b/src/mem/ruby/network/garnet/README.txt @@ -76,4 +76,3 @@ instantiated, then the Network Brisge takes over the flit in HeteroGarnet. serializing or deserializing the flits * Check if CDC is enabled and schedule all the flits according to the consumers clock domain. - diff --git a/src/mem/ruby/network/garnet/Router.cc b/src/mem/ruby/network/garnet/Router.cc index 2e5b0fe50b..5232b91897 100644 --- a/src/mem/ruby/network/garnet/Router.cc +++ b/src/mem/ruby/network/garnet/Router.cc @@ -274,6 +274,26 @@ Router::printAggregateFaultProbability(std::ostream& out) out << aggregate_fault_prob << std::endl; } +bool +Router::functionalRead(Packet *pkt, WriteMask &mask) +{ + bool read = false; + if (crossbarSwitch.functionalRead(pkt, mask)) + read = true; + + for (uint32_t i = 0; i < m_input_unit.size(); i++) { + if (m_input_unit[i]->functionalRead(pkt, mask)) + read = true; + } + + for (uint32_t i = 0; i < m_output_unit.size(); i++) { + if (m_output_unit[i]->functionalRead(pkt, mask)) + read = true; + } + + return read; +} + uint32_t Router::functionalWrite(Packet *pkt) { diff --git a/src/mem/ruby/network/garnet/Router.hh b/src/mem/ruby/network/garnet/Router.hh index 874f6f7ca4..dbcdda9dbf 100644 --- a/src/mem/ruby/network/garnet/Router.hh +++ b/src/mem/ruby/network/garnet/Router.hh @@ -139,6 +139,7 @@ class Router : public BasicRouter, public Consumer aggregate_fault_prob); } + bool functionalRead(Packet *pkt, WriteMask &mask); uint32_t functionalWrite(Packet *); private: diff --git a/src/mem/ruby/network/garnet/VirtualChannel.cc b/src/mem/ruby/network/garnet/VirtualChannel.cc index a473bca804..18e89a09b9 100644 --- a/src/mem/ruby/network/garnet/VirtualChannel.cc +++ b/src/mem/ruby/network/garnet/VirtualChannel.cc @@ -75,6 +75,12 @@ VirtualChannel::need_stage(flit_stage stage, Tick time) return false; } +bool +VirtualChannel::functionalRead(Packet *pkt, WriteMask &mask) +{ + return inputBuffer.functionalRead(pkt, mask); +} + uint32_t VirtualChannel::functionalWrite(Packet *pkt) { diff --git a/src/mem/ruby/network/garnet/VirtualChannel.hh b/src/mem/ruby/network/garnet/VirtualChannel.hh index 29f3698b7b..04b046b697 100644 --- a/src/mem/ruby/network/garnet/VirtualChannel.hh +++ b/src/mem/ruby/network/garnet/VirtualChannel.hh @@ -95,6 +95,7 @@ class VirtualChannel return inputBuffer.getTopFlit(); } + bool functionalRead(Packet *pkt, WriteMask &mask); uint32_t functionalWrite(Packet *pkt); private: diff --git a/src/mem/ruby/network/garnet/flit.cc b/src/mem/ruby/network/garnet/flit.cc index b65297c605..d31d826c93 100644 --- a/src/mem/ruby/network/garnet/flit.cc +++ b/src/mem/ruby/network/garnet/flit.cc @@ -125,6 +125,13 @@ flit::print(std::ostream& out) const out << "]"; } +bool +flit::functionalRead(Packet *pkt, WriteMask &mask) +{ + Message *msg = m_msg_ptr.get(); + return msg->functionalRead(pkt, mask); +} + bool flit::functionalWrite(Packet *pkt) { diff --git a/src/mem/ruby/network/garnet/flit.hh b/src/mem/ruby/network/garnet/flit.hh index a84dc57aa3..a52d7416ae 100644 --- a/src/mem/ruby/network/garnet/flit.hh +++ b/src/mem/ruby/network/garnet/flit.hh @@ -107,6 +107,7 @@ class flit } } + bool functionalRead(Packet *pkt, WriteMask &mask); bool functionalWrite(Packet *pkt); virtual flit* serialize(int ser_id, int parts, uint32_t bWidth); diff --git a/src/mem/ruby/network/garnet/flitBuffer.cc b/src/mem/ruby/network/garnet/flitBuffer.cc index b6a2e0a6c7..6b3b56cfe7 100644 --- a/src/mem/ruby/network/garnet/flitBuffer.cc +++ b/src/mem/ruby/network/garnet/flitBuffer.cc @@ -85,6 +85,19 @@ flitBuffer::setMaxSize(int maximum) max_size = maximum; } +bool +flitBuffer::functionalRead(Packet *pkt, WriteMask &mask) +{ + bool read = false; + for (unsigned int i = 0; i < m_buffer.size(); ++i) { + if (m_buffer[i]->functionalRead(pkt, mask)) { + read = true; + } + } + + return read; +} + uint32_t flitBuffer::functionalWrite(Packet *pkt) { diff --git a/src/mem/ruby/network/garnet/flitBuffer.hh b/src/mem/ruby/network/garnet/flitBuffer.hh index d37f9a5141..089c931951 100644 --- a/src/mem/ruby/network/garnet/flitBuffer.hh +++ b/src/mem/ruby/network/garnet/flitBuffer.hh @@ -80,6 +80,7 @@ class flitBuffer m_buffer.push_back(flt); } + bool functionalRead(Packet *pkt, WriteMask &mask); uint32_t functionalWrite(Packet *pkt); private: diff --git a/src/mem/ruby/network/simple/SimpleLink.py b/src/mem/ruby/network/simple/SimpleLink.py index 04975945d0..0d3f20698d 100644 --- a/src/mem/ruby/network/simple/SimpleLink.py +++ b/src/mem/ruby/network/simple/SimpleLink.py @@ -43,15 +43,17 @@ from m5.SimObject import SimObject from m5.objects.BasicLink import BasicIntLink, BasicExtLink from m5.objects.MessageBuffer import MessageBuffer + class SimpleExtLink(BasicExtLink): - type = 'SimpleExtLink' + type = "SimpleExtLink" cxx_header = "mem/ruby/network/simple/SimpleLink.hh" - cxx_class = 'gem5::ruby::SimpleExtLink' + cxx_class = "gem5::ruby::SimpleExtLink" + class SimpleIntLink(BasicIntLink): - type = 'SimpleIntLink' + type = "SimpleIntLink" cxx_header = "mem/ruby/network/simple/SimpleLink.hh" - cxx_class = 'gem5::ruby::SimpleIntLink' + cxx_class = "gem5::ruby::SimpleIntLink" # Buffers for this internal link. # One buffer is allocated per vnet when setup_buffers is called. @@ -61,14 +63,16 @@ class SimpleIntLink(BasicIntLink): def setup_buffers(self, network): if len(self.buffers) > 0: - fatal("User should not manually set links' \ - in_buffers or out_buffers") + fatal( + "User should not manually set links' \ + in_buffers or out_buffers" + ) # The network needs number_of_virtual_networks buffers per # in and out port buffers = [] for i in range(int(network.number_of_virtual_networks)): - buffers.append(MessageBuffer(ordered = True)) + buffers.append(MessageBuffer(ordered=True)) # If physical_vnets_channels is set we adjust the buffer sizes and # the max_dequeue_rate in order to achieve the expected thoughput @@ -80,12 +84,15 @@ class SimpleIntLink(BasicIntLink): # for a 1 cy enqueue latency, 2 entries are needed. For any latency, # the size should be at least latency+1. if len(network.physical_vnets_channels) != 0: - assert(len(network.physical_vnets_channels) == \ - int(network.number_of_virtual_networks)) + assert len(network.physical_vnets_channels) == int( + network.number_of_virtual_networks + ) for i in range(int(network.number_of_virtual_networks)): - buffers[i].buffer_size = \ - network.physical_vnets_channels[i] * (self.latency + 1) - buffers[i].max_dequeue_rate = \ - network.physical_vnets_channels[i] + buffers[i].buffer_size = network.physical_vnets_channels[i] * ( + self.latency + 1 + ) + buffers[i].max_dequeue_rate = network.physical_vnets_channels[ + i + ] self.buffers = buffers diff --git a/src/mem/ruby/network/simple/SimpleNetwork.py b/src/mem/ruby/network/simple/SimpleNetwork.py index 42484a144a..aa553deee3 100644 --- a/src/mem/ruby/network/simple/SimpleNetwork.py +++ b/src/mem/ruby/network/simple/SimpleNetwork.py @@ -45,23 +45,31 @@ from m5.objects.Network import RubyNetwork from m5.objects.BasicRouter import BasicRouter from m5.objects.MessageBuffer import MessageBuffer -class SimpleNetwork(RubyNetwork): - type = 'SimpleNetwork' - cxx_header = "mem/ruby/network/simple/SimpleNetwork.hh" - cxx_class = 'gem5::ruby::SimpleNetwork' - buffer_size = Param.Int(0, "default internal buffer size for links and\ - routers; 0 indicates infinite buffering") +class SimpleNetwork(RubyNetwork): + type = "SimpleNetwork" + cxx_header = "mem/ruby/network/simple/SimpleNetwork.hh" + cxx_class = "gem5::ruby::SimpleNetwork" + + buffer_size = Param.Int( + 0, + "default internal buffer size for links and\ + routers; 0 indicates infinite buffering", + ) endpoint_bandwidth = Param.Int(1000, "bandwidth adjustment factor") - physical_vnets_channels = VectorParam.Int([], + physical_vnets_channels = VectorParam.Int( + [], "Set to emulate multiple channels for each vnet." - "If not set, all vnets share the same physical channel.") + "If not set, all vnets share the same physical channel.", + ) - physical_vnets_bandwidth = VectorParam.Int([], + physical_vnets_bandwidth = VectorParam.Int( + [], "Assign a different link bandwidth factor for each vnet channels." "Only valid when physical_vnets_channels is set. This overrides the" - "bandwidth_factor parameter set for the individual links.") + "bandwidth_factor parameter set for the individual links.", + ) def setup_buffers(self): # Setup internal buffers for links and routers @@ -72,37 +80,42 @@ class SimpleNetwork(RubyNetwork): class BaseRoutingUnit(SimObject): - type = 'BaseRoutingUnit' + type = "BaseRoutingUnit" abstract = True - cxx_header = 'mem/ruby/network/simple/routing/BaseRoutingUnit.hh' - cxx_class = 'gem5::ruby::BaseRoutingUnit' + cxx_header = "mem/ruby/network/simple/routing/BaseRoutingUnit.hh" + cxx_class = "gem5::ruby::BaseRoutingUnit" class WeightBased(BaseRoutingUnit): - type = 'WeightBased' - cxx_header = 'mem/ruby/network/simple/routing/WeightBased.hh' - cxx_class = 'gem5::ruby::WeightBased' + type = "WeightBased" + cxx_header = "mem/ruby/network/simple/routing/WeightBased.hh" + cxx_class = "gem5::ruby::WeightBased" adaptive_routing = Param.Bool(False, "enable adaptive routing") class SwitchPortBuffer(MessageBuffer): """MessageBuffer type used internally by the Switch port buffers""" + ordered = True allow_zero_latency = True + class Switch(BasicRouter): - type = 'Switch' - cxx_header = 'mem/ruby/network/simple/Switch.hh' - cxx_class = 'gem5::ruby::Switch' + type = "Switch" + cxx_header = "mem/ruby/network/simple/Switch.hh" + cxx_class = "gem5::ruby::Switch" - virt_nets = Param.Int(Parent.number_of_virtual_networks, - "number of virtual networks") + virt_nets = Param.Int( + Parent.number_of_virtual_networks, "number of virtual networks" + ) - int_routing_latency = Param.Cycles(BasicRouter.latency, - "Routing latency to internal links") - ext_routing_latency = Param.Cycles(BasicRouter.latency, - "Routing latency to external links") + int_routing_latency = Param.Cycles( + BasicRouter.latency, "Routing latency to internal links" + ) + ext_routing_latency = Param.Cycles( + BasicRouter.latency, "Routing latency to external links" + ) # Internal port buffers used between the PerfectSwitch and # Throttle objects. There is one buffer per virtual network @@ -112,8 +125,8 @@ class Switch(BasicRouter): port_buffers = VectorParam.MessageBuffer([], "Port buffers") routing_unit = Param.BaseRoutingUnit( - WeightBased(adaptive_routing = False), - "Routing strategy to be used") + WeightBased(adaptive_routing=False), "Routing strategy to be used" + ) def setup_buffers(self, network): def vnet_buffer_size(vnet): @@ -126,8 +139,9 @@ class Switch(BasicRouter): if len(network.physical_vnets_channels) == 0: return network.buffer_size else: - return network.buffer_size * \ - network.physical_vnets_channels[vnet] + return ( + network.buffer_size * network.physical_vnets_channels[vnet] + ) if len(self.port_buffers) > 0: fatal("User should not manually set routers' port_buffers") @@ -138,15 +152,17 @@ class Switch(BasicRouter): for link in network.int_links: if link.dst_node == self: for i in range(int(network.number_of_virtual_networks)): - router_buffers.append(SwitchPortBuffer( - buffer_size = vnet_buffer_size(i))) + router_buffers.append( + SwitchPortBuffer(buffer_size=vnet_buffer_size(i)) + ) # Add message buffers to routers for each external link connection for link in network.ext_links: # Routers can only be int_nodes on ext_links if link.int_node == self: for i in range(int(network.number_of_virtual_networks)): - router_buffers.append(SwitchPortBuffer( - buffer_size = vnet_buffer_size(i))) + router_buffers.append( + SwitchPortBuffer(buffer_size=vnet_buffer_size(i)) + ) self.port_buffers = router_buffers diff --git a/src/mem/ruby/protocol/GPU_VIPER-SQC.sm b/src/mem/ruby/protocol/GPU_VIPER-SQC.sm index dea347ab9b..28bddf5ba4 100644 --- a/src/mem/ruby/protocol/GPU_VIPER-SQC.sm +++ b/src/mem/ruby/protocol/GPU_VIPER-SQC.sm @@ -277,7 +277,13 @@ machine(MachineType:SQC, "GPU SQC (L1 I Cache)") responseToSQC_in.dequeue(clockEdge()); } - action(l_loadDone, "l", desc="local load done") { + action(l_loadDoneHit, "ldh", desc="local load done (hits in SQC)") { + assert(is_valid(cache_entry)); + sequencer.readCallback(address, cache_entry.DataBlk, true, MachineType:L1Cache); + APPEND_TRANSITION_COMMENT(cache_entry.DataBlk); + } + + action(l_loadDoneMiss, "ldm", desc="local load done (misses in SQC)") { assert(is_valid(cache_entry)); sequencer.readCallback(address, cache_entry.DataBlk, false, MachineType:L1Cache); APPEND_TRANSITION_COMMENT(cache_entry.DataBlk); @@ -291,28 +297,42 @@ machine(MachineType:SQC, "GPU SQC (L1 I Cache)") } } + // added for profiling + action(uu_profileDataMiss, "\udm", desc="Profile SQC demand miss"){ + L1cache.profileDemandMiss(); + } + + action(uu_profileDataHit, "\udh", desc="Profile SQC demand hit"){ + L1cache.profileDemandHit(); + } + // Transitions // transitions from base transition({I, V}, Repl, I) {TagArrayRead, TagArrayWrite} { - ic_invCache + // since we're evicting something, don't bother classifying as hit/miss + ic_invCache; } transition(I, Data, V) {TagArrayRead, TagArrayWrite, DataArrayRead} { a_allocate; - w_writeCache - l_loadDone; + // don't profile this as a hit/miss since it's a reponse from L2, + // so we already counted it + w_writeCache; + l_loadDoneMiss; pr_popResponseQueue; } transition(I, Fetch) {TagArrayRead, TagArrayWrite} { nS_issueRdBlkS; + uu_profileDataMiss; // since line wasn't in SQC, we missed p_popMandatoryQueue; } // simple hit transitions transition(V, Fetch) {TagArrayRead, DataArrayRead} { - l_loadDone; + l_loadDoneHit; + uu_profileDataHit; // line was in SQC, so we hit p_popMandatoryQueue; } } diff --git a/src/mem/ruby/protocol/GPU_VIPER-TCP.sm b/src/mem/ruby/protocol/GPU_VIPER-TCP.sm index 2e8378beae..775a62b174 100644 --- a/src/mem/ruby/protocol/GPU_VIPER-TCP.sm +++ b/src/mem/ruby/protocol/GPU_VIPER-TCP.sm @@ -453,7 +453,16 @@ machine(MachineType:TCP, "GPU TCP (L1 Data Cache)") responseToTCP_in.dequeue(clockEdge()); } - action(l_loadDone, "l", desc="local load done") { + action(l_loadDoneHit, "ldh", desc="local load done (hits in TCP)") { + assert(is_valid(cache_entry)); + if (use_seq_not_coal) { + sequencer.readCallback(address, cache_entry.DataBlk, true, MachineType:L1Cache); + } else { + coalescer.readCallback(address, MachineType:L1Cache, cache_entry.DataBlk); + } + } + + action(l_loadDoneMiss, "ldm", desc="local load done (misses in TCP)") { assert(is_valid(cache_entry)); if (use_seq_not_coal) { sequencer.readCallback(address, cache_entry.DataBlk, false, MachineType:L1Cache); @@ -467,7 +476,18 @@ machine(MachineType:TCP, "GPU TCP (L1 Data Cache)") coalescer.atomicCallback(address, MachineType:L1Cache, cache_entry.DataBlk); } - action(s_storeDone, "s", desc="local store done") { + action(s_storeDoneHit, "sdh", desc="local store done (hits in TCP)") { + assert(is_valid(cache_entry)); + + if (use_seq_not_coal) { + sequencer.writeCallback(address, cache_entry.DataBlk, true, MachineType:L1Cache); + } else { + coalescer.writeCallback(address, MachineType:L1Cache, cache_entry.DataBlk); + } + cache_entry.Dirty := true; + } + + action(s_storeDoneMiss, "sdm", desc="local store done (misses in TCP)") { assert(is_valid(cache_entry)); if (use_seq_not_coal) { @@ -557,7 +577,7 @@ machine(MachineType:TCP, "GPU TCP (L1 Data Cache)") } transition(V, Load) {TagArrayRead, DataArrayRead} { - l_loadDone; + l_loadDoneHit; mru_updateMRU; uu_profileDataHit; p_popMandatoryQueue; @@ -573,7 +593,7 @@ machine(MachineType:TCP, "GPU TCP (L1 Data Cache)") transition(I, StoreThrough) {TagArrayRead, TagArrayWrite, DataArrayWrite} { a_allocate; dw_dirtyWrite; - s_storeDone; + s_storeDoneMiss; uu_profileDataMiss; wt_writeThrough; ic_invCache; @@ -582,7 +602,7 @@ machine(MachineType:TCP, "GPU TCP (L1 Data Cache)") transition(V, StoreThrough, I) {TagArrayRead, TagArrayWrite, DataArrayWrite} { dw_dirtyWrite; - s_storeDone; + s_storeDoneHit; uu_profileDataHit; wt_writeThrough; ic_invCache; @@ -592,7 +612,7 @@ machine(MachineType:TCP, "GPU TCP (L1 Data Cache)") transition(I, TCC_Ack, V) {TagArrayRead, TagArrayWrite, DataArrayRead, DataArrayWrite} { a_allocate; w_writeCache; - l_loadDone; + l_loadDoneMiss; pr_popResponseQueue; } @@ -618,7 +638,7 @@ machine(MachineType:TCP, "GPU TCP (L1 Data Cache)") transition(V, TCC_Ack, V) {TagArrayRead, DataArrayRead, DataArrayWrite} { w_writeCache; - l_loadDone; + l_loadDoneHit; pr_popResponseQueue; } diff --git a/src/mem/ruby/protocol/MESI_Two_Level-L1cache.sm b/src/mem/ruby/protocol/MESI_Two_Level-L1cache.sm index 1a5d0e5b12..2b5935dee5 100644 --- a/src/mem/ruby/protocol/MESI_Two_Level-L1cache.sm +++ b/src/mem/ruby/protocol/MESI_Two_Level-L1cache.sm @@ -849,8 +849,8 @@ machine(MachineType:L1Cache, "MESI Directory L1 Cache CMP") { assert(is_valid(cache_entry)); DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk); - L1Icache.setMRU(address); - L1Dcache.setMRU(address); + // Request misses and new replacement policy info is set for new entry. + // No need to setMRU on external hit. sequencer.readCallback(address, cache_entry.DataBlk, true); } @@ -867,8 +867,8 @@ machine(MachineType:L1Cache, "MESI Directory L1 Cache CMP") { assert(is_valid(cache_entry)); DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk); - L1Icache.setMRU(address); - L1Dcache.setMRU(address); + // Request misses and new replacement policy info is set for new entry. + // No need to setMRU on external hit. sequencer.writeCallback(address, cache_entry.DataBlk, true); cache_entry.Dirty := true; } diff --git a/src/mem/ruby/protocol/MI_example-cache.sm b/src/mem/ruby/protocol/MI_example-cache.sm index e662a76a87..a4611abf68 100644 --- a/src/mem/ruby/protocol/MI_example-cache.sm +++ b/src/mem/ruby/protocol/MI_example-cache.sm @@ -368,7 +368,6 @@ machine(MachineType:L1Cache, "MI Example L1 Cache") peek(responseNetwork_in, ResponseMsg) { assert(is_valid(cache_entry)); DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk); - cacheMemory.setMRU(cache_entry); sequencer.readCallback(address, cache_entry.DataBlk, true, machineIDToMachineType(in_msg.Sender)); } @@ -385,7 +384,6 @@ machine(MachineType:L1Cache, "MI Example L1 Cache") peek(responseNetwork_in, ResponseMsg) { assert(is_valid(cache_entry)); DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk); - cacheMemory.setMRU(cache_entry); sequencer.writeCallback(address, cache_entry.DataBlk, true, machineIDToMachineType(in_msg.Sender)); } diff --git a/src/mem/ruby/protocol/MI_example-dma.sm b/src/mem/ruby/protocol/MI_example-dma.sm index 85d0b7f7df..01c95122f6 100644 --- a/src/mem/ruby/protocol/MI_example-dma.sm +++ b/src/mem/ruby/protocol/MI_example-dma.sm @@ -27,7 +27,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -machine(MachineType:DMA, "DMA Controller") +machine(MachineType:DMA, "DMA Controller") : DMASequencer * dma_sequencer; Cycles request_latency := 6; @@ -134,7 +134,7 @@ machine(MachineType:DMA, "DMA Controller") peek(dmaRequestQueue_in, SequencerMsg) { enqueue(requestToDir_out, DMARequestMsg, request_latency) { out_msg.PhysicalAddress := in_msg.PhysicalAddress; - out_msg.LineAddress := in_msg.LineAddress; + out_msg.LineAddress := in_msg.LineAddress; out_msg.Type := DMARequestType:READ; out_msg.Requestor := machineID; out_msg.DataBlk := in_msg.DataBlk; @@ -149,7 +149,7 @@ machine(MachineType:DMA, "DMA Controller") peek(dmaRequestQueue_in, SequencerMsg) { enqueue(requestToDir_out, DMARequestMsg, request_latency) { out_msg.PhysicalAddress := in_msg.PhysicalAddress; - out_msg.LineAddress := in_msg.LineAddress; + out_msg.LineAddress := in_msg.LineAddress; out_msg.Type := DMARequestType:WRITE; out_msg.Requestor := machineID; out_msg.DataBlk := in_msg.DataBlk; diff --git a/src/mem/ruby/protocol/MOESI_AMD_Base-CorePair.sm b/src/mem/ruby/protocol/MOESI_AMD_Base-CorePair.sm index 12270cde2f..7d1bde04dd 100644 --- a/src/mem/ruby/protocol/MOESI_AMD_Base-CorePair.sm +++ b/src/mem/ruby/protocol/MOESI_AMD_Base-CorePair.sm @@ -2978,5 +2978,3 @@ machine(MachineType:CorePair, "CP-like Core Coherence") // END TRANSITIONS } - - diff --git a/src/mem/ruby/protocol/MOESI_AMD_Base-Region-CorePair.sm b/src/mem/ruby/protocol/MOESI_AMD_Base-Region-CorePair.sm index a5e75e94e8..ae44f09510 100644 --- a/src/mem/ruby/protocol/MOESI_AMD_Base-Region-CorePair.sm +++ b/src/mem/ruby/protocol/MOESI_AMD_Base-Region-CorePair.sm @@ -3008,5 +3008,3 @@ machine(MachineType:CorePair, "CP-like Core Coherence") // END TRANSITIONS } - - diff --git a/src/mem/ruby/protocol/MOESI_AMD_Base-RegionBuffer.sm b/src/mem/ruby/protocol/MOESI_AMD_Base-RegionBuffer.sm index 3eeece229d..5987d7cf76 100644 --- a/src/mem/ruby/protocol/MOESI_AMD_Base-RegionBuffer.sm +++ b/src/mem/ruby/protocol/MOESI_AMD_Base-RegionBuffer.sm @@ -1371,4 +1371,3 @@ machine(MachineType:RegionBuffer, "Region Buffer for AMD_Base-like protocol") } } - diff --git a/src/mem/ruby/protocol/MOESI_AMD_Base-RegionDir.sm b/src/mem/ruby/protocol/MOESI_AMD_Base-RegionDir.sm index 1b03286411..2464e038ff 100644 --- a/src/mem/ruby/protocol/MOESI_AMD_Base-RegionDir.sm +++ b/src/mem/ruby/protocol/MOESI_AMD_Base-RegionDir.sm @@ -1180,5 +1180,3 @@ machine(MachineType:RegionDir, "Region Directory for AMD_Base-like protocol") } } - - diff --git a/src/mem/ruby/protocol/MOESI_CMP_token-L1cache.sm b/src/mem/ruby/protocol/MOESI_CMP_token-L1cache.sm index c9fe135ae7..865fce4e3c 100644 --- a/src/mem/ruby/protocol/MOESI_CMP_token-L1cache.sm +++ b/src/mem/ruby/protocol/MOESI_CMP_token-L1cache.sm @@ -51,7 +51,7 @@ machine(MachineType:L1Cache, "Token protocol") // Message Queues // From this node's L1 cache TO the network - + // a local L1 -> this L2 bank MessageBuffer * responseFromL1Cache, network="To", virtual_network="4", vnet_type="response"; @@ -60,7 +60,7 @@ machine(MachineType:L1Cache, "Token protocol") // a local L1 -> this L2 bank, currently ordered with directory forwarded requests MessageBuffer * requestFromL1Cache, network="To", virtual_network="1", vnet_type="request"; - + // To this node's L1 cache FROM the network // a L2 bank -> this L1 diff --git a/src/mem/ruby/protocol/MOESI_hammer-dir.sm b/src/mem/ruby/protocol/MOESI_hammer-dir.sm index 8c4c63556f..8fd447fdf4 100644 --- a/src/mem/ruby/protocol/MOESI_hammer-dir.sm +++ b/src/mem/ruby/protocol/MOESI_hammer-dir.sm @@ -26,11 +26,11 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * - * AMD's contributions to the MOESI hammer protocol do not constitute an + * AMD's contributions to the MOESI hammer protocol do not constitute an * endorsement of its similarity to any AMD products. */ -machine(MachineType:Directory, "AMD Hammer-like protocol") +machine(MachineType:Directory, "AMD Hammer-like protocol") : DirectoryMemory * directory; CacheMemory * probeFilter; Cycles from_memory_controller_latency := 2; @@ -44,7 +44,7 @@ machine(MachineType:Directory, "AMD Hammer-like protocol") MessageBuffer * responseFromDir, network="To", virtual_network="4", vnet_type="response"; - // For a finite buffered network, note that the DMA response network only + // For a finite buffered network, note that the DMA response network only // works at this relatively lower numbered (lower priority) virtual network // because the trigger queue decouples cache responses from DMA responses. MessageBuffer * dmaResponseFromDir, network="To", virtual_network="1", @@ -107,7 +107,7 @@ machine(MachineType:Directory, "AMD Hammer-like protocol") enumeration(Event, desc="Directory events") { GETX, desc="A GETX arrives"; GETS, desc="A GETS arrives"; - PUT, desc="A PUT arrives"; + PUT, desc="A PUT arrives"; Unblock, desc="An unblock message arrives"; UnblockS, desc="An unblock message arrives"; UnblockM, desc="An unblock message arrives"; @@ -244,7 +244,7 @@ machine(MachineType:Directory, "AMD Hammer-like protocol") assert(is_valid(pf_entry) == false); } } - if (state == State:E || state == State:NX || state == State:NO || state == State:S || + if (state == State:E || state == State:NX || state == State:NO || state == State:S || state == State:O) { assert(is_valid(tbe) == false); } @@ -456,17 +456,17 @@ machine(MachineType:Directory, "AMD Hammer-like protocol") } // Actions - + action(r_setMRU, "\rr", desc="manually set the MRU bit for pf entry" ) { if (probe_filter_enabled || full_bit_dir_enabled) { - assert(is_valid(cache_entry)); + assert(is_valid(cache_entry)); probeFilter.setMRU(address); } } action(auno_assertUnblockerNotOwner, "auno", desc="assert unblocker not owner") { if (probe_filter_enabled || full_bit_dir_enabled) { - assert(is_valid(cache_entry)); + assert(is_valid(cache_entry)); peek(unblockNetwork_in, ResponseMsg) { assert(cache_entry.Owner != in_msg.Sender); if (full_bit_dir_enabled) { @@ -478,7 +478,7 @@ machine(MachineType:Directory, "AMD Hammer-like protocol") action(uo_updateOwnerIfPf, "uo", desc="update owner") { if (probe_filter_enabled || full_bit_dir_enabled) { - assert(is_valid(cache_entry)); + assert(is_valid(cache_entry)); peek(unblockNetwork_in, ResponseMsg) { cache_entry.Owner := in_msg.Sender; if (full_bit_dir_enabled) { @@ -637,7 +637,7 @@ machine(MachineType:Directory, "AMD Hammer-like protocol") tbe.Acks := 1; } } - } + } action(saa_setAcksToAllIfPF, "saa", desc="Non-forwarded request, set the ack amount to all") { assert(is_valid(tbe)); @@ -647,7 +647,7 @@ machine(MachineType:Directory, "AMD Hammer-like protocol") } else { tbe.Acks := 1; } - } + } action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") { peek(responseToDir_in, ResponseMsg) { @@ -655,8 +655,8 @@ machine(MachineType:Directory, "AMD Hammer-like protocol") assert(in_msg.Acks > 0); DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs); // - // Note that cache data responses will have an ack count of 2. However, - // directory DMA requests must wait for acks from all LLC caches, so + // Note that cache data responses will have an ack count of 2. However, + // directory DMA requests must wait for acks from all LLC caches, so // only decrement by 1. // if ((in_msg.Type == CoherenceResponseType:DATA_SHARED) || @@ -763,7 +763,7 @@ machine(MachineType:Directory, "AMD Hammer-like protocol") out_msg.LineAddress := address; out_msg.Type := DMAResponseType:DATA; // - // we send the entire data block and rely on the dma controller to + // we send the entire data block and rely on the dma controller to // split it up if need be // out_msg.DataBlk := in_msg.DataBlk; @@ -781,7 +781,7 @@ machine(MachineType:Directory, "AMD Hammer-like protocol") out_msg.LineAddress := address; out_msg.Type := DMAResponseType:DATA; // - // we send the entire data block and rely on the dma controller to + // we send the entire data block and rely on the dma controller to // split it up if need be // out_msg.DataBlk := tbe.DataBlk; @@ -797,7 +797,7 @@ machine(MachineType:Directory, "AMD Hammer-like protocol") out_msg.PhysicalAddress := address; out_msg.LineAddress := address; out_msg.Type := DMAResponseType:ACK; - out_msg.Destination.add(tbe.DmaRequestor); + out_msg.Destination.add(tbe.DmaRequestor); out_msg.MessageSize := MessageSizeType:Writeback_Control; } } @@ -935,7 +935,7 @@ machine(MachineType:Directory, "AMD Hammer-like protocol") action(io_invalidateOwnerRequest, "io", desc="invalidate all copies") { if (machineCount(MachineType:L1Cache) > 1) { enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) { - assert(is_valid(cache_entry)); + assert(is_valid(cache_entry)); out_msg.addr := address; out_msg.Type := CoherenceRequestType:INV; out_msg.Requestor := machineID; @@ -1015,7 +1015,7 @@ machine(MachineType:Directory, "AMD Hammer-like protocol") out_msg.MessageSize := MessageSizeType:Request_Control; out_msg.InitialRequestTime := zero_time(); out_msg.ForwardRequestTime := curCycle(); - } + } } } @@ -1024,7 +1024,7 @@ machine(MachineType:Directory, "AMD Hammer-like protocol") if (probe_filter_enabled || full_bit_dir_enabled) { peek(requestQueue_in, RequestMsg) { enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) { - assert(is_valid(cache_entry)); + assert(is_valid(cache_entry)); out_msg.addr := address; out_msg.Type := in_msg.Type; out_msg.Requestor := in_msg.Requestor; @@ -1034,7 +1034,7 @@ machine(MachineType:Directory, "AMD Hammer-like protocol") out_msg.InitialRequestTime := in_msg.InitialRequestTime; out_msg.ForwardRequestTime := curCycle(); } - } + } } else { peek(requestQueue_in, RequestMsg) { enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) { @@ -1099,7 +1099,7 @@ machine(MachineType:Directory, "AMD Hammer-like protocol") // itself // out_msg.Requestor := machineID; - out_msg.Destination.broadcast(MachineType:L1Cache); + out_msg.Destination.broadcast(MachineType:L1Cache); out_msg.MessageSize := MessageSizeType:Broadcast_Control; } } @@ -1118,7 +1118,7 @@ machine(MachineType:Directory, "AMD Hammer-like protocol") // itself // out_msg.Requestor := machineID; - out_msg.Destination.broadcast(MachineType:L1Cache); + out_msg.Destination.broadcast(MachineType:L1Cache); out_msg.MessageSize := MessageSizeType:Broadcast_Control; } } @@ -1132,7 +1132,7 @@ machine(MachineType:Directory, "AMD Hammer-like protocol") action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") { peek(unblockNetwork_in, ResponseMsg) { APPEND_TRANSITION_COMMENT(in_msg.Sender); - } + } unblockNetwork_in.dequeue(clockEdge()); } @@ -1155,7 +1155,7 @@ machine(MachineType:Directory, "AMD Hammer-like protocol") action(zd_stallAndWaitDMARequest, "zd", desc="Stall and wait the dma request queue") { peek(dmaRequestQueue_in, DMARequestMsg) { APPEND_TRANSITION_COMMENT(in_msg.Requestor); - } + } stall_and_wait(dmaRequestQueue_in, address); } @@ -1184,7 +1184,7 @@ machine(MachineType:Directory, "AMD Hammer-like protocol") action(ano_assertNotOwner, "ano", desc="Assert that request is not current owner") { if (probe_filter_enabled || full_bit_dir_enabled) { peek(requestQueue_in, RequestMsg) { - assert(is_valid(cache_entry)); + assert(is_valid(cache_entry)); assert(cache_entry.Owner != in_msg.Requestor); } } @@ -1263,7 +1263,7 @@ machine(MachineType:Directory, "AMD Hammer-like protocol") action(z_stallAndWaitRequest, "z", desc="Recycle the request queue") { peek(requestQueue_in, RequestMsg) { APPEND_TRANSITION_COMMENT(in_msg.Requestor); - } + } stall_and_wait(requestQueue_in, address); } @@ -1467,7 +1467,7 @@ machine(MachineType:Directory, "AMD Hammer-like protocol") // // note that the PUT requestor may not be the current owner if an invalidate // raced with PUT - // + // a_sendWriteBackAck; i_popIncomingRequestQueue; } @@ -1485,9 +1485,9 @@ machine(MachineType:Directory, "AMD Hammer-like protocol") } // Blocked transient states - transition({NO_B_X, O_B, NO_DR_B_W, NO_DW_B_W, NO_B_W, NO_DR_B_D, + transition({NO_B_X, O_B, NO_DR_B_W, NO_DW_B_W, NO_B_W, NO_DR_B_D, NO_DR_B, O_DR_B, O_B_W, O_DR_B_W, NO_DW_W, NO_B_S_W, - NO_W, O_W, WB, WB_E_W, WB_O_W, O_R, S_R, NO_R, NO_F_W}, + NO_W, O_W, WB, WB_E_W, WB_O_W, O_R, S_R, NO_R, NO_F_W}, {GETS, GETX, GETF, PUT, Pf_Replacement}) { z_stallAndWaitRequest; } @@ -1508,9 +1508,9 @@ machine(MachineType:Directory, "AMD Hammer-like protocol") z_stallAndWaitRequest; } - transition({NO_B_X, NO_B, NO_B_S, O_B, NO_DR_B_W, NO_DW_B_W, NO_B_W, NO_DR_B_D, + transition({NO_B_X, NO_B, NO_B_S, O_B, NO_DR_B_W, NO_DW_B_W, NO_B_W, NO_DR_B_D, NO_DR_B, O_DR_B, O_B_W, O_DR_B_W, NO_DW_W, NO_B_S_W, - NO_W, O_W, WB, WB_E_W, WB_O_W, O_R, S_R, NO_R, NO_F_W}, + NO_W, O_W, WB, WB_E_W, WB_O_W, O_R, S_R, NO_R, NO_F_W}, {DMA_READ, DMA_WRITE}) { zd_stallAndWaitDMARequest; } diff --git a/src/mem/ruby/protocol/MOESI_hammer-dma.sm b/src/mem/ruby/protocol/MOESI_hammer-dma.sm index 6a4c5ace42..ed45cb4e01 100644 --- a/src/mem/ruby/protocol/MOESI_hammer-dma.sm +++ b/src/mem/ruby/protocol/MOESI_hammer-dma.sm @@ -27,7 +27,7 @@ */ -machine(MachineType:DMA, "DMA Controller") +machine(MachineType:DMA, "DMA Controller") : DMASequencer * dma_sequencer; Cycles request_latency := 6; @@ -134,7 +134,7 @@ machine(MachineType:DMA, "DMA Controller") peek(dmaRequestQueue_in, SequencerMsg) { enqueue(requestToDir_out, DMARequestMsg, request_latency) { out_msg.PhysicalAddress := in_msg.PhysicalAddress; - out_msg.LineAddress := in_msg.LineAddress; + out_msg.LineAddress := in_msg.LineAddress; out_msg.Type := DMARequestType:READ; out_msg.Requestor := machineID; out_msg.DataBlk := in_msg.DataBlk; @@ -149,7 +149,7 @@ machine(MachineType:DMA, "DMA Controller") peek(dmaRequestQueue_in, SequencerMsg) { enqueue(requestToDir_out, DMARequestMsg, request_latency) { out_msg.PhysicalAddress := in_msg.PhysicalAddress; - out_msg.LineAddress := in_msg.LineAddress; + out_msg.LineAddress := in_msg.LineAddress; out_msg.Type := DMARequestType:WRITE; out_msg.Requestor := machineID; out_msg.DataBlk := in_msg.DataBlk; diff --git a/src/mem/ruby/protocol/MOESI_hammer-msg.sm b/src/mem/ruby/protocol/MOESI_hammer-msg.sm index 3262903861..2beaf50d56 100644 --- a/src/mem/ruby/protocol/MOESI_hammer-msg.sm +++ b/src/mem/ruby/protocol/MOESI_hammer-msg.sm @@ -25,7 +25,7 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * - * AMD's contributions to the MOESI hammer protocol do not constitute an + * AMD's contributions to the MOESI hammer protocol do not constitute an * endorsement of its similarity to any AMD products. */ diff --git a/src/mem/ruby/protocol/SConscript b/src/mem/ruby/protocol/SConscript index 238ce2fe12..07545c3ae0 100644 --- a/src/mem/ruby/protocol/SConscript +++ b/src/mem/ruby/protocol/SConscript @@ -118,4 +118,3 @@ for f in nodes: # for it to contain a single SimObject with the same name. assert(filename.endswith('_Controller.py')) SimObject(f, sim_objects=[os.path.splitext(filename)[0]]) - diff --git a/src/mem/ruby/protocol/chi/CHI-cache-actions.sm b/src/mem/ruby/protocol/chi/CHI-cache-actions.sm index 2c47ac9cbb..d18c600516 100644 --- a/src/mem/ruby/protocol/chi/CHI-cache-actions.sm +++ b/src/mem/ruby/protocol/chi/CHI-cache-actions.sm @@ -891,7 +891,11 @@ action(Initiate_CopyBack_Stale, desc="") { tbe.actions.pushNB(Event:SendCompDBIDRespStale); tbe.actions.pushNB(Event:WriteFEPipe); - tbe.actions.push(Event:FinishCopyBack_Stale); + + // eviction condition should be examined if it is the last sharer + if (tbe.dir_sharers.count() == 1) { + tbe.actions.push(Event:FinishCopyBack_Stale); + } assert((tbe.dir_ownerExists == false) || (tbe.dir_owner != tbe.requestor)); } @@ -900,7 +904,7 @@ action(Finish_CopyBack_Stale, desc="") { // if it was the last known sharer and we don't have the data do the same // the Initiate_Evict if ((is_HN == false) && (tbe.dir_sharers.count() == 0) && - tbe.dir_sharers.isElement(tbe.requestor) && (tbe.dataValid == false)) { + (tbe.dataValid == false)) { tbe.actions.push(Event:SendEvict); } } diff --git a/src/mem/ruby/protocol/chi/CHI-cache.sm b/src/mem/ruby/protocol/chi/CHI-cache.sm index 3770382bf3..3bd8d3f3c3 100644 --- a/src/mem/ruby/protocol/chi/CHI-cache.sm +++ b/src/mem/ruby/protocol/chi/CHI-cache.sm @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 ARM Limited + * Copyright (c) 2021-2022 ARM Limited * All rights reserved * * The license below extends only to copyright in the software and shall @@ -344,10 +344,10 @@ machine(MachineType:Cache, "Cache coherency protocol") : PCrdGrant_PoC_Hazard, desc=""; // Events triggered by incoming DVM messages - SnpDvmOpSync_P1; - SnpDvmOpSync_P2; - SnpDvmOpNonSync_P1; - SnpDvmOpNonSync_P2; + SnpDvmOpSync_P1, desc=""; + SnpDvmOpSync_P2, desc=""; + SnpDvmOpNonSync_P1, desc=""; + SnpDvmOpNonSync_P2, desc=""; // Events triggered by incoming data response messages // See CHIDataType in CHi-msg.sm for descriptions diff --git a/src/mem/ruby/protocol/chi/CHI-dvm-misc-node-transitions.sm b/src/mem/ruby/protocol/chi/CHI-dvm-misc-node-transitions.sm index 24d524b70d..8a00d78d8c 100644 --- a/src/mem/ruby/protocol/chi/CHI-dvm-misc-node-transitions.sm +++ b/src/mem/ruby/protocol/chi/CHI-dvm-misc-node-transitions.sm @@ -181,4 +181,4 @@ transition(DvmOp_Complete, Final, Unallocated) { Pop_TriggerQueue; // "Final" event is applied from the trigger queue Finalize_DeallocateRequest; // Deallocate the DVM TBE -} \ No newline at end of file +} diff --git a/src/mem/ruby/protocol/chi/CHI-dvm-misc-node.sm b/src/mem/ruby/protocol/chi/CHI-dvm-misc-node.sm index ba38c65856..92a04ed3d2 100644 --- a/src/mem/ruby/protocol/chi/CHI-dvm-misc-node.sm +++ b/src/mem/ruby/protocol/chi/CHI-dvm-misc-node.sm @@ -162,8 +162,8 @@ machine(MachineType:MiscNode, "CHI Misc Node for handling and distrbuting DVM op AllocRequest, desc="Allocates a TBE for a request. Triggers a retry if table is full"; AllocRequestWithCredit, desc="Allocates a TBE for a request. Always succeeds. Used when a client is retrying after being denied."; - SnpResp_I; - NCBWrData; + SnpResp_I, desc=""; + NCBWrData, desc=""; // Retry handling SendRetryAck, desc="Send RetryAck"; @@ -182,9 +182,9 @@ machine(MachineType:MiscNode, "CHI Misc Node for handling and distrbuting DVM op // any queued action and is not expecting responses/data. The transaction // is finalized and the next stable state is stored in the cache/directory // See the processNextState and makeFinalState functions - Final; + Final, desc=""; - null; + null, desc=""; } //////////////////////////////////////////////////////////////////////////// diff --git a/src/mem/ruby/protocol/chi/CHI-mem.sm b/src/mem/ruby/protocol/chi/CHI-mem.sm index d7a50fd50b..820f2dfcf4 100644 --- a/src/mem/ruby/protocol/chi/CHI-mem.sm +++ b/src/mem/ruby/protocol/chi/CHI-mem.sm @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 ARM Limited + * Copyright (c) 2021,2022 ARM Limited * All rights reserved * * The license below extends only to copyright in the software and shall @@ -119,6 +119,7 @@ machine(MachineType:Memory, "Memory controller interface") : Trigger_ReceiveDone, desc=""; Trigger_SendRetry, desc=""; Trigger_SendPCrdGrant, desc=""; + Trigger_SendMemoryRead, desc=""; } @@ -551,6 +552,14 @@ machine(MachineType:Memory, "Memory controller interface") : } } + action(triggerSendMemoryRead, "tsmr", desc="Trigger sendMemoryRead") { + assert(is_valid(tbe)); + enqueue(triggerOutPort, TriggerMsg, 0) { + out_msg.addr := address; + out_msg.event := Event:Trigger_SendMemoryRead; + } + } + action(sendMemoryRead, "smr", desc="Send request to memory") { assert(is_valid(tbe)); enqueue(memQueue_out, MemoryMsg, to_memory_controller_latency) { @@ -698,18 +707,23 @@ machine(MachineType:Memory, "Memory controller interface") : transition(READY, ReadNoSnp, READING_MEM) { allocateTBE; initializeFromReqTBE; - sendMemoryRead; + triggerSendMemoryRead; popReqInQueue; } transition(READY, ReadNoSnpSep, READING_MEM) { allocateTBE; initializeFromReqTBE; - sendMemoryRead; + triggerSendMemoryRead; sendReadReceipt; popReqInQueue; } + transition(READING_MEM, Trigger_SendMemoryRead) { + sendMemoryRead; + popTriggerQueue; + } + transition(READING_MEM, MemoryData, SENDING_NET_DATA) { prepareSend; sendDataAndCheck; @@ -763,7 +777,7 @@ machine(MachineType:Memory, "Memory controller interface") : // Notice we only use this here and call wakeUp when leaving this state transition({READING_MEM,WAITING_NET_DATA,SENDING_NET_DATA}, - {ReadNoSnp, ReadNoSnpSep, WriteNoSnpPtl}) { + {ReadNoSnp, ReadNoSnpSep, WriteNoSnpPtl, WriteNoSnp}) { stallRequestQueue; } diff --git a/src/mem/ruby/protocol/chi/CHI-msg.sm b/src/mem/ruby/protocol/chi/CHI-msg.sm index 0437982684..63648a5920 100644 --- a/src/mem/ruby/protocol/chi/CHI-msg.sm +++ b/src/mem/ruby/protocol/chi/CHI-msg.sm @@ -247,5 +247,3 @@ structure(CHIDataMsg, desc="", interface="Message") { return testAndWrite(addr, dataBlk, pkt); } } - - diff --git a/src/mem/ruby/protocol/chi/CHI.slicc b/src/mem/ruby/protocol/chi/CHI.slicc index 49c92882d8..cdb49ed4ac 100644 --- a/src/mem/ruby/protocol/chi/CHI.slicc +++ b/src/mem/ruby/protocol/chi/CHI.slicc @@ -4,4 +4,4 @@ include "RubySlicc_interfaces.slicc"; include "CHI-msg.sm"; include "CHI-cache.sm"; include "CHI-mem.sm"; -include "CHI-dvm-misc-node.sm"; \ No newline at end of file +include "CHI-dvm-misc-node.sm"; diff --git a/src/mem/ruby/slicc_interface/AbstractController.cc b/src/mem/ruby/slicc_interface/AbstractController.cc index e11d780f66..2d13a5a9b6 100644 --- a/src/mem/ruby/slicc_interface/AbstractController.cc +++ b/src/mem/ruby/slicc_interface/AbstractController.cc @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017,2019-2021 ARM Limited + * Copyright (c) 2017,2019-2022 ARM Limited * All rights reserved. * * The license below extends only to copyright in the software and shall @@ -96,15 +96,13 @@ AbstractController::init() MachineID mid = abs_cntrl->getMachineID(); const AddrRangeList &ranges = abs_cntrl->getAddrRanges(); for (const auto &addr_range : ranges) { - auto i = downstreamAddrMap.intersects(addr_range); - if (i == downstreamAddrMap.end()) { - i = downstreamAddrMap.insert(addr_range, AddrMapEntry()); + auto i = downstreamAddrMap.find(mid.getType()); + if ((i != downstreamAddrMap.end()) && + (i->second.intersects(addr_range) != i->second.end())) { + fatal("%s: %s mapped to multiple machines of the same type\n", + name(), addr_range.to_string()); } - AddrMapEntry &entry = i->second; - fatal_if(entry.count(mid.getType()) > 0, - "%s: %s mapped to multiple machines of the same type\n", - name(), addr_range.to_string()); - entry[mid.getType()] = mid; + downstreamAddrMap[mid.getType()].insert(addr_range, mid); } downstreamDestinations.add(mid); } @@ -419,23 +417,24 @@ MachineID AbstractController::mapAddressToDownstreamMachine(Addr addr, MachineType mtype) const { - const auto i = downstreamAddrMap.contains(addr); - fatal_if(i == downstreamAddrMap.end(), - "%s: couldn't find mapping for address %x\n", name(), addr); - - const AddrMapEntry &entry = i->second; - assert(!entry.empty()); - if (mtype == MachineType_NUM) { - fatal_if(entry.size() > 1, - "%s: address %x mapped to multiple machine types.\n", name(), addr); - return entry.begin()->second; - } else { - auto j = entry.find(mtype); - fatal_if(j == entry.end(), - "%s: couldn't find mapping for address %x\n", name(), addr); - return j->second; + // map to the first match + for (const auto &i : downstreamAddrMap) { + const auto mapping = i.second.contains(addr); + if (mapping != i.second.end()) + return mapping->second; + } } + else { + const auto i = downstreamAddrMap.find(mtype); + if (i != downstreamAddrMap.end()) { + const auto mapping = i->second.contains(addr); + if (mapping != i->second.end()) + return mapping->second; + } + } + fatal("%s: couldn't find mapping for address %x mtype=%s\n", + name(), addr, mtype); } diff --git a/src/mem/ruby/slicc_interface/AbstractController.hh b/src/mem/ruby/slicc_interface/AbstractController.hh index 19cfe510e2..a5ab5c2c44 100644 --- a/src/mem/ruby/slicc_interface/AbstractController.hh +++ b/src/mem/ruby/slicc_interface/AbstractController.hh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017,2019-2021 ARM Limited + * Copyright (c) 2017,2019-2022 ARM Limited * All rights reserved. * * The license below extends only to copyright in the software and shall @@ -405,9 +405,8 @@ class AbstractController : public ClockedObject, public Consumer /** The address range to which the controller responds on the CPU side. */ const AddrRangeList addrRanges; - typedef std::unordered_map AddrMapEntry; - - AddrRangeMap downstreamAddrMap; + std::unordered_map> + downstreamAddrMap; NetDest downstreamDestinations; NetDest upstreamDestinations; diff --git a/src/mem/ruby/slicc_interface/Controller.py b/src/mem/ruby/slicc_interface/Controller.py index c73836d56e..185812a044 100644 --- a/src/mem/ruby/slicc_interface/Controller.py +++ b/src/mem/ruby/slicc_interface/Controller.py @@ -40,19 +40,22 @@ from m5.params import * from m5.proxy import * from m5.objects.ClockedObject import ClockedObject + class RubyController(ClockedObject): - type = 'RubyController' - cxx_class = 'gem5::ruby::AbstractController' + type = "RubyController" + cxx_class = "gem5::ruby::AbstractController" cxx_header = "mem/ruby/slicc_interface/AbstractController.hh" abstract = True version = Param.Int("") - addr_ranges = VectorParam.AddrRange([AllMemory], "Address range this " - "controller responds to") + addr_ranges = VectorParam.AddrRange( + [AllMemory], "Address range this " "controller responds to" + ) cluster_id = Param.UInt32(0, "Id of this controller's cluster") - transitions_per_cycle = \ - Param.Int(32, "no. of SLICC state machine transitions per cycle") + transitions_per_cycle = Param.Int( + 32, "no. of SLICC state machine transitions per cycle" + ) buffer_size = Param.UInt32(0, "max buffer size 0 means infinite") recycle_latency = Param.Cycles(10, "") @@ -63,19 +66,26 @@ class RubyController(ClockedObject): # If the latency depends on the request type or protocol-specific states, # the protocol may ignore this parameter by overriding the # mandatoryQueueLatency function - mandatory_queue_latency = \ - Param.Cycles(1, "Default latency for requests added to the " \ - "mandatory queue on top-level controllers") + mandatory_queue_latency = Param.Cycles( + 1, + "Default latency for requests added to the " + "mandatory queue on top-level controllers", + ) memory_out_port = RequestPort("Port for attaching a memory controller") - memory = DeprecatedParam(memory_out_port, "The request port for Ruby " - "memory output to the main memory is now called `memory_out_port`") + memory = DeprecatedParam( + memory_out_port, + "The request port for Ruby " + "memory output to the main memory is now called `memory_out_port`", + ) system = Param.System(Parent.any, "system object parameter") # These can be used by a protocol to enable reuse of the same machine # types to model different levels of the cache hierarchy - upstream_destinations = VectorParam.RubyController([], - "Possible destinations for requests sent towards the CPU") - downstream_destinations = VectorParam.RubyController([], - "Possible destinations for requests sent towards memory") + upstream_destinations = VectorParam.RubyController( + [], "Possible destinations for requests sent towards the CPU" + ) + downstream_destinations = VectorParam.RubyController( + [], "Possible destinations for requests sent towards memory" + ) diff --git a/src/mem/ruby/slicc_interface/RubySlicc_includes.hh b/src/mem/ruby/slicc_interface/RubySlicc_includes.hh index 3a8df919b0..f68c6c19a9 100644 --- a/src/mem/ruby/slicc_interface/RubySlicc_includes.hh +++ b/src/mem/ruby/slicc_interface/RubySlicc_includes.hh @@ -33,4 +33,3 @@ #include "mem/ruby/slicc_interface/RubySlicc_Util.hh" #endif // __MEM_RUBY_SLICC_INTERFACE_RUBYSLICC_INCLUDES_HH__ - diff --git a/src/mem/ruby/structures/DirectoryMemory.py b/src/mem/ruby/structures/DirectoryMemory.py index 8da381a9d7..85f05367cf 100644 --- a/src/mem/ruby/structures/DirectoryMemory.py +++ b/src/mem/ruby/structures/DirectoryMemory.py @@ -40,10 +40,12 @@ from m5.params import * from m5.proxy import * from m5.SimObject import SimObject + class RubyDirectoryMemory(SimObject): - type = 'RubyDirectoryMemory' - cxx_class = 'gem5::ruby::DirectoryMemory' + type = "RubyDirectoryMemory" + cxx_class = "gem5::ruby::DirectoryMemory" cxx_header = "mem/ruby/structures/DirectoryMemory.hh" addr_ranges = VectorParam.AddrRange( - Parent.addr_ranges, "Address range this directory responds to") + Parent.addr_ranges, "Address range this directory responds to" + ) diff --git a/src/mem/ruby/structures/MN_TBEStorage.hh b/src/mem/ruby/structures/MN_TBEStorage.hh index 1adbccae98..d8991d0321 100644 --- a/src/mem/ruby/structures/MN_TBEStorage.hh +++ b/src/mem/ruby/structures/MN_TBEStorage.hh @@ -64,7 +64,7 @@ template class MN_TBEStorage { public: - MN_TBEStorage(Stats::Group *parent, + MN_TBEStorage(statistics::Group *parent, std::initializer_list _partitions) : m_stats(parent), partitions(_partitions) diff --git a/src/mem/ruby/structures/RubyCache.py b/src/mem/ruby/structures/RubyCache.py index 859a64b273..f2c1b7230c 100644 --- a/src/mem/ruby/structures/RubyCache.py +++ b/src/mem/ruby/structures/RubyCache.py @@ -29,17 +29,20 @@ from m5.proxy import * from m5.objects.ReplacementPolicies import * from m5.SimObject import SimObject + class RubyCache(SimObject): - type = 'RubyCache' - cxx_class = 'gem5::ruby::CacheMemory' + type = "RubyCache" + cxx_class = "gem5::ruby::CacheMemory" cxx_header = "mem/ruby/structures/CacheMemory.hh" - size = Param.MemorySize("capacity in bytes"); - assoc = Param.Int(""); + size = Param.MemorySize("capacity in bytes") + assoc = Param.Int("") replacement_policy = Param.BaseReplacementPolicy(TreePLRURP(), "") - start_index_bit = Param.Int(6, "index start, default 6 for 64-byte line"); - is_icache = Param.Bool(False, "is instruction only cache"); - block_size = Param.MemorySize("0B", "block size in bytes. 0 means default RubyBlockSize") + start_index_bit = Param.Int(6, "index start, default 6 for 64-byte line") + is_icache = Param.Bool(False, "is instruction only cache") + block_size = Param.MemorySize( + "0B", "block size in bytes. 0 means default RubyBlockSize" + ) dataArrayBanks = Param.Int(1, "Number of banks for the data array") tagArrayBanks = Param.Int(1, "Number of banks for the tag array") diff --git a/src/mem/ruby/structures/RubyPrefetcher.py b/src/mem/ruby/structures/RubyPrefetcher.py index ade55af44f..cea6ec9604 100644 --- a/src/mem/ruby/structures/RubyPrefetcher.py +++ b/src/mem/ruby/structures/RubyPrefetcher.py @@ -42,24 +42,30 @@ from m5.proxy import * from m5.objects.System import System + class RubyPrefetcher(SimObject): - type = 'RubyPrefetcher' - cxx_class = 'gem5::ruby::RubyPrefetcher' + type = "RubyPrefetcher" + cxx_class = "gem5::ruby::RubyPrefetcher" cxx_header = "mem/ruby/structures/RubyPrefetcher.hh" - num_streams = Param.UInt32(4, - "Number of prefetch streams to be allocated") - unit_filter = Param.UInt32(8, - "Number of entries in the unit filter array") - nonunit_filter = Param.UInt32(8, - "Number of entries in the non-unit filter array") + num_streams = Param.UInt32(4, "Number of prefetch streams to be allocated") + unit_filter = Param.UInt32(8, "Number of entries in the unit filter array") + nonunit_filter = Param.UInt32( + 8, "Number of entries in the non-unit filter array" + ) train_misses = Param.UInt32(4, "") num_startup_pfs = Param.UInt32(1, "") - cross_page = Param.Bool(False, """True if prefetched address can be on a - page different from the observed address""") - page_shift = Param.UInt32(12, - "Number of bits to mask to get a page number") + cross_page = Param.Bool( + False, + """True if prefetched address can be on a + page different from the observed address""", + ) + page_shift = Param.UInt32( + 12, "Number of bits to mask to get a page number" + ) + class Prefetcher(RubyPrefetcher): """DEPRECATED""" + pass diff --git a/src/mem/ruby/structures/WireBuffer.py b/src/mem/ruby/structures/WireBuffer.py index bb0ffb5042..ca67e7cb31 100644 --- a/src/mem/ruby/structures/WireBuffer.py +++ b/src/mem/ruby/structures/WireBuffer.py @@ -30,9 +30,10 @@ from m5.params import * from m5.proxy import * from m5.SimObject import SimObject + class RubyWireBuffer(SimObject): - type = 'RubyWireBuffer' - cxx_class = 'gem5::ruby::WireBuffer' + type = "RubyWireBuffer" + cxx_class = "gem5::ruby::WireBuffer" cxx_header = "mem/ruby/structures/WireBuffer.hh" ruby_system = Param.RubySystem(Parent.any, "") diff --git a/src/mem/ruby/system/GPUCoalescer.py b/src/mem/ruby/system/GPUCoalescer.py index c3c44ef5d6..da459de133 100644 --- a/src/mem/ruby/system/GPUCoalescer.py +++ b/src/mem/ruby/system/GPUCoalescer.py @@ -32,23 +32,28 @@ from m5.proxy import * from m5.objects.Sequencer import * + class RubyGPUCoalescer(RubyPort): - type = 'RubyGPUCoalescer' - abstract = True - cxx_class = 'gem5::ruby::GPUCoalescer' - cxx_header = "mem/ruby/system/GPUCoalescer.hh" + type = "RubyGPUCoalescer" + abstract = True + cxx_class = "gem5::ruby::GPUCoalescer" + cxx_header = "mem/ruby/system/GPUCoalescer.hh" - # max_outstanding_requests = (wave front slots) x (wave front size) - max_outstanding_requests = Param.Int(40*64, - "max requests (incl. prefetches) outstanding") - max_coalesces_per_cycle = Param.Int(1, "max instructions that can be " \ - "coalesced in a single cycle") + # max_outstanding_requests = (wave front slots) x (wave front size) + max_outstanding_requests = Param.Int( + 40 * 64, "max requests (incl. prefetches) outstanding" + ) + max_coalesces_per_cycle = Param.Int( + 1, "max instructions that can be " "coalesced in a single cycle" + ) - icache = Param.RubyCache("") - dcache = Param.RubyCache("") - deadlock_threshold = Param.Cycles(500000, - "max outstanding cycles for a request before " \ - "deadlock/livelock declared") - garnet_standalone = Param.Bool(False, "") + icache = Param.RubyCache("") + dcache = Param.RubyCache("") + deadlock_threshold = Param.Cycles( + 500000, + "max outstanding cycles for a request before " + "deadlock/livelock declared", + ) + garnet_standalone = Param.Bool(False, "") - gmTokenPort = ResponsePort("Port to the CU for sharing tokens") + gmTokenPort = ResponsePort("Port to the CU for sharing tokens") diff --git a/src/mem/ruby/system/RubySystem.py b/src/mem/ruby/system/RubySystem.py index b3d369b0da..64e39bda4c 100644 --- a/src/mem/ruby/system/RubySystem.py +++ b/src/mem/ruby/system/RubySystem.py @@ -29,25 +29,33 @@ from m5.proxy import * from m5.objects.ClockedObject import ClockedObject from m5.objects.SimpleMemory import * -class RubySystem(ClockedObject): - type = 'RubySystem' - cxx_header = "mem/ruby/system/RubySystem.hh" - cxx_class = 'gem5::ruby::RubySystem' - randomization = Param.Bool(False, +class RubySystem(ClockedObject): + type = "RubySystem" + cxx_header = "mem/ruby/system/RubySystem.hh" + cxx_class = "gem5::ruby::RubySystem" + + randomization = Param.Bool( + False, "insert random delays on message enqueue times (if True, all message \ buffers are enforced to have randomization; otherwise, a message \ - buffer set its own flag to enable/disable randomization)"); - block_size_bytes = Param.UInt32(64, - "default cache block size; must be a power of two"); - memory_size_bits = Param.UInt32(64, - "number of bits that a memory address requires"); + buffer set its own flag to enable/disable randomization)", + ) + block_size_bytes = Param.UInt32( + 64, "default cache block size; must be a power of two" + ) + memory_size_bits = Param.UInt32( + 64, "number of bits that a memory address requires" + ) phys_mem = Param.SimpleMemory(NULL, "") system = Param.System(Parent.any, "system object") - access_backing_store = Param.Bool(False, "Use phys_mem as the functional \ - store and only use ruby for timing.") + access_backing_store = Param.Bool( + False, + "Use phys_mem as the functional \ + store and only use ruby for timing.", + ) # Profiler related configuration variables hot_lines = Param.Bool(False, "") diff --git a/src/mem/ruby/system/Sequencer.cc b/src/mem/ruby/system/Sequencer.cc index 601e23aed8..24439d6487 100644 --- a/src/mem/ruby/system/Sequencer.cc +++ b/src/mem/ruby/system/Sequencer.cc @@ -42,12 +42,14 @@ #include "mem/ruby/system/Sequencer.hh" #include "arch/x86/ldstflags.hh" +#include "base/compiler.hh" #include "base/logging.hh" #include "base/str.hh" #include "cpu/testers/rubytest/RubyTester.hh" #include "debug/LLSC.hh" #include "debug/MemoryAccess.hh" #include "debug/ProtocolTrace.hh" +#include "debug/RubyHitMiss.hh" #include "debug/RubySequencer.hh" #include "debug/RubyStats.hh" #include "mem/packet.hh" @@ -228,7 +230,7 @@ Sequencer::wakeup() Cycles current_time = curCycle(); // Check across all outstanding requests - int total_outstanding = 0; + GEM5_VAR_USED int total_outstanding = 0; for (const auto &table_entry : m_RequestTable) { for (const auto &seq_req : table_entry.second) { @@ -630,6 +632,10 @@ Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data, llscLoadLinked(line_addr); } + DPRINTF(RubyHitMiss, "Cache %s at %#x\n", + externalHit ? "miss" : "hit", + printAddress(request_address)); + // update the data unless it is a non-data-carrying flush if (RubySystem::getWarmupEnabled()) { data.setData(pkt); diff --git a/src/mem/ruby/system/Sequencer.py b/src/mem/ruby/system/Sequencer.py index 6a475ebb74..eb1003692f 100644 --- a/src/mem/ruby/system/Sequencer.py +++ b/src/mem/ruby/system/Sequencer.py @@ -41,36 +41,45 @@ from m5.params import * from m5.proxy import * from m5.objects.ClockedObject import ClockedObject + class RubyPort(ClockedObject): - type = 'RubyPort' + type = "RubyPort" abstract = True cxx_header = "mem/ruby/system/RubyPort.hh" - cxx_class = 'gem5::ruby::RubyPort' + cxx_class = "gem5::ruby::RubyPort" version = Param.Int(0, "") - in_ports = VectorResponsePort("CPU side of this RubyPort/Sequencer. " - "The CPU request ports should be connected to this. If a CPU " - "has multiple ports (e.g., I/D ports) all of the ports for a " - "single CPU can connect to one RubyPort.") - slave = DeprecatedParam(in_ports, '`slave` is now called `in_ports`') + in_ports = VectorResponsePort( + "CPU side of this RubyPort/Sequencer. " + "The CPU request ports should be connected to this. If a CPU " + "has multiple ports (e.g., I/D ports) all of the ports for a " + "single CPU can connect to one RubyPort." + ) + slave = DeprecatedParam(in_ports, "`slave` is now called `in_ports`") - interrupt_out_port = VectorRequestPort("Port to connect to x86 interrupt " - "controller to send the CPU requests from outside.") - master = DeprecatedParam(interrupt_out_port, - '`master` is now called `interrupt_out_port`') + interrupt_out_port = VectorRequestPort( + "Port to connect to x86 interrupt " + "controller to send the CPU requests from outside." + ) + master = DeprecatedParam( + interrupt_out_port, "`master` is now called `interrupt_out_port`" + ) pio_request_port = RequestPort("Ruby pio request port") - pio_master_port = DeprecatedParam(pio_request_port, - '`pio_master_port` is now called `pio_request_port`') + pio_master_port = DeprecatedParam( + pio_request_port, "`pio_master_port` is now called `pio_request_port`" + ) mem_request_port = RequestPort("Ruby mem request port") - mem_master_port = DeprecatedParam(mem_request_port, - '`mem_master_port` is now called `mem_request_port`') + mem_master_port = DeprecatedParam( + mem_request_port, "`mem_master_port` is now called `mem_request_port`" + ) pio_response_port = ResponsePort("Ruby pio response port") - pio_slave_port = DeprecatedParam(pio_response_port, - '`pio_slave_port` is now called `pio_response_port`') + pio_slave_port = DeprecatedParam( + pio_response_port, "`pio_slave_port` is now called `pio_response_port`" + ) using_ruby_tester = Param.Bool(False, "") no_retry_on_stall = Param.Bool(False, "") @@ -80,23 +89,28 @@ class RubyPort(ClockedObject): support_inst_reqs = Param.Bool(True, "inst cache requests supported") is_cpu_sequencer = Param.Bool(True, "connected to a cpu") + class RubyPortProxy(RubyPort): - type = 'RubyPortProxy' + type = "RubyPortProxy" cxx_header = "mem/ruby/system/RubyPortProxy.hh" - cxx_class = 'gem5::ruby::RubyPortProxy' + cxx_class = "gem5::ruby::RubyPortProxy" + class RubySequencer(RubyPort): - type = 'RubySequencer' - cxx_class = 'gem5::ruby::Sequencer' + type = "RubySequencer" + cxx_class = "gem5::ruby::Sequencer" cxx_header = "mem/ruby/system/Sequencer.hh" dcache = Param.RubyCache("") - max_outstanding_requests = Param.Int(16, - "max requests (incl. prefetches) outstanding") - deadlock_threshold = Param.Cycles(500000, + max_outstanding_requests = Param.Int( + 16, "max requests (incl. prefetches) outstanding" + ) + deadlock_threshold = Param.Cycles( + 500000, "max outstanding cycles for a request " - "before deadlock/livelock declared") + "before deadlock/livelock declared", + ) garnet_standalone = Param.Bool(False, "") # id used by protocols that support multiple sequencers per controller # 99 is the dummy default value @@ -110,10 +124,12 @@ class RubySequencer(RubyPort): objects should use connectInstPort and connectDataPort. """ import m5.objects - assert(isinstance(cpu, m5.objects.BaseCPU)) + + assert isinstance(cpu, m5.objects.BaseCPU) # this connects all cpu mem-side ports to self.in_ports cpu.connectAllPorts( - self.in_ports, self.in_ports, self.interrupt_out_port) + self.in_ports, self.in_ports, self.interrupt_out_port + ) def connectIOPorts(self, piobus): """ @@ -127,14 +143,16 @@ class RubySequencer(RubyPort): self.mem_request_port = piobus.cpu_side_ports self.pio_response_port = piobus.mem_side_ports + class RubyHTMSequencer(RubySequencer): - type = 'RubyHTMSequencer' - cxx_class = 'gem5::ruby::HTMSequencer' + type = "RubyHTMSequencer" + cxx_class = "gem5::ruby::HTMSequencer" cxx_header = "mem/ruby/system/HTMSequencer.hh" + class DMASequencer(RubyPort): - type = 'DMASequencer' + type = "DMASequencer" cxx_header = "mem/ruby/system/DMASequencer.hh" - cxx_class = 'gem5::ruby::DMASequencer' + cxx_class = "gem5::ruby::DMASequencer" max_outstanding_requests = Param.Int(64, "max outstanding requests") diff --git a/src/mem/ruby/system/VIPERCoalescer.py b/src/mem/ruby/system/VIPERCoalescer.py index 311ce94a2c..af43cddb2c 100644 --- a/src/mem/ruby/system/VIPERCoalescer.py +++ b/src/mem/ruby/system/VIPERCoalescer.py @@ -31,9 +31,10 @@ from m5.params import * from m5.proxy import * from m5.objects.GPUCoalescer import * + class VIPERCoalescer(RubyGPUCoalescer): - type = 'VIPERCoalescer' - cxx_class = 'gem5::ruby::VIPERCoalescer' + type = "VIPERCoalescer" + cxx_class = "gem5::ruby::VIPERCoalescer" cxx_header = "mem/ruby/system/VIPERCoalescer.hh" max_inv_per_cycle = Param.Int(32, "max invalidations per cycle") diff --git a/src/mem/shared_memory_server.cc b/src/mem/shared_memory_server.cc index 24dd9f6c88..bee663bd37 100644 --- a/src/mem/shared_memory_server.cc +++ b/src/mem/shared_memory_server.cc @@ -56,47 +56,50 @@ SharedMemoryServer::SharedMemoryServer(const SharedMemoryServerParams& params) system(params.system), serverFd(-1) { fatal_if(system == nullptr, "Requires a system to share memory from!"); - // Ensure the unix socket path to use is not occupied. Also, if there's - // actually anything to be removed, warn the user something might be off. - if (unlink(unixSocketPath.c_str()) == 0) { - warn( - "The server path %s was occupied and will be replaced. Please " - "make sure there is no other server using the same path.", - unixSocketPath.c_str()); - } // Create a new unix socket. serverFd = ListenSocket::socketCloexec(AF_UNIX, SOCK_STREAM, 0); - panic_if(serverFd < 0, "%s: cannot create unix socket: %s", name().c_str(), + panic_if(serverFd < 0, "%s: cannot create unix socket: %s", name(), strerror(errno)); // Bind to the specified path. sockaddr_un serv_addr = {}; serv_addr.sun_family = AF_UNIX; strncpy(serv_addr.sun_path, unixSocketPath.c_str(), sizeof(serv_addr.sun_path) - 1); - warn_if(strlen(serv_addr.sun_path) != unixSocketPath.size(), - "%s: unix socket path truncated, expect '%s' but get '%s'", - name().c_str(), unixSocketPath.c_str(), serv_addr.sun_path); + // If the target path is truncated, warn the user that the actual path is + // different and update the target path. + if (strlen(serv_addr.sun_path) != unixSocketPath.size()) { + warn("%s: unix socket path truncated, expect '%s' but get '%s'", + name(), unixSocketPath, serv_addr.sun_path); + unixSocketPath = serv_addr.sun_path; + } + // Ensure the unix socket path to use is not occupied. Also, if there's + // actually anything to be removed, warn the user something might be off. + bool old_sock_removed = unlink(unixSocketPath.c_str()) == 0; + warn_if(old_sock_removed, + "%s: the server path %s was occupied and will be replaced. Please " + "make sure there is no other server using the same path.", + name(), unixSocketPath); int bind_retv = bind(serverFd, reinterpret_cast(&serv_addr), sizeof(serv_addr)); - fatal_if(bind_retv != 0, "%s: cannot bind unix socket: %s", name().c_str(), + fatal_if(bind_retv != 0, "%s: cannot bind unix socket: %s", name(), strerror(errno)); // Start listening. int listen_retv = listen(serverFd, 1); - fatal_if(listen_retv != 0, "%s: listen failed: %s", name().c_str(), + fatal_if(listen_retv != 0, "%s: listen failed: %s", name(), strerror(errno)); listenSocketEvent.reset(new ListenSocketEvent(serverFd, this)); pollQueue.schedule(listenSocketEvent.get()); - inform("%s: listening at %s", name().c_str(), unixSocketPath.c_str()); + inform("%s: listening at %s", name(), unixSocketPath); } SharedMemoryServer::~SharedMemoryServer() { int unlink_retv = unlink(unixSocketPath.c_str()); - warn_if(unlink_retv != 0, "%s: cannot unlink unix socket: %s", - name().c_str(), strerror(errno)); + warn_if(unlink_retv != 0, "%s: cannot unlink unix socket: %s", name(), + strerror(errno)); int close_retv = close(serverFd); - warn_if(close_retv != 0, "%s: cannot close unix socket: %s", - name().c_str(), strerror(errno)); + warn_if(close_retv != 0, "%s: cannot close unix socket: %s", name(), + strerror(errno)); } SharedMemoryServer::BaseShmPollEvent::BaseShmPollEvent( @@ -121,7 +124,7 @@ SharedMemoryServer::BaseShmPollEvent::tryReadAll(void* buffer, size_t size) if (retv >= 0) { offset += retv; } else if (errno != EINTR) { - warn("%s: recv failed: %s", name().c_str(), strerror(errno)); + warn("%s: recv failed: %s", name(), strerror(errno)); return false; } } @@ -132,16 +135,13 @@ void SharedMemoryServer::ListenSocketEvent::process(int revents) { panic_if(revents & (POLLERR | POLLNVAL), "%s: listen socket is broken", - name().c_str()); + name()); int cli_fd = ListenSocket::acceptCloexec(pfd.fd, nullptr, nullptr); - panic_if(cli_fd < 0, "%s: accept failed: %s", name().c_str(), - strerror(errno)); - panic_if(shmServer->clientSocketEvent.get(), - "%s: cannot serve two clients at once", name().c_str()); - inform("%s: accept new connection %d", name().c_str(), cli_fd); - shmServer->clientSocketEvent.reset( + panic_if(cli_fd < 0, "%s: accept failed: %s", name(), strerror(errno)); + inform("%s: accept new connection %d", name(), cli_fd); + shmServer->clientSocketEvents[cli_fd].reset( new ClientSocketEvent(cli_fd, shmServer)); - pollQueue.schedule(shmServer->clientSocketEvent.get()); + pollQueue.schedule(shmServer->clientSocketEvents[cli_fd].get()); } void @@ -165,7 +165,7 @@ SharedMemoryServer::ClientSocketEvent::process(int revents) break; } if (req_type != RequestType::kGetPhysRange) { - warn("%s: receive unknown request: %d", name().c_str(), + warn("%s: receive unknown request: %d", name(), static_cast(req_type)); break; } @@ -173,8 +173,7 @@ SharedMemoryServer::ClientSocketEvent::process(int revents) break; } AddrRange range(request.start, request.end); - inform("%s: receive request: %s", name().c_str(), - range.to_string().c_str()); + inform("%s: receive request: %s", name(), range.to_string()); // Identify the backing store. const auto& stores = shmServer->system->getPhysMem().getBackingStore(); @@ -183,13 +182,12 @@ SharedMemoryServer::ClientSocketEvent::process(int revents) return entry.shmFd >= 0 && range.isSubset(entry.range); }); if (it == stores.end()) { - warn("%s: cannot find backing store for %s", name().c_str(), - range.to_string().c_str()); + warn("%s: cannot find backing store for %s", name(), + range.to_string()); break; } inform("%s: find shared backing store for %s at %s, shm=%d:%lld", - name().c_str(), range.to_string().c_str(), - it->range.to_string().c_str(), it->shmFd, + name(), range.to_string(), it->range.to_string(), it->shmFd, (unsigned long long)it->shmOffset); // Populate response message. @@ -224,24 +222,24 @@ SharedMemoryServer::ClientSocketEvent::process(int revents) // Send the response. int retv = sendmsg(pfd.fd, &msg, 0); if (retv < 0) { - warn("%s: sendmsg failed: %s", name().c_str(), strerror(errno)); + warn("%s: sendmsg failed: %s", name(), strerror(errno)); break; } if (retv != sizeof(response)) { - warn("%s: failed to send all response at once", name().c_str()); + warn("%s: failed to send all response at once", name()); break; } // Request done. - inform("%s: request done", name().c_str()); + inform("%s: request done", name()); return; } while (false); // If we ever reach here, our client either close the connection or is // somehow broken. We'll just close the connection and move on. - inform("%s: closing connection", name().c_str()); + inform("%s: closing connection", name()); close(pfd.fd); - shmServer->clientSocketEvent.reset(); + shmServer->clientSocketEvents.erase(pfd.fd); } } // namespace memory diff --git a/src/mem/shared_memory_server.hh b/src/mem/shared_memory_server.hh index 9102d74571..8f573fef3b 100644 --- a/src/mem/shared_memory_server.hh +++ b/src/mem/shared_memory_server.hh @@ -30,6 +30,7 @@ #include #include +#include #include "base/pollevent.hh" #include "params/SharedMemoryServer.hh" @@ -86,7 +87,8 @@ class SharedMemoryServer : public SimObject int serverFd; std::unique_ptr listenSocketEvent; - std::unique_ptr clientSocketEvent; + std::unordered_map> + clientSocketEvents; }; } // namespace memory diff --git a/src/mem/slicc/ast/AST.py b/src/mem/slicc/ast/AST.py index 0db4a52c3b..ff8e3326ad 100644 --- a/src/mem/slicc/ast/AST.py +++ b/src/mem/slicc/ast/AST.py @@ -27,6 +27,7 @@ from slicc.util import PairContainer, Location + class AST(PairContainer): def __init__(self, slicc, pairs=None): self.slicc = slicc @@ -53,7 +54,9 @@ class AST(PairContainer): if args: message = message % args code = self.slicc.codeFormatter() - code(''' + code( + """ panic("Runtime Error at ${{self.location}}: %s.\\n", $message); -''') +""" + ) return code diff --git a/src/mem/slicc/ast/ActionDeclAST.py b/src/mem/slicc/ast/ActionDeclAST.py index 22a782dd3e..21b6e3a2f7 100644 --- a/src/mem/slicc/ast/ActionDeclAST.py +++ b/src/mem/slicc/ast/ActionDeclAST.py @@ -28,6 +28,7 @@ from slicc.ast.DeclAST import DeclAST from slicc.symbols import Action, Type, Var + class ActionDeclAST(DeclAST): def __init__(self, slicc, ident, pairs, statement_list): super().__init__(slicc, pairs) @@ -53,18 +54,36 @@ class ActionDeclAST(DeclAST): if addr_type is None: self.error("Type 'Addr' not declared.") - var = Var(self.symtab, "address", self.location, addr_type, - "addr", self.pairs) + var = Var( + self.symtab, + "address", + self.location, + addr_type, + "addr", + self.pairs, + ) self.symtab.newSymbol(var) if machine.TBEType != None: - var = Var(self.symtab, "tbe", self.location, machine.TBEType, - "m_tbe_ptr", self.pairs) + var = Var( + self.symtab, + "tbe", + self.location, + machine.TBEType, + "m_tbe_ptr", + self.pairs, + ) self.symtab.newSymbol(var) if machine.EntryType != None: - var = Var(self.symtab, "cache_entry", self.location, - machine.EntryType, "m_cache_entry_ptr", self.pairs) + var = Var( + self.symtab, + "cache_entry", + self.location, + machine.EntryType, + "m_cache_entry_ptr", + self.pairs, + ) self.symtab.newSymbol(var) # Do not allows returns in actions @@ -76,6 +95,7 @@ class ActionDeclAST(DeclAST): self.symtab.popFrame() - action = Action(self.symtab, self.ident, resources, self.location, - self.pairs) + action = Action( + self.symtab, self.ident, resources, self.location, self.pairs + ) machine.addAction(action) diff --git a/src/mem/slicc/ast/AssignStatementAST.py b/src/mem/slicc/ast/AssignStatementAST.py index 89bafbd701..d1f5f5105a 100644 --- a/src/mem/slicc/ast/AssignStatementAST.py +++ b/src/mem/slicc/ast/AssignStatementAST.py @@ -27,6 +27,7 @@ from slicc.ast.StatementAST import StatementAST + class AssignStatementAST(StatementAST): def __init__(self, slicc, lvalue, rvalue): super().__init__(slicc) @@ -45,12 +46,16 @@ class AssignStatementAST(StatementAST): code("$lcode = $rcode;") - if not (ltype == rtype or (ltype.isInterface and ltype['interface'] == rtype.ident)): + if not ( + ltype == rtype + or (ltype.isInterface and ltype["interface"] == rtype.ident) + ): # FIXME - beckmann # the following if statement is a hack to allow NetDest objects to # be assigned to Sets this allows for the previous Message # Destination 'Set class' to migrate to the new Message Destination # 'NetDest class' if str(ltype) != "NetDest" and str(rtype) != "Set": - self.error("Assignment type mismatch '%s' and '%s'", - ltype, rtype) + self.error( + "Assignment type mismatch '%s' and '%s'", ltype, rtype + ) diff --git a/src/mem/slicc/ast/CheckAllocateStatementAST.py b/src/mem/slicc/ast/CheckAllocateStatementAST.py index 8bbe906fea..83325df7f0 100644 --- a/src/mem/slicc/ast/CheckAllocateStatementAST.py +++ b/src/mem/slicc/ast/CheckAllocateStatementAST.py @@ -27,6 +27,7 @@ from slicc.ast.StatementAST import StatementAST + class CheckAllocateStatementAST(StatementAST): def __init__(self, slicc, variable): super().__init__(slicc) diff --git a/src/mem/slicc/ast/CheckNextCycleAST.py b/src/mem/slicc/ast/CheckNextCycleAST.py index a4db8c4a26..642cc14eaf 100644 --- a/src/mem/slicc/ast/CheckNextCycleAST.py +++ b/src/mem/slicc/ast/CheckNextCycleAST.py @@ -28,6 +28,7 @@ from slicc.ast.StatementAST import StatementAST + class CheckNextCycleAST(StatementAST): def __init__(self, slicc): super().__init__(slicc) diff --git a/src/mem/slicc/ast/CheckProbeStatementAST.py b/src/mem/slicc/ast/CheckProbeStatementAST.py index 995c803762..4e798ed12f 100644 --- a/src/mem/slicc/ast/CheckProbeStatementAST.py +++ b/src/mem/slicc/ast/CheckProbeStatementAST.py @@ -28,6 +28,7 @@ from slicc.ast.StatementAST import StatementAST + class CheckProbeStatementAST(StatementAST): def __init__(self, slicc, in_port, address): super().__init__(slicc) @@ -43,11 +44,13 @@ class CheckProbeStatementAST(StatementAST): in_port_code = self.in_port.var.code address_code = self.address.var.code - code(''' + code( + """ if (m_is_blocking && (m_block_map.count($address_code) == 1) && (m_block_map[$address_code] == &$in_port_code)) { $in_port_code.delayHead(clockEdge(), cyclesToTicks(Cycles(1))); continue; } - ''') + """ + ) diff --git a/src/mem/slicc/ast/DeclAST.py b/src/mem/slicc/ast/DeclAST.py index ce3f252316..6e4ef96924 100644 --- a/src/mem/slicc/ast/DeclAST.py +++ b/src/mem/slicc/ast/DeclAST.py @@ -27,8 +27,9 @@ from slicc.ast.AST import AST + class DeclAST(AST): - def __init__(self, slicc, pairs = None): + def __init__(self, slicc, pairs=None): super().__init__(slicc, pairs) def files(self, parent=None): diff --git a/src/mem/slicc/ast/DeclListAST.py b/src/mem/slicc/ast/DeclListAST.py index 4e1538c224..a835a04a61 100644 --- a/src/mem/slicc/ast/DeclListAST.py +++ b/src/mem/slicc/ast/DeclListAST.py @@ -27,16 +27,17 @@ from slicc.ast.AST import AST + class DeclListAST(AST): def __init__(self, slicc, decls): super().__init__(slicc) if not isinstance(decls, (list, tuple)): - decls = [ decls ] + decls = [decls] self.decls = decls def __repr__(self): - return "[DeclListAST: %s]" % (', '.join(repr(d) for d in self.decls)) + return "[DeclListAST: %s]" % (", ".join(repr(d) for d in self.decls)) def files(self, parent=None): s = set() diff --git a/src/mem/slicc/ast/DeferEnqueueingStatementAST.py b/src/mem/slicc/ast/DeferEnqueueingStatementAST.py index b71310541c..0c34113902 100644 --- a/src/mem/slicc/ast/DeferEnqueueingStatementAST.py +++ b/src/mem/slicc/ast/DeferEnqueueingStatementAST.py @@ -31,6 +31,7 @@ from slicc.ast.StatementAST import StatementAST from slicc.symbols import Var + class DeferEnqueueingStatementAST(StatementAST): def __init__(self, slicc, queue_name, type_ast, statements): super().__init__(slicc) @@ -40,8 +41,11 @@ class DeferEnqueueingStatementAST(StatementAST): self.statements = statements def __repr__(self): - return "[DeferEnqueueingStatementAst: %s %s %s]" % \ - (self.queue_name, self.type_ast.ident, self.statements) + return "[DeferEnqueueingStatementAst: %s %s %s]" % ( + self.queue_name, + self.type_ast.ident, + self.statements, + ) def generate(self, code, return_type, **kwargs): code("{") @@ -51,20 +55,30 @@ class DeferEnqueueingStatementAST(StatementAST): msg_type = self.type_ast.type # Add new local var to symbol table - v = Var(self.symtab, "out_msg", self.location, msg_type, "*out_msg", - self.pairs) + v = Var( + self.symtab, + "out_msg", + self.location, + msg_type, + "*out_msg", + self.pairs, + ) self.symtab.newSymbol(v) # Declare message - code("std::shared_ptr<${{msg_type.c_ident}}> out_msg = "\ - "std::make_shared<${{msg_type.c_ident}}>(clockEdge());") + code( + "std::shared_ptr<${{msg_type.c_ident}}> out_msg = " + "std::make_shared<${{msg_type.c_ident}}>(clockEdge());" + ) # The other statements t = self.statements.generate(code, None) self.queue_name.assertType("OutPort") - code("(${{self.queue_name.var.code}}).deferEnqueueingMessage(addr, "\ - "out_msg);") + code( + "(${{self.queue_name.var.code}}).deferEnqueueingMessage(addr, " + "out_msg);" + ) # End scope self.symtab.popFrame() diff --git a/src/mem/slicc/ast/EnqueueStatementAST.py b/src/mem/slicc/ast/EnqueueStatementAST.py index ea2a45f65a..148cc3a223 100644 --- a/src/mem/slicc/ast/EnqueueStatementAST.py +++ b/src/mem/slicc/ast/EnqueueStatementAST.py @@ -29,6 +29,7 @@ from slicc.ast.StatementAST import StatementAST from slicc.symbols import Var + class EnqueueStatementAST(StatementAST): def __init__(self, slicc, queue_name, type_ast, lexpr, statements): super().__init__(slicc) @@ -39,8 +40,11 @@ class EnqueueStatementAST(StatementAST): self.statements = statements def __repr__(self): - return "[EnqueueStatementAst: %s %s %s]" % \ - (self.queue_name, self.type_ast.ident, self.statements) + return "[EnqueueStatementAst: %s %s %s]" % ( + self.queue_name, + self.type_ast.ident, + self.statements, + ) def generate(self, code, return_type, **kwargs): code("{") @@ -50,13 +54,21 @@ class EnqueueStatementAST(StatementAST): msg_type = self.type_ast.type # Add new local var to symbol table - v = Var(self.symtab, "out_msg", self.location, msg_type, "*out_msg", - self.pairs) + v = Var( + self.symtab, + "out_msg", + self.location, + msg_type, + "*out_msg", + self.pairs, + ) self.symtab.newSymbol(v) # Declare message - code("std::shared_ptr<${{msg_type.c_ident}}> out_msg = "\ - "std::make_shared<${{msg_type.c_ident}}>(clockEdge());") + code( + "std::shared_ptr<${{msg_type.c_ident}}> out_msg = " + "std::make_shared<${{msg_type.c_ident}}>(clockEdge());" + ) # The other statements t = self.statements.generate(code, None) @@ -64,11 +76,15 @@ class EnqueueStatementAST(StatementAST): if self.latexpr != None: ret_type, rcode = self.latexpr.inline(True) - code("(${{self.queue_name.var.code}}).enqueue(" \ - "out_msg, clockEdge(), cyclesToTicks(Cycles($rcode)));") + code( + "(${{self.queue_name.var.code}}).enqueue(" + "out_msg, clockEdge(), cyclesToTicks(Cycles($rcode)));" + ) else: - code("(${{self.queue_name.var.code}}).enqueue(out_msg, "\ - "clockEdge(), cyclesToTicks(Cycles(1)));") + code( + "(${{self.queue_name.var.code}}).enqueue(out_msg, " + "clockEdge(), cyclesToTicks(Cycles(1)));" + ) # End scope self.symtab.popFrame() diff --git a/src/mem/slicc/ast/EnumDeclAST.py b/src/mem/slicc/ast/EnumDeclAST.py index 9694dd965c..5ffc8bb720 100644 --- a/src/mem/slicc/ast/EnumDeclAST.py +++ b/src/mem/slicc/ast/EnumDeclAST.py @@ -28,6 +28,7 @@ from slicc.ast.DeclAST import DeclAST from slicc.symbols import Func, Type + class EnumDeclAST(DeclAST): def __init__(self, slicc, type_ast, pairs, fields): super().__init__(slicc, pairs) @@ -53,8 +54,9 @@ class EnumDeclAST(DeclAST): ident = str(self.type_ast) # Make the new type - t = Type(self.symtab, ident, self.location, self.pairs, - self.state_machine) + t = Type( + self.symtab, ident, self.location, self.pairs, self.state_machine + ) self.symtab.newSymbol(t) # Add all of the fields of the type to it @@ -64,9 +66,16 @@ class EnumDeclAST(DeclAST): # Add the implicit State_to_string method - FIXME, this is a bit dirty func_id = "%s_to_string" % t.c_ident - pairs = { "external" : "yes" } - func = Func(self.symtab, func_id + "_" + t.c_ident, - func_id, self.location, - self.symtab.find("std::string", Type), [ t ], [], "", - pairs) + pairs = {"external": "yes"} + func = Func( + self.symtab, + func_id + "_" + t.c_ident, + func_id, + self.location, + self.symtab.find("std::string", Type), + [t], + [], + "", + pairs, + ) self.symtab.newSymbol(func) diff --git a/src/mem/slicc/ast/EnumExprAST.py b/src/mem/slicc/ast/EnumExprAST.py index 52e430539b..9f3aae33a3 100644 --- a/src/mem/slicc/ast/EnumExprAST.py +++ b/src/mem/slicc/ast/EnumExprAST.py @@ -27,6 +27,7 @@ from slicc.ast.ExprAST import ExprAST + class EnumExprAST(ExprAST): def __init__(self, slicc, type_ast, value): super().__init__(slicc) @@ -42,12 +43,15 @@ class EnumExprAST(ExprAST): def generate(self, code, **kwargs): fix = code.nofix() - code('${{self.type_ast.type.c_ident}}_${{self.value}}') + code("${{self.type_ast.type.c_ident}}_${{self.value}}") code.fix(fix) # Make sure the enumeration value exists if self.value not in self.type_ast.type.enums: - self.error("Type '%s' does not have enumeration '%s'", - self.type_ast, self.value) + self.error( + "Type '%s' does not have enumeration '%s'", + self.type_ast, + self.value, + ) return self.type_ast.type diff --git a/src/mem/slicc/ast/ExprAST.py b/src/mem/slicc/ast/ExprAST.py index 0b0a0ecca3..597b143e75 100644 --- a/src/mem/slicc/ast/ExprAST.py +++ b/src/mem/slicc/ast/ExprAST.py @@ -26,6 +26,7 @@ from slicc.ast.AST import AST + class ExprAST(AST): def __init__(self, slicc): super().__init__(slicc) diff --git a/src/mem/slicc/ast/ExprStatementAST.py b/src/mem/slicc/ast/ExprStatementAST.py index 037163eaf5..d26920c11c 100644 --- a/src/mem/slicc/ast/ExprStatementAST.py +++ b/src/mem/slicc/ast/ExprStatementAST.py @@ -30,6 +30,7 @@ from slicc.ast.StatementAST import StatementAST from slicc.ast.LocalVariableAST import LocalVariableAST from slicc.symbols import Type + class ExprStatementAST(StatementAST): def __init__(self, slicc, expr): super().__init__(slicc) @@ -39,15 +40,17 @@ class ExprStatementAST(StatementAST): return "[ExprStatementAST: %s]" % (self.expr) def generate(self, code, return_type, **kwargs): - actual_type,rcode = self.expr.inline(True, **kwargs) + actual_type, rcode = self.expr.inline(True, **kwargs) code("$rcode;") # The return type must be void, except for local var decls - if (not isinstance(self.expr, LocalVariableAST) and - actual_type != self.symtab.find("void", Type)): - self.expr.warning("Non-void return ignored, " + \ - "return type is '%s'", actual_type.ident) + if not isinstance( + self.expr, LocalVariableAST + ) and actual_type != self.symtab.find("void", Type): + self.expr.warning( + "Non-void return ignored, " + "return type is '%s'", + actual_type.ident, + ) def findResources(self, resources): self.expr.findResources(resources) - diff --git a/src/mem/slicc/ast/FormalParamAST.py b/src/mem/slicc/ast/FormalParamAST.py index 9f17101c5c..cd6cdc182c 100644 --- a/src/mem/slicc/ast/FormalParamAST.py +++ b/src/mem/slicc/ast/FormalParamAST.py @@ -40,8 +40,9 @@ from slicc.ast.AST import AST from slicc.symbols import Var + class FormalParamAST(AST): - def __init__(self, slicc, type_ast, ident, default = None, qualifier=""): + def __init__(self, slicc, type_ast, ident, default=None, qualifier=""): super().__init__(slicc) self.type_ast = type_ast self.ident = ident @@ -60,19 +61,22 @@ class FormalParamAST(AST): param = "param_%s" % self.ident # Add to symbol table - v = Var(self.symtab, self.ident, self.location, type, param, - self.pairs) + v = Var( + self.symtab, self.ident, self.location, type, param, self.pairs + ) self.symtab.newSymbol(v) # Qualifier is always a pointer for TBE table and Cache entries. # It's expected to be left unspecified or specified as ptr. qualifier = self.qualifier if str(type) == "TBE" or ( - "interface" in type and ( - type["interface"] == "AbstractCacheEntry")): - if qualifier not in ["", "PTR"] : - self.warning("Parameter \'%s\' is always pointer. " - "%s qualifier ignored" % (self.ident, qualifier)) + "interface" in type and (type["interface"] == "AbstractCacheEntry") + ): + if qualifier not in ["", "PTR"]: + self.warning( + "Parameter '%s' is always pointer. " + "%s qualifier ignored" % (self.ident, qualifier) + ) qualifier = "PTR" # default @@ -86,4 +90,4 @@ class FormalParamAST(AST): elif qualifier == "CONST_REF": return type, "const %s& %s" % (type.c_ident, param) else: - self.error("Invalid qualifier for param \'%s\'" % self.ident) + self.error("Invalid qualifier for param '%s'" % self.ident) diff --git a/src/mem/slicc/ast/FuncCallExprAST.py b/src/mem/slicc/ast/FuncCallExprAST.py index e7046edab2..940e78acff 100644 --- a/src/mem/slicc/ast/FuncCallExprAST.py +++ b/src/mem/slicc/ast/FuncCallExprAST.py @@ -41,6 +41,7 @@ from slicc.ast.ExprAST import ExprAST from slicc.symbols import Func, Type + class FuncCallExprAST(ExprAST): def __init__(self, slicc, proc_name, exprs): super().__init__(slicc) @@ -78,13 +79,20 @@ class FuncCallExprAST(ExprAST): str_list.append("%s" % self.exprs[i].inline()) if len(str_list) == 0: - code('DPRINTF($0, "$1: $2")', - dflag, self.exprs[0].location, format[2:format_length-2]) + code( + 'DPRINTF($0, "$1: $2")', + dflag, + self.exprs[0].location, + format[2 : format_length - 2], + ) else: - code('DPRINTF($0, "$1: $2", $3)', - dflag, - self.exprs[0].location, format[2:format_length-2], - ', '.join(str_list)) + code( + 'DPRINTF($0, "$1: $2", $3)', + dflag, + self.exprs[0].location, + format[2 : format_length - 2], + ", ".join(str_list), + ) return self.symtab.find("void", Type) @@ -97,12 +105,18 @@ class FuncCallExprAST(ExprAST): str_list.append("%s" % self.exprs[i].inline()) if len(str_list) == 0: - code('DPRINTFN("$0: $1")', - self.exprs[0].location, format[2:format_length-2]) + code( + 'DPRINTFN("$0: $1")', + self.exprs[0].location, + format[2 : format_length - 2], + ) else: - code('DPRINTFN("$0: $1", $2)', - self.exprs[0].location, format[2:format_length-2], - ', '.join(str_list)) + code( + 'DPRINTFN("$0: $1", $2)', + self.exprs[0].location, + format[2 : format_length - 2], + ", ".join(str_list), + ) return self.symtab.find("void", Type) @@ -115,7 +129,7 @@ class FuncCallExprAST(ExprAST): func_name_args = self.proc_name for expr in self.exprs: - actual_type,param_code = expr.inline(True) + actual_type, param_code = expr.inline(True) func_name_args += "_" + str(actual_type.ident) # Look up the function in the symbol table @@ -142,38 +156,51 @@ class FuncCallExprAST(ExprAST): # port. So as most of current protocols. if self.proc_name == "trigger": - code(''' + code( + """ { -''') +""" + ) if machine.TBEType != None and machine.EntryType != None: - code(''' + code( + """ TransitionResult result = doTransition(${{cvec[0]}}, ${{cvec[2]}}, ${{cvec[3]}}, ${{cvec[1]}}); -''') +""" + ) elif machine.TBEType != None: - code(''' + code( + """ TransitionResult result = doTransition(${{cvec[0]}}, ${{cvec[2]}}, ${{cvec[1]}}); -''') +""" + ) elif machine.EntryType != None: - code(''' + code( + """ TransitionResult result = doTransition(${{cvec[0]}}, ${{cvec[2]}}, ${{cvec[1]}}); -''') +""" + ) else: - code(''' + code( + """ TransitionResult result = doTransition(${{cvec[0]}}, ${{cvec[1]}}); -''') +""" + ) - assert('in_port' in kwargs) - in_port = kwargs['in_port'] + assert "in_port" in kwargs + in_port = kwargs["in_port"] - code(''' + code( + """ if (result == TransitionResult_Valid) { counter++; continue; // Check the first port again } else if (result == TransitionResult_ResourceStall) { -''') - if 'rsc_stall_handler' in in_port.pairs: - stall_func_name = in_port.pairs['rsc_stall_handler'] - code(''' +""" + ) + if "rsc_stall_handler" in in_port.pairs: + stall_func_name = in_port.pairs["rsc_stall_handler"] + code( + """ if (${{stall_func_name}}()) { counter++; continue; // Check the first port again @@ -181,18 +208,24 @@ class FuncCallExprAST(ExprAST): scheduleEvent(Cycles(1)); // Cannot do anything with this transition, go check next doable transition (mostly likely of next port) } -''') +""" + ) else: - code(''' + code( + """ scheduleEvent(Cycles(1)); // Cannot do anything with this transition, go check next doable transition (mostly likely of next port) -''') - code(''' +""" + ) + code( + """ } else if (result == TransitionResult_ProtocolStall) { -''') - if 'prot_stall_handler' in in_port.pairs: - stall_func_name = in_port.pairs['prot_stall_handler'] - code(''' +""" + ) + if "prot_stall_handler" in in_port.pairs: + stall_func_name = in_port.pairs["prot_stall_handler"] + code( + """ if (${{stall_func_name}}()) { counter++; continue; // Check the first port again @@ -200,37 +233,44 @@ class FuncCallExprAST(ExprAST): scheduleEvent(Cycles(1)); // Cannot do anything with this transition, go check next doable transition (mostly likely of next port) } -''') +""" + ) else: - code(''' + code( + """ scheduleEvent(Cycles(1)); // Cannot do anything with this transition, go check next doable transition (mostly likely of next port) -''') - code(''' +""" + ) + code( + """ } } -''') +""" + ) elif self.proc_name == "error": code("$0", self.exprs[0].embedError(cvec[0])) elif self.proc_name == "assert": error = self.exprs[0].embedError('"assert failure"') - code(''' + code( + """ #ifndef NDEBUG if (!(${{cvec[0]}})) { $error } #endif -''') +""" + ) elif self.proc_name == "set_cache_entry": - code("set_cache_entry(m_cache_entry_ptr, %s);" %(cvec[0])); + code("set_cache_entry(m_cache_entry_ptr, %s);" % (cvec[0])) elif self.proc_name == "unset_cache_entry": - code("unset_cache_entry(m_cache_entry_ptr);"); + code("unset_cache_entry(m_cache_entry_ptr);") elif self.proc_name == "set_tbe": - code("set_tbe(m_tbe_ptr, %s);" %(cvec[0])); + code("set_tbe(m_tbe_ptr, %s);" % (cvec[0])) elif self.proc_name == "unset_tbe": - code("unset_tbe(m_tbe_ptr);"); + code("unset_tbe(m_tbe_ptr);") elif self.proc_name == "stallPort": code("scheduleEvent(Cycles(1));") @@ -245,13 +285,13 @@ if (!(${{cvec[0]}})) { for (param_code, type) in zip(cvec, type_vec): if first_param: params = str(param_code) - first_param = False + first_param = False else: - params += ', ' - params += str(param_code); + params += ", " + params += str(param_code) fix = code.nofix() - code('(${{func.c_name}}($params))') + code("(${{func.c_name}}($params))") code.fix(fix) return func.return_type diff --git a/src/mem/slicc/ast/FuncDeclAST.py b/src/mem/slicc/ast/FuncDeclAST.py index ab2a1c6b8b..ece27e708f 100644 --- a/src/mem/slicc/ast/FuncDeclAST.py +++ b/src/mem/slicc/ast/FuncDeclAST.py @@ -28,6 +28,7 @@ from slicc.ast.DeclAST import DeclAST from slicc.symbols import Func, Type + class FuncDeclAST(DeclAST): def __init__(self, slicc, return_type, ident, formals, pairs, statements): super().__init__(slicc, pairs) @@ -43,7 +44,7 @@ class FuncDeclAST(DeclAST): def files(self, parent=None): return set() - def generate(self, parent = None, **kwargs): + def generate(self, parent=None, **kwargs): types = [] params = [] void_type = self.symtab.find("void", Type) @@ -79,6 +80,7 @@ class FuncDeclAST(DeclAST): if parent is None: for arg in self.formals: from slicc.ast import FormalParamAST + if isinstance(arg, FormalParamAST): arg_name = arg.type_ast.ident else: @@ -86,8 +88,17 @@ class FuncDeclAST(DeclAST): func_name_args += "_" + str(arg_name) machine = self.state_machine - func = Func(self.symtab, func_name_args, self.ident, self.location, - return_type, types, params, str(body), self.pairs) + func = Func( + self.symtab, + func_name_args, + self.ident, + self.location, + return_type, + types, + params, + str(body), + self.pairs, + ) if parent is not None: if not parent.addFunc(func): diff --git a/src/mem/slicc/ast/IfStatementAST.py b/src/mem/slicc/ast/IfStatementAST.py index cbacfd3634..aba19d62ce 100644 --- a/src/mem/slicc/ast/IfStatementAST.py +++ b/src/mem/slicc/ast/IfStatementAST.py @@ -28,6 +28,7 @@ from slicc.ast.StatementAST import StatementAST from slicc.symbols import Type + class IfStatementAST(StatementAST): def __init__(self, slicc, cond, then, else_): super().__init__(slicc) @@ -47,12 +48,13 @@ class IfStatementAST(StatementAST): cond_type = self.cond.generate(cond_code) if cond_type != self.symtab.find("bool", Type): - self.cond.error("Condition of if stmt must be bool, type was '%s'", - cond_type) + self.cond.error( + "Condition of if stmt must be bool, type was '%s'", cond_type + ) # Conditional code.indent() - code('if ($cond_code) {') + code("if ($cond_code) {") # Then part code.indent() self.symtab.pushFrame() @@ -61,13 +63,13 @@ class IfStatementAST(StatementAST): code.dedent() # Else part if self.else_: - code('} else {') + code("} else {") code.indent() self.symtab.pushFrame() self.else_.generate(code, return_type, **kwargs) self.symtab.popFrame() code.dedent() - code('}') # End scope + code("}") # End scope def findResources(self, resources): # Take a worse case look at both paths diff --git a/src/mem/slicc/ast/InPortDeclAST.py b/src/mem/slicc/ast/InPortDeclAST.py index 362a75dec3..c8b99a4710 100644 --- a/src/mem/slicc/ast/InPortDeclAST.py +++ b/src/mem/slicc/ast/InPortDeclAST.py @@ -41,6 +41,7 @@ from slicc.ast.DeclAST import DeclAST from slicc.ast.TypeAST import TypeAST from slicc.symbols import Func, Type, Var + class InPortDeclAST(DeclAST): def __init__(self, slicc, ident, msg_type, var_expr, pairs, statements): super().__init__(slicc, pairs) @@ -65,14 +66,23 @@ class InPortDeclAST(DeclAST): code = self.slicc.codeFormatter() queue_type = self.var_expr.generate(code) if not queue_type.isInPort: - self.error("The inport queue's type must have the 'inport' " + \ - "attribute. Type '%s' does not have this attribute.", - queue_type) + self.error( + "The inport queue's type must have the 'inport' " + + "attribute. Type '%s' does not have this attribute.", + queue_type, + ) type = self.queue_type.type self.pairs["buffer_expr"] = self.var_expr - in_port = Var(self.symtab, self.ident, self.location, type, str(code), - self.pairs, machine) + in_port = Var( + self.symtab, + self.ident, + self.location, + type, + str(code), + self.pairs, + machine, + ) symtab.newSymbol(in_port) symtab.pushFrame() @@ -97,18 +107,36 @@ class InPortDeclAST(DeclAST): param_types.append(machine.TBEType) # Add the trigger method - FIXME, this is a bit dirty - pairs = { "external" : "yes" } + pairs = {"external": "yes"} trigger_func_name = "trigger" for param in param_types: trigger_func_name += "_" + param.ident - func = Func(self.symtab, trigger_func_name, "trigger", self.location, - void_type, param_types, [], "", pairs) + func = Func( + self.symtab, + trigger_func_name, + "trigger", + self.location, + void_type, + param_types, + [], + "", + pairs, + ) symtab.newSymbol(func) # Add the stallPort method - this hacks reschedules the controller # for stalled messages that don't trigger events - func = Func(self.symtab, "stallPort", "stallPort", self.location, - void_type, [], [], "", pairs) + func = Func( + self.symtab, + "stallPort", + "stallPort", + self.location, + void_type, + [], + [], + "", + pairs, + ) symtab.newSymbol(func) param_types = [] diff --git a/src/mem/slicc/ast/IsValidPtrExprAST.py b/src/mem/slicc/ast/IsValidPtrExprAST.py index d5c4c38882..ec285dcaa6 100644 --- a/src/mem/slicc/ast/IsValidPtrExprAST.py +++ b/src/mem/slicc/ast/IsValidPtrExprAST.py @@ -29,6 +29,7 @@ from slicc.ast.ExprAST import ExprAST from slicc.symbols import Type + class IsValidPtrExprAST(ExprAST): def __init__(self, slicc, variable, flag): super().__init__(slicc) @@ -42,7 +43,7 @@ class IsValidPtrExprAST(ExprAST): # Make sure the variable is valid fix = code.nofix() code("(") - var_type, var_code = self.variable.inline(True); + var_type, var_code = self.variable.inline(True) if self.flag: code("${var_code} != NULL)") else: diff --git a/src/mem/slicc/ast/LiteralExprAST.py b/src/mem/slicc/ast/LiteralExprAST.py index a5ffa20591..973ac6a1c0 100644 --- a/src/mem/slicc/ast/LiteralExprAST.py +++ b/src/mem/slicc/ast/LiteralExprAST.py @@ -28,6 +28,7 @@ from slicc.ast.ExprAST import ExprAST from slicc.symbols import Type + class LiteralExprAST(ExprAST): def __init__(self, slicc, literal, type): super().__init__(slicc) @@ -42,9 +43,9 @@ class LiteralExprAST(ExprAST): if self.type == "std::string": code('("${{self.literal}}")') elif self.type == "bool": - code('(${{str(self.literal).lower()}})') + code("(${{str(self.literal).lower()}})") else: - code('(${{self.literal}})') + code("(${{self.literal}})") code.fix(fix) type = self.symtab.find(self.type, Type) diff --git a/src/mem/slicc/ast/LocalVariableAST.py b/src/mem/slicc/ast/LocalVariableAST.py index 481c9a1b6a..e08e5770a4 100644 --- a/src/mem/slicc/ast/LocalVariableAST.py +++ b/src/mem/slicc/ast/LocalVariableAST.py @@ -30,11 +30,12 @@ from slicc.ast.StatementAST import StatementAST from slicc.symbols import Var + class LocalVariableAST(StatementAST): - def __init__(self, slicc, type_ast, ident, pointer = False): + def __init__(self, slicc, type_ast, ident, pointer=False): super().__init__(slicc) self.type_ast = type_ast - self.ident = ident + self.ident = ident self.pointer = pointer def __repr__(self): @@ -53,18 +54,24 @@ class LocalVariableAST(StatementAST): return code def generate(self, code, **kwargs): - type = self.type_ast.type; - ident = "%s" % self.ident; + type = self.type_ast.type + ident = "%s" % self.ident # Add to symbol table - v = Var(self.symtab, self.ident, self.location, type, ident, - self.pairs) + v = Var( + self.symtab, self.ident, self.location, type, ident, self.pairs + ) self.symtab.newSymbol(v) - if self.pointer or str(type) == "TBE" or ( - # Check whether type is Entry by checking interface since - # entries in protocol files use AbstractCacheEntry as interfaces. - "interface" in type and ( - type["interface"] == "AbstractCacheEntry")): + if ( + self.pointer + or str(type) == "TBE" + or ( + # Check whether type is Entry by checking interface since + # entries in protocol files use AbstractCacheEntry as interfaces. + "interface" in type + and (type["interface"] == "AbstractCacheEntry") + ) + ): code += "%s* %s" % (type.c_ident, ident) else: code += "%s %s" % (type.c_ident, ident) diff --git a/src/mem/slicc/ast/MachineAST.py b/src/mem/slicc/ast/MachineAST.py index 7434d6bbcf..57526daa3c 100644 --- a/src/mem/slicc/ast/MachineAST.py +++ b/src/mem/slicc/ast/MachineAST.py @@ -28,6 +28,7 @@ from slicc.ast.DeclAST import DeclAST from slicc.symbols import StateMachine, Type + class MachineAST(DeclAST): def __init__(self, slicc, mtype, pairs_ast, config_parameters, decls): super().__init__(slicc, pairs_ast) @@ -41,11 +42,15 @@ class MachineAST(DeclAST): return "[Machine: %r]" % self.ident def files(self, parent=None): - s = set(('%s_Controller.cc' % self.ident, - '%s_Controller.hh' % self.ident, - '%s_Controller.py' % self.ident, - '%s_Transitions.cc' % self.ident, - '%s_Wakeup.cc' % self.ident)) + s = set( + ( + "%s_Controller.cc" % self.ident, + "%s_Controller.hh" % self.ident, + "%s_Controller.py" % self.ident, + "%s_Transitions.cc" % self.ident, + "%s_Wakeup.cc" % self.ident, + ) + ) s |= self.decls.files(self.ident) return s @@ -55,8 +60,13 @@ class MachineAST(DeclAST): self.symtab.pushFrame() # Create a new machine - machine = StateMachine(self.symtab, self.ident, self.location, - self.pairs, self.config_parameters) + machine = StateMachine( + self.symtab, + self.ident, + self.location, + self.pairs, + self.config_parameters, + ) self.symtab.newCurrentMachine(machine) diff --git a/src/mem/slicc/ast/MemberExprAST.py b/src/mem/slicc/ast/MemberExprAST.py index a6e359175a..292c1b7899 100644 --- a/src/mem/slicc/ast/MemberExprAST.py +++ b/src/mem/slicc/ast/MemberExprAST.py @@ -27,6 +27,7 @@ from slicc.ast.ExprAST import ExprAST + class MemberExprAST(ExprAST): def __init__(self, slicc, expr_ast, field): super().__init__(slicc) @@ -41,13 +42,13 @@ class MemberExprAST(ExprAST): return_type, gcode = self.expr_ast.inline(True) fix = code.nofix() - # Check whether return_type is Entry by checking # interfaces since entries in protocol files use # AbstractCacheEntry as interfaces. - if str(return_type) == "TBE" \ - or ("interface" in return_type and - (return_type["interface"] == "AbstractCacheEntry")): + if str(return_type) == "TBE" or ( + "interface" in return_type + and (return_type["interface"] == "AbstractCacheEntry") + ): code("(*$gcode).m_${{self.field}}") else: code("($gcode).m_${{self.field}}") @@ -60,11 +61,13 @@ class MemberExprAST(ExprAST): return return_type.data_members[self.field].type else: if "interface" in return_type: - interface_type = self.symtab.find(return_type["interface"]); - if interface_type != None: - if self.field in interface_type.data_members: - # Return the type of the field - return interface_type.data_members[self.field].type - self.error("Invalid object field: " + - "Type '%s' does not have data member %s" % \ - (return_type, self.field)) + interface_type = self.symtab.find(return_type["interface"]) + if interface_type != None: + if self.field in interface_type.data_members: + # Return the type of the field + return interface_type.data_members[self.field].type + self.error( + "Invalid object field: " + + "Type '%s' does not have data member %s" + % (return_type, self.field) + ) diff --git a/src/mem/slicc/ast/MethodCallExprAST.py b/src/mem/slicc/ast/MethodCallExprAST.py index e08cc053d1..a4ebc67ecc 100644 --- a/src/mem/slicc/ast/MethodCallExprAST.py +++ b/src/mem/slicc/ast/MethodCallExprAST.py @@ -27,6 +27,7 @@ from slicc.ast.ExprAST import ExprAST + class MethodCallExprAST(ExprAST): def __init__(self, slicc, proc_name, expr_ast_vec): super().__init__(slicc) @@ -45,7 +46,7 @@ class MethodCallExprAST(ExprAST): # generate code params = [] for expr_ast in self.expr_ast_vec: - return_type,tcode = expr_ast.inline(True) + return_type, tcode = expr_ast.inline(True) params.append(str(tcode)) fix = code.nofix() code("$prefix${{self.proc_name}}(${{', '.join(params)}}))") @@ -53,8 +54,11 @@ class MethodCallExprAST(ExprAST): # Verify that this is a method of the object if methodId not in obj_type.methods: - self.error("Invalid method call: Type '%s' does not have a method '%s'", - obj_type, methodId) + self.error( + "Invalid method call: Type '%s' does not have a method '%s'", + obj_type, + methodId, + ) func = obj_type.methods[methodId] func.checkArguments(self.expr_ast_vec) @@ -65,6 +69,7 @@ class MethodCallExprAST(ExprAST): def findResources(self, resources): pass + class MemberMethodCallExprAST(MethodCallExprAST): def __init__(self, slicc, obj_expr_ast, func_call): s = super() @@ -72,9 +77,12 @@ class MemberMethodCallExprAST(MethodCallExprAST): self.obj_expr_ast = obj_expr_ast def __repr__(self): - return "[MethodCallExpr: %r%r %r]" % (self.proc_name, - self.obj_expr_ast, - self.expr_ast_vec) + return "[MethodCallExpr: %r%r %r]" % ( + self.proc_name, + self.obj_expr_ast, + self.expr_ast_vec, + ) + def generate_prefix(self, paramTypes): code = self.slicc.codeFormatter() @@ -92,69 +100,84 @@ class MemberMethodCallExprAST(MethodCallExprAST): # # Check whether the method is implemented by the super class if "interface" in obj_type: - interface_type = self.symtab.find(obj_type["interface"]); + interface_type = self.symtab.find(obj_type["interface"]) if methodId in interface_type.methods: return_type = interface_type.methods[methodId].return_type obj_type = interface_type else: - self.error("Invalid method call: " \ - "Type '%s' does not have a method %s, '%s'", - obj_type, self.proc_name, methodId) + self.error( + "Invalid method call: " + "Type '%s' does not have a method %s, '%s'", + obj_type, + self.proc_name, + methodId, + ) else: - # - # The initial method check has failed, but before generating an - # error we must check whether any of the paramTypes implement - # an interface. If so, we must check if the method ids using - # the inherited types exist. - # - # This code is a temporary fix and only checks for the methodId - # where all paramTypes are converted to their inherited type. The - # right way to do this is to replace slicc's simple string - # comparison for determining the correct overloaded method, with a - # more robust param by param check. - # - implemented_paramTypes = [] - for paramType in paramTypes: - implemented_paramType = paramType - if paramType.isInterface: - implements_interface = True - implemented_paramType.abstract_ident = paramType["interface"] - else: - implemented_paramType.abstract_ident = paramType.c_ident + # + # The initial method check has failed, but before generating an + # error we must check whether any of the paramTypes implement + # an interface. If so, we must check if the method ids using + # the inherited types exist. + # + # This code is a temporary fix and only checks for the methodId + # where all paramTypes are converted to their inherited type. The + # right way to do this is to replace slicc's simple string + # comparison for determining the correct overloaded method, with a + # more robust param by param check. + # + implemented_paramTypes = [] + for paramType in paramTypes: + implemented_paramType = paramType + if paramType.isInterface: + implements_interface = True + implemented_paramType.abstract_ident = paramType[ + "interface" + ] + else: + implemented_paramType.abstract_ident = ( + paramType.c_ident + ) - implemented_paramTypes.append(implemented_paramType) + implemented_paramTypes.append(implemented_paramType) - implementedMethodId = "" - if implements_interface: - implementedMethodId = obj_type.methodIdAbstract( - self.proc_name, implemented_paramTypes) + implementedMethodId = "" + if implements_interface: + implementedMethodId = obj_type.methodIdAbstract( + self.proc_name, implemented_paramTypes + ) - if implementedMethodId not in obj_type.methods: - self.error("Invalid method call: Type '%s' " \ - "does not have a method %s, '%s' nor '%s'", - obj_type, self.proc_name, methodId, - implementedMethodId) + if implementedMethodId not in obj_type.methods: + self.error( + "Invalid method call: Type '%s' " + "does not have a method %s, '%s' nor '%s'", + obj_type, + self.proc_name, + methodId, + implementedMethodId, + ) - # Replace the methodId with the implementedMethodId - # found in the method list. - methodId = implementedMethodId - return_type = obj_type.methods[methodId].return_type + # Replace the methodId with the implementedMethodId + # found in the method list. + methodId = implementedMethodId + return_type = obj_type.methods[methodId].return_type # Check object type or interface of entries by checking # AbstractCacheEntry since AbstractCacheEntry is used in # protocol files. - if str(obj_type) == "AbstractCacheEntry" or \ - ("interface" in obj_type and ( - obj_type["interface"] == "AbstractCacheEntry")): + if str(obj_type) == "AbstractCacheEntry" or ( + "interface" in obj_type + and (obj_type["interface"] == "AbstractCacheEntry") + ): prefix = "%s((*(%s))." % (prefix, code) else: prefix = "%s((%s)." % (prefix, code) return obj_type, methodId, prefix + class ClassMethodCallExprAST(MethodCallExprAST): def __init__(self, slicc, type_ast, proc_name, expr_ast_vec): s = super() @@ -174,4 +197,5 @@ class ClassMethodCallExprAST(MethodCallExprAST): return obj_type, methodId, prefix -__all__ = [ "MemberMethodCallExprAST", "ClassMethodCallExprAST" ] + +__all__ = ["MemberMethodCallExprAST", "ClassMethodCallExprAST"] diff --git a/src/mem/slicc/ast/NewExprAST.py b/src/mem/slicc/ast/NewExprAST.py index b0967147f5..3488070783 100644 --- a/src/mem/slicc/ast/NewExprAST.py +++ b/src/mem/slicc/ast/NewExprAST.py @@ -27,6 +27,7 @@ from slicc.ast.ExprAST import ExprAST + class NewExprAST(ExprAST): def __init__(self, slicc, type_ast): super().__init__(slicc) diff --git a/src/mem/slicc/ast/ObjDeclAST.py b/src/mem/slicc/ast/ObjDeclAST.py index 2ab956298e..0aec0c367e 100644 --- a/src/mem/slicc/ast/ObjDeclAST.py +++ b/src/mem/slicc/ast/ObjDeclAST.py @@ -28,6 +28,7 @@ from slicc.ast.DeclAST import DeclAST from slicc.symbols import Var + class ObjDeclAST(DeclAST): def __init__(self, slicc, type_ast, ident, pairs, rvalue, pointer): super().__init__(slicc, pairs) @@ -40,9 +41,10 @@ class ObjDeclAST(DeclAST): def __repr__(self): return "[ObjDecl: %r]" % self.ident - def generate(self, parent = None, **kwargs): - if "network" in self and not ("virtual_network" in self or - "physical_network" in self) : + def generate(self, parent=None, **kwargs): + if "network" in self and not ( + "virtual_network" in self or "physical_network" in self + ): self.error("Network queues require a 'virtual_network' attribute") type = self.type_ast.type @@ -63,20 +65,33 @@ class ObjDeclAST(DeclAST): # check type if this is a initialization init_code = "" if self.rvalue: - rvalue_type,init_code = self.rvalue.inline(True) + rvalue_type, init_code = self.rvalue.inline(True) if type != rvalue_type: - self.error("Initialization type mismatch '%s' and '%s'" % \ - (type, rvalue_type)) + self.error( + "Initialization type mismatch '%s' and '%s'" + % (type, rvalue_type) + ) machine = self.symtab.state_machine - v = Var(self.symtab, self.ident, self.location, type, c_code, - self.pairs, machine) + v = Var( + self.symtab, + self.ident, + self.location, + type, + c_code, + self.pairs, + machine, + ) # Add data member to the parent type if parent: - if not parent.addDataMember(self.ident, type, self.pairs, init_code): - self.error("Duplicate data member: %s:%s" % (parent, self.ident)) + if not parent.addDataMember( + self.ident, type, self.pairs, init_code + ): + self.error( + "Duplicate data member: %s:%s" % (parent, self.ident) + ) elif machine: machine.addObject(v) diff --git a/src/mem/slicc/ast/OodAST.py b/src/mem/slicc/ast/OodAST.py index 1de0c14265..78dc78efc4 100644 --- a/src/mem/slicc/ast/OodAST.py +++ b/src/mem/slicc/ast/OodAST.py @@ -28,6 +28,7 @@ from slicc.ast.ExprAST import ExprAST + class OodAST(ExprAST): def __init__(self, slicc): super().__init__(slicc) diff --git a/src/mem/slicc/ast/OperatorExprAST.py b/src/mem/slicc/ast/OperatorExprAST.py index a75568e6cc..ebebfdf23d 100644 --- a/src/mem/slicc/ast/OperatorExprAST.py +++ b/src/mem/slicc/ast/OperatorExprAST.py @@ -28,6 +28,7 @@ from slicc.ast.ExprAST import ExprAST from slicc.symbols import Type + class InfixOperatorExprAST(ExprAST): def __init__(self, slicc, left, op, right): super().__init__(slicc) @@ -49,11 +50,15 @@ class InfixOperatorExprAST(ExprAST): # Figure out what the input and output types should be if self.op in ("==", "!=", ">=", "<=", ">", "<"): output = "bool" - if (ltype != rtype): - self.error("Type mismatch: left and right operands of " + - "operator '%s' must be the same type. " + - "left: '%s', right: '%s'", - self.op, ltype, rtype) + if ltype != rtype: + self.error( + "Type mismatch: left and right operands of " + + "operator '%s' must be the same type. " + + "left: '%s', right: '%s'", + self.op, + ltype, + rtype, + ) else: expected_types = [] output = None @@ -62,19 +67,23 @@ class InfixOperatorExprAST(ExprAST): # boolean inputs and output expected_types = [("bool", "bool", "bool")] elif self.op in ("<<", ">>"): - expected_types = [("int", "int", "int"), - ("Cycles", "int", "Cycles")] + expected_types = [ + ("int", "int", "int"), + ("Cycles", "int", "Cycles"), + ] elif self.op in ("+", "-", "*", "/", "%"): - expected_types = [("int", "int", "int"), - ("Cycles", "Cycles", "Cycles"), - ("Tick", "Tick", "Tick"), - ("Cycles", "int", "Cycles"), - ("Scalar", "int", "Scalar"), - ("int", "bool", "int"), - ("bool", "int", "int"), - ("int", "Cycles", "Cycles")] + expected_types = [ + ("int", "int", "int"), + ("Cycles", "Cycles", "Cycles"), + ("Tick", "Tick", "Tick"), + ("Cycles", "int", "Cycles"), + ("Scalar", "int", "Scalar"), + ("int", "bool", "int"), + ("bool", "int", "int"), + ("int", "Cycles", "Cycles"), + ] else: - self.error("No operator matched with {0}!" .format(self.op)) + self.error("No operator matched with {0}!".format(self.op)) for expected_type in expected_types: left_input_type = self.symtab.find(expected_type[0], Type) @@ -84,9 +93,12 @@ class InfixOperatorExprAST(ExprAST): output = expected_type[2] if output == None: - self.error("Type mismatch: operands ({0}, {1}) for operator " \ - "'{2}' failed to match with the expected types" . - format(ltype, rtype, self.op)) + self.error( + "Type mismatch: operands ({0}, {1}) for operator " + "'{2}' failed to match with the expected types".format( + ltype, rtype, self.op + ) + ) # All is well fix = code.nofix() @@ -94,6 +106,7 @@ class InfixOperatorExprAST(ExprAST): code.fix(fix) return self.symtab.find(output, Type) + class PrefixOperatorExprAST(ExprAST): def __init__(self, slicc, op, operand): super().__init__(slicc) @@ -113,13 +126,15 @@ class PrefixOperatorExprAST(ExprAST): if self.op in opmap: output = opmap[self.op] type_in_symtab = self.symtab.find(opmap[self.op], Type) - if (optype != type_in_symtab): - self.error("Type mismatch: right operand of " + - "unary operator '%s' must be of type '%s'. ", - self.op, type_in_symtab) + if optype != type_in_symtab: + self.error( + "Type mismatch: right operand of " + + "unary operator '%s' must be of type '%s'. ", + self.op, + type_in_symtab, + ) else: - self.error("Invalid prefix operator '%s'", - self.op) + self.error("Invalid prefix operator '%s'", self.op) # All is well fix = code.nofix() diff --git a/src/mem/slicc/ast/OutPortDeclAST.py b/src/mem/slicc/ast/OutPortDeclAST.py index d84f9e8604..887597b797 100644 --- a/src/mem/slicc/ast/OutPortDeclAST.py +++ b/src/mem/slicc/ast/OutPortDeclAST.py @@ -30,6 +30,7 @@ from slicc.ast.TypeAST import TypeAST from slicc.symbols import Var from slicc.symbols import Type + class OutPortDeclAST(DeclAST): def __init__(self, slicc, ident, msg_type, var_expr, pairs): super().__init__(slicc, pairs) @@ -47,14 +48,23 @@ class OutPortDeclAST(DeclAST): queue_type = self.var_expr.generate(code) if not queue_type.isOutPort: - self.error("The outport queue's type must have the 'outport' " + - "attribute. Type '%s' does not have this attribute.", - (queue_type)) + self.error( + "The outport queue's type must have the 'outport' " + + "attribute. Type '%s' does not have this attribute.", + (queue_type), + ) if not self.symtab.find(self.msg_type.ident, Type): - self.error("The message type '%s' does not exist.", - self.msg_type.ident) + self.error( + "The message type '%s' does not exist.", self.msg_type.ident + ) - var = Var(self.symtab, self.ident, self.location, self.queue_type.type, - str(code), self.pairs) + var = Var( + self.symtab, + self.ident, + self.location, + self.queue_type.type, + str(code), + self.pairs, + ) self.symtab.newSymbol(var) diff --git a/src/mem/slicc/ast/PairAST.py b/src/mem/slicc/ast/PairAST.py index fa56a686c3..eae776f136 100644 --- a/src/mem/slicc/ast/PairAST.py +++ b/src/mem/slicc/ast/PairAST.py @@ -26,6 +26,7 @@ from slicc.ast.AST import AST + class PairAST(AST): def __init__(self, slicc, key, value): super().__init__(slicc) @@ -33,4 +34,4 @@ class PairAST(AST): self.value = value def __repr__(self): - return '[%s=%s]' % (self.key, self.value) + return "[%s=%s]" % (self.key, self.value) diff --git a/src/mem/slicc/ast/PairListAST.py b/src/mem/slicc/ast/PairListAST.py index d195ea2088..a0cf26d07f 100644 --- a/src/mem/slicc/ast/PairListAST.py +++ b/src/mem/slicc/ast/PairListAST.py @@ -26,6 +26,7 @@ from slicc.ast.AST import AST + class PairListAST(AST): def __init__(self, slicc): super().__init__(slicc) diff --git a/src/mem/slicc/ast/PeekStatementAST.py b/src/mem/slicc/ast/PeekStatementAST.py index d0fa306e61..fd07d4e13a 100644 --- a/src/mem/slicc/ast/PeekStatementAST.py +++ b/src/mem/slicc/ast/PeekStatementAST.py @@ -29,6 +29,7 @@ from slicc.ast.StatementAST import StatementAST from slicc.symbols import Var + class PeekStatementAST(StatementAST): def __init__(self, slicc, queue_name, type_ast, pairs, statements, method): super().__init__(slicc, pairs) @@ -39,8 +40,12 @@ class PeekStatementAST(StatementAST): self.method = method def __repr__(self): - return "[PeekStatementAST: %r queue_name: %r type: %r %r]" % \ - (self.method, self.queue_name, self.type_ast, self.statements) + return "[PeekStatementAST: %r queue_name: %r type: %r %r]" % ( + self.method, + self.queue_name, + self.type_ast, + self.statements, + ) def generate(self, code, return_type, **kwargs): self.symtab.pushFrame() @@ -48,8 +53,14 @@ class PeekStatementAST(StatementAST): msg_type = self.type_ast.type # Add new local var to symbol table - var = Var(self.symtab, "in_msg", self.location, msg_type, "(*in_msg_ptr)", - self.pairs) + var = Var( + self.symtab, + "in_msg", + self.location, + msg_type, + "(*in_msg_ptr)", + self.pairs, + ) self.symtab.newSymbol(var) # Check the queue type @@ -58,7 +69,8 @@ class PeekStatementAST(StatementAST): # Declare the new "in_msg_ptr" variable mtid = msg_type.c_ident qcode = self.queue_name.var.code - code(''' + code( + """ { // Declare message [[maybe_unused]] const $mtid* in_msg_ptr; @@ -69,26 +81,31 @@ class PeekStatementAST(StatementAST): // different inport or punt. throw RejectException(); } -''') +""" + ) if "block_on" in self.pairs: - address_field = self.pairs['block_on'] - code(''' + address_field = self.pairs["block_on"] + code( + """ if (m_is_blocking && (m_block_map.count(in_msg_ptr->m_$address_field) == 1) && (m_block_map[in_msg_ptr->m_$address_field] != &$qcode)) { $qcode.delayHead(clockEdge(), cyclesToTicks(Cycles(1))); continue; } - ''') + """ + ) if "wake_up" in self.pairs: - address_field = self.pairs['wake_up'] - code(''' + address_field = self.pairs["wake_up"] + code( + """ if (m_waiting_buffers.count(in_msg_ptr->m_$address_field) > 0) { wakeUpBuffers(in_msg_ptr->m_$address_field); } - ''') + """ + ) # The other statements self.statements.generate(code, return_type, **kwargs) diff --git a/src/mem/slicc/ast/ReturnStatementAST.py b/src/mem/slicc/ast/ReturnStatementAST.py index 320a53a2a7..ca4e33dbb9 100644 --- a/src/mem/slicc/ast/ReturnStatementAST.py +++ b/src/mem/slicc/ast/ReturnStatementAST.py @@ -27,6 +27,7 @@ from slicc.ast.StatementAST import StatementAST + class ReturnStatementAST(StatementAST): def __init__(self, slicc, expr_ast): super().__init__(slicc) @@ -38,7 +39,7 @@ class ReturnStatementAST(StatementAST): def generate(self, code, return_type, **kwargs): actual_type, ecode = self.expr_ast.inline(True) - code('return $ecode;') + code("return $ecode;") # Is return valid here? if return_type is None: @@ -46,9 +47,12 @@ class ReturnStatementAST(StatementAST): # The return type must match if actual_type != "OOD" and return_type != actual_type: - self.expr_ast.error("Return type miss-match, expected return " + - "type is '%s', actual is '%s'", - return_type, actual_type) + self.expr_ast.error( + "Return type miss-match, expected return " + + "type is '%s', actual is '%s'", + return_type, + actual_type, + ) def findResources(self, resources): self.expr_ast.findResources(resources) diff --git a/src/mem/slicc/ast/StallAndWaitStatementAST.py b/src/mem/slicc/ast/StallAndWaitStatementAST.py index eb0d3e38d5..37e567289e 100644 --- a/src/mem/slicc/ast/StallAndWaitStatementAST.py +++ b/src/mem/slicc/ast/StallAndWaitStatementAST.py @@ -28,6 +28,7 @@ from slicc.ast.StatementAST import StatementAST + class StallAndWaitStatementAST(StatementAST): def __init__(self, slicc, in_port, address): super().__init__(slicc) @@ -43,7 +44,9 @@ class StallAndWaitStatementAST(StatementAST): in_port_code = self.in_port.var.code address_code = self.address.var.code - code(''' + code( + """ stallBuffer(&($in_port_code), $address_code); $in_port_code.stallMessage($address_code, clockEdge()); - ''') + """ + ) diff --git a/src/mem/slicc/ast/StateDeclAST.py b/src/mem/slicc/ast/StateDeclAST.py index 300080eb61..f6e5d6e39b 100644 --- a/src/mem/slicc/ast/StateDeclAST.py +++ b/src/mem/slicc/ast/StateDeclAST.py @@ -27,6 +27,7 @@ from slicc.ast.DeclAST import DeclAST from slicc.symbols import Func, Type + class StateDeclAST(DeclAST): def __init__(self, slicc, type_ast, pairs, states): super().__init__(slicc, pairs) @@ -52,8 +53,9 @@ class StateDeclAST(DeclAST): ident = str(self.type_ast) # Make the new type - t = Type(self.symtab, ident, self.location, self.pairs, - self.state_machine) + t = Type( + self.symtab, ident, self.location, self.pairs, self.state_machine + ) self.symtab.newSymbol(t) # Add all of the states of the type to it @@ -63,19 +65,33 @@ class StateDeclAST(DeclAST): # Add the implicit State_to_string method - FIXME, this is a bit dirty func_id = "%s_to_string" % t.c_ident - pairs = { "external" : "yes" } - func = Func(self.symtab, func_id + "_" + - t.ident, func_id, self.location, - self.symtab.find("std::string", Type), [ t ], [], "", - pairs) + pairs = {"external": "yes"} + func = Func( + self.symtab, + func_id + "_" + t.ident, + func_id, + self.location, + self.symtab.find("std::string", Type), + [t], + [], + "", + pairs, + ) self.symtab.newSymbol(func) # Add the State_to_permission method func_id = "%s_to_permission" % t.c_ident - pairs = { "external" : "yes" } - func = Func(self.symtab, func_id + "_" + - t.ident, func_id, self.location, - self.symtab.find("AccessPermission", Type), [ t ], [], "", - pairs) + pairs = {"external": "yes"} + func = Func( + self.symtab, + func_id + "_" + t.ident, + func_id, + self.location, + self.symtab.find("AccessPermission", Type), + [t], + [], + "", + pairs, + ) self.symtab.newSymbol(func) diff --git a/src/mem/slicc/ast/StatementAST.py b/src/mem/slicc/ast/StatementAST.py index 8999204d58..8eaee84f0c 100644 --- a/src/mem/slicc/ast/StatementAST.py +++ b/src/mem/slicc/ast/StatementAST.py @@ -26,6 +26,7 @@ from slicc.ast.AST import AST + class StatementAST(AST): def __init__(self, slicc, pairs=None): super().__init__(slicc, pairs) diff --git a/src/mem/slicc/ast/StatementListAST.py b/src/mem/slicc/ast/StatementListAST.py index 5f7bc549c9..82b5d20d1e 100644 --- a/src/mem/slicc/ast/StatementListAST.py +++ b/src/mem/slicc/ast/StatementListAST.py @@ -27,11 +27,12 @@ from slicc.ast.AST import AST + class StatementListAST(AST): def __init__(self, slicc, statements): super().__init__(slicc) if not isinstance(statements, (list, tuple)): - statements = [ statements ] + statements = [statements] self.statements = statements def __repr__(self): diff --git a/src/mem/slicc/ast/StaticCastAST.py b/src/mem/slicc/ast/StaticCastAST.py index 4e2d580a71..16f6b151bc 100644 --- a/src/mem/slicc/ast/StaticCastAST.py +++ b/src/mem/slicc/ast/StaticCastAST.py @@ -26,6 +26,7 @@ from slicc.ast.ExprAST import ExprAST + class StaticCastAST(ExprAST): def __init__(self, slicc, type_ast, type_modifier, expr_ast): super().__init__(slicc) @@ -40,19 +41,23 @@ class StaticCastAST(ExprAST): def generate(self, code, **kwargs): actual_type, ecode = self.expr_ast.inline(True) if self.type_modifier == "pointer": - code('static_cast<${{self.type_ast.type.c_ident}} *>($ecode)') + code("static_cast<${{self.type_ast.type.c_ident}} *>($ecode)") else: - code('static_cast<${{self.type_ast.type.c_ident}} &>($ecode)') + code("static_cast<${{self.type_ast.type.c_ident}} &>($ecode)") if not "interface" in self.type_ast.type: - self.expr_ast.error("static cast only premitted for those types " \ - "that implement inherit an interface") + self.expr_ast.error( + "static cast only premitted for those types " + "that implement inherit an interface" + ) # The interface type should match if str(actual_type) != str(self.type_ast.type["interface"]): - self.expr_ast.error("static cast miss-match, type is '%s'," \ - "but inherited type is '%s'", - actual_type, self.type_ast.type["interface"]) + self.expr_ast.error( + "static cast miss-match, type is '%s'," + "but inherited type is '%s'", + actual_type, + self.type_ast.type["interface"], + ) return self.type_ast.type - diff --git a/src/mem/slicc/ast/TransitionDeclAST.py b/src/mem/slicc/ast/TransitionDeclAST.py index 18c1b80b73..089bb45e4e 100644 --- a/src/mem/slicc/ast/TransitionDeclAST.py +++ b/src/mem/slicc/ast/TransitionDeclAST.py @@ -28,9 +28,11 @@ from slicc.ast.DeclAST import DeclAST from slicc.symbols import Transition + class TransitionDeclAST(DeclAST): - def __init__(self, slicc, request_types, states, events, next_state, - actions): + def __init__( + self, slicc, request_types, states, events, next_state, actions + ): super().__init__(slicc) self.request_types = request_types @@ -50,24 +52,39 @@ class TransitionDeclAST(DeclAST): for action in self.actions: if action not in machine.actions: - self.error("Invalid action: %s is not part of machine: %s" % \ - (action, machine)) + self.error( + "Invalid action: %s is not part of machine: %s" + % (action, machine) + ) for request_type in self.request_types: if request_type not in machine.request_types: - self.error("Invalid protocol access type: " \ - "%s is not part of machine: %s" % \ - (request_type, machine)) + self.error( + "Invalid protocol access type: " + "%s is not part of machine: %s" % (request_type, machine) + ) for state in self.states: if state not in machine.states: - self.error("Invalid state: %s is not part of machine: %s" % \ - (state, machine)) + self.error( + "Invalid state: %s is not part of machine: %s" + % (state, machine) + ) next_state = self.next_state or state for event in self.events: if event not in machine.events: - self.error("Invalid event: %s is not part of machine: %s" % \ - (event, machine)) - t = Transition(self.symtab, machine, state, event, next_state, - self.actions, self.request_types, self.location) + self.error( + "Invalid event: %s is not part of machine: %s" + % (event, machine) + ) + t = Transition( + self.symtab, + machine, + state, + event, + next_state, + self.actions, + self.request_types, + self.location, + ) machine.addTransition(t) diff --git a/src/mem/slicc/ast/TypeAST.py b/src/mem/slicc/ast/TypeAST.py index fccff9c20b..92c3190fe2 100644 --- a/src/mem/slicc/ast/TypeAST.py +++ b/src/mem/slicc/ast/TypeAST.py @@ -29,6 +29,7 @@ from slicc.ast.AST import AST from slicc.symbols import Type + class TypeAST(AST): def __init__(self, slicc, ident): super().__init__(slicc) diff --git a/src/mem/slicc/ast/TypeDeclAST.py b/src/mem/slicc/ast/TypeDeclAST.py index f4c4c8e1c7..e64b3d5010 100644 --- a/src/mem/slicc/ast/TypeDeclAST.py +++ b/src/mem/slicc/ast/TypeDeclAST.py @@ -28,6 +28,7 @@ from slicc.ast.DeclAST import DeclAST from slicc.symbols.Type import Type + class TypeDeclAST(DeclAST): def __init__(self, slicc, type_ast, pairs, field_asts): super().__init__(slicc, pairs) @@ -53,8 +54,9 @@ class TypeDeclAST(DeclAST): machine = self.symtab.state_machine # Make the new type - new_type = Type(self.symtab, ident, self.location, self.pairs, - self.state_machine) + new_type = Type( + self.symtab, ident, self.location, self.pairs, self.state_machine + ) if machine: machine.addType(new_type) diff --git a/src/mem/slicc/ast/TypeFieldAST.py b/src/mem/slicc/ast/TypeFieldAST.py index 18d1513bc4..ed22c1a3f5 100644 --- a/src/mem/slicc/ast/TypeFieldAST.py +++ b/src/mem/slicc/ast/TypeFieldAST.py @@ -27,6 +27,7 @@ from slicc.ast.AST import AST + class TypeFieldAST(AST): def __init__(self, slicc, pairs): super().__init__(slicc, pairs) diff --git a/src/mem/slicc/ast/TypeFieldEnumAST.py b/src/mem/slicc/ast/TypeFieldEnumAST.py index 293bca8962..ea35e081eb 100644 --- a/src/mem/slicc/ast/TypeFieldEnumAST.py +++ b/src/mem/slicc/ast/TypeFieldEnumAST.py @@ -28,6 +28,7 @@ from slicc.ast.TypeFieldAST import TypeFieldAST from slicc.symbols import Event, State, RequestType + class TypeFieldEnumAST(TypeFieldAST): def __init__(self, slicc, field_id, pairs_ast): super().__init__(slicc, pairs_ast) @@ -40,7 +41,9 @@ class TypeFieldEnumAST(TypeFieldAST): def generate(self, type, **kwargs): if str(type) == "State": - self.error("States must in a State Declaration, not a normal enum.") + self.error( + "States must in a State Declaration, not a normal enum." + ) # Add enumeration if not type.addEnum(self.field_id, self.pairs_ast.pairs): @@ -58,6 +61,7 @@ class TypeFieldEnumAST(TypeFieldAST): if str(type) == "RequestType": if not machine: self.error("RequestType declaration not part of a machine.") - s = RequestType(self.symtab, self.field_id, self.location, - self.pairs) + s = RequestType( + self.symtab, self.field_id, self.location, self.pairs + ) machine.addRequestType(s) diff --git a/src/mem/slicc/ast/TypeFieldStateAST.py b/src/mem/slicc/ast/TypeFieldStateAST.py index 61e0b28aa0..e71b9383c3 100644 --- a/src/mem/slicc/ast/TypeFieldStateAST.py +++ b/src/mem/slicc/ast/TypeFieldStateAST.py @@ -27,6 +27,7 @@ from slicc.ast.TypeFieldAST import TypeFieldAST from slicc.symbols import Event, State + class TypeFieldStateAST(TypeFieldAST): def __init__(self, slicc, field_id, perm_ast, pairs_ast): super().__init__(slicc, pairs_ast) @@ -57,5 +58,3 @@ class TypeFieldStateAST(TypeFieldAST): machine.addState(s) type.statePermPairAdd(s, self.perm_ast.value) - - diff --git a/src/mem/slicc/ast/VarExprAST.py b/src/mem/slicc/ast/VarExprAST.py index f9f3eb1fc8..a653504f6d 100644 --- a/src/mem/slicc/ast/VarExprAST.py +++ b/src/mem/slicc/ast/VarExprAST.py @@ -29,6 +29,7 @@ from slicc.ast.ExprAST import ExprAST from slicc.symbols import Type, Var + class VarExprAST(ExprAST): def __init__(self, slicc, var): super().__init__(slicc) @@ -52,13 +53,18 @@ class VarExprAST(ExprAST): expected_type = self.symtab.find(type_ident, Type) if not expected_type: - self.error("There must be a type '%s' declared in this scope", - type_ident) + self.error( + "There must be a type '%s' declared in this scope", type_ident + ) if self.var.type != expected_type: - self.error("Incorrect type: " + \ - "'%s' is expected to be type '%s' not '%s'", - self.var.ident, expected_type, self.var.type) + self.error( + "Incorrect type: " + + "'%s' is expected to be type '%s' not '%s'", + self.var.ident, + expected_type, + self.var.type, + ) def generate(self, code, **kwargs): fix = code.nofix() diff --git a/src/mem/slicc/ast/WakeupPortStatementAST.py b/src/mem/slicc/ast/WakeupPortStatementAST.py index 293d5a3a09..62e3549ceb 100644 --- a/src/mem/slicc/ast/WakeupPortStatementAST.py +++ b/src/mem/slicc/ast/WakeupPortStatementAST.py @@ -35,6 +35,7 @@ from slicc.ast.StatementAST import StatementAST + class WakeupPortStatementAST(StatementAST): def __init__(self, slicc, in_port, address): super().__init__(slicc) @@ -50,6 +51,8 @@ class WakeupPortStatementAST(StatementAST): in_port_code = self.in_port.var.code address_code = self.address.var.code - code(''' + code( + """ wakeUpBuffer(&($in_port_code), $address_code); - ''') + """ + ) diff --git a/src/mem/slicc/generate/dot.py b/src/mem/slicc/generate/dot.py index 294075caad..bfbbb6f463 100644 --- a/src/mem/slicc/generate/dot.py +++ b/src/mem/slicc/generate/dot.py @@ -25,16 +25,18 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + def printDotty(sm, code): - code('digraph ${{sm.getIdent()}} {') + code("digraph ${{sm.getIdent()}} {") code.indent() for t in sm.transitions: # Don't print ignored transitions if t.getActionShorthands() in ("--", "z"): continue - code('${{t.getStateShorthand()}} -> ${{t.getNextStateShorthand()}') - code(' [label="${{t.getEventShorthand()}}/${{t.getActionShorthands()}}"') + code("${{t.getStateShorthand()}} -> ${{t.getNextStateShorthand()}") + code( + ' [label="${{t.getEventShorthand()}}/${{t.getActionShorthands()}}"' + ) code.dedent() - code('}') - + code("}") diff --git a/src/mem/slicc/generate/html.py b/src/mem/slicc/generate/html.py index b0bc3e9a95..4b86549b86 100644 --- a/src/mem/slicc/generate/html.py +++ b/src/mem/slicc/generate/html.py @@ -27,28 +27,32 @@ from code_formatter import code_formatter + def createSymbol(symbol, title): code = code_formatter() - code(''' + code( + """ $title: ${{formatShorthand(symbol.short)}} - ${{symbol.desc}} -''') +""" + ) return code + def formatShorthand(short): munged_shorthand = "" mode_is_normal = True # -- Walk over the string, processing superscript directives gen = enumerate(short) - for i,c in gen: - if c == '!': + for i, c in gen: + if c == "!": # -- Reached logical end of shorthand name break - elif c == '_': + elif c == "_": munged_shorthand += " " - elif c == '^': + elif c == "^": # -- Process super/subscript formatting mode_is_normal = not mode_is_normal if mode_is_normal: @@ -57,12 +61,12 @@ def formatShorthand(short): else: # -- Going to superscript mode munged_shorthand += "" - elif c == '\\': + elif c == "\\": # -- Process Symbol character set if i + 1 < len(short): # -- Proceed to next char. Yes I know that changing # the loop var is ugly! - i,c = next(gen) + i, c = next(gen) munged_shorthand += "" munged_shorthand += c munged_shorthand += "" @@ -79,4 +83,3 @@ def formatShorthand(short): munged_shorthand += "" return munged_shorthand - diff --git a/src/mem/slicc/generate/tex.py b/src/mem/slicc/generate/tex.py index 176227fa1a..00d8fd6f3e 100644 --- a/src/mem/slicc/generate/tex.py +++ b/src/mem/slicc/generate/tex.py @@ -27,28 +27,32 @@ from code_formatter import code_formatter + class tex_formatter(code_formatter): braced = "<>" double_braced = "<<>>" + def printTexTable(sm, code): tex = tex_formatter() - tex(r''' + tex( + r""" %& latex \documentclass[12pt]{article} \usepackage{graphics} \begin{document} \begin{tabular}{|l||$<<"l" * len(sm.events)>>|} \hline -''') +""" + ) for event in sm.events: code(r" & \rotatebox{90}{$<>}") - tex(r'\\ \hline \hline') + tex(r"\\ \hline \hline") for state in sm.states: state_str = state.short for event in sm.events: - state_str += ' & ' + state_str += " & " trans = sm.get_transition(state, event) if trans: actions = trans.getActionShorthands() @@ -59,13 +63,15 @@ def printTexTable(sm, code): nextState = "" state_str += actions if nextState and actions: - state_str += '/' + state_str += "/" state_str += nextState - tex(r'$0 \\', state_str) - tex(r''' + tex(r"$0 \\", state_str) + tex( + r""" \hline \end{tabular} \end{document} -''') +""" + ) code.append(tex) diff --git a/src/mem/slicc/main.py b/src/mem/slicc/main.py index c3afd2e143..bb0f9cbf16 100644 --- a/src/mem/slicc/main.py +++ b/src/mem/slicc/main.py @@ -30,55 +30,76 @@ import sys from slicc.parser import SLICC -usage="%prog [options] ... " -version="%prog v0.4" -brief_copyright=''' +usage = "%prog [options] ... " +version = "%prog v0.4" +brief_copyright = """ Copyright (c) 1999-2008 Mark D. Hill and David A. Wood Copyright (c) 2009 The Hewlett-Packard Development Company All Rights Reserved. -''' -help_details = '''This is intended to be used to process slicc files as a +""" +help_details = """This is intended to be used to process slicc files as a standalone script. This script assumes that it is running in a directory under gem5/ (e.g., gem5/temp). It takes a single argument: The path to a *.slicc file. By default it generates the C++ code in the directory generated/. This script can also generate the html SLICC output. See src/mem/slicc/main.py for -more details.''' +more details.""" + def nprint(format, *args): pass + def eprint(format, *args): if args: format = format % args print(format, file=sys.stderr) + def main(args=None): import optparse - parser = optparse.OptionParser(usage=usage, version=version, - epilog=help_details, - description=brief_copyright) - parser.add_option("-d", "--debug", default=False, action="store_true", - help="Turn on PLY debugging") - parser.add_option("-C", "--code-path", default="generated", - help="Path where C++ code output code goes") - parser.add_option("-H", "--html-path", - help="Path where html output goes") - parser.add_option("-F", "--print-files", action='store_true', - help="Print files that SLICC will generate") - parser.add_option("--tb", "--traceback", action='store_true', - help="print traceback on error") - parser.add_option("-q", "--quiet", - help="don't print messages") - opts,files = parser.parse_args(args=args) + parser = optparse.OptionParser( + usage=usage, + version=version, + epilog=help_details, + description=brief_copyright, + ) + parser.add_option( + "-d", + "--debug", + default=False, + action="store_true", + help="Turn on PLY debugging", + ) + parser.add_option( + "-C", + "--code-path", + default="generated", + help="Path where C++ code output code goes", + ) + parser.add_option("-H", "--html-path", help="Path where html output goes") + parser.add_option( + "-F", + "--print-files", + action="store_true", + help="Print files that SLICC will generate", + ) + parser.add_option( + "--tb", + "--traceback", + action="store_true", + help="print traceback on error", + ) + parser.add_option("-q", "--quiet", help="don't print messages") + opts, files = parser.parse_args(args=args) if len(files) != 1: parser.print_help() sys.exit(2) slicc_file = files[0] - if not slicc_file.endswith('.slicc'): + if not slicc_file.endswith(".slicc"): print("Must specify a .slicc file with a list of state machine files") parser.print_help() sys.exit(2) @@ -88,15 +109,20 @@ def main(args=None): output("SLICC v0.4") output("Parsing...") - protocol_base = os.path.join(os.path.dirname(__file__), - '..', 'ruby', 'protocol') - slicc = SLICC(slicc_file, protocol_base, verbose=True, debug=opts.debug, - traceback=opts.tb) - + protocol_base = os.path.join( + os.path.dirname(__file__), "..", "ruby", "protocol" + ) + slicc = SLICC( + slicc_file, + protocol_base, + verbose=True, + debug=opts.debug, + traceback=opts.tb, + ) if opts.print_files: for i in sorted(slicc.files()): - print(' %s' % i) + print(" %s" % i) else: output("Processing AST...") slicc.process() @@ -108,8 +134,8 @@ def main(args=None): output("Writing C++ files...") slicc.writeCodeFiles(opts.code_path, []) - output("SLICC is Done.") + if __name__ == "__main__": main() diff --git a/src/mem/slicc/parser.py b/src/mem/slicc/parser.py index 36df4b616c..2d33cd30b5 100644 --- a/src/mem/slicc/parser.py +++ b/src/mem/slicc/parser.py @@ -48,8 +48,11 @@ import slicc.ast as ast import slicc.util as util from slicc.symbols import SymbolTable + class SLICC(Grammar): - def __init__(self, filename, base_dir, verbose=False, traceback=False, **kwargs): + def __init__( + self, filename, base_dir, verbose=False, traceback=False, **kwargs + ): self.protocol = None self.traceback = traceback self.verbose = verbose @@ -64,12 +67,13 @@ class SLICC(Grammar): raise def currentLocation(self): - return util.Location(self.current_source, self.current_line, - no_warning=not self.verbose) + return util.Location( + self.current_source, self.current_line, no_warning=not self.verbose + ) def codeFormatter(self, *args, **kwargs): code = code_formatter(*args, **kwargs) - code['protocol'] = self.protocol + code["protocol"] = self.protocol return code def process(self): @@ -82,128 +86,150 @@ class SLICC(Grammar): self.symtab.writeHTMLFiles(html_path) def files(self): - f = set(['Types.hh']) + f = set(["Types.hh"]) f |= self.decl_list.files() return f - t_ignore = '\t ' + t_ignore = "\t " # C or C++ comment (ignore) def t_c_comment(self, t): - r'/\*(.|\n)*?\*/' - t.lexer.lineno += t.value.count('\n') + r"/\*(.|\n)*?\*/" + t.lexer.lineno += t.value.count("\n") def t_cpp_comment(self, t): - r'//.*' + r"//.*" # Define a rule so we can track line numbers def t_newline(self, t): - r'\n+' + r"\n+" t.lexer.lineno += len(t.value) reserved = { - 'protocol' : 'PROTOCOL', - 'include' : 'INCLUDE', - 'global' : 'GLOBAL', - 'machine' : 'MACHINE', - 'in_port' : 'IN_PORT', - 'out_port' : 'OUT_PORT', - 'action' : 'ACTION', - 'transition' : 'TRANS', - 'structure' : 'STRUCT', - 'external_type' : 'EXTERN_TYPE', - 'enumeration' : 'ENUM', - 'state_declaration' : 'STATE_DECL', - 'peek' : 'PEEK', - 'stall_and_wait' : 'STALL_AND_WAIT', - 'wakeup_port' : 'WAKEUP_PORT', - 'enqueue' : 'ENQUEUE', - 'check_allocate' : 'CHECK_ALLOCATE', - 'check_next_cycle' : 'CHECK_NEXT_CYCLE', - 'check_stop_slots' : 'CHECK_STOP_SLOTS', - 'check_on_cache_probe' : 'CHECK_PROBE', - 'static_cast' : 'STATIC_CAST', - 'if' : 'IF', - 'is_valid' : 'IS_VALID', - 'is_invalid' : 'IS_INVALID', - 'else' : 'ELSE', - 'return' : 'RETURN', - 'void' : 'VOID', - 'new' : 'NEW', - 'OOD' : 'OOD', - 'defer_enqueueing' : 'DEFER_ENQUEUEING', + "protocol": "PROTOCOL", + "include": "INCLUDE", + "global": "GLOBAL", + "machine": "MACHINE", + "in_port": "IN_PORT", + "out_port": "OUT_PORT", + "action": "ACTION", + "transition": "TRANS", + "structure": "STRUCT", + "external_type": "EXTERN_TYPE", + "enumeration": "ENUM", + "state_declaration": "STATE_DECL", + "peek": "PEEK", + "stall_and_wait": "STALL_AND_WAIT", + "wakeup_port": "WAKEUP_PORT", + "enqueue": "ENQUEUE", + "check_allocate": "CHECK_ALLOCATE", + "check_next_cycle": "CHECK_NEXT_CYCLE", + "check_stop_slots": "CHECK_STOP_SLOTS", + "check_on_cache_probe": "CHECK_PROBE", + "static_cast": "STATIC_CAST", + "if": "IF", + "is_valid": "IS_VALID", + "is_invalid": "IS_INVALID", + "else": "ELSE", + "return": "RETURN", + "void": "VOID", + "new": "NEW", + "OOD": "OOD", + "defer_enqueueing": "DEFER_ENQUEUEING", } - literals = ':[]{}(),=' + literals = ":[]{}(),=" - tokens = [ 'EQ', 'NE', 'LT', 'GT', 'LE', 'GE', - 'LEFTSHIFT', 'RIGHTSHIFT', - 'NOT', 'AND', 'OR', - 'PLUS', 'DASH', 'STAR', 'SLASH', 'MOD', - 'INCR', 'DECR', - 'DOUBLE_COLON', 'SEMI', - 'ASSIGN', 'DOT', - 'IDENT', 'LIT_BOOL', 'FLOATNUMBER', 'NUMBER', 'STRING', - 'AMP', 'CONST' ] + tokens = [ + "EQ", + "NE", + "LT", + "GT", + "LE", + "GE", + "LEFTSHIFT", + "RIGHTSHIFT", + "NOT", + "AND", + "OR", + "PLUS", + "DASH", + "STAR", + "SLASH", + "MOD", + "INCR", + "DECR", + "DOUBLE_COLON", + "SEMI", + "ASSIGN", + "DOT", + "IDENT", + "LIT_BOOL", + "FLOATNUMBER", + "NUMBER", + "STRING", + "AMP", + "CONST", + ] tokens += reserved.values() - t_EQ = r'==' - t_NE = r'!=' - t_LT = r'<' - t_GT = r'>' - t_LE = r'<=' - t_GE = r'>=' - t_LEFTSHIFT = r'<<' - t_RIGHTSHIFT = r'>>' - t_NOT = r'!' - t_AND = r'&&' - t_OR = r'\|\|' - t_PLUS = r'\+' - t_DASH = r'-' - t_STAR = r'\*' - t_AMP = r'&' - t_CONST = r'const' - t_SLASH = r'/' - t_MOD = r'%' - t_DOUBLE_COLON = r'::' - t_SEMI = r';' - t_ASSIGN = r':=' - t_DOT = r'\.' - t_INCR = r'\+\+' - t_DECR = r'--' + t_EQ = r"==" + t_NE = r"!=" + t_LT = r"<" + t_GT = r">" + t_LE = r"<=" + t_GE = r">=" + t_LEFTSHIFT = r"<<" + t_RIGHTSHIFT = r">>" + t_NOT = r"!" + t_AND = r"&&" + t_OR = r"\|\|" + t_PLUS = r"\+" + t_DASH = r"-" + t_STAR = r"\*" + t_AMP = r"&" + t_CONST = r"const" + t_SLASH = r"/" + t_MOD = r"%" + t_DOUBLE_COLON = r"::" + t_SEMI = r";" + t_ASSIGN = r":=" + t_DOT = r"\." + t_INCR = r"\+\+" + t_DECR = r"--" precedence = ( - ('left', 'INCR', 'DECR'), - ('left', 'OR'), - ('left', 'AND'), - ('left', 'EQ', 'NE'), - ('left', 'LT', 'GT', 'LE', 'GE'), - ('left', 'RIGHTSHIFT', 'LEFTSHIFT'), - ('left', 'PLUS', 'DASH'), - ('left', 'STAR', 'SLASH', 'MOD'), - ('right', 'NOT', 'UMINUS'), + ("left", "INCR", "DECR"), + ("left", "OR"), + ("left", "AND"), + ("left", "EQ", "NE"), + ("left", "LT", "GT", "LE", "GE"), + ("left", "RIGHTSHIFT", "LEFTSHIFT"), + ("left", "PLUS", "DASH"), + ("left", "STAR", "SLASH", "MOD"), + ("right", "NOT", "UMINUS"), ) def t_IDENT(self, t): - r'[a-zA-Z_][a-zA-Z_0-9]*' - if t.value == 'true': - t.type = 'LIT_BOOL' + r"[a-zA-Z_][a-zA-Z_0-9]*" + if t.value == "true": + t.type = "LIT_BOOL" t.value = True return t - if t.value == 'false': - t.type = 'LIT_BOOL' + if t.value == "false": + t.type = "LIT_BOOL" t.value = False return t # Check for reserved words - t.type = self.reserved.get(t.value, 'IDENT') + t.type = self.reserved.get(t.value, "IDENT") return t def t_FLOATNUMBER(self, t): - '[0-9]+[.][0-9]+' + "[0-9]+[.][0-9]+" try: t.value = float(t.value) except ValueError: @@ -211,7 +237,7 @@ class SLICC(Grammar): return t def t_NUMBER(self, t): - r'[0-9]+' + r"[0-9]+" try: t.value = int(t.value) except ValueError: @@ -220,13 +246,13 @@ class SLICC(Grammar): def t_STRING1(self, t): r'\"[^"\n]*\"' - t.type = 'STRING' + t.type = "STRING" t.value = t.value[1:-1] return t def t_STRING2(self, t): r"\'[^'\n]*\'" - t.type = 'STRING' + t.type = "STRING" t.value = t.value[1:-1] return t @@ -248,7 +274,7 @@ class SLICC(Grammar): elif p[1] is None: decls = [] else: - decls = [ p[1] ] + decls = [p[1]] p[0] = decls + p[2] def p_declsx__none(self, p): @@ -258,8 +284,10 @@ class SLICC(Grammar): def p_decl__protocol(self, p): "decl : PROTOCOL STRING SEMI" if self.protocol: - msg = "Protocol can only be set once! Error at %s:%s\n" % \ - (self.current_source, self.current_line) + msg = "Protocol can only be set once! Error at %s:%s\n" % ( + self.current_source, + self.current_line, + ) raise ParseError(msg) self.protocol = p[2] p[0] = None @@ -337,7 +365,7 @@ class SLICC(Grammar): # Type fields def p_obj_decls__list(self, p): "obj_decls : obj_decl obj_decls" - p[0] = [ p[1] ] + p[2] + p[0] = [p[1]] + p[2] def p_obj_decls__empty(self, p): "obj_decls : empty" @@ -345,7 +373,7 @@ class SLICC(Grammar): def p_type_members__list(self, p): "type_members : type_member type_members" - p[0] = [ p[1] ] + p[2] + p[0] = [p[1]] + p[2] def p_type_members__empty(self, p): "type_members : empty" @@ -353,8 +381,8 @@ class SLICC(Grammar): def p_type_member__0(self, p): """type_member : obj_decl - | func_decl - | func_def""" + | func_decl + | func_def""" p[0] = p[1] # Member / Variable declarations @@ -372,13 +400,15 @@ class SLICC(Grammar): def p_obj_decl__2(self, p): "obj_decl : type ident ASSIGN expr SEMI" - p[0] = ast.ObjDeclAST(self, p[1], p[2], ast.PairListAST(self), p[4], - False) + p[0] = ast.ObjDeclAST( + self, p[1], p[2], ast.PairListAST(self), p[4], False + ) def p_obj_decl__3(self, p): "obj_decl : type STAR ident ASSIGN expr SEMI" - p[0] = ast.ObjDeclAST(self, p[1], p[3], ast.PairListAST(self), p[5], - True) + p[0] = ast.ObjDeclAST( + self, p[1], p[3], ast.PairListAST(self), p[5], True + ) # Function definition and declaration def p_decl__func_decl(self, p): @@ -387,12 +417,12 @@ class SLICC(Grammar): def p_func_decl__0(self, p): """func_decl : void ident '(' params ')' pairs SEMI - | type ident '(' params ')' pairs SEMI""" + | type ident '(' params ')' pairs SEMI""" p[0] = ast.FuncDeclAST(self, p[1], p[2], p[4], p[6], None) def p_func_decl__1(self, p): """func_decl : void ident '(' types ')' pairs SEMI - | type ident '(' types ')' pairs SEMI""" + | type ident '(' types ')' pairs SEMI""" p[0] = ast.FuncDeclAST(self, p[1], p[2], p[4], p[6], None) def p_decl__func_def(self, p): @@ -401,13 +431,13 @@ class SLICC(Grammar): def p_func_def__0(self, p): """func_def : void ident '(' params ')' pairs statements - | type ident '(' params ')' pairs statements""" + | type ident '(' params ')' pairs statements""" p[0] = ast.FuncDeclAST(self, p[1], p[2], p[4], p[6], p[7]) # Enum fields def p_type_enums__list(self, p): "type_enums : type_enum type_enums" - p[0] = [ p[1] ] + p[2] + p[0] = [p[1]] + p[2] def p_type_enums__empty(self, p): "type_enums : empty" @@ -420,7 +450,7 @@ class SLICC(Grammar): # States def p_type_states__list(self, p): "type_states : type_state type_states" - p[0] = [ p[1] ] + p[2] + p[0] = [p[1]] + p[2] def p_type_states__empty(self, p): "type_states : empty" @@ -433,11 +463,11 @@ class SLICC(Grammar): # Formal Param def p_params__many(self, p): "params : param ',' params" - p[0] = [ p[1] ] + p[3] + p[0] = [p[1]] + p[3] def p_params__one(self, p): "params : param" - p[0] = [ p[1] ] + p[0] = [p[1]] def p_params__none(self, p): "params : empty" @@ -478,11 +508,11 @@ class SLICC(Grammar): # Type def p_types__multiple(self, p): "types : type ',' types" - p[0] = [ p[1] ] + p[3] + p[0] = [p[1]] + p[3] def p_types__one(self, p): "types : type" - p[0] = [ p[1] ] + p[0] = [p[1]] def p_types__empty(self, p): "types : empty" @@ -490,7 +520,7 @@ class SLICC(Grammar): def p_typestr__multi(self, p): "typestr : typestr DOUBLE_COLON ident" - p[0] = '%s::%s' % (p[1], p[3]) + p[0] = "%s::%s" % (p[1], p[3]) def p_typestr__single(self, p): "typestr : ident" @@ -511,20 +541,20 @@ class SLICC(Grammar): def p_idents__bare(self, p): "idents : ident" - p[0] = [ p[1] ] + p[0] = [p[1]] def p_identx__multiple_1(self, p): """identx : ident SEMI identx - | ident ',' identx""" - p[0] = [ p[1] ] + p[3] + | ident ',' identx""" + p[0] = [p[1]] + p[3] def p_identx__multiple_2(self, p): "identx : ident identx" - p[0] = [ p[1] ] + p[2] + p[0] = [p[1]] + p[2] def p_identx__single(self, p): "identx : empty" - p[0] = [ ] + p[0] = [] def p_ident(self, p): "ident : IDENT" @@ -532,7 +562,7 @@ class SLICC(Grammar): def p_ident_or_star(self, p): """ident_or_star : ident - | STAR""" + | STAR""" p[0] = p[1] # Pair and pair lists @@ -556,8 +586,8 @@ class SLICC(Grammar): def p_pair__assign(self, p): """pair : ident '=' STRING - | ident '=' ident - | ident '=' NUMBER""" + | ident '=' ident + | ident '=' NUMBER""" p[0] = ast.PairAST(self, p[1], p[3]) def p_pair__literal(self, p): @@ -575,22 +605,22 @@ class SLICC(Grammar): def p_statements_inner__many(self, p): "statements_inner : statement statements_inner" - p[0] = [ p[1] ] + p[2] + p[0] = [p[1]] + p[2] def p_statements_inner__one(self, p): "statements_inner : statement" - p[0] = [ p[1] ] + p[0] = [p[1]] def p_exprs__multiple(self, p): "exprs : expr ',' exprs" - p[0] = [ p[1] ] + p[3] + p[0] = [p[1]] + p[3] def p_exprs__one(self, p): "exprs : expr" - p[0] = [ p[1] ] + p[0] = [p[1]] def p_exprs__empty(self, p): - "exprs : empty""" + "exprs : empty" "" p[0] = [] def p_statement__expression(self, p): @@ -659,8 +689,9 @@ class SLICC(Grammar): def p_statement__if_else_if(self, p): "if_statement : IF '(' expr ')' statements ELSE if_statement" - p[0] = ast.IfStatementAST(self, p[3], p[5], - ast.StatementListAST(self, p[7])) + p[0] = ast.IfStatementAST( + self, p[3], p[5], ast.StatementListAST(self, p[7]) + ) def p_expr__static_cast(self, p): "aexpr : STATIC_CAST '(' type ',' expr ')'" @@ -704,18 +735,21 @@ class SLICC(Grammar): def p_expr__member_method_call(self, p): "aexpr : aexpr DOT ident '(' exprs ')'" - p[0] = ast.MemberMethodCallExprAST(self, p[1], - ast.FuncCallExprAST(self, p[3], p[5])) + p[0] = ast.MemberMethodCallExprAST( + self, p[1], ast.FuncCallExprAST(self, p[3], p[5]) + ) def p_expr__member_method_call_lookup(self, p): "aexpr : aexpr '[' exprs ']'" - p[0] = ast.MemberMethodCallExprAST(self, p[1], - ast.FuncCallExprAST(self, "lookup", p[3])) + p[0] = ast.MemberMethodCallExprAST( + self, p[1], ast.FuncCallExprAST(self, "lookup", p[3]) + ) def p_expr__class_method_call(self, p): "aexpr : type DOUBLE_COLON ident '(' exprs ')'" - p[0] = ast.ClassMethodCallExprAST(self, p[1], - ast.FuncCallExprAST(self, p[3], p[5])) + p[0] = ast.ClassMethodCallExprAST( + self, p[1], ast.FuncCallExprAST(self, p[3], p[5]) + ) def p_expr__aexpr(self, p): "expr : aexpr" @@ -723,28 +757,28 @@ class SLICC(Grammar): def p_expr__binary_op(self, p): """expr : expr STAR expr - | expr SLASH expr - | expr MOD expr - | expr PLUS expr - | expr DASH expr - | expr LT expr - | expr GT expr - | expr LE expr - | expr GE expr - | expr EQ expr - | expr NE expr - | expr AND expr - | expr OR expr - | expr RIGHTSHIFT expr - | expr LEFTSHIFT expr""" + | expr SLASH expr + | expr MOD expr + | expr PLUS expr + | expr DASH expr + | expr LT expr + | expr GT expr + | expr LE expr + | expr GE expr + | expr EQ expr + | expr NE expr + | expr AND expr + | expr OR expr + | expr RIGHTSHIFT expr + | expr LEFTSHIFT expr""" p[0] = ast.InfixOperatorExprAST(self, p[1], p[2], p[3]) # FIXME - unary not def p_expr__unary_op(self, p): """expr : NOT expr - | INCR expr - | DECR expr - | DASH expr %prec UMINUS""" + | INCR expr + | DECR expr + | DASH expr %prec UMINUS""" p[0] = ast.PrefixOperatorExprAST(self, p[1], p[2]) def p_expr__parens(self, p): diff --git a/src/mem/slicc/symbols/Action.py b/src/mem/slicc/symbols/Action.py index 3c7f70c0e2..c00482f276 100644 --- a/src/mem/slicc/symbols/Action.py +++ b/src/mem/slicc/symbols/Action.py @@ -27,12 +27,14 @@ from slicc.symbols.Symbol import Symbol + class Action(Symbol): - def __init__(self, table, ident, resources, location, pairs): - super().__init__(table, ident, location, pairs) - self.resources = resources + def __init__(self, table, ident, resources, location, pairs): + super().__init__(table, ident, location, pairs) + self.resources = resources - def __repr__(self): - return "[Action: %s]" % self.ident + def __repr__(self): + return "[Action: %s]" % self.ident -__all__ = [ "Action" ] + +__all__ = ["Action"] diff --git a/src/mem/slicc/symbols/Event.py b/src/mem/slicc/symbols/Event.py index 9ff4d8ba75..57ab7a76e3 100644 --- a/src/mem/slicc/symbols/Event.py +++ b/src/mem/slicc/symbols/Event.py @@ -27,8 +27,10 @@ from slicc.symbols.Symbol import Symbol + class Event(Symbol): def __repr__(self): return "[Event: %s]" % self.ident -__all__ = [ "Event" ] + +__all__ = ["Event"] diff --git a/src/mem/slicc/symbols/Func.py b/src/mem/slicc/symbols/Func.py index 8a73a5c800..4d254138e1 100644 --- a/src/mem/slicc/symbols/Func.py +++ b/src/mem/slicc/symbols/Func.py @@ -28,9 +28,20 @@ from slicc.symbols.Symbol import Symbol from slicc.symbols.Type import Type + class Func(Symbol): - def __init__(self, table, ident, name, location, return_type, param_types, - param_strings, body, pairs): + def __init__( + self, + table, + ident, + name, + location, + return_type, + param_types, + param_strings, + body, + pairs, + ): super().__init__(table, ident, location, pairs) self.return_type = return_type self.param_types = param_types @@ -56,35 +67,46 @@ class Func(Symbol): elif "return_by_pointer" in self and self.return_type != void_type: return_type += "*" - return "%s %s(%s);" % (return_type, self.c_name, - ", ".join(self.param_strings)) + return "%s %s(%s);" % ( + return_type, + self.c_name, + ", ".join(self.param_strings), + ) def writeCodeFiles(self, path, includes): return def checkArguments(self, args): if len(args) != len(self.param_types): - self.error("Wrong number of arguments passed to function : '%s'" +\ - " Expected %d, got %d", self.c_ident, - len(self.param_types), len(args)) + self.error( + "Wrong number of arguments passed to function : '%s'" + + " Expected %d, got %d", + self.c_ident, + len(self.param_types), + len(args), + ) cvec = [] type_vec = [] - for expr,expected_type in zip(args, self.param_types): + for expr, expected_type in zip(args, self.param_types): # Check the types of the parameter - actual_type,param_code = expr.inline(True) - if str(actual_type) != 'OOD' and \ - str(actual_type) != str(expected_type) and \ - str(actual_type["interface"]) != str(expected_type): - expr.error("Type mismatch: expected: %s actual: %s" % \ - (expected_type, actual_type)) + actual_type, param_code = expr.inline(True) + if ( + str(actual_type) != "OOD" + and str(actual_type) != str(expected_type) + and str(actual_type["interface"]) != str(expected_type) + ): + expr.error( + "Type mismatch: expected: %s actual: %s" + % (expected_type, actual_type) + ) cvec.append(param_code) type_vec.append(expected_type) return cvec, type_vec def generateCode(self): - '''This write a function of object Chip''' + """This write a function of object Chip""" if "external" in self: return "" @@ -98,15 +120,18 @@ class Func(Symbol): if "return_by_pointer" in self and self.return_type != void_type: return_type += "*" - params = ', '.join(self.param_strings) + params = ", ".join(self.param_strings) - code(''' + code( + """ $return_type ${{self.class_name}}::${{self.c_name}}($params) { ${{self.body}} } -''') +""" + ) return str(code) -__all__ = [ "Func" ] + +__all__ = ["Func"] diff --git a/src/mem/slicc/symbols/RequestType.py b/src/mem/slicc/symbols/RequestType.py index dd2f4aa882..db822a0b17 100644 --- a/src/mem/slicc/symbols/RequestType.py +++ b/src/mem/slicc/symbols/RequestType.py @@ -26,8 +26,10 @@ from slicc.symbols.Symbol import Symbol + class RequestType(Symbol): def __repr__(self): return "[RequestType: %s]" % self.ident -__all__ = [ "RequestType" ] + +__all__ = ["RequestType"] diff --git a/src/mem/slicc/symbols/State.py b/src/mem/slicc/symbols/State.py index 164c585f6c..59c7c7d701 100644 --- a/src/mem/slicc/symbols/State.py +++ b/src/mem/slicc/symbols/State.py @@ -27,16 +27,21 @@ from slicc.symbols.Symbol import Symbol + class State(Symbol): def __repr__(self): return "[State: %s]" % self.ident + def isWildcard(self): return False + class WildcardState(State): def __repr__(self): return "[State: *]" + def isWildcard(self): return True -__all__ = [ "State" ] + +__all__ = ["State"] diff --git a/src/mem/slicc/symbols/StateMachine.py b/src/mem/slicc/symbols/StateMachine.py index a9f7373238..55ee527c41 100644 --- a/src/mem/slicc/symbols/StateMachine.py +++ b/src/mem/slicc/symbols/StateMachine.py @@ -46,25 +46,26 @@ import slicc.generate.html as html import re python_class_map = { - "int": "Int", - "NodeID": "Int", - "uint32_t" : "UInt32", - "std::string": "String", - "bool": "Bool", - "CacheMemory": "RubyCache", - "WireBuffer": "RubyWireBuffer", - "Sequencer": "RubySequencer", - "HTMSequencer": "RubyHTMSequencer", - "GPUCoalescer" : "RubyGPUCoalescer", - "VIPERCoalescer" : "VIPERCoalescer", - "DirectoryMemory": "RubyDirectoryMemory", - "PerfectCacheMemory": "RubyPerfectCacheMemory", - "MemoryControl": "MemoryControl", - "MessageBuffer": "MessageBuffer", - "DMASequencer": "DMASequencer", - "RubyPrefetcher":"RubyPrefetcher", - "Cycles":"Cycles", - } + "int": "Int", + "NodeID": "Int", + "uint32_t": "UInt32", + "std::string": "String", + "bool": "Bool", + "CacheMemory": "RubyCache", + "WireBuffer": "RubyWireBuffer", + "Sequencer": "RubySequencer", + "HTMSequencer": "RubyHTMSequencer", + "GPUCoalescer": "RubyGPUCoalescer", + "VIPERCoalescer": "VIPERCoalescer", + "DirectoryMemory": "RubyDirectoryMemory", + "PerfectCacheMemory": "RubyPerfectCacheMemory", + "MemoryControl": "MemoryControl", + "MessageBuffer": "MessageBuffer", + "DMASequencer": "DMASequencer", + "RubyPrefetcher": "RubyPrefetcher", + "Cycles": "Cycles", +} + class StateMachine(Symbol): def __init__(self, symtab, ident, location, pairs, config_parameters): @@ -80,11 +81,25 @@ class StateMachine(Symbol): for param in config_parameters: if param.pointer: - var = Var(symtab, param.ident, location, param.type_ast.type, - "(*m_%s_ptr)" % param.ident, {}, self) + var = Var( + symtab, + param.ident, + location, + param.type_ast.type, + "(*m_%s_ptr)" % param.ident, + {}, + self, + ) else: - var = Var(symtab, param.ident, location, param.type_ast.type, - "m_%s" % param.ident, {}, self) + var = Var( + symtab, + param.ident, + location, + param.type_ast.type, + "m_%s" % param.ident, + {}, + self, + ) self.symtab.registerSym(param.ident, var) @@ -103,13 +118,13 @@ class StateMachine(Symbol): # the {} machine. Note that these along with the config params # form the entire set of data members of the machine. self.objects = [] - self.TBEType = None + self.TBEType = None self.EntryType = None # Python's sets are not sorted so we have to be careful when using # this to generate deterministic output. self.debug_flags = set() - self.debug_flags.add('RubyGenerated') - self.debug_flags.add('RubySlicc') + self.debug_flags.add("RubyGenerated") + self.debug_flags.add("RubySlicc") def __repr__(self): return "[StateMachine: %s]" % self.ident @@ -128,7 +143,9 @@ class StateMachine(Symbol): # Check for duplicate action for other in self.actions.values(): if action.ident == other.ident: - action.warning("Duplicate action definition: %s" % action.ident) + action.warning( + "Duplicate action definition: %s" % action.ident + ) action.error("Duplicate action definition: %s" % action.ident) if action.short == other.short: other.warning("Duplicate action shorthand: %s" % other.ident) @@ -162,21 +179,24 @@ class StateMachine(Symbol): self.objects.append(obj) def addType(self, type): - type_ident = '%s' % type.c_ident + type_ident = "%s" % type.c_ident - if type_ident == "%s_TBE" %self.ident: + if type_ident == "%s_TBE" % self.ident: if self.TBEType != None: - self.error("Multiple Transaction Buffer types in a " \ - "single machine."); + self.error( + "Multiple Transaction Buffer types in a " "single machine." + ) self.TBEType = type elif "interface" in type and "AbstractCacheEntry" == type["interface"]: if "main" in type and "false" == type["main"].lower(): - pass # this isn't the EntryType + pass # this isn't the EntryType else: if self.EntryType != None: - self.error("Multiple AbstractCacheEntry types in a " \ - "single machine."); + self.error( + "Multiple AbstractCacheEntry types in a " + "single machine." + ) self.EntryType = type # Needs to be called before accessing the table @@ -205,7 +225,7 @@ class StateMachine(Symbol): if not action.used: error_msg = "Unused action: %s" % action.ident if "desc" in action: - error_msg += ", " + action.desc + error_msg += ", " + action.desc action.warning(error_msg) self.table = table @@ -238,7 +258,8 @@ class StateMachine(Symbol): py_ident = "%s_Controller" % ident c_ident = "%s_Controller" % self.ident - code(''' + code( + """ from m5.params import * from m5.SimObject import SimObject from m5.objects.Controller import RubyController @@ -247,34 +268,40 @@ class $py_ident(RubyController): type = '$py_ident' cxx_header = 'mem/ruby/protocol/${c_ident}.hh' cxx_class = 'gem5::ruby::$py_ident' -''') +""" + ) code.indent() for param in self.config_parameters: - dflt_str = '' + dflt_str = "" if param.rvalue is not None: - dflt_str = str(param.rvalue.inline()) + ', ' + dflt_str = str(param.rvalue.inline()) + ", " if param.type_ast.type.c_ident in python_class_map: python_type = python_class_map[param.type_ast.type.c_ident] - code('${{param.ident}} = Param.${{python_type}}(${dflt_str}"")') + code( + '${{param.ident}} = Param.${{python_type}}(${dflt_str}"")' + ) else: - self.error("Unknown c++ to python class conversion for c++ " \ - "type: '%s'. Please update the python_class_map " \ - "in StateMachine.py", param.type_ast.type.c_ident) + self.error( + "Unknown c++ to python class conversion for c++ " + "type: '%s'. Please update the python_class_map " + "in StateMachine.py", + param.type_ast.type.c_ident, + ) code.dedent() - code.write(path, '%s.py' % py_ident) - + code.write(path, "%s.py" % py_ident) def printControllerHH(self, path): - '''Output the method declarations for the class declaration''' + """Output the method declarations for the class declaration""" code = self.symtab.codeFormatter() ident = self.ident c_ident = "%s_Controller" % self.ident - code(''' + code( + """ // Created by slicc definition of Module "${{self.short}}" #ifndef __${ident}_CONTROLLER_HH__ @@ -290,7 +317,8 @@ class $py_ident(RubyController): #include "mem/ruby/slicc_interface/AbstractController.hh" #include "params/$c_ident.hh" -''') +""" + ) seen_types = set() for var in self.objects: @@ -299,7 +327,8 @@ class $py_ident(RubyController): seen_types.add(var.type.ident) # for adding information to the protocol debug trace - code(''' + code( + """ namespace gem5 { @@ -343,47 +372,61 @@ class $c_ident : public AbstractController uint64_t getTransitionCount(${ident}_State state, ${ident}_Event event); private: -''') +""" + ) code.indent() # added by SS for param in self.config_parameters: if param.pointer: - code('${{param.type_ast.type}}* m_${{param.ident}}_ptr;') + code("${{param.type_ast.type}}* m_${{param.ident}}_ptr;") else: - code('${{param.type_ast.type}} m_${{param.ident}};') + code("${{param.type_ast.type}} m_${{param.ident}};") - code(''' + code( + """ TransitionResult doTransition(${ident}_Event event, -''') +""" + ) if self.EntryType != None: - code(''' + code( + """ ${{self.EntryType.c_ident}}* m_cache_entry_ptr, -''') +""" + ) if self.TBEType != None: - code(''' + code( + """ ${{self.TBEType.c_ident}}* m_tbe_ptr, -''') +""" + ) - code(''' + code( + """ Addr addr); TransitionResult doTransitionWorker(${ident}_Event event, ${ident}_State state, ${ident}_State& next_state, -''') +""" + ) if self.TBEType != None: - code(''' + code( + """ ${{self.TBEType.c_ident}}*& m_tbe_ptr, -''') +""" + ) if self.EntryType != None: - code(''' + code( + """ ${{self.EntryType.c_ident}}*& m_cache_entry_ptr, -''') +""" + ) - code(''' + code( + """ Addr addr); ${ident}_Event m_curTransitionEvent; @@ -401,78 +444,95 @@ static std::vector > transVec; static int m_num_controllers; // Internal functions -''') +""" + ) for func in self.functions: proto = func.prototype if proto: - code('$proto') + code("$proto") if self.EntryType != None: - code(''' + code( + """ // Set and Reset for cache_entry variable void set_cache_entry(${{self.EntryType.c_ident}}*& m_cache_entry_ptr, AbstractCacheEntry* m_new_cache_entry); void unset_cache_entry(${{self.EntryType.c_ident}}*& m_cache_entry_ptr); -''') +""" + ) if self.TBEType != None: - code(''' + code( + """ // Set and Reset for tbe variable void set_tbe(${{self.TBEType.c_ident}}*& m_tbe_ptr, ${ident}_TBE* m_new_tbe); void unset_tbe(${{self.TBEType.c_ident}}*& m_tbe_ptr); -''') +""" + ) # Prototype the actions that the controller can take - code(''' + code( + """ // Actions -''') +""" + ) if self.TBEType != None and self.EntryType != None: for action in self.actions.values(): - code('/** \\brief ${{action.desc}} */') - code('void ${{action.ident}}(${{self.TBEType.c_ident}}*& ' - 'm_tbe_ptr, ${{self.EntryType.c_ident}}*& ' - 'm_cache_entry_ptr, Addr addr);') + code("/** \\brief ${{action.desc}} */") + code( + "void ${{action.ident}}(${{self.TBEType.c_ident}}*& " + "m_tbe_ptr, ${{self.EntryType.c_ident}}*& " + "m_cache_entry_ptr, Addr addr);" + ) elif self.TBEType != None: for action in self.actions.values(): - code('/** \\brief ${{action.desc}} */') - code('void ${{action.ident}}(${{self.TBEType.c_ident}}*& ' - 'm_tbe_ptr, Addr addr);') + code("/** \\brief ${{action.desc}} */") + code( + "void ${{action.ident}}(${{self.TBEType.c_ident}}*& " + "m_tbe_ptr, Addr addr);" + ) elif self.EntryType != None: for action in self.actions.values(): - code('/** \\brief ${{action.desc}} */') - code('void ${{action.ident}}(${{self.EntryType.c_ident}}*& ' - 'm_cache_entry_ptr, Addr addr);') + code("/** \\brief ${{action.desc}} */") + code( + "void ${{action.ident}}(${{self.EntryType.c_ident}}*& " + "m_cache_entry_ptr, Addr addr);" + ) else: for action in self.actions.values(): - code('/** \\brief ${{action.desc}} */') - code('void ${{action.ident}}(Addr addr);') + code("/** \\brief ${{action.desc}} */") + code("void ${{action.ident}}(Addr addr);") # the controller internal variables - code(''' + code( + """ // Objects -''') +""" + ) for var in self.objects: th = var.get("template", "") - code('${{var.type.c_ident}}$th* m_${{var.ident}}_ptr;') + code("${{var.type.c_ident}}$th* m_${{var.ident}}_ptr;") code.dedent() - code(''' + code( + """ }; } // namespace ruby } // namespace gem5 #endif // __${ident}_CONTROLLER_H__ -''') +""" + ) - code.write(path, '%s.hh' % c_ident) + code.write(path, "%s.hh" % c_ident) def printControllerCC(self, path, includes): - '''Output the actions for performing the actions''' + """Output the actions for performing the actions""" code = self.symtab.codeFormatter() ident = self.ident @@ -492,18 +552,19 @@ void unset_tbe(${{self.TBEType.c_ident}}*& m_tbe_ptr); # is included above "base/cprintf.hh" in this file, despite it being # necessary in this case. This is therefore a bit of a hack to keep # both clang and our style-checker happy. - base_include = ''' + base_include = """ #include "base/compiler.hh" #include "base/cprintf.hh" -''' +""" - boolvec_include = ''' + boolvec_include = """ #include "mem/ruby/common/BoolVec.hh" -''' +""" - code(''' + code( + """ // Created by slicc definition of Module "${{self.short}}" #include @@ -514,7 +575,8 @@ void unset_tbe(${{self.TBEType.c_ident}}*& m_tbe_ptr); #include #include -''') +""" + ) code(boolvec_include) code(base_include) @@ -522,7 +584,8 @@ void unset_tbe(${{self.TBEType.c_ident}}*& m_tbe_ptr); # output and avoid unnecessary rebuilds of the generated files. for f in sorted(self.debug_flags): code('#include "debug/${{f}}.hh"') - code(''' + code( + """ #include "mem/ruby/network/Network.hh" #include "mem/ruby/protocol/${ident}_Controller.hh" #include "mem/ruby/protocol/${ident}_Event.hh" @@ -530,7 +593,8 @@ void unset_tbe(${{self.TBEType.c_ident}}*& m_tbe_ptr); #include "mem/ruby/protocol/Types.hh" #include "mem/ruby/system/RubySystem.hh" -''') +""" + ) for include_path in includes: code('#include "${{include_path}}"') @@ -543,7 +607,8 @@ void unset_tbe(${{self.TBEType.c_ident}}*& m_tbe_ptr); num_in_ports = len(self.in_ports) - code(''' + code( + """ namespace gem5 { @@ -573,7 +638,8 @@ $c_ident::$c_ident(const Params &p) p.ruby_system->registerAbstractController(this); m_in_ports = $num_in_ports; -''') +""" + ) code.indent() # @@ -583,20 +649,25 @@ $c_ident::$c_ident(const Params &p) # for param in self.config_parameters: if param.pointer: - code('m_${{param.ident}}_ptr = p.${{param.ident}};') + code("m_${{param.ident}}_ptr = p.${{param.ident}};") else: - code('m_${{param.ident}} = p.${{param.ident}};') + code("m_${{param.ident}} = p.${{param.ident}};") - if re.compile("sequencer").search(param.ident) or \ - param.type_ast.type.c_ident == "GPUCoalescer" or \ - param.type_ast.type.c_ident == "VIPERCoalescer": - code(''' + if ( + re.compile("sequencer").search(param.ident) + or param.type_ast.type.c_ident == "GPUCoalescer" + or param.type_ast.type.c_ident == "VIPERCoalescer" + ): + code( + """ if (m_${{param.ident}}_ptr != NULL) { m_${{param.ident}}_ptr->setController(this); } -''') +""" + ) - code(''' + code( + """ for (int state = 0; state < ${ident}_State_NUM; state++) { for (int event = 0; event < ${ident}_Event_NUM; event++) { @@ -607,9 +678,11 @@ for (int state = 0; state < ${ident}_State_NUM; state++) { for (int event = 0; event < ${ident}_Event_NUM; event++) { m_event_counters[event] = 0; } -''') +""" + ) code.dedent() - code(''' + code( + """ } void @@ -618,7 +691,8 @@ $c_ident::initNetQueues() MachineType machine_type = string_to_MachineType("${{self.ident}}"); [[maybe_unused]] int base = MachineType_base_number(machine_type); -''') +""" + ) code.indent() # set for maintaining the vnet, direction pairs already seen for this @@ -630,7 +704,7 @@ $c_ident::initNetQueues() vid = "m_%s_ptr" % var.ident if "network" in var: vtype = var.type_ast.type - code('assert($vid != NULL);') + code("assert($vid != NULL);") # Network port object network = var["network"] @@ -640,25 +714,29 @@ $c_ident::initNetQueues() vnet_type = var["vnet_type"] assert (vnet, network) not in vnet_dir_set - vnet_dir_set.add((vnet,network)) + vnet_dir_set.add((vnet, network)) - code(''' + code( + """ m_net_ptr->set${network}NetQueue(m_version + base, $vid->getOrdered(), $vnet, "$vnet_type", $vid); -''') +""" + ) # Set Priority if "rank" in var: code('$vid->setPriority(${{var["rank"]}})') code.dedent() - code(''' + code( + """ } void $c_ident::init() { // initialize objects -''') +""" + ) code.indent() @@ -668,7 +746,7 @@ $c_ident::init() if "network" not in var: # Not a network port object if "primitive" in vtype: - code('$vid = new ${{vtype.c_ident}};') + code("$vid = new ${{vtype.c_ident}};") if "default" in var: code('(*$vid) = ${{var["default"]}};') else: @@ -679,8 +757,8 @@ $c_ident::init() if "non_obj" not in vtype and not vtype.isEnumeration: args = var.get("constructor", "") - code('$expr($args);') - code('assert($vid != NULL);') + code("$expr($args);") + code("assert($vid != NULL);") if "default" in var: code('*$vid = ${{var["default"]}}; // Object default') @@ -691,12 +769,12 @@ $c_ident::init() # Set the prefetchers code() for prefetcher in self.prefetchers: - code('${{prefetcher.code}}.setController(this);') + code("${{prefetcher.code}}.setController(this);") code() for port in self.in_ports: # Set the queue consumers - code('${{port.code}}.setConsumer(this);') + code("${{port.code}}.setConsumer(this);") # Initialize the transition profiling code() @@ -711,14 +789,16 @@ $c_ident::init() if not stall: state = "%s_State_%s" % (self.ident, trans.state.ident) event = "%s_Event_%s" % (self.ident, trans.event.ident) - code('possibleTransition($state, $event);') + code("possibleTransition($state, $event);") code.dedent() - code(''' + code( + """ AbstractController::init(); resetStats(); } -''') +""" + ) mq_ident = "NULL" for port in self.in_ports: @@ -738,23 +818,24 @@ $c_ident::init() seq_ident = "NULL" for param in self.config_parameters: if param.ident == "sequencer": - assert(param.pointer) + assert param.pointer seq_ident = "m_%s_ptr" % param.ident dma_seq_ident = "NULL" for param in self.config_parameters: if param.ident == "dma_sequencer": - assert(param.pointer) + assert param.pointer dma_seq_ident = "m_%s_ptr" % param.ident coal_ident = "NULL" for param in self.config_parameters: if param.ident == "coalescer": - assert(param.pointer) + assert param.pointer coal_ident = "m_%s_ptr" % param.ident if seq_ident != "NULL": - code(''' + code( + """ Sequencer* $c_ident::getCPUSequencer() const { @@ -764,19 +845,23 @@ $c_ident::getCPUSequencer() const return NULL; } } -''') +""" + ) else: - code(''' + code( + """ Sequencer* $c_ident::getCPUSequencer() const { return NULL; } -''') +""" + ) if dma_seq_ident != "NULL": - code(''' + code( + """ DMASequencer* $c_ident::getDMASequencer() const { @@ -786,19 +871,23 @@ $c_ident::getDMASequencer() const return NULL; } } -''') +""" + ) else: - code(''' + code( + """ DMASequencer* $c_ident::getDMASequencer() const { return NULL; } -''') +""" + ) if coal_ident != "NULL": - code(''' + code( + """ GPUCoalescer* $c_ident::getGPUCoalescer() const { @@ -808,18 +897,22 @@ $c_ident::getGPUCoalescer() const return NULL; } } -''') +""" + ) else: - code(''' + code( + """ GPUCoalescer* $c_ident::getGPUCoalescer() const { return NULL; } -''') +""" + ) - code(''' + code( + """ void $c_ident::regStats() @@ -827,7 +920,7 @@ $c_ident::regStats() AbstractController::regStats(); // For each type of controllers, one controller of that type is picked - // to aggregate stats of all controllers of that type. + // to aggregate stats of all controllers of that type. if (m_version == 0) { Profiler *profiler = params().ruby_system->getProfiler(); @@ -1028,10 +1121,12 @@ void $c_ident::resetStats() AbstractController::resetStats(); } -''') +""" + ) if self.EntryType != None: - code(''' + code( + """ // Set and Reset for cache_entry variable void @@ -1045,10 +1140,12 @@ $c_ident::unset_cache_entry(${{self.EntryType.c_ident}}*& m_cache_entry_ptr) { m_cache_entry_ptr = 0; } -''') +""" + ) if self.TBEType != None: - code(''' + code( + """ // Set and Reset for tbe variable void @@ -1062,35 +1159,41 @@ $c_ident::unset_tbe(${{self.TBEType.c_ident}}*& m_tbe_ptr) { m_tbe_ptr = NULL; } -''') +""" + ) - code(''' + code( + """ void $c_ident::recordCacheTrace(int cntrl, CacheRecorder* tr) { -''') +""" + ) # # Record cache contents for all associated caches. # code.indent() for param in self.config_parameters: if param.type_ast.type.ident == "CacheMemory": - assert(param.pointer) - code('m_${{param.ident}}_ptr->recordCacheContents(cntrl, tr);') + assert param.pointer + code("m_${{param.ident}}_ptr->recordCacheContents(cntrl, tr);") code.dedent() - code(''' + code( + """ } // Actions -''') +""" + ) if self.TBEType != None and self.EntryType != None: for action in self.actions.values(): if "c_code" not in action: - continue + continue - code(''' + code( + """ /** \\brief ${{action.desc}} */ void $c_ident::${{action.ident}}(${{self.TBEType.c_ident}}*& m_tbe_ptr, ${{self.EntryType.c_ident}}*& m_cache_entry_ptr, Addr addr) @@ -1105,13 +1208,15 @@ $c_ident::${{action.ident}}(${{self.TBEType.c_ident}}*& m_tbe_ptr, ${{self.Entry } } -''') +""" + ) elif self.TBEType != None: for action in self.actions.values(): if "c_code" not in action: - continue + continue - code(''' + code( + """ /** \\brief ${{action.desc}} */ void $c_ident::${{action.ident}}(${{self.TBEType.c_ident}}*& m_tbe_ptr, Addr addr) @@ -1120,13 +1225,15 @@ $c_ident::${{action.ident}}(${{self.TBEType.c_ident}}*& m_tbe_ptr, Addr addr) ${{action["c_code"]}} } -''') +""" + ) elif self.EntryType != None: for action in self.actions.values(): if "c_code" not in action: - continue + continue - code(''' + code( + """ /** \\brief ${{action.desc}} */ void $c_ident::${{action.ident}}(${{self.EntryType.c_ident}}*& m_cache_entry_ptr, Addr addr) @@ -1135,13 +1242,15 @@ $c_ident::${{action.ident}}(${{self.EntryType.c_ident}}*& m_cache_entry_ptr, Add ${{action["c_code"]}} } -''') +""" + ) else: for action in self.actions.values(): if "c_code" not in action: - continue + continue - code(''' + code( + """ /** \\brief ${{action.desc}} */ void $c_ident::${{action.ident}}(Addr addr) @@ -1150,53 +1259,61 @@ $c_ident::${{action.ident}}(Addr addr) ${{action["c_code"]}} } -''') +""" + ) for func in self.functions: code(func.generateCode()) # Function for functional writes to messages buffered in the controller - code(''' + code( + """ int $c_ident::functionalWriteBuffers(PacketPtr& pkt) { int num_functional_writes = 0; -''') +""" + ) for var in self.objects: vtype = var.type if vtype.isBuffer: vid = "m_%s_ptr" % var.ident - code('num_functional_writes += $vid->functionalWrite(pkt);') + code("num_functional_writes += $vid->functionalWrite(pkt);") for var in self.config_parameters: vtype = var.type_ast.type if vtype.isBuffer: vid = "m_%s_ptr" % var.ident - code('num_functional_writes += $vid->functionalWrite(pkt);') + code("num_functional_writes += $vid->functionalWrite(pkt);") - code(''' + code( + """ return num_functional_writes; } -''') +""" + ) # Function for functional reads to messages buffered in the controller - code(''' + code( + """ bool $c_ident::functionalReadBuffers(PacketPtr& pkt) { -''') +""" + ) for var in self.objects: vtype = var.type if vtype.isBuffer: vid = "m_%s_ptr" % var.ident - code('if ($vid->functionalRead(pkt)) return true;') + code("if ($vid->functionalRead(pkt)) return true;") for var in self.config_parameters: vtype = var.type_ast.type if vtype.isBuffer: vid = "m_%s_ptr" % var.ident - code('if ($vid->functionalRead(pkt)) return true;') + code("if ($vid->functionalRead(pkt)) return true;") - code(''' + code( + """ return false; } @@ -1204,31 +1321,34 @@ bool $c_ident::functionalReadBuffers(PacketPtr& pkt, WriteMask &mask) { bool read = false; -''') +""" + ) for var in self.objects: vtype = var.type if vtype.isBuffer: vid = "m_%s_ptr" % var.ident - code('if ($vid->functionalRead(pkt, mask)) read = true;') + code("if ($vid->functionalRead(pkt, mask)) read = true;") for var in self.config_parameters: vtype = var.type_ast.type if vtype.isBuffer: vid = "m_%s_ptr" % var.ident - code('if ($vid->functionalRead(pkt, mask)) read = true;') + code("if ($vid->functionalRead(pkt, mask)) read = true;") - code(''' + code( + """ return read; } } // namespace ruby } // namespace gem5 -''') +""" + ) code.write(path, "%s.cc" % c_ident) def printCWakeup(self, path, includes): - '''Output the wakeup loop for the events''' + """Output the wakeup loop for the events""" code = self.symtab.codeFormatter() ident = self.ident @@ -1237,7 +1357,8 @@ $c_ident::functionalReadBuffers(PacketPtr& pkt, WriteMask &mask) if len(self.request_types) == 0: outputRequest_types = False - code(''' + code( + """ // ${ident}: ${{self.short}} #include @@ -1248,34 +1369,39 @@ $c_ident::functionalReadBuffers(PacketPtr& pkt, WriteMask &mask) #include "base/logging.hh" -''') +""" + ) # We have to sort self.debug_flags in order to produce deterministic # output and avoid unnecessary rebuilds of the generated files. for f in sorted(self.debug_flags): code('#include "debug/${{f}}.hh"') - code(''' + code( + """ #include "mem/ruby/protocol/${ident}_Controller.hh" #include "mem/ruby/protocol/${ident}_Event.hh" #include "mem/ruby/protocol/${ident}_State.hh" -''') +""" + ) if outputRequest_types: code('''#include "mem/ruby/protocol/${ident}_RequestType.hh"''') - code(''' + code( + """ #include "mem/ruby/protocol/Types.hh" #include "mem/ruby/system/RubySystem.hh" -''') - +""" + ) for include_path in includes: code('#include "${{include_path}}"') port_to_buf_map, in_msg_bufs, msg_bufs = self.getBufferMaps(ident) - code(''' + code( + """ namespace gem5 { @@ -1303,7 +1429,8 @@ ${ident}_Controller::wakeup() scheduleEvent(Cycles(1)); break; } -''') +""" + ) code.indent() code.indent() @@ -1312,35 +1439,40 @@ ${ident}_Controller::wakeup() # for port in self.in_ports: code.indent() - code('// ${ident}InPort $port') + code("// ${ident}InPort $port") if "rank" in port.pairs: code('m_cur_in_port = ${{port.pairs["rank"]}};') else: - code('m_cur_in_port = 0;') + code("m_cur_in_port = 0;") if port in port_to_buf_map: - code('try {') + code("try {") code.indent() code('${{port["c_code_in_port"]}}') if port in port_to_buf_map: code.dedent() - code(''' + code( + """ } catch (const RejectException & e) { rejected[${{port_to_buf_map[port]}}]++; } -''') +""" + ) code.dedent() - code('') + code("") code.dedent() code.dedent() - code(''' + code( + """ // If we got this far, we have nothing left todo or something went - // wrong''') + // wrong""" + ) for buf_name, ports in in_msg_bufs.items(): if len(ports) > 1: # only produce checks when a buffer is shared by multiple ports - code(''' + code( + """ if (${{buf_name}}->isReady(clockEdge()) && rejected[${{port_to_buf_map[ports[0]]}}] == ${{len(ports)}}) { // no port claimed the message on the top of this buffer @@ -1351,25 +1483,29 @@ ${ident}_Controller::wakeup() "the incoming message type.\\n", Cycles(1)); } -''') - code(''' +""" + ) + code( + """ break; } } } // namespace ruby } // namespace gem5 -''') +""" + ) code.write(path, "%s_Wakeup.cc" % self.ident) def printCSwitch(self, path): - '''Output switch statement for transition table''' + """Output switch statement for transition table""" code = self.symtab.codeFormatter() ident = self.ident - code(''' + code( + """ // ${ident}: ${{self.short}} #include @@ -1397,31 +1533,41 @@ namespace ruby TransitionResult ${ident}_Controller::doTransition(${ident}_Event event, -''') +""" + ) if self.EntryType != None: - code(''' + code( + """ ${{self.EntryType.c_ident}}* m_cache_entry_ptr, -''') +""" + ) if self.TBEType != None: - code(''' + code( + """ ${{self.TBEType.c_ident}}* m_tbe_ptr, -''') - code(''' +""" + ) + code( + """ Addr addr) { -''') +""" + ) code.indent() if self.TBEType != None and self.EntryType != None: - code('${ident}_State state = getState(m_tbe_ptr, m_cache_entry_ptr, addr);') + code( + "${ident}_State state = getState(m_tbe_ptr, m_cache_entry_ptr, addr);" + ) elif self.TBEType != None: - code('${ident}_State state = getState(m_tbe_ptr, addr);') + code("${ident}_State state = getState(m_tbe_ptr, addr);") elif self.EntryType != None: - code('${ident}_State state = getState(m_cache_entry_ptr, addr);') + code("${ident}_State state = getState(m_cache_entry_ptr, addr);") else: - code('${ident}_State state = getState(addr);') + code("${ident}_State state = getState(addr);") - code(''' + code( + """ ${ident}_State next_state = state; DPRINTF(RubyGenerated, "%s, Time: %lld, state: %s, event: %s, addr: %#x\\n", @@ -1429,19 +1575,27 @@ DPRINTF(RubyGenerated, "%s, Time: %lld, state: %s, event: %s, addr: %#x\\n", ${ident}_Event_to_string(event), addr); TransitionResult result = -''') +""" + ) if self.TBEType != None and self.EntryType != None: - code('doTransitionWorker(event, state, next_state, m_tbe_ptr, m_cache_entry_ptr, addr);') + code( + "doTransitionWorker(event, state, next_state, m_tbe_ptr, m_cache_entry_ptr, addr);" + ) elif self.TBEType != None: - code('doTransitionWorker(event, state, next_state, m_tbe_ptr, addr);') + code( + "doTransitionWorker(event, state, next_state, m_tbe_ptr, addr);" + ) elif self.EntryType != None: - code('doTransitionWorker(event, state, next_state, m_cache_entry_ptr, addr);') + code( + "doTransitionWorker(event, state, next_state, m_cache_entry_ptr, addr);" + ) else: - code('doTransitionWorker(event, state, next_state, addr);') + code("doTransitionWorker(event, state, next_state, addr);") port_to_buf_map, in_msg_bufs, msg_bufs = self.getBufferMaps(ident) - code(''' + code( + """ if (result == TransitionResult_Valid) { DPRINTF(RubyGenerated, "next_state: %s\\n", @@ -1456,21 +1610,23 @@ if (result == TransitionResult_Valid) { printAddress(addr), GET_TRANSITION_COMMENT()); CLEAR_TRANSITION_COMMENT(); -''') +""" + ) if self.TBEType != None and self.EntryType != None: - code('setState(m_tbe_ptr, m_cache_entry_ptr, addr, next_state);') - code('setAccessPermission(m_cache_entry_ptr, addr, next_state);') + code("setState(m_tbe_ptr, m_cache_entry_ptr, addr, next_state);") + code("setAccessPermission(m_cache_entry_ptr, addr, next_state);") elif self.TBEType != None: - code('setState(m_tbe_ptr, addr, next_state);') - code('setAccessPermission(addr, next_state);') + code("setState(m_tbe_ptr, addr, next_state);") + code("setAccessPermission(addr, next_state);") elif self.EntryType != None: - code('setState(m_cache_entry_ptr, addr, next_state);') - code('setAccessPermission(m_cache_entry_ptr, addr, next_state);') + code("setState(m_cache_entry_ptr, addr, next_state);") + code("setAccessPermission(m_cache_entry_ptr, addr, next_state);") else: - code('setState(addr, next_state);') - code('setAccessPermission(addr, next_state);') + code("setState(addr, next_state);") + code("setAccessPermission(addr, next_state);") - code(''' + code( + """ } else if (result == TransitionResult_ResourceStall) { DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %#x %s\\n", curTick(), m_version, "${ident}", @@ -1489,39 +1645,52 @@ if (result == TransitionResult_Valid) { } return result; -''') +""" + ) code.dedent() - code(''' + code( + """ } TransitionResult ${ident}_Controller::doTransitionWorker(${ident}_Event event, ${ident}_State state, ${ident}_State& next_state, -''') +""" + ) if self.TBEType != None: - code(''' + code( + """ ${{self.TBEType.c_ident}}*& m_tbe_ptr, -''') +""" + ) if self.EntryType != None: - code(''' + code( + """ ${{self.EntryType.c_ident}}*& m_cache_entry_ptr, -''') - code(''' +""" + ) + code( + """ Addr addr) { m_curTransitionEvent = event; m_curTransitionNextState = next_state; switch(HASH_FUN(state, event)) { -''') +""" + ) # This map will allow suppress generating duplicate code cases = OrderedDict() for trans in self.transitions: - case_string = "%s_State_%s, %s_Event_%s" % \ - (self.ident, trans.state.ident, self.ident, trans.event.ident) + case_string = "%s_State_%s, %s_Event_%s" % ( + self.ident, + trans.state.ident, + self.ident, + trans.event.ident, + ) case = self.symtab.codeFormatter() # Only set next_state if it changes @@ -1533,12 +1702,16 @@ ${ident}_Controller::doTransitionWorker(${ident}_Event event, # is determined before any actions of the transition # execute, and therefore the next state calculation cannot # depend on any of the transitionactions. - case('next_state = getNextState(addr); ' - 'm_curTransitionNextState = next_state;') + case( + "next_state = getNextState(addr); " + "m_curTransitionNextState = next_state;" + ) else: ns_ident = trans.nextState.ident - case('next_state = ${ident}_State_${ns_ident}; ' - 'm_curTransitionNextState = next_state;') + case( + "next_state = ${ident}_State_${ns_ident}; " + "m_curTransitionNextState = next_state;" + ) actions = trans.actions request_types = trans.request_types @@ -1546,20 +1719,26 @@ ${ident}_Controller::doTransitionWorker(${ident}_Event event, # Check for resources case_sorter = [] res = trans.resources - for key,val in res.items(): - val = ''' + for key, val in res.items(): + val = """ if (!%s.areNSlotsAvailable(%s, clockEdge())) return TransitionResult_ResourceStall; -''' % (key.code, val) +""" % ( + key.code, + val, + ) case_sorter.append(val) # Check all of the request_types for resource constraints for request_type in request_types: - val = ''' + val = """ if (!checkResourceAvailable(%s_RequestType_%s, addr)) { return TransitionResult_ResourceStall; } -''' % (self.ident, request_type.ident) +""" % ( + self.ident, + request_type.ident, + ) case_sorter.append(val) # Emit the code sequences in a sorted order. This makes the @@ -1570,7 +1749,9 @@ if (!checkResourceAvailable(%s_RequestType_%s, addr)) { # Record access types for this transition for request_type in request_types: - case('recordRequestType(${ident}_RequestType_${{request_type.ident}}, addr);') + case( + "recordRequestType(${ident}_RequestType_${{request_type.ident}}, addr);" + ) # Figure out if we stall stall = False @@ -1580,21 +1761,23 @@ if (!checkResourceAvailable(%s_RequestType_%s, addr)) { break if stall: - case('return TransitionResult_ProtocolStall;') + case("return TransitionResult_ProtocolStall;") else: if self.TBEType != None and self.EntryType != None: for action in actions: - case('${{action.ident}}(m_tbe_ptr, m_cache_entry_ptr, addr);') + case( + "${{action.ident}}(m_tbe_ptr, m_cache_entry_ptr, addr);" + ) elif self.TBEType != None: for action in actions: - case('${{action.ident}}(m_tbe_ptr, addr);') + case("${{action.ident}}(m_tbe_ptr, addr);") elif self.EntryType != None: for action in actions: - case('${{action.ident}}(m_cache_entry_ptr, addr);') + case("${{action.ident}}(m_cache_entry_ptr, addr);") else: for action in actions: - case('${{action.ident}}(addr);') - case('return TransitionResult_Valid;') + case("${{action.ident}}(addr);") + case("return TransitionResult_Valid;") case = str(case) @@ -1606,14 +1789,15 @@ if (!checkResourceAvailable(%s_RequestType_%s, addr)) { # Walk through all of the unique code blocks and spit out the # corresponding case statement elements - for case,transitions in cases.items(): + for case, transitions in cases.items(): # Iterative over all the multiple transitions that share # the same code for trans in transitions: - code(' case HASH_FUN($trans):') - code(' $case\n') + code(" case HASH_FUN($trans):") + code(" $case\n") - code(''' + code( + """ default: panic("Invalid transition\\n" "%s time: %d addr: %#x event: %s state: %s\\n", @@ -1625,21 +1809,23 @@ if (!checkResourceAvailable(%s_RequestType_%s, addr)) { } // namespace ruby } // namespace gem5 -''') +""" + ) code.write(path, "%s_Transitions.cc" % self.ident) - # ************************** # ******* HTML Files ******* # ************************** def frameRef(self, click_href, click_target, over_href, over_num, text): code = self.symtab.codeFormatter(fix_newlines=False) - code(""" ${{html.formatShorthand(text)}} - """) + """ + ) return str(code) def writeHTMLFiles(self, path): @@ -1671,39 +1857,45 @@ if (!checkResourceAvailable(%s_RequestType_%s, addr)) { def printHTMLTransitions(self, path, active_state): code = self.symtab.codeFormatter() - code(''' + code( + """

${{html.formatShorthand(self.short)}}: -''') +""" + ) code.indent() - for i,machine in enumerate(self.symtab.getAllType(StateMachine)): + for i, machine in enumerate(self.symtab.getAllType(StateMachine)): mid = machine.ident if i != 0: extra = " - " else: extra = "" if machine == self: - code('$extra$mid') + code("$extra$mid") else: - code('$extra$mid') + code( + '$extra$mid' + ) code.dedent() - code(""" + code( + """

-""") +""" + ) for event in self.events.values(): href = "%s_Event_%s.html" % (self.ident, event.ident) ref = self.frameRef(href, "Status", href, "1", event.short) - code('') + code("") - code('') + code("") # -- Body of table for state in self.states.values(): # -- Each row @@ -1716,14 +1908,16 @@ if (!checkResourceAvailable(%s_RequestType_%s, addr)) { over = "%s_State_%s.html" % (self.ident, state.ident) text = html.formatShorthand(state.short) ref = self.frameRef(click, "Table", over, "1", state.short) - code(''' + code( + """ -''') +""" + ) # -- One column for each event for event in self.events.values(): - trans = self.table.get((state,event), None) + trans = self.table.get((state, event), None) if trans is None: # This is the no transition case if state == active_state: @@ -1731,7 +1925,7 @@ if (!checkResourceAvailable(%s_RequestType_%s, addr)) { else: color = "lightgrey" - code('') + code("") continue next = trans.nextState @@ -1739,8 +1933,10 @@ if (!checkResourceAvailable(%s_RequestType_%s, addr)) { # -- Get the actions for action in trans.actions: - if action.ident == "z_stall" or \ - action.ident == "zz_recycleMandatoryQueue": + if ( + action.ident == "z_stall" + or action.ident == "zz_recycleMandatoryQueue" + ): stall_action = True # -- Print out "actions/next-state" @@ -1757,15 +1953,16 @@ if (!checkResourceAvailable(%s_RequestType_%s, addr)) { else: color = "white" - code(' -''') - code(''' +""" + ) + code( + """ -''') +""" + ) for event in self.events.values(): href = "%s_Event_%s.html" % (self.ident, event.ident) ref = self.frameRef(href, "Status", href, "1", event.short) - code('') - code(''' + code("") + code( + """
$ref$ref
$ref  ') + code("") for action in trans.actions: href = "%s_action_%s.html" % (self.ident, action.ident) - ref = self.frameRef(href, "Status", href, "1", - action.short) - code(' $ref') + ref = self.frameRef( + href, "Status", href, "1", action.short + ) + code(" $ref") if next != state: if trans.actions: - code('/') + code("/") click = "%s_table_%s.html" % (self.ident, next.ident) over = "%s_State_%s.html" % (self.ident, next.ident) ref = self.frameRef(click, "Table", over, "1", next.short) @@ -1781,26 +1978,31 @@ if (!checkResourceAvailable(%s_RequestType_%s, addr)) { click = "%s_table_%s.html" % (self.ident, state.ident) over = "%s_State_%s.html" % (self.ident, state.ident) ref = self.frameRef(click, "Table", over, "1", state.short) - code(''' + code( + """ $ref
$ref$ref
-''') - +""" + ) if active_state: name = "%s_table_%s.html" % (self.ident, active_state.ident) @@ -1808,4 +2010,5 @@ if (!checkResourceAvailable(%s_RequestType_%s, addr)) { name = "%s_table.html" % self.ident code.write(path, name) -__all__ = [ "StateMachine" ] + +__all__ = ["StateMachine"] diff --git a/src/mem/slicc/symbols/Symbol.py b/src/mem/slicc/symbols/Symbol.py index 268ec0420e..cd8f6b9a29 100644 --- a/src/mem/slicc/symbols/Symbol.py +++ b/src/mem/slicc/symbols/Symbol.py @@ -1,3 +1,15 @@ +# Copyright (c) 2022 Arm Limited +# All rights reserved. +# +# The license below extends only to copyright in the software and shall +# not be construed as granting a license to any other intellectual +# property including but not limited to intellectual property relating +# to a hardware implementation of the functionality of the software +# licensed hereunder. You may use the software subject to the license +# terms below provided that you ensure that this notice is replicated +# unmodified and in its entirety in all distributions of the software, +# modified or unmodified, in source code or in binary form. +# # Copyright (c) 1999-2008 Mark D. Hill and David A. Wood # Copyright (c) 2009 The Hewlett-Packard Development Company # All rights reserved. @@ -27,15 +39,20 @@ from slicc.util import PairContainer + class Symbol(PairContainer): def __init__(self, symtab, ident, location, pairs=None): super().__init__() from slicc.util import Location from slicc.symbols import SymbolTable - if not isinstance(symtab, SymbolTable): raise AttributeError - if not isinstance(ident, str): raise AttributeError - if not isinstance(location, Location): raise AttributeError + + if not isinstance(symtab, SymbolTable): + raise AttributeError + if not isinstance(ident, str): + raise AttributeError + if not isinstance(location, Location): + raise AttributeError self.symtab = symtab self.ident = ident @@ -54,8 +71,12 @@ class Symbol(PairContainer): def __setitem__(self, key, value): if key in self.pairs: - self.warning("Pair key '%s' re-defined. new: '%s' old: '%s'", - key, value, self.pairs[key]) + self.warning( + "Pair key '%s' re-defined. new: '%s' old: '%s'", + key, + value, + self.pairs[key], + ) super().__setitem__(key, value) @property @@ -64,7 +85,11 @@ class Symbol(PairContainer): @property def desc(self): - return self["desc"] + # Allow Symbols with no description: return an empty string. + if "desc" not in self: + return "" + else: + return self["desc"] def error(self, message, *args): self.location.error(message, *args) @@ -75,4 +100,5 @@ class Symbol(PairContainer): def writeHTMLFiles(self, path): pass -__all__ = [ "Symbol" ] + +__all__ = ["Symbol"] diff --git a/src/mem/slicc/symbols/SymbolTable.py b/src/mem/slicc/symbols/SymbolTable.py index f9f7a1041e..4b06be5c4a 100644 --- a/src/mem/slicc/symbols/SymbolTable.py +++ b/src/mem/slicc/symbols/SymbolTable.py @@ -32,6 +32,7 @@ from slicc.symbols.StateMachine import StateMachine from slicc.symbols.Type import Type from slicc.util import Location + def makeDir(path): """Make a directory if it doesn't exist. If the path does exist, ensure that it is a directory""" @@ -41,12 +42,13 @@ def makeDir(path): else: os.mkdir(path) + class SymbolTable(object): def __init__(self, slicc): self.slicc = slicc self.sym_vec = [] - self.sym_map_vec = [ {} ] + self.sym_map_vec = [{}] self.machine_components = {} pairs = {} @@ -57,7 +59,7 @@ class SymbolTable(object): self.newSymbol(void) def __repr__(self): - return "[SymbolTable]" # FIXME + return "[SymbolTable]" # FIXME def codeFormatter(self, *args, **kwargs): return self.slicc.codeFormatter(*args, **kwargs) @@ -88,8 +90,8 @@ class SymbolTable(object): if types is not None: if not isinstance(symbol, types): - continue # there could be a name clash with other symbol - # so rather than producing an error, keep trying + continue # there could be a name clash with other symbol + # so rather than producing an error, keep trying return symbol @@ -158,7 +160,8 @@ class SymbolTable(object): name = "empty.html" code = self.codeFormatter() - code(''' + code( + """ $path @@ -168,7 +171,8 @@ class SymbolTable(object): -''') +""" + ) code.write(path, "index.html") code = self.codeFormatter() @@ -178,4 +182,5 @@ class SymbolTable(object): for symbol in self.sym_vec: symbol.writeHTMLFiles(path) -__all__ = [ "SymbolTable" ] + +__all__ = ["SymbolTable"] diff --git a/src/mem/slicc/symbols/Transition.py b/src/mem/slicc/symbols/Transition.py index bfa721c120..478f28c74c 100644 --- a/src/mem/slicc/symbols/Transition.py +++ b/src/mem/slicc/symbols/Transition.py @@ -28,45 +28,61 @@ from slicc.symbols.Symbol import Symbol from slicc.symbols.State import WildcardState + class Transition(Symbol): - def __init__(self, table, machine, state, event, nextState, actions, - request_types, location): + def __init__( + self, + table, + machine, + state, + event, + nextState, + actions, + request_types, + location, + ): ident = "%s|%s" % (state, event) super().__init__(table, ident, location) self.state = machine.states[state] self.event = machine.events[event] - if nextState == '*': + if nextState == "*": # check to make sure there is a getNextState function declared found = False for func in machine.functions: - if func.c_ident == 'getNextState_Addr': + if func.c_ident == "getNextState_Addr": found = True break if not found: - fatal("Machine uses a wildcard transition without getNextState defined") - self.nextState = WildcardState(machine.symtab, - '*', location) + fatal( + "Machine uses a wildcard transition without getNextState defined" + ) + self.nextState = WildcardState(machine.symtab, "*", location) else: self.nextState = machine.states[nextState] - self.actions = [ machine.actions[a] for a in actions ] - self.request_types = [ machine.request_types[s] for s in request_types ] + self.actions = [machine.actions[a] for a in actions] + self.request_types = [machine.request_types[s] for s in request_types] self.resources = {} for action in self.actions: - for var,value in action.resources.items(): + for var, value in action.resources.items(): num = int(value) if var in self.resources: num += int(value) self.resources[var] = str(num) def __repr__(self): - return "[Transition: (%r, %r) -> %r, %r]" % \ - (self.state, self.event, self.nextState, self.actions) + return "[Transition: (%r, %r) -> %r, %r]" % ( + self.state, + self.event, + self.nextState, + self.actions, + ) def getActionShorthands(self): assert self.actions - return ''.join(a.short for a in self.actions) + return "".join(a.short for a in self.actions) -__all__ = [ "Transition" ] + +__all__ = ["Transition"] diff --git a/src/mem/slicc/symbols/Type.py b/src/mem/slicc/symbols/Type.py index a87e6b51d0..c51902667b 100644 --- a/src/mem/slicc/symbols/Type.py +++ b/src/mem/slicc/symbols/Type.py @@ -43,21 +43,25 @@ from slicc.util import PairContainer from slicc.symbols.Symbol import Symbol from slicc.symbols.Var import Var + class DataMember(Var): - def __init__(self, symtab, ident, location, type, code, pairs, - machine, init_code): + def __init__( + self, symtab, ident, location, type, code, pairs, machine, init_code + ): super().__init__(symtab, ident, location, type, code, pairs, machine) self.init_code = init_code self.real_c_type = self.type.c_ident if "template" in pairs: self.real_c_type += pairs["template"] + class Enumeration(PairContainer): def __init__(self, ident, pairs): super().__init__(pairs) self.ident = ident self.primary = False + class Type(Symbol): def __init__(self, table, ident, location, pairs, machine=None): super().__init__(table, ident, location, pairs) @@ -98,9 +102,9 @@ class Type(Symbol): if self.ident == "Prefetcher": self["prefetcher"] = "yes" - self.isMachineType = (ident == "MachineType") + self.isMachineType = ident == "MachineType" - self.isStateDecl = ("state_decl" in self) + self.isStateDecl = "state_decl" in self self.statePermPairs = [] self.data_members = OrderedDict() @@ -114,24 +118,31 @@ class Type(Symbol): @property def isMessage(self): return "message" in self + @property def isBuffer(self): return "buffer" in self + @property def isInPort(self): return "inport" in self + @property def isOutPort(self): return "outport" in self + @property def isEnumeration(self): return "enumeration" in self + @property def isExternal(self): return "external" in self + @property def isGlobal(self): return "global" in self + @property def isInterface(self): return "interface" in self @@ -141,8 +152,16 @@ class Type(Symbol): if ident in self.data_members: return False - member = DataMember(self.symtab, ident, self.location, type, - "m_%s" % ident, pairs, None, init_code) + member = DataMember( + self.symtab, + ident, + self.location, + type, + "m_%s" % ident, + pairs, + None, + init_code, + ) self.data_members[ident] = member self.symtab.registerSym(ident, member) @@ -152,10 +171,10 @@ class Type(Symbol): return self.data_members[ident].type def methodId(self, name, param_type_vec): - return '_'.join([name] + [ pt.c_ident for pt in param_type_vec ]) + return "_".join([name] + [pt.c_ident for pt in param_type_vec]) def methodIdAbstract(self, name, param_type_vec): - return '_'.join([name] + [ pt.abstract_ident for pt in param_type_vec ]) + return "_".join([name] + [pt.abstract_ident for pt in param_type_vec]) def statePermPairAdd(self, state_name, perm_name): self.statePermPairs.append([state_name, perm_name]) @@ -202,7 +221,8 @@ class Type(Symbol): def printTypeHH(self, path): code = self.symtab.codeFormatter() - code(''' + code( + """ #ifndef __${{self.c_ident}}_HH__ #define __${{self.c_ident}}_HH__ @@ -210,7 +230,8 @@ class Type(Symbol): #include "mem/ruby/slicc_interface/RubySlicc_Util.hh" -''') +""" + ) for dm in self.data_members.values(): if not dm.type.isPrimitive: @@ -221,7 +242,8 @@ class Type(Symbol): code('#include "mem/ruby/protocol/$0.hh"', self["interface"]) parent = " : public %s" % self["interface"] - code(''' + code( + """ namespace gem5 { @@ -232,12 +254,14 @@ $klass ${{self.c_ident}}$parent { public: ${{self.c_ident}} -''', klass="class") +""", + klass="class", + ) if self.isMessage: - code('(Tick curTime) : %s(curTime) {' % self["interface"]) + code("(Tick curTime) : %s(curTime) {" % self["interface"]) else: - code('()\n\t\t{') + code("()\n\t\t{") code.indent() if not self.isGlobal: @@ -246,35 +270,39 @@ $klass ${{self.c_ident}}$parent ident = dm.ident if "default" in dm: # look for default value - code('m_$ident = ${{dm["default"]}}; // default for this field') + code( + 'm_$ident = ${{dm["default"]}}; // default for this field' + ) elif "default" in dm.type: # Look for the type default tid = dm.real_c_type code('m_$ident = ${{dm.type["default"]}};') - code(' // default value of $tid') + code(" // default value of $tid") else: - code('// m_$ident has no default') + code("// m_$ident has no default") code.dedent() - code('}') + code("}") # ******** Copy constructor ******** - code('${{self.c_ident}}(const ${{self.c_ident}}&) = default;') + code("${{self.c_ident}}(const ${{self.c_ident}}&) = default;") # ******** Assignment operator ******** - code('${{self.c_ident}}') - code('&operator=(const ${{self.c_ident}}&) = default;') + code("${{self.c_ident}}") + code("&operator=(const ${{self.c_ident}}&) = default;") # ******** Full init constructor ******** if not self.isGlobal: - params = [ 'const %s& local_%s' % (dm.real_c_type, dm.ident) \ - for dm in self.data_members.values() ] - params = ', '.join(params) + params = [ + "const %s& local_%s" % (dm.real_c_type, dm.ident) + for dm in self.data_members.values() + ] + params = ", ".join(params) if self.isMessage: params = "const Tick curTime, " + params - code('${{self.c_ident}}($params)') + code("${{self.c_ident}}($params)") # Call superclass constructor if "interface" in self: @@ -283,37 +311,42 @@ $klass ${{self.c_ident}}$parent else: code(' : ${{self["interface"]}}()') - code('{') + code("{") code.indent() for dm in self.data_members.values(): - code('m_${{dm.ident}} = local_${{dm.ident}};') + code("m_${{dm.ident}} = local_${{dm.ident}};") code.dedent() - code('}') + code("}") # create a clone member if self.isMessage: - code(''' + code( + """ MsgPtr clone() const { return std::shared_ptr(new ${{self.c_ident}}(*this)); } -''') +""" + ) else: - code(''' + code( + """ ${{self.c_ident}}* clone() const { return new ${{self.c_ident}}(*this); } -''') +""" + ) if not self.isGlobal: # const Get methods for each field - code('// Const accessors methods for each field') + code("// Const accessors methods for each field") for dm in self.data_members.values(): - code(''' + code( + """ /** \\brief Const accessor method for ${{dm.ident}} field. * \\return ${{dm.ident}} field */ @@ -322,12 +355,14 @@ get${{dm.ident}}() const { return m_${{dm.ident}}; } -''') +""" + ) # Non-const Get methods for each field - code('// Non const Accessors methods for each field') + code("// Non const Accessors methods for each field") for dm in self.data_members.values(): - code(''' + code( + """ /** \\brief Non-const accessor method for ${{dm.ident}} field. * \\return ${{dm.ident}} field */ @@ -336,23 +371,26 @@ get${{dm.ident}}() { return m_${{dm.ident}}; } -''') +""" + ) - #Set methods for each field - code('// Mutator methods for each field') + # Set methods for each field + code("// Mutator methods for each field") for dm in self.data_members.values(): - code(''' + code( + """ /** \\brief Mutator method for ${{dm.ident}} field */ void set${{dm.ident}}(const ${{dm.real_c_type}}& local_${{dm.ident}}) { m_${{dm.ident}} = local_${{dm.ident}}; } -''') +""" + ) - code('void print(std::ostream& out) const;') + code("void print(std::ostream& out) const;") code.dedent() - code(' //private:') + code(" //private:") code.indent() # Data members for each field @@ -374,18 +412,19 @@ set${{dm.ident}}(const ${{dm.real_c_type}}& local_${{dm.ident}}) if "desc" in dm: code('/** ${{dm["desc"]}} */') - code('$const${{dm.real_c_type}} m_${{dm.ident}}$init;') + code("$const${{dm.real_c_type}} m_${{dm.ident}}$init;") # Prototypes for methods defined for the Type for item in self.methods: proto = self.methods[item].prototype if proto: - code('$proto') + code("$proto") code.dedent() - code('};') + code("};") - code(''' + code( + """ inline ::std::ostream& operator<<(::std::ostream& out, const ${{self.c_ident}}& obj) { @@ -398,14 +437,16 @@ operator<<(::std::ostream& out, const ${{self.c_ident}}& obj) } // namespace gem5 #endif // __${{self.c_ident}}_HH__ -''') +""" + ) code.write(path, "%s.hh" % self.c_ident) def printTypeCC(self, path): code = self.symtab.codeFormatter() - code(''' + code( + """ #include #include @@ -423,67 +464,79 @@ void ${{self.c_ident}}::print(std::ostream& out) const { out << "[${{self.c_ident}}: "; -''') +""" + ) # For each field code.indent() for dm in self.data_members.values(): if dm.type.c_ident == "Addr": - code(''' -out << "${{dm.ident}} = " << printAddress(m_${{dm.ident}}) << " ";''') + code( + """ +out << "${{dm.ident}} = " << printAddress(m_${{dm.ident}}) << " ";""" + ) else: - code('out << "${{dm.ident}} = " << m_${{dm.ident}} << " ";''') + code('out << "${{dm.ident}} = " << m_${{dm.ident}} << " ";' "") code.dedent() # Trailer - code(''' + code( + """ out << "]"; -}''') +}""" + ) # print the code for the methods in the type for item in self.methods: code(self.methods[item].generateCode()) - code(''' + code( + """ } // namespace ruby } // namespace gem5 -''') +""" + ) code.write(path, "%s.cc" % self.c_ident) def printEnumHH(self, path): code = self.symtab.codeFormatter() - code(''' + code( + """ #ifndef __${{self.c_ident}}_HH__ #define __${{self.c_ident}}_HH__ #include #include -''') +""" + ) if self.isStateDecl: code('#include "mem/ruby/protocol/AccessPermission.hh"') if self.isMachineType: - code('#include ') + code("#include ") code('#include "base/logging.hh"') code('#include "mem/ruby/common/Address.hh"') code('#include "mem/ruby/common/TypeDefines.hh"') - code(''' + code( + """ namespace gem5 { namespace ruby { -''') +""" + ) if self.isMachineType: - code('struct MachineID;') + code("struct MachineID;") - code(''' + code( + """ // Class definition /** \\enum ${{self.c_ident}} @@ -491,19 +544,21 @@ namespace ruby */ enum ${{self.c_ident}} { ${{self.c_ident}}_FIRST, -''') +""" + ) code.indent() # For each field - for i,(ident,enum) in enumerate(self.enums.items()): + for i, (ident, enum) in enumerate(self.enums.items()): desc = enum.get("desc", "No description avaliable") if i == 0: - init = ' = %s_FIRST' % self.c_ident + init = " = %s_FIRST" % self.c_ident else: - init = '' - code('${{self.c_ident}}_${{enum.ident}}$init, /**< $desc */') + init = "" + code("${{self.c_ident}}_${{enum.ident}}$init, /**< $desc */") code.dedent() - code(''' + code( + """ ${{self.c_ident}}_NUM }; @@ -515,42 +570,52 @@ ${{self.c_ident}} string_to_${{self.c_ident}}(const ::std::string& str); // Code to increment an enumeration type ${{self.c_ident}} &operator++(${{self.c_ident}} &e); -''') +""" + ) # MachineType hack used to set the base component id for each Machine if self.isMachineType: - code(''' + code( + """ int ${{self.c_ident}}_base_level(const ${{self.c_ident}}& obj); MachineType ${{self.c_ident}}_from_base_level(int); int ${{self.c_ident}}_base_number(const ${{self.c_ident}}& obj); int ${{self.c_ident}}_base_count(const ${{self.c_ident}}& obj); -''') +""" + ) for enum in self.enums.values(): - code(''' + code( + """ MachineID get${{enum.ident}}MachineID(NodeID RubyNode); -''') +""" + ) if self.isStateDecl: - code(''' + code( + """ // Code to convert the current state to an access permission AccessPermission ${{self.c_ident}}_to_permission(const ${{self.c_ident}}& obj); -''') +""" + ) - code(''' + code( + """ ::std::ostream& operator<<(::std::ostream& out, const ${{self.c_ident}}& obj); } // namespace ruby } // namespace gem5 -''') +""" + ) if self.isMachineType: - code(''' + code( + """ // define a hash function for the MachineType class namespace std { @@ -565,18 +630,22 @@ struct hash }; } -''') +""" + ) # Trailer - code(''' + code( + """ #endif // __${{self.c_ident}}_HH__ -''') +""" + ) code.write(path, "%s.hh" % self.c_ident) def printEnumCC(self, path): code = self.symtab.codeFormatter() - code(''' + code( + """ #include #include #include @@ -584,10 +653,12 @@ struct hash #include "base/logging.hh" #include "mem/ruby/protocol/${{self.c_ident}}.hh" -''') +""" + ) if self.isStateDecl: - code(''' + code( + """ namespace gem5 { @@ -598,14 +669,16 @@ namespace ruby AccessPermission ${{self.c_ident}}_to_permission(const ${{self.c_ident}}& obj) { switch(obj) { -''') +""" + ) # For each case code.indent() for statePerm in self.statePermPairs: - code(' case ${{self.c_ident}}_${{statePerm[0]}}:') - code(' return AccessPermission_${{statePerm[1]}};') + code(" case ${{self.c_ident}}_${{statePerm[0]}}:") + code(" return AccessPermission_${{statePerm[1]}};") code.dedent() - code (''' + code( + """ default: panic("Unknown state access permission converstion for ${{self.c_ident}}"); } @@ -616,16 +689,20 @@ AccessPermission ${{self.c_ident}}_to_permission(const ${{self.c_ident}}& obj) } // namespace ruby } // namespace gem5 -''') +""" + ) if self.isMachineType: for enum in self.enums.values(): if enum.primary: - code('#include "mem/ruby/protocol/${{enum.ident}}' - '_Controller.hh"') + code( + '#include "mem/ruby/protocol/${{enum.ident}}' + '_Controller.hh"' + ) code('#include "mem/ruby/common/MachineID.hh"') - code(''' + code( + """ namespace gem5 { @@ -646,17 +723,19 @@ std::string ${{self.c_ident}}_to_string(const ${{self.c_ident}}& obj) { switch(obj) { -''') +""" + ) # For each field code.indent() for enum in self.enums.values(): - code(' case ${{self.c_ident}}_${{enum.ident}}:') + code(" case ${{self.c_ident}}_${{enum.ident}}:") code(' return "${{enum.ident}}";') code.dedent() # Trailer - code(''' + code( + """ default: panic("Invalid range for type ${{self.c_ident}}"); } @@ -668,18 +747,20 @@ ${{self.c_ident}}_to_string(const ${{self.c_ident}}& obj) ${{self.c_ident}} string_to_${{self.c_ident}}(const std::string& str) { -''') +""" + ) # For each field start = "" code.indent() for enum in self.enums.values(): code('${start}if (str == "${{enum.ident}}") {') - code(' return ${{self.c_ident}}_${{enum.ident}};') + code(" return ${{self.c_ident}}_${{enum.ident}};") start = "} else " code.dedent() - code(''' + code( + """ } else { panic("Invalid string conversion for %s, type ${{self.c_ident}}", str); } @@ -692,12 +773,14 @@ operator++(${{self.c_ident}}& e) assert(e < ${{self.c_ident}}_NUM); return e = ${{self.c_ident}}(e+1); } -''') +""" + ) # MachineType hack used to set the base level and number of # components for each Machine if self.isMachineType: - code(''' + code( + """ /** \\brief returns the base vector index for each machine type to be * used by NetDest * @@ -708,17 +791,19 @@ int ${{self.c_ident}}_base_level(const ${{self.c_ident}}& obj) { switch(obj) { -''') +""" + ) # For each field code.indent() - for i,enum in enumerate(self.enums.values()): - code(' case ${{self.c_ident}}_${{enum.ident}}:') - code(' return $i;') + for i, enum in enumerate(self.enums.values()): + code(" case ${{self.c_ident}}_${{enum.ident}}:") + code(" return $i;") code.dedent() # total num - code(''' + code( + """ case ${{self.c_ident}}_NUM: return ${{len(self.enums)}}; @@ -737,17 +822,19 @@ MachineType ${{self.c_ident}}_from_base_level(int type) { switch(type) { -''') +""" + ) # For each field code.indent() - for i,enum in enumerate(self.enums.values()): - code(' case $i:') - code(' return ${{self.c_ident}}_${{enum.ident}};') + for i, enum in enumerate(self.enums.values()): + code(" case $i:") + code(" return ${{self.c_ident}}_${{enum.ident}};") code.dedent() # Trailer - code(''' + code( + """ default: panic("Invalid range for type ${{self.c_ident}}"); } @@ -763,23 +850,27 @@ ${{self.c_ident}}_base_number(const ${{self.c_ident}}& obj) { int base = 0; switch(obj) { -''') +""" + ) # For each field code.indent() - code(' case ${{self.c_ident}}_NUM:') + code(" case ${{self.c_ident}}_NUM:") for enum in reversed(list(self.enums.values())): # Check if there is a defined machine with this type if enum.primary: - code(' base += ${{enum.ident}}_Controller::getNumControllers();') + code( + " base += ${{enum.ident}}_Controller::getNumControllers();" + ) else: - code(' base += 0;') - code(' [[fallthrough]];') - code(' case ${{self.c_ident}}_${{enum.ident}}:') - code(' break;') + code(" base += 0;") + code(" [[fallthrough]];") + code(" case ${{self.c_ident}}_${{enum.ident}}:") + code(" break;") code.dedent() - code(''' + code( + """ default: panic("Invalid range for type ${{self.c_ident}}"); } @@ -794,18 +885,22 @@ int ${{self.c_ident}}_base_count(const ${{self.c_ident}}& obj) { switch(obj) { -''') +""" + ) # For each field for enum in self.enums.values(): - code('case ${{self.c_ident}}_${{enum.ident}}:') + code("case ${{self.c_ident}}_${{enum.ident}}:") if enum.primary: - code('return ${{enum.ident}}_Controller::getNumControllers();') + code( + "return ${{enum.ident}}_Controller::getNumControllers();" + ) else: - code('return 0;') + code("return 0;") # total num - code(''' + code( + """ case ${{self.c_ident}}_NUM: default: panic("Invalid range for type ${{self.c_ident}}"); @@ -813,10 +908,12 @@ ${{self.c_ident}}_base_count(const ${{self.c_ident}}& obj) // Appease the compiler since this function has a return value return -1; } -''') +""" + ) for enum in self.enums.values(): - code(''' + code( + """ MachineID get${{enum.ident}}MachineID(NodeID RubyNode) @@ -824,14 +921,18 @@ get${{enum.ident}}MachineID(NodeID RubyNode) MachineID mach = {MachineType_${{enum.ident}}, RubyNode}; return mach; } -''') +""" + ) - code(''' + code( + """ } // namespace ruby } // namespace gem5 -''') +""" + ) # Write the file code.write(path, "%s.cc" % self.c_ident) -__all__ = [ "Type" ] + +__all__ = ["Type"] diff --git a/src/mem/slicc/symbols/Var.py b/src/mem/slicc/symbols/Var.py index 6d9a2cf9d8..3b8a538a23 100644 --- a/src/mem/slicc/symbols/Var.py +++ b/src/mem/slicc/symbols/Var.py @@ -27,9 +27,11 @@ from slicc.symbols.Symbol import Symbol + class Var(Symbol): - def __init__(self, symtab, ident, location, type, code, pairs, - machine=None): + def __init__( + self, symtab, ident, location, type, code, pairs, machine=None + ): super().__init__(symtab, ident, location, pairs) self.machine = machine @@ -42,4 +44,5 @@ class Var(Symbol): def writeCodeFiles(self, path, includes): pass -__all__ = [ "Var" ] + +__all__ = ["Var"] diff --git a/src/mem/slicc/util.py b/src/mem/slicc/util.py index ace879e331..07b5ba6ab2 100644 --- a/src/mem/slicc/util.py +++ b/src/mem/slicc/util.py @@ -27,6 +27,7 @@ import os import sys + class PairContainer(object): def __init__(self, pairs=None): self.pairs = {} @@ -45,27 +46,30 @@ class PairContainer(object): def get(self, item, failobj=None): return self.pairs.get(item, failobj) + class Location(object): def __init__(self, filename, lineno, no_warning=False): if not isinstance(filename, str): raise AttributeError( - "filename must be a string, found {}".format(type(filename))) + "filename must be a string, found {}".format(type(filename)) + ) if not isinstance(lineno, int): raise AttributeError( - "filename must be an integer, found {}".format(type(lineno))) + "filename must be an integer, found {}".format(type(lineno)) + ) self.filename = filename self.lineno = lineno self.no_warning = no_warning def __str__(self): - return '%s:%d' % (os.path.basename(self.filename), self.lineno) + return "%s:%d" % (os.path.basename(self.filename), self.lineno) def warning(self, message, *args): if self.no_warning: return if args: message = message % args - #raise Exception, "%s: Warning: %s" % (self, message) + # raise Exception, "%s: Warning: %s" % (self, message) print("%s: Warning: %s" % (self, message), file=sys.stderr) def error(self, message, *args): @@ -74,4 +78,5 @@ class Location(object): raise Exception("{}: Error: {}".format(self, message)) sys.exit("\n%s: Error: %s" % (self, message)) -__all__ = [ 'PairContainer', 'Location' ] + +__all__ = ["PairContainer", "Location"] diff --git a/src/mem/snoop_filter.cc b/src/mem/snoop_filter.cc index e2568b66bf..273c087887 100644 --- a/src/mem/snoop_filter.cc +++ b/src/mem/snoop_filter.cc @@ -162,11 +162,9 @@ SnoopFilter::finishRequest(bool will_retry, Addr addr, bool is_secure) if (reqLookupResult.it != cachedLocations.end()) { // since we rely on the caller, do a basic check to ensure // that finishRequest is being called following lookupRequest - Addr line_addr = (addr & ~(Addr(linesize - 1))); - if (is_secure) { - line_addr |= LineSecure; - } - assert(reqLookupResult.it->first == line_addr); + assert(reqLookupResult.it->first == \ + (is_secure ? ((addr & ~(Addr(linesize - 1))) | LineSecure) : \ + (addr & ~(Addr(linesize - 1))))); if (will_retry) { SnoopItem retry_item = reqLookupResult.retryItem; // Undo any changes made in lookupRequest to the snoop filter diff --git a/src/mem/thread_bridge.cc b/src/mem/thread_bridge.cc new file mode 100644 index 0000000000..3f76ef49b3 --- /dev/null +++ b/src/mem/thread_bridge.cc @@ -0,0 +1,121 @@ +/* + * Copyright 2022 Google, LLC + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "mem/thread_bridge.hh" + +#include "base/trace.hh" +#include "sim/eventq.hh" + +namespace gem5 +{ + +ThreadBridge::ThreadBridge(const ThreadBridgeParams &p) + : SimObject(p), in_port_("in_port", *this), out_port_("out_port", *this) +{ +} + +ThreadBridge::IncomingPort::IncomingPort(const std::string &name, + ThreadBridge &device) + : ResponsePort(name, &device), device_(device) +{ +} + +AddrRangeList +ThreadBridge::IncomingPort::getAddrRanges() const +{ + return device_.out_port_.getAddrRanges(); +} + +// TimingResponseProtocol +bool +ThreadBridge::IncomingPort::recvTimingReq(PacketPtr pkt) +{ + panic("ThreadBridge only supports atomic/functional access."); +} +void +ThreadBridge::IncomingPort::recvRespRetry() +{ + panic("ThreadBridge only supports atomic/functional access."); +} + +// AtomicResponseProtocol +Tick +ThreadBridge::IncomingPort::recvAtomicBackdoor(PacketPtr pkt, + MemBackdoorPtr &backdoor) +{ + panic("ThreadBridge only supports atomic/functional access."); +} +Tick +ThreadBridge::IncomingPort::recvAtomic(PacketPtr pkt) +{ + EventQueue::ScopedMigration migrate(device_.eventQueue()); + return device_.out_port_.sendAtomic(pkt); +} + +// FunctionalResponseProtocol +void +ThreadBridge::IncomingPort::recvFunctional(PacketPtr pkt) +{ + EventQueue::ScopedMigration migrate(device_.eventQueue()); + device_.out_port_.sendFunctional(pkt); +} + +ThreadBridge::OutgoingPort::OutgoingPort(const std::string &name, + ThreadBridge &device) + : RequestPort(name, &device), device_(device) +{ +} + +void +ThreadBridge::OutgoingPort::recvRangeChange() +{ + device_.in_port_.sendRangeChange(); +} + +// TimingRequestProtocol +bool +ThreadBridge::OutgoingPort::recvTimingResp(PacketPtr pkt) +{ + panic("ThreadBridge only supports atomic/functional access."); +} +void +ThreadBridge::OutgoingPort::recvReqRetry() +{ + panic("ThreadBridge only supports atomic/functional access."); +} + +Port & +ThreadBridge::getPort(const std::string &if_name, PortID idx) +{ + if (if_name == "in_port") + return in_port_; + if (if_name == "out_port") + return out_port_; + return SimObject::getPort(if_name, idx); +} + +} // namespace gem5 diff --git a/src/mem/thread_bridge.hh b/src/mem/thread_bridge.hh new file mode 100644 index 0000000000..28c959193c --- /dev/null +++ b/src/mem/thread_bridge.hh @@ -0,0 +1,88 @@ +/* + * Copyright 2022 Google, LLC. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __MEM_THREAD_BRIDGE_HH__ +#define __MEM_THREAD_BRIDGE_HH__ + +#include "mem/port.hh" +#include "params/ThreadBridge.hh" +#include "sim/sim_object.hh" + +namespace gem5 +{ + +class ThreadBridge : public SimObject +{ + public: + explicit ThreadBridge(const ThreadBridgeParams &p); + + Port &getPort(const std::string &if_name, + PortID idx = InvalidPortID) override; + + private: + class IncomingPort : public ResponsePort + { + public: + IncomingPort(const std::string &name, ThreadBridge &device); + AddrRangeList getAddrRanges() const override; + + // TimingResponseProtocol + bool recvTimingReq(PacketPtr pkt) override; + void recvRespRetry() override; + + // AtomicResponseProtocol + Tick recvAtomicBackdoor(PacketPtr pkt, + MemBackdoorPtr &backdoor) override; + Tick recvAtomic(PacketPtr pkt) override; + + // FunctionalResponseProtocol + void recvFunctional(PacketPtr pkt) override; + + private: + ThreadBridge &device_; + }; + + class OutgoingPort : public RequestPort + { + public: + OutgoingPort(const std::string &name, ThreadBridge &device); + void recvRangeChange() override; + + // TimingRequestProtocol + bool recvTimingResp(PacketPtr pkt) override; + void recvReqRetry() override; + + private: + ThreadBridge &device_; + }; + + IncomingPort in_port_; + OutgoingPort out_port_; +}; + +} // namespace gem5 +#endif // __MEM_THREAD_BRIDGE_HH__ diff --git a/src/proto/inst.proto b/src/proto/inst.proto index 00b9579c54..fadb82952b 100644 --- a/src/proto/inst.proto +++ b/src/proto/inst.proto @@ -112,4 +112,3 @@ message Inst { } repeated MemAccess mem_access = 8; } - diff --git a/src/python/SConscript b/src/python/SConscript index b595ba91bd..e7e464e2df 100644 --- a/src/python/SConscript +++ b/src/python/SConscript @@ -89,6 +89,9 @@ PySource('gem5.components.cachehierarchies.classic', PySource('gem5.components.cachehierarchies.classic', 'gem5/components/cachehierarchies/classic/' 'private_l1_private_l2_cache_hierarchy.py') +PySource('gem5.components.cachehierarchies.classic', + 'gem5/components/cachehierarchies/classic/' + 'private_l1_shared_l2_cache_hierarchy.py') PySource('gem5.components.cachehierarchies.classic.caches', 'gem5/components/cachehierarchies/classic/caches/__init__.py') PySource('gem5.components.cachehierarchies.classic.caches', @@ -148,6 +151,7 @@ PySource('gem5.components.memory', 'gem5/components/memory/simple.py') PySource('gem5.components.memory', 'gem5/components/memory/memory.py') PySource('gem5.components.memory', 'gem5/components/memory/single_channel.py') PySource('gem5.components.memory', 'gem5/components/memory/multi_channel.py') +PySource('gem5.components.memory', 'gem5/components/memory/hbm.py') PySource('gem5.components.memory.dram_interfaces', 'gem5/components/memory/dram_interfaces/__init__.py') PySource('gem5.components.memory.dram_interfaces', @@ -174,6 +178,8 @@ PySource('gem5.components.processors', 'gem5/components/processors/abstract_core.py') PySource('gem5.components.processors', 'gem5/components/processors/abstract_generator_core.py') +PySource('gem5.components.processors', + 'gem5/components/processors/abstract_generator.py') PySource('gem5.components.processors', 'gem5/components/processors/abstract_processor.py') PySource('gem5.components.processors', @@ -200,23 +206,51 @@ PySource('gem5.components.processors', 'gem5/components/processors/random_generator.py') PySource('gem5.components.processors', 'gem5/components/processors/simple_core.py') +PySource('gem5.components.processors', + 'gem5/components/processors/base_cpu_core.py') PySource('gem5.components.processors', 'gem5/components/processors/simple_processor.py') +PySource('gem5.components.processors', + 'gem5/components/processors/base_cpu_processor.py') PySource('gem5.components.processors', 'gem5/components/processors/simple_switchable_processor.py') PySource('gem5.components.processors', 'gem5/components/processors/switchable_processor.py') +PySource('gem5.utils', 'gem5/utils/simpoint.py') +PySource('gem5.components.processors', + 'gem5/components/processors/traffic_generator_core.py') +PySource('gem5.components.processors', + 'gem5/components/processors/traffic_generator.py') PySource('gem5.prebuilt', 'gem5/prebuilt/__init__.py') PySource('gem5.prebuilt.demo', 'gem5/prebuilt/demo/__init__.py') PySource('gem5.prebuilt.demo', 'gem5/prebuilt/demo/x86_demo_board.py') +PySource('gem5.prebuilt.riscvmatched', + 'gem5/prebuilt/riscvmatched/__init__.py') +PySource('gem5.prebuilt.riscvmatched', + 'gem5/prebuilt/riscvmatched/riscvmatched_board.py') +PySource('gem5.prebuilt.riscvmatched', + 'gem5/prebuilt/riscvmatched/riscvmatched_cache.py') +PySource('gem5.prebuilt.riscvmatched', + 'gem5/prebuilt/riscvmatched/riscvmatched_processor.py') +PySource('gem5.prebuilt.riscvmatched', + 'gem5/prebuilt/riscvmatched/riscvmatched_core.py') PySource('gem5.resources', 'gem5/resources/__init__.py') PySource('gem5.resources', 'gem5/resources/downloader.py') PySource('gem5.resources', 'gem5/resources/md5_utils.py') PySource('gem5.resources', 'gem5/resources/resource.py') +PySource('gem5.resources', 'gem5/resources/workload.py') PySource('gem5.utils', 'gem5/utils/__init__.py') PySource('gem5.utils', 'gem5/utils/filelock.py') PySource('gem5.utils', 'gem5/utils/override.py') PySource('gem5.utils', 'gem5/utils/requires.py') +PySource('gem5.utils.multiprocessing', + 'gem5/utils/multiprocessing/__init__.py') +PySource('gem5.utils.multiprocessing', + 'gem5/utils/multiprocessing/_command_line.py') +PySource('gem5.utils.multiprocessing', + 'gem5/utils/multiprocessing/context.py') +PySource('gem5.utils.multiprocessing', + 'gem5/utils/multiprocessing/popen_spawn_gem5.py') PySource('', 'importer.py') PySource('m5', 'm5/__init__.py') @@ -251,7 +285,8 @@ PySource('m5.ext.pyfdt', 'm5/ext/pyfdt/pyfdt.py') PySource('m5.ext.pyfdt', 'm5/ext/pyfdt/__init__.py') PySource('m5.ext.pystats', 'm5/ext/pystats/__init__.py') -PySource('m5.ext.pystats', 'm5/ext/pystats/jsonserializable.py') +PySource('m5.ext.pystats', 'm5/ext/pystats/serializable_stat.py') +PySource('m5.ext.pystats', 'm5/ext/pystats/abstract_stat.py') PySource('m5.ext.pystats', 'm5/ext/pystats/group.py') PySource('m5.ext.pystats', 'm5/ext/pystats/simstat.py') PySource('m5.ext.pystats', 'm5/ext/pystats/statistic.py') diff --git a/src/python/gem5/coherence_protocol.py b/src/python/gem5/coherence_protocol.py index 275a304a36..1ae45fc190 100644 --- a/src/python/gem5/coherence_protocol.py +++ b/src/python/gem5/coherence_protocol.py @@ -42,4 +42,4 @@ class CoherenceProtocol(Enum): MOESI_AMD_BASE = 8 MI_EXAMPLE = 9 GPU_VIPER = 10 - CHI = 11 \ No newline at end of file + CHI = 11 diff --git a/src/python/gem5/components/boards/abstract_board.py b/src/python/gem5/components/boards/abstract_board.py index 3067f7d0af..4ea8866009 100644 --- a/src/python/gem5/components/boards/abstract_board.py +++ b/src/python/gem5/components/boards/abstract_board.py @@ -25,10 +25,13 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from abc import ABCMeta, abstractmethod +import inspect from .mem_mode import MemMode, mem_mode_to_string +from ...resources.workload import AbstractWorkload from m5.objects import ( + AddrRange, System, Port, IOXBar, @@ -37,7 +40,7 @@ from m5.objects import ( VoltageDomain, ) -from typing import List +from typing import List, Optional, Sequence, Tuple class AbstractBoard: @@ -66,13 +69,15 @@ class AbstractBoard: clk_freq: str, processor: "AbstractProcessor", memory: "AbstractMemorySystem", - cache_hierarchy: "AbstractCacheHierarchy", + cache_hierarchy: Optional["AbstractCacheHierarchy"], ) -> None: """ :param clk_freq: The clock frequency for this board. :param processor: The processor for this board. :param memory: The memory for this board. - :param cache_hierarchy: The Cachie Hierarchy for this board. + :param cache_hierarchy: The Cache Hierarchy for this board. + In some boards caches can be optional. If so, + that board must override `_connect_things`. """ if not isinstance(self, System): @@ -86,7 +91,9 @@ class AbstractBoard: # Set the processor, memory, and cache hierarchy. self.processor = processor self.memory = memory - self.cache_hierarchy = cache_hierarchy + self._cache_hierarchy = cache_hierarchy + if cache_hierarchy is not None: + self.cache_hierarchy = cache_hierarchy # This variable determines whether the board is to be executed in # full-system or syscall-emulation mode. This is set when the workload @@ -94,14 +101,20 @@ class AbstractBoard: # determined by which kind of workload is set. self._is_fs = None + # This variable is used to record the checkpoint directory which is + # set when declaring the board's workload and then used by the + # Simulator module. + self._checkpoint = None + # Setup the board and memory system's memory ranges. self._setup_memory_ranges() # Setup board properties unique to the board being constructed. self._setup_board() - # Connect the memory, processor, and cache hierarchy. - self._connect_things() + # A private variable to record whether `_connect_things` has been + # been called. + self._connect_things_called = False def get_processor(self) -> "AbstractProcessor": """Get the processor connected to the board. @@ -117,12 +130,20 @@ class AbstractBoard: """ return self.memory - def get_cache_hierarchy(self) -> "AbstractCacheHierarchy": + def get_mem_ports(self) -> Sequence[Tuple[AddrRange, Port]]: + """Get the memory ports exposed on this board + + Note: The ports should be returned such that the address ranges are + in ascending order. + """ + return self.get_memory().get_mem_ports() + + def get_cache_hierarchy(self) -> Optional["AbstractCacheHierarchy"]: """Get the cache hierarchy connected to the board. :returns: The cache hierarchy. """ - return self.cache_hierarchy + return self._cache_hierarchy def get_cache_line_size(self) -> int: """Get the size of the cache line. @@ -168,13 +189,46 @@ class AbstractBoard: This function is used by the Simulator module to setup the simulation correctly. """ - if self._is_fs == None: - raise Exception("The workload for this board not yet to be set. " - "Whether the board is to be executed in FS or SE " - "mode is determined by which 'set workload' " - "function is run.") + if self._is_fs == None: + raise Exception( + "The workload for this board not yet to be set. " + "Whether the board is to be executed in FS or SE " + "mode is determined by which 'set workload' " + "function is run." + ) return self._is_fs + def set_workload(self, workload: AbstractWorkload) -> None: + """ + Set the workload for this board to run. + + This function will take the workload specified and run the correct + workload function (e.g., `set_kernel_disk_workload`) with the correct + parameters + + :params workload: The workload to be set to this board. + """ + + try: + func = getattr(self, workload.get_function_str()) + except AttributeError: + raise Exception( + "This board does not support this workload type. " + f"This board does not contain the necessary " + f"`{workload.get_function_str()}` function" + ) + + func_signature = inspect.signature(func) + for param_name in workload.get_parameters().keys(): + if param_name not in func_signature.parameters.keys(): + raise Exception( + "Workload specifies non-existent parameter " + f"`{param_name}` for function " + f"`{workload.get_function_str()}` " + ) + + func(**workload.get_parameters()) + @abstractmethod def _setup_board(self) -> None: """ @@ -288,13 +342,68 @@ class AbstractBoard: * The processor is incorporated after the cache hierarchy due to a bug noted here: https://gem5.atlassian.net/browse/GEM5-1113. Until this bug is fixed, this ordering must be maintained. + * Once this function is called `_connect_things_called` *must* be set + to `True`. """ + if self._connect_things_called: + raise Exception( + "The `_connect_things` function has already been called." + ) + # Incorporate the memory into the motherboard. self.get_memory().incorporate_memory(self) # Incorporate the cache hierarchy for the motherboard. - self.get_cache_hierarchy().incorporate_cache(self) + if self.get_cache_hierarchy(): + self.get_cache_hierarchy().incorporate_cache(self) # Incorporate the processor into the motherboard. self.get_processor().incorporate_processor(self) + + self._connect_things_called = True + + def _post_instantiate(self): + """Called to set up anything needed after m5.instantiate""" + self.get_processor()._post_instantiate() + if self.get_cache_hierarchy(): + self.get_cache_hierarchy()._post_instantiate() + self.get_memory()._post_instantiate() + + def _pre_instantiate(self): + """To be called immediately before m5.instantiate. This is where + `_connect_things` is executed by default.""" + + # Connect the memory, processor, and cache hierarchy. + self._connect_things() + + def _connect_things_check(self): + """ + Here we check that connect things has been called and throw an + Exception if it has not. + + Since v22.1 `_connect_things` function has + been moved from the AbstractBoard constructor to the + `_pre_instantation` function. Users who have used the gem5 stdlib + components (i.e., boards which inherit from AbstractBoard) and the + Simulator module should notice no change. Those who do not use the + Simulator module and instead called `m5.instantiate` directly must + call `AbstractBoard._pre_instantation` prior so `_connect_things` is + called. In order to avoid confusion, this check has been incorporated + and the Exception thrown explains the fix needed to convert old scripts + to function with v22.1. + + This function is called in `AbstractSystemBoard.createCCObject` and + ArmBoard.createCCObject`. Both these functions override + `SimObject.createCCObject`. We can not do that here as AbstractBoard + does not inherit form System. + """ + if not self._connect_things_called: + raise Exception( + """ +AbstractBoard's `_connect_things` function has not been called. This is likely +due to not running a board outside of the gem5 Standard Library Simulator +module. If this is the case, this can be resolved by calling +`._pre_instantiate()` prior to `m5.instantiate()`. +""" + ) diff --git a/src/python/gem5/components/boards/abstract_system_board.py b/src/python/gem5/components/boards/abstract_system_board.py index 463a5b6f12..2812b37260 100644 --- a/src/python/gem5/components/boards/abstract_system_board.py +++ b/src/python/gem5/components/boards/abstract_system_board.py @@ -27,8 +27,10 @@ from abc import ABCMeta from .abstract_board import AbstractBoard +from ...utils.override import overrides + +from m5.objects import System, SimObject -from m5.objects import System class AbstractSystemBoard(System, AbstractBoard): @@ -37,6 +39,7 @@ class AbstractSystemBoard(System, AbstractBoard): """ __metaclass__ = ABCMeta + def __init__( self, clk_freq: str, @@ -51,4 +54,13 @@ class AbstractSystemBoard(System, AbstractBoard): processor=processor, memory=memory, cache_hierarchy=cache_hierarchy, - ) \ No newline at end of file + ) + + @overrides(SimObject) + def createCCObject(self): + """We override this function as it is called in `m5.instantiate`. This + means we can insert a check to ensure the `_connect_things` function + has been run. + """ + super()._connect_things_check() + super().createCCObject() diff --git a/src/python/gem5/components/boards/arm_board.py b/src/python/gem5/components/boards/arm_board.py index 84346584b0..7936c0c25e 100644 --- a/src/python/gem5/components/boards/arm_board.py +++ b/src/python/gem5/components/boards/arm_board.py @@ -44,16 +44,16 @@ from m5.objects import ( ArmDefaultRelease, VExpress_GEM5_Base, VExpress_GEM5_Foundation, + SimObject, ) import os import m5 from abc import ABCMeta from ...isas import ISA -from typing import List -from m5.util import fatal from ...utils.requires import requires from ...utils.override import overrides +from typing import List, Sequence, Tuple from .abstract_board import AbstractBoard from ...resources.resource import AbstractResource from .kernel_disk_workload import KernelDiskWorkload @@ -62,6 +62,7 @@ from ..processors.abstract_processor import AbstractProcessor from ..memory.abstract_memory_system import AbstractMemorySystem from ..cachehierarchies.abstract_cache_hierarchy import AbstractCacheHierarchy + class ArmBoard(ArmSystem, AbstractBoard, KernelDiskWorkload): """ A board capable of full system simulation for ARM instructions. It is based @@ -73,10 +74,9 @@ class ArmBoard(ArmSystem, AbstractBoard, KernelDiskWorkload): Versatile(TM) Express family of boards. **Limitations** - * The board currently does not support ruby caches. * stage2 walker ports are ignored. - * This version does not support SECURITY extension. """ + __metaclass__ = ABCMeta def __init__( @@ -86,43 +86,31 @@ class ArmBoard(ArmSystem, AbstractBoard, KernelDiskWorkload): memory: AbstractMemorySystem, cache_hierarchy: AbstractCacheHierarchy, platform: VExpress_GEM5_Base = VExpress_GEM5_Foundation(), - release: ArmRelease = ArmDefaultRelease() + release: ArmRelease = ArmDefaultRelease(), ) -> None: + + # The platform and the clk has to be set before calling the super class + self._platform = platform + self._clk_freq = clk_freq + super().__init__() AbstractBoard.__init__( self, - clk_freq = clk_freq, - processor = processor, - memory = memory, - cache_hierarchy = cache_hierarchy, + clk_freq=clk_freq, + processor=processor, + memory=memory, + cache_hierarchy=cache_hierarchy, ) # This board requires ARM ISA to work. - - requires(isa_required = ISA.ARM) - - # Setting the voltage domain here. - - self.voltage_domain = self.clk_domain.voltage_domain + requires(isa_required=ISA.ARM) # Setting up ARM release here. We use the ARM default release, which # corresponds to an ARMv8 system. - self.release = release - # RealView sets up most of the on-chip and off-chip devices and GIC - # for the ARM board. These devices' iformation is also used to - # generate the dtb file. - - self._setup_realview(platform) - - # ArmBoard's memory can only be setup once realview is initialized. - - self._setup_arm_memory_ranges() - # Setting multi_proc of ArmSystem by counting the number of processors. - - if processor.get_num_cores() != 1: + if processor.get_num_cores() == 1: self.multi_proc = False else: self.multi_proc = True @@ -132,117 +120,149 @@ class ArmBoard(ArmSystem, AbstractBoard, KernelDiskWorkload): # This board is expected to run full-system simulation. # Loading ArmFsLinux() from `src/arch/arm/ArmFsWorkload.py` - self.workload = ArmFsLinux() # We are fixing the following variable for the ArmSystem to work. The # security extension is checked while generating the dtb file in - # realview. This board does not have security extention enabled. - + # realview. This board does not have security extension enabled. self._have_psci = False # highest_el_is_64 is set to True. True if the register width of the # highest implemented exception level is 64 bits. - self.highest_el_is_64 = True # Setting up the voltage and the clock domain here for the ARM board. # The ArmSystem/RealView expects voltage_domain to be a parameter. # The voltage and the clock frequency are taken from the devices.py - # file from configs/example/arm - + # file from configs/example/arm. We set the clock to the same frequency + # as the user specified in the config script. self.voltage_domain = VoltageDomain(voltage="1.0V") self.clk_domain = SrcClockDomain( - clock="1GHz", voltage_domain=self.voltage_domain + clock=self._clk_freq, voltage_domain=self.voltage_domain ) # The ARM board supports both Terminal and VncServer. - self.terminal = Terminal() self.vncserver = VncServer() # Incoherent I/O Bus - self.iobus = IOXBar() self.iobus.badaddr_responder = BadAddr() self.iobus.default = self.iobus.badaddr_responder.pio - def _setup_io_devices(self) -> None: - """ - This method connects the I/O devices to the I/O bus. - """ + # We now need to setup the dma_ports. + self._dma_ports = None - # We setup the iobridge for the ARM Board. The default - # cache_hierarchy's NoCache class has an iobridge has a latency of - # 10. We are using an iobridge with latency = 50ns, taken from the - # configs/example/arm/devices.py + # RealView sets up most of the on-chip and off-chip devices and GIC + # for the ARM board. These devices' information is also used to + # generate the dtb file. We then connect the I/O devices to the + # I/O bus. + self._setup_io_devices() - self.iobridge = Bridge(delay="50ns") - self.iobridge.mem_side_port = self.iobus.cpu_side_ports - self.iobridge.cpu_side_port = ( - self.cache_hierarchy.get_mem_side_port() - ) + # Once the realview is setup, we can continue setting up the memory + # ranges. ArmBoard's memory can only be setup once realview is + # initialized. + memory = self.get_memory() + mem_size = memory.get_size() - # We either have iocache or dmabridge depending upon the - # cache_hierarchy. If we have "NoCache", then we use the dmabridge. - # Otherwise, we use the iocache on the board. - - if isinstance(self.cache_hierarchy, NoCache) is False: - - # The ArmBoard does not support ruby caches. - - if self.get_cache_hierarchy().is_ruby(): - fatal("Ruby caches are not supported by the ArmBoard.") - - # The classic caches are setup in the _setup_io_cache() method, - # defined under the cachehierarchy class. Verified it with both - # PrivateL1PrivateL2CacheHierarchy and PrivateL1CacheHierarchy - # classes. - - else: - - # This corresponds to a machine without caches. We have a DMA - # beidge in this case. Parameters of this bridge are also taken - # from the common/example/arm/devices.py file. - - self.dmabridge = Bridge( - delay="50ns", ranges=self.mem_ranges + # The following code is taken from configs/example/arm/devices.py. It + # sets up all the memory ranges for the board. + self.mem_ranges = [] + success = False + for mem_range in self.realview._mem_regions: + size_in_range = min(mem_size, mem_range.size()) + self.mem_ranges.append( + AddrRange(start=mem_range.start, size=size_in_range) ) - self.dmabridge.mem_side_port = self.get_dma_ports()[0] - self.dmabridge.cpu_side_port = self.get_dma_ports()[1] + mem_size -= size_in_range + if mem_size == 0: + success = True + break - self.realview.attachOnChipIO( - self.cache_hierarchy.membus, self.iobridge - ) - self.realview.attachIO(self.iobus) + if success: + memory.set_memory_range(self.mem_ranges) + else: + raise ValueError("Memory size too big for platform capabilities") - def _setup_realview(self, platform) -> None: + # The PCI Devices. PCI devices can be added via the `_add_pci_device` + # function. + self._pci_devices = [] + + def _setup_io_devices(self) -> None: """ - Notes: - The ARM Board has realview platform. Most of the on-chip and - off-chip devices are setup by the RealView platform. Currently, there - are 5 different types of realview platforms supported by the ArmBoard. - - :param platform: the user can specify the platform while instantiating - an ArmBoard object. + This method first sets up the platform. ARM uses `realview` platform. + Most of the on-chip and off-chip devices are setup by the realview + platform. Once realview is setup, we connect the I/O devices to the + I/O bus. """ # Currently, the ArmBoard supports VExpress_GEM5_V1, # VExpress_GEM5_V1_HDLcd and VExpress_GEM5_Foundation. # VExpress_GEM5_V2 and VExpress_GEM5_V2_HDLcd are not supported by the # ArmBoard. - - self.realview = platform + self.realview = self._platform # We need to setup the global interrupt controller (GIC) addr for the # realview system. - if hasattr(self.realview.gic, "cpu_addr"): self.gic_cpu_addr = self.realview.gic.cpu_addr - def _setup_io_cache(self): - pass + # IO devices has to setup before incorporating the caches in the case + # of ruby caches. Otherwise the DMA controllers are incorrectly + # created. The IO device has to be attached first. This is done in the + # realview class. + if self.get_cache_hierarchy().is_ruby(): + + # All the on-chip devices are attached in this method. + self.realview.attachOnChipIO( + self.iobus, + dma_ports=self.get_dma_ports(), + mem_ports=self.get_memory().get_mem_ports(), + ) + self.realview.attachIO(self.iobus, dma_ports=self.get_dma_ports()) + + else: + # We either have iocache or dmabridge depending upon the + # cache_hierarchy. If we have "NoCache", then we use the dmabridge. + # Otherwise, we use the iocache on the board. + + # We setup the iobridge for the ARM Board. The default + # cache_hierarchy's NoCache class has an iobridge has a latency + # of 10. We are using an iobridge with latency = 50ns, taken + # from the configs/example/arm/devices.py. + self.iobridge = Bridge(delay="50ns") + self.iobridge.mem_side_port = self.iobus.cpu_side_ports + self.iobridge.cpu_side_port = ( + self.cache_hierarchy.get_mem_side_port() + ) + + if isinstance(self.cache_hierarchy, NoCache) is True: + # This corresponds to a machine without caches. We have a DMA + # bridge in this case. Parameters of this bridge are also taken + # from the common/example/arm/devices.py file. + self.dmabridge = Bridge(delay="50ns", ranges=self.mem_ranges) + self.dmabridge.mem_side_port = ( + self.cache_hierarchy.get_cpu_side_port() + ) + self.dmabridge.cpu_side_port = self.iobus.mem_side_ports + + # The classic caches are setup in the _setup_io_cache() method + # defined under the cachehierarchy class. Verified it with both + # PrivateL1PrivateL2CacheHierarchy and PrivateL1CacheHierarchy + # classes. + self.realview.attachOnChipIO( + self.cache_hierarchy.membus, self.iobridge + ) + self.realview.attachIO(self.iobus) + + @overrides(AbstractBoard) + def get_mem_ports(self) -> Sequence[Tuple[AddrRange, Port]]: + all_ports = [ + (self.realview.bootmem.range, self.realview.bootmem.port), + ] + self.get_memory().get_mem_ports() + + return all_ports @overrides(AbstractBoard) def has_io_bus(self) -> bool: @@ -250,10 +270,20 @@ class ArmBoard(ArmSystem, AbstractBoard, KernelDiskWorkload): @overrides(AbstractBoard) def get_io_bus(self) -> IOXBar: - return [self.iobus.cpu_side_ports, self.iobus.mem_side_ports] + return self.iobus @overrides(AbstractBoard) def has_coherent_io(self) -> bool: + # The setup of the caches gets a little tricky here. We need to + # override the default cache_hierarchy.iobridge due to different delay + # values (see method _setup_io_devices()). One way to do it would be to + # prevent creating cache_hierarchy.iobridge altogether. We trick + # NoCache() to assume that this board has no coherent_io and we we + # simply setup our own iobridge in the _setup_io_devices() method. + if isinstance(self.cache_hierarchy, NoCache): + return False + # In all other cases, we use the default values setup in the + # respective cache hierarchy class. return True @overrides(AbstractBoard) @@ -264,20 +294,65 @@ class ArmBoard(ArmSystem, AbstractBoard, KernelDiskWorkload): def has_dma_ports(self) -> bool: return True - def _setup_coherent_io_bridge(self, board: AbstractBoard) -> None: - pass - @overrides(AbstractBoard) def get_dma_ports(self) -> List[Port]: - return [ - self.cache_hierarchy.get_cpu_side_port(), - self.iobus.mem_side_ports - ] + # The DMA ports differ depending upon the cache hierarchy. The method + # self.set_dma_ports takes care of that. In the case of ruby caches, + # this method should initially return an empty list. + if self.cache_hierarchy.is_ruby(): + if self._dma_ports is None: + self._dma_ports = [] + + # _dma_ports should always be empty for classic caches. + return self._dma_ports @overrides(AbstractBoard) def connect_system_port(self, port: Port) -> None: self.system_port = port + @overrides(AbstractBoard) + def _pre_instantiate(self): + super()._pre_instantiate() + + # Add the PCI devices. + self.pci_devices = self._pci_devices + + # The workload needs to know the dtb_file. + self.workload.dtb_filename = self._get_dtb_filename() + + # Calling generateDtb from class ArmSystem to add memory information to + # the dtb file. + self.generateDtb(self._get_dtb_filename()) + + # Finally we need to setup the bootloader for the ArmBoard. An ARM + # system requires three inputs to simulate a full system: a disk image, + # the kernel file and the bootloader file(s). + self.realview.setupBootLoader( + self, self._get_dtb_filename(), self._bootloader + ) + + def _get_dtb_filename(self) -> str: + """Returns the dtb file location. + + **Note**: This may be the _expected_ file location when generated. A + file may not exist at this location when this function is called.""" + + return os.path.join(m5.options.outdir, "device.dtb") + + def _add_pci_device(self, pci_device: PciVirtIO) -> None: + """Attaches the PCI Device to the board. All devices will be added to + `self.pci_device` as a pre-instantiation setup. + + :param pci_device: The PCI Device to add. + """ + self._pci_devices.append(pci_device) + + # For every PCI device, we need to get its dma_port so that we + # can setup dma_controllers correctly. + self.realview.attachPciDevice( + pci_device, self.iobus, dma_ports=self.get_dma_ports() + ) + @overrides(KernelDiskWorkload) def get_disk_device(self): return "/dev/vda" @@ -285,89 +360,27 @@ class ArmBoard(ArmSystem, AbstractBoard, KernelDiskWorkload): @overrides(KernelDiskWorkload) def _add_disk_to_board(self, disk_image: AbstractResource): - # We define the image. - - image = CowDiskImage( - child=RawDiskImage(read_only=True), read_only=False + self._image = CowDiskImage( + child=RawDiskImage( + read_only=True, image_file=disk_image.get_local_path() + ), + read_only=False, ) - self.pci_devices = [PciVirtIO(vio=VirtIOBlock(image=image))] - self.realview.attachPciDevice( - self.pci_devices[0], self.iobus - ) - - # Now that the disk and workload are set, we can generate the device - # tree file. We will generate the dtb file everytime the board is - # boot-up. - - image.child.image_file = disk_image.get_local_path() - - # _setup_io_devices needs to be implemented. - - self._setup_io_devices() - - # Specifying the dtb file location to the workload. - - self.workload.dtb_filename = os.path.join( - m5.options.outdir, "device.dtb" - ) - - # Calling generateDtb from class ArmSystem to add memory information to - # the dtb file. - - self.generateDtb(self.workload.dtb_filename) - - # Finally we need to setup the bootloader for the ArmBoard. An ARM - # system requires three inputs to simulate a full system: a disk image, - # the kernel file and the bootloader file(s). - - self.realview.setupBootLoader( - self, self.workload.dtb_filename, self._bootloader) - - def _get_memory_ranges(self, mem_size) -> list: - """ - This method is taken from configs/example/arm/devices.py. It sets up - all the memory ranges for the board. - """ - mem_ranges = [] - - for mem_range in self.realview._mem_regions: - size_in_range = min(mem_size, mem_range.size()) - mem_ranges.append( - AddrRange(start = mem_range.start, size = size_in_range) - ) - - mem_size -= size_in_range - if mem_size == 0: - return mem_ranges - - raise ValueError("Memory size too big for platform capabilities") + self._add_pci_device(PciVirtIO(vio=VirtIOBlock(image=self._image))) @overrides(AbstractBoard) def _setup_memory_ranges(self) -> None: """ - The ArmBoard's memory can only be setup after realview is setup. Once - realview is initialized, we call _setup_arm_memory_ranges() to - correctly setup the memory ranges. + The ArmBoard's memory can only be setup after realview is setup. We set + this up in the `_setup_board` function. """ pass - def _setup_arm_memory_ranges(self) -> None: - - # We setup the memory here. The memory size is specified in the run - # script that the user uses. - - memory = self.get_memory() - mem_size = memory.get_size() - - self.mem_ranges = self._get_memory_ranges(mem_size) - memory.set_memory_range(self.mem_ranges) - @overrides(KernelDiskWorkload) def get_default_kernel_args(self) -> List[str]: # The default kernel string is taken from the devices.py file. - return [ "console=ttyAMA0", "lpj=19988480", @@ -376,3 +389,12 @@ class ArmBoard(ArmSystem, AbstractBoard, KernelDiskWorkload): "rw", "mem=%s" % self.get_memory().get_size(), ] + + @overrides(SimObject) + def createCCObject(self): + """We override this function as it is called in `m5.instantiate`. This + means we can insert a check to ensure the `_connect_things` function + has been run. + """ + super()._connect_things_check() + super().createCCObject() diff --git a/src/python/gem5/components/boards/experimental/lupv_board.py b/src/python/gem5/components/boards/experimental/lupv_board.py index 59eedd87c5..5624712ca8 100644 --- a/src/python/gem5/components/boards/experimental/lupv_board.py +++ b/src/python/gem5/components/boards/experimental/lupv_board.py @@ -72,6 +72,7 @@ from m5.util.fdthelper import ( FdtState, ) + class LupvBoard(AbstractSystemBoard, KernelDiskWorkload): """ A board capable of full system simulation for RISC-V. @@ -94,9 +95,11 @@ class LupvBoard(AbstractSystemBoard, KernelDiskWorkload): raise EnvironmentError("RiscvBoard is not compatible with Ruby") if processor.get_isa() != ISA.RISCV: - raise Exception("The LupvBoard requires a processor using the " + raise Exception( + "The LupvBoard requires a processor using the " "RISCV ISA. Current processor " - f"ISA: '{processor.get_isa().name}'.") + f"ISA: '{processor.get_isa().name}'." + ) super().__init__(clk_freq, processor, memory, cache_hierarchy) @@ -107,76 +110,77 @@ class LupvBoard(AbstractSystemBoard, KernelDiskWorkload): # Initialize all the devices that we want to use on this board # Interrupt IDS for PIC Device - self._excep_code = { 'INT_SOFT_SUPER': 1, 'INT_TIMER_SUPER': 5, - 'INT_TIMER_MACHINE': 7, 'INT_EXT_SUPER': 9, - 'INT_EXT_MACHINE': 10 } - self._int_ids = { 'TTY': 1, 'BLK': 2, 'RNG': 3} + self._excep_code = { + "INT_SOFT_SUPER": 1, + "INT_SOFT_MACHINE": 3, + "INT_TIMER_SUPER": 5, + "INT_TIMER_MACHINE": 7, + "INT_EXT_SUPER": 9, + "INT_EXT_MACHINE": 11, + } + self._int_ids = {"TTY": 0, "BLK": 1, "RNG": 2} # CLINT self.clint = Clint(pio_addr=0x2000000) # PLIC - self.pic = Plic(pio_addr=0xc000000) + self.pic = Plic(pio_addr=0xC000000) # LUPIO IPI self.lupio_ipi = LupioIPI( pio_addr=0x20001000, - int_type=self._excep_code['INT_SOFT_SUPER'], - num_threads = self.processor.get_num_cores() + int_type=self._excep_code["INT_SOFT_SUPER"], + num_threads=self.processor.get_num_cores(), ) # LUPIO PIC self.lupio_pic = LupioPIC( pio_addr=0x20002000, - int_type = self._excep_code['INT_EXT_SUPER'], - num_threads = self.processor.get_num_cores() + int_type=self._excep_code["INT_EXT_SUPER"], + num_threads=self.processor.get_num_cores(), ) - #LupV Platform - self.lupv = LupV( - pic = self.lupio_pic, - uart_int_id = self._int_ids['TTY'] - ) + # LupV Platform + self.lupv = LupV(pic=self.lupio_pic, uart_int_id=self._int_ids["TTY"]) # LUPIO BLK self.lupio_blk = LupioBLK( pio_addr=0x20000000, - platform = self.lupv, - int_id = self._int_ids['BLK'] + platform=self.lupv, + int_id=self._int_ids["BLK"], ) # LUPIO RNG self.lupio_rng = LupioRNG( - pio_addr=0x20005000, - platform = self.lupv, - int_id = self._int_ids['RNG'] + pio_addr=0x20003000, + platform=self.lupv, + int_id=self._int_ids["RNG"], ) # LUPIO RTC self.lupio_rtc = LupioRTC(pio_addr=0x20004000) - #LUPIO SYS - self.lupio_sys = LupioSYS(pio_addr= 0x20003000) - + # LUPIO SYS + self.lupio_sys = LupioSYS(pio_addr=0x20005000) # LUPIO TMR self.lupio_tmr = LupioTMR( pio_addr=0x20006000, - int_type = self._excep_code['INT_TIMER_SUPER'], - num_threads = self.processor.get_num_cores() + int_type=self._excep_code["INT_TIMER_SUPER"], + num_threads=self.processor.get_num_cores(), ) # LUPIO TTY self.lupio_tty = LupioTTY( pio_addr=0x20007000, - platform = self.lupv, - int_id = self._int_ids['TTY'] + platform=self.lupv, + int_id=self._int_ids["TTY"], ) self.terminal = Terminal() pic_srcs = [ - self._int_ids['TTY'], - self._int_ids['BLK'], - self._int_ids['RNG'] + self._int_ids["TTY"], + self._int_ids["BLK"], + self._int_ids["RNG"], ] # Set the number of sources to the PIC as 0 because we've removed the @@ -205,14 +209,14 @@ class LupvBoard(AbstractSystemBoard, KernelDiskWorkload): self.pic, self.lupio_ipi, self.lupio_pic, - self.lupio_tmr + self.lupio_tmr, ] self._off_chip_devices = [ self.lupio_blk, self.lupio_tty, self.lupio_sys, self.lupio_rng, - self.lupio_rtc + self.lupio_rtc, ] def _setup_io_devices(self) -> None: @@ -286,7 +290,8 @@ class LupvBoard(AbstractSystemBoard, KernelDiskWorkload): root = FdtNode("/") root.append(state.addrCellsProperty()) root.append(state.sizeCellsProperty()) - root.appendCompatible(["riscv-virtio"]) + root.appendCompatible(["luplab,lupv"]) + for mem_range in self.mem_ranges: node = FdtNode("memory@%x" % int(mem_range.start)) node.append(FdtPropertyStrings("device_type", ["memory"])) @@ -313,7 +318,7 @@ class LupvBoard(AbstractSystemBoard, KernelDiskWorkload): node.append(FdtPropertyWords("reg", state.CPUAddrCells(i))) node.append(FdtPropertyStrings("mmu-type", "riscv,sv48")) node.append(FdtPropertyStrings("status", "okay")) - node.append(FdtPropertyStrings("riscv,isa", "rv64imafdc")) + node.append(FdtPropertyStrings("riscv,isa", "rv64imafdcsu")) # TODO: Should probably get this from the core. freq = self.clk_domain.clock[0].frequency node.appendCompatible(["riscv"]) @@ -341,13 +346,16 @@ class LupvBoard(AbstractSystemBoard, KernelDiskWorkload): clint_node = clint.generateBasicPioDeviceNode( soc_state, "clint", clint.pio_addr, clint.pio_size ) + + clint_node.append(FdtPropertyStrings("status", "disable")) + int_extended = list() for i, core in enumerate(self.get_processor().get_cores()): phandle = state.phandle(f"cpu@{i}.int_state") int_extended.append(phandle) - int_extended.append(0x3) + int_extended.append(self._excep_code["INT_SOFT_MACHINE"]) int_extended.append(phandle) - int_extended.append(0x7) + int_extended.append(self._excep_code["INT_TIMER_MACHINE"]) clint_node.append( FdtPropertyWords("interrupts-extended", int_extended) ) @@ -365,18 +373,19 @@ class LupvBoard(AbstractSystemBoard, KernelDiskWorkload): # LupioTMR lupio_tmr = self.lupio_tmr - lupio_tmr_node = lupio_tmr.generateBasicPioDeviceNode(soc_state, - "lupio-tmr", lupio_tmr.pio_addr, - lupio_tmr.pio_size) + lupio_tmr_node = lupio_tmr.generateBasicPioDeviceNode( + soc_state, "lupio-tmr", lupio_tmr.pio_addr, lupio_tmr.pio_size + ) int_state = FdtState(addr_cells=0, interrupt_cells=1) lupio_tmr_node.append(FdtPropertyWords("clocks", [clk_phandle])) int_extended = list() for i, core in enumerate(self.get_processor().get_cores()): phandle = state.phandle(f"cpu@{i}.int_state") int_extended.append(phandle) - int_extended.append(self._excep_code['INT_TIMER_SUPER']) + int_extended.append(self._excep_code["INT_TIMER_SUPER"]) lupio_tmr_node.append( - FdtPropertyWords("interrupts-extended", int_extended)) + FdtPropertyWords("interrupts-extended", int_extended) + ) lupio_tmr_node.appendCompatible(["lupio,tmr"]) soc_node.append(lupio_tmr_node) @@ -386,8 +395,9 @@ class LupvBoard(AbstractSystemBoard, KernelDiskWorkload): soc_state, "plic", plic.pio_addr, plic.pio_size ) - int_state = FdtState(addr_cells=0, interrupt_cells=1) - plic_node.append(int_state.addrCellsProperty()) + plic_node.append(FdtPropertyStrings("status", "disable")) + + int_state = FdtState(interrupt_cells=1) plic_node.append(int_state.interruptCellsProperty()) phandle = int_state.phandle(plic) @@ -398,9 +408,7 @@ class LupvBoard(AbstractSystemBoard, KernelDiskWorkload): for i, core in enumerate(self.get_processor().get_cores()): phandle = state.phandle(f"cpu@{i}.int_state") int_extended.append(phandle) - int_extended.append(0xB) - int_extended.append(phandle) - int_extended.append(0x9) + int_extended.append(self._excep_code["INT_EXT_MACHINE"]) plic_node.append(FdtPropertyWords("interrupts-extended", int_extended)) plic_node.append(FdtProperty("interrupt-controller")) @@ -410,25 +418,26 @@ class LupvBoard(AbstractSystemBoard, KernelDiskWorkload): # LupioIPI Device lupio_ipi = self.lupio_ipi - lupio_ipi_node = lupio_ipi.generateBasicPioDeviceNode(soc_state, - "lupio-ipi", lupio_ipi.pio_addr, - lupio_ipi.pio_size) + lupio_ipi_node = lupio_ipi.generateBasicPioDeviceNode( + soc_state, "lupio-ipi", lupio_ipi.pio_addr, lupio_ipi.pio_size + ) int_extended = list() for i, core in enumerate(self.get_processor().get_cores()): phandle = state.phandle(f"cpu@{i}.int_state") int_extended.append(phandle) - int_extended.append(self._excep_code['INT_SOFT_SUPER']) + int_extended.append(self._excep_code["INT_SOFT_SUPER"]) lupio_ipi_node.append( - FdtPropertyWords("interrupts-extended", int_extended)) + FdtPropertyWords("interrupts-extended", int_extended) + ) lupio_ipi_node.append(FdtProperty("interrupt-controller")) lupio_ipi_node.appendCompatible(["lupio,ipi"]) soc_node.append(lupio_ipi_node) # LupioPIC Device lupio_pic = self.lupio_pic - lupio_pic_node = lupio_pic.generateBasicPioDeviceNode(soc_state, - "lupio-pic", lupio_pic.pio_addr, - lupio_pic.pio_size) + lupio_pic_node = lupio_pic.generateBasicPioDeviceNode( + soc_state, "lupio-pic", lupio_pic.pio_addr, lupio_pic.pio_size + ) int_state = FdtState(interrupt_cells=1) lupio_pic_node.append(int_state.interruptCellsProperty()) phandle = state.phandle(lupio_pic) @@ -437,44 +446,47 @@ class LupvBoard(AbstractSystemBoard, KernelDiskWorkload): for i, core in enumerate(self.get_processor().get_cores()): phandle = state.phandle(f"cpu@{i}.int_state") int_extended.append(phandle) - int_extended.append(self._excep_code['INT_EXT_SUPER']) + int_extended.append(self._excep_code["INT_EXT_SUPER"]) lupio_pic_node.append( - FdtPropertyWords("interrupts-extended", int_extended)) + FdtPropertyWords("interrupts-extended", int_extended) + ) lupio_pic_node.append(FdtProperty("interrupt-controller")) lupio_pic_node.appendCompatible(["lupio,pic"]) soc_node.append(lupio_pic_node) # LupioBLK Device lupio_blk = self.lupio_blk - lupio_blk_node = lupio_blk.generateBasicPioDeviceNode(soc_state, - "lupio-blk", lupio_blk.pio_addr, - lupio_blk.pio_size) + lupio_blk_node = lupio_blk.generateBasicPioDeviceNode( + soc_state, "lupio-blk", lupio_blk.pio_addr, lupio_blk.pio_size + ) lupio_blk_node.appendCompatible(["lupio,blk"]) lupio_blk_node.append( - FdtPropertyWords("interrupts", - [self.lupio_blk.int_id])) - lupio_blk_node.append( - FdtPropertyWords("interrupt-parent", - state.phandle(self.lupio_pic))) + FdtPropertyWords( + "interrupts-extended", + [state.phandle(self.lupio_pic), self.lupio_blk.int_id], + ) + ) soc_node.append(lupio_blk_node) # LupioRNG Device lupio_rng = self.lupio_rng - lupio_rng_node = lupio_rng.generateBasicPioDeviceNode(soc_state, - "lupio-rng", lupio_rng.pio_addr,lupio_rng.pio_size) + lupio_rng_node = lupio_rng.generateBasicPioDeviceNode( + soc_state, "lupio-rng", lupio_rng.pio_addr, lupio_rng.pio_size + ) lupio_rng_node.appendCompatible(["lupio,rng"]) lupio_rng_node.append( - FdtPropertyWords("interrupts", - [self.lupio_rng.int_id])) - lupio_rng_node.append( - FdtPropertyWords("interrupt-parent", - state.phandle(self.lupio_pic))) + FdtPropertyWords( + "interrupts-extended", + [state.phandle(self.lupio_pic), self.lupio_rng.int_id], + ) + ) soc_node.append(lupio_rng_node) - #LupioSYS Device + # LupioSYS Device lupio_sys = self.lupio_sys - lupio_sys_node = lupio_sys.generateBasicPioDeviceNode(soc_state, - "lupio-sys", lupio_sys.pio_addr, lupio_sys.pio_size) + lupio_sys_node = lupio_sys.generateBasicPioDeviceNode( + soc_state, "lupio-sys", lupio_sys.pio_addr, lupio_sys.pio_size + ) lupio_sys_node.appendCompatible(["syscon"]) sys_phandle = state.phandle(self.lupio_sys) lupio_sys_node.append(FdtPropertyWords("phandle", [sys_phandle])) @@ -504,15 +516,16 @@ class LupvBoard(AbstractSystemBoard, KernelDiskWorkload): # LupioTTY Device lupio_tty = self.lupio_tty - lupio_tty_node = lupio_tty.generateBasicPioDeviceNode(soc_state, - "lupio-tty", lupio_tty.pio_addr, lupio_tty.pio_size) + lupio_tty_node = lupio_tty.generateBasicPioDeviceNode( + soc_state, "lupio-tty", lupio_tty.pio_addr, lupio_tty.pio_size + ) lupio_tty_node.appendCompatible(["lupio,tty"]) lupio_tty_node.append( - FdtPropertyWords("interrupts", - [self.lupio_tty.int_id])) - lupio_tty_node.append( - FdtPropertyWords("interrupt-parent", - state.phandle(self.lupio_pic))) + FdtPropertyWords( + "interrupts-extended", + [state.phandle(self.lupio_pic), self.lupio_tty.int_id], + ) + ) soc_node.append(lupio_tty_node) root.append(soc_node) @@ -523,7 +536,7 @@ class LupvBoard(AbstractSystemBoard, KernelDiskWorkload): @overrides(KernelDiskWorkload) def get_default_kernel_args(self) -> List[str]: - return ["earlycon console=ttyLIO0", "root={root_value}", "ro"] + return ["console=ttyLIO0", "root={root_value}", "rw"] @overrides(KernelDiskWorkload) def get_disk_device(self) -> str: @@ -534,10 +547,9 @@ class LupvBoard(AbstractSystemBoard, KernelDiskWorkload): # Note: This must be called after set_workload because it looks for an # attribute named "disk" and connects - # Set the disk image for the block device to use + # Set the disk image for the block device to use image = CowDiskImage( - child=RawDiskImage(read_only=True), - read_only=False + child=RawDiskImage(read_only=True), read_only=False ) image.child.image_file = disk_image.get_local_path() self.lupio_blk.image = image diff --git a/src/python/gem5/components/boards/kernel_disk_workload.py b/src/python/gem5/components/boards/kernel_disk_workload.py index 23824d140e..29d38baa7b 100644 --- a/src/python/gem5/components/boards/kernel_disk_workload.py +++ b/src/python/gem5/components/boards/kernel_disk_workload.py @@ -29,11 +29,13 @@ from abc import abstractmethod from .abstract_board import AbstractBoard from ...resources.resource import AbstractResource -from typing import List, Optional +from typing import List, Optional, Union import os +from pathlib import Path import m5 + class KernelDiskWorkload: """ The purpose of this abstract class is to enable a full-system boot @@ -139,6 +141,7 @@ class KernelDiskWorkload: readfile_contents: Optional[str] = None, kernel_args: Optional[List[str]] = None, exit_on_work_items: bool = True, + checkpoint: Optional[Union[Path, AbstractResource]] = None, ) -> None: """ This function allows the setting of a full-system run with a Kernel @@ -158,11 +161,13 @@ class KernelDiskWorkload: passed to the kernel. By default set to `get_default_kernel_args()`. :param exit_on_work_items: Whether the simulation should exit on work items. True by default. + :param checkpoint: The checkpoint directory. Used to restore the + simulation to that checkpoint. """ # We assume this this is in a multiple-inheritance setup with an # Abstract board. This function will not work otherwise. - assert(isinstance(self,AbstractBoard)) + assert isinstance(self, AbstractBoard) # If we are setting a workload of this type, we need to run as a # full-system simulation. @@ -201,3 +206,17 @@ class KernelDiskWorkload: # Set whether to exit on work items. self.exit_on_work_items = exit_on_work_items + + # Here we set `self._checkpoint_dir`. This is then used by the + # Simulator module to setup checkpoints. + if checkpoint: + if isinstance(checkpoint, Path): + self._checkpoint = checkpoint + elif isinstance(checkpoint, AbstractResource): + self._checkpoint = Path(checkpoint.get_local_path()) + else: + # The checkpoint_dir must be None, Path, Or AbstractResource. + raise Exception( + "Checkpoints must be passed as a Path or an " + "AbstractResource." + ) diff --git a/src/python/gem5/components/boards/riscv_board.py b/src/python/gem5/components/boards/riscv_board.py index 9f8b1c02c8..15ec57af69 100644 --- a/src/python/gem5/components/boards/riscv_board.py +++ b/src/python/gem5/components/boards/riscv_board.py @@ -93,9 +93,11 @@ class RiscvBoard(AbstractSystemBoard, KernelDiskWorkload): super().__init__(clk_freq, processor, memory, cache_hierarchy) if processor.get_isa() != ISA.RISCV: - raise Exception("The RISCVBoard requires a processor using the" + raise Exception( + "The RISCVBoard requires a processor using the" "RISCV ISA. Current processor ISA: " - f"'{processor.get_isa().name}'.") + f"'{processor.get_isa().name}'." + ) @overrides(AbstractSystemBoard) def _setup_board(self) -> None: @@ -141,16 +143,17 @@ class RiscvBoard(AbstractSystemBoard, KernelDiskWorkload): def _setup_io_devices(self) -> None: """Connect the I/O devices to the I/O bus""" - #Add PCI + # Add PCI self.platform.pci_host.pio = self.iobus.mem_side_ports - #Add Ethernet card - self.ethernet = IGbE_e1000(pci_bus=0, pci_dev=0, pci_func=0, - InterruptLine=1, InterruptPin=1) + # Add Ethernet card + self.ethernet = IGbE_e1000( + pci_bus=0, pci_dev=0, pci_func=0, InterruptLine=1, InterruptPin=1 + ) self.ethernet.host = self.platform.pci_host - self.ethernet.pio = self.iobus.mem_side_ports - self.ethernet.dma = self.iobus.cpu_side_ports + self.ethernet.pio = self.iobus.mem_side_ports + self.ethernet.dma = self.iobus.cpu_side_ports if self.get_cache_hierarchy().is_ruby(): for device in self._off_chip_devices + self._on_chip_devices: @@ -172,10 +175,10 @@ class RiscvBoard(AbstractSystemBoard, KernelDiskWorkload): for dev in self._off_chip_devices ] - #PCI - self.bridge.ranges.append(AddrRange(0x2F000000, size='16MB')) - self.bridge.ranges.append(AddrRange(0x30000000, size='256MB')) - self.bridge.ranges.append(AddrRange(0x40000000, size='512MB')) + # PCI + self.bridge.ranges.append(AddrRange(0x2F000000, size="16MB")) + self.bridge.ranges.append(AddrRange(0x30000000, size="256MB")) + self.bridge.ranges.append(AddrRange(0x40000000, size="512MB")) def _setup_pma(self) -> None: """Set the PMA devices on each core""" @@ -185,10 +188,10 @@ class RiscvBoard(AbstractSystemBoard, KernelDiskWorkload): for dev in self._on_chip_devices + self._off_chip_devices ] - #PCI - uncacheable_range.append(AddrRange(0x2F000000, size='16MB')) - uncacheable_range.append(AddrRange(0x30000000, size='256MB')) - uncacheable_range.append(AddrRange(0x40000000, size='512MB')) + # PCI + uncacheable_range.append(AddrRange(0x2F000000, size="16MB")) + uncacheable_range.append(AddrRange(0x30000000, size="256MB")) + uncacheable_range.append(AddrRange(0x40000000, size="512MB")) # TODO: Not sure if this should be done per-core like in the example for cpu in self.get_processor().get_cores(): @@ -263,7 +266,7 @@ class RiscvBoard(AbstractSystemBoard, KernelDiskWorkload): cpus_node.append(cpus_state.sizeCellsProperty()) # Used by the CLINT driver to set the timer frequency. Value taken from # RISC-V kernel docs (Note: freedom-u540 is actually 1MHz) - cpus_node.append(FdtPropertyWords("timebase-frequency", [10000000])) + cpus_node.append(FdtPropertyWords("timebase-frequency", [100000000])) for i, core in enumerate(self.get_processor().get_cores()): node = FdtNode(f"cpu@{i}") @@ -346,8 +349,9 @@ class RiscvBoard(AbstractSystemBoard, KernelDiskWorkload): soc_node.append(plic_node) # PCI - pci_state = FdtState(addr_cells=3, size_cells=2, - cpu_cells=1, interrupt_cells=1) + pci_state = FdtState( + addr_cells=3, size_cells=2, cpu_cells=1, interrupt_cells=1 + ) pci_node = FdtNode("pci") if int(self.platform.pci_host.conf_device_bits) == 8: @@ -364,9 +368,13 @@ class RiscvBoard(AbstractSystemBoard, KernelDiskWorkload): pci_node.append(pci_state.sizeCellsProperty()) pci_node.append(pci_state.interruptCellsProperty()) # PCI address for CPU - pci_node.append(FdtPropertyWords("reg", - soc_state.addrCells(self.platform.pci_host.conf_base) + - soc_state.sizeCells(self.platform.pci_host.conf_size) )) + pci_node.append( + FdtPropertyWords( + "reg", + soc_state.addrCells(self.platform.pci_host.conf_base) + + soc_state.sizeCells(self.platform.pci_host.conf_size), + ) + ) # Ranges mapping # For now some of this is hard coded, because the PCI module does not @@ -382,18 +390,19 @@ class RiscvBoard(AbstractSystemBoard, KernelDiskWorkload): # AXI memory address range ranges += self.platform.pci_host.pciFdtAddr(space=2, addr=0) ranges += soc_state.addrCells(self.platform.pci_host.pci_mem_base) - ranges += pci_state.sizeCells(0x40000000) # Fixed size + ranges += pci_state.sizeCells(0x40000000) # Fixed size pci_node.append(FdtPropertyWords("ranges", ranges)) # Interrupt mapping plic_handle = int_state.phandle(plic) - int_base = self.platform.pci_host.int_base + int_base = self.platform.pci_host.int_base interrupts = [] for i in range(int(self.platform.pci_host.int_count)): - interrupts += self.platform.pci_host.pciFdtAddr(device=i, - addr=0) + [int(i) + 1, plic_handle, int(int_base) + i] + interrupts += self.platform.pci_host.pciFdtAddr( + device=i, addr=0 + ) + [int(i) + 1, plic_handle, int(int_base) + i] pci_node.append(FdtPropertyWords("interrupt-map", interrupts)) @@ -401,8 +410,9 @@ class RiscvBoard(AbstractSystemBoard, KernelDiskWorkload): if int_count & (int_count - 1): fatal("PCI interrupt count should be power of 2") - intmask = self.platform.pci_host.pciFdtAddr(device=int_count - 1, - addr=0) + [0x0] + intmask = self.platform.pci_host.pciFdtAddr( + device=int_count - 1, addr=0 + ) + [0x0] pci_node.append(FdtPropertyWords("interrupt-map-mask", intmask)) if self.platform.pci_host._dma_coherent: @@ -484,4 +494,4 @@ class RiscvBoard(AbstractSystemBoard, KernelDiskWorkload): @overrides(KernelDiskWorkload) def get_default_kernel_args(self) -> List[str]: - return ["console=ttyS0", "root={root_value}", "ro"] + return ["console=ttyS0", "root={root_value}", "rw"] diff --git a/src/python/gem5/components/boards/se_binary_workload.py b/src/python/gem5/components/boards/se_binary_workload.py index 6ff81e7481..8ec112ee13 100644 --- a/src/python/gem5/components/boards/se_binary_workload.py +++ b/src/python/gem5/components/boards/se_binary_workload.py @@ -26,9 +26,15 @@ from .abstract_board import AbstractBoard from ...resources.resource import AbstractResource +from gem5.utils.simpoint import SimPoint from m5.objects import SEWorkload, Process +from typing import Optional, List, Union +from m5.util import warn +from pathlib import Path + + class SEBinaryWorkload: """ This class is used to enable simple Syscall-Execution (SE) mode execution @@ -37,38 +43,129 @@ class SEBinaryWorkload: For this to function correctly the SEBinaryWorkload class should be added as a superclass to a board (i.e., something that inherits from AbstractBoard). + + **Important Notes:** At present this implementation is limited. A single + process is added to all cores as the workload. Therefore, despite allowing + for multi-core setups, multi-program workloads are not presently supported. """ def set_se_binary_workload( self, binary: AbstractResource, - exit_on_work_items: bool = True + exit_on_work_items: bool = True, + stdin_file: Optional[AbstractResource] = None, + stdout_file: Optional[Path] = None, + stderr_file: Optional[Path] = None, + arguments: List[str] = [], + checkpoint: Optional[Union[Path, AbstractResource]] = None, ) -> None: """Set up the system to run a specific binary. **Limitations** - * Only supports single threaded applications + * Only supports single threaded applications. * Dynamically linked executables are partially supported when the host ISA and the simulated ISA are the same. :param binary: The resource encapsulating the binary to be run. :param exit_on_work_items: Whether the simulation should exit on work items. True by default. + :param stdin_file: The input file for the binary + :param arguments: The input arguments for the binary + :param checkpoint: The checkpoint directory. Used to restore the + simulation to that checkpoint. """ # We assume this this is in a multiple-inheritance setup with an # Abstract board. This function will not work otherwise. - assert(isinstance(self,AbstractBoard)) + assert isinstance(self, AbstractBoard) # If we are setting a workload of this type, we need to run as a # SE-mode simulation. self._set_fullsystem(False) - self.workload = SEWorkload.init_compatible(binary.get_local_path()) + binary_path = binary.get_local_path() + self.workload = SEWorkload.init_compatible(binary_path) process = Process() - process.cmd = [binary.get_local_path()] - self.get_processor().get_cores()[0].set_workload(process) + process.executable = binary_path + process.cmd = [binary_path] + arguments + if stdin_file is not None: + process.input = stdin_file.get_local_path() + if stdout_file is not None: + process.output = stdout_file.as_posix() + if stderr_file is not None: + process.errout = stderr_file.as_posix() + + for core in self.get_processor().get_cores(): + core.set_workload(process) # Set whether to exit on work items for the se_workload self.exit_on_work_items = exit_on_work_items + + # Here we set `self._checkpoint_dir`. This is then used by the + # Simulator module to setup checkpoints. + if checkpoint: + if isinstance(checkpoint, Path): + self._checkpoint = checkpoint + elif isinstance(checkpoint, AbstractResource): + self._checkpoint_dir = Path(checkpoint.get_local_path()) + else: + raise Exception( + "The checkpoint_dir must be None, Path, or " + "AbstractResource." + ) + + def set_se_simpoint_workload( + self, + binary: AbstractResource, + arguments: List[str] = [], + simpoint: Union[AbstractResource, SimPoint] = None, + checkpoint: Optional[Union[Path, AbstractResource]] = None, + ) -> None: + """Set up the system to run a SimPoint workload. + + **Limitations** + * Only supports single threaded applications. + * Dynamically linked executables are partially supported when the host + ISA and the simulated ISA are the same. + + **Warning:** SimPoints only works with one core + + :param binary: The resource encapsulating the binary to be run. + :param arguments: The input arguments for the binary + :param simpoint: The SimPoint object or Resource that contains the list of + SimPoints starting instructions, the list of weights, and the SimPoints + interval + :param checkpoint: The checkpoint directory. Used to restore the + simulation to that checkpoint. + """ + + # convert input to SimPoint if necessary + if isinstance(simpoint, AbstractResource): + self._simpoint_object = SimPoint(simpoint) + else: + assert isinstance(simpoint, SimPoint) + self._simpoint_object = simpoint + + if self.get_processor().get_num_cores() > 1: + warn("SimPoints only works with one core") + self.get_processor().get_cores()[0]._set_simpoint( + inst_starts=self._simpoint_object.get_simpoint_start_insts(), + board_initialized=False, + ) + + # Call set_se_binary_workload after SimPoint setup is complete + self.set_se_binary_workload( + binary=binary, + arguments=arguments, + checkpoint=checkpoint, + ) + + def get_simpoint(self) -> SimPoint: + """ + Returns the SimPoint object set. If no SimPoint object has been set an + exception is thrown. + """ + if getattr(self, "_simpoint_object", None): + return self._simpoint_object + raise Exception("This board does not have a simpoint set.") diff --git a/src/python/gem5/components/boards/simple_board.py b/src/python/gem5/components/boards/simple_board.py index 532475d8aa..2e4122e061 100644 --- a/src/python/gem5/components/boards/simple_board.py +++ b/src/python/gem5/components/boards/simple_board.py @@ -24,11 +24,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from m5.objects import ( - AddrRange, - IOXBar, - Port, -) +from m5.objects import AddrRange, IOXBar, Port from .abstract_system_board import AbstractSystemBoard from .se_binary_workload import SEBinaryWorkload diff --git a/src/python/gem5/components/boards/test_board.py b/src/python/gem5/components/boards/test_board.py index 7031e0e0d7..dea5adab56 100644 --- a/src/python/gem5/components/boards/test_board.py +++ b/src/python/gem5/components/boards/test_board.py @@ -24,21 +24,17 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from m5.objects import ( - Port, - IOXBar, - AddrRange, -) +from m5.objects import Port, IOXBar, AddrRange -from .mem_mode import MemMode, mem_mode_to_string from ...utils.override import overrides +from .abstract_board import AbstractBoard from .abstract_system_board import AbstractSystemBoard -from ..processors.abstract_processor import AbstractProcessor +from ..processors.abstract_generator import AbstractGenerator from ..memory.abstract_memory_system import AbstractMemorySystem from ..cachehierarchies.abstract_cache_hierarchy import AbstractCacheHierarchy -from typing import List +from typing import List, Optional class TestBoard(AbstractSystemBoard): @@ -47,21 +43,27 @@ class TestBoard(AbstractSystemBoard): architecture. To work as a traffic generator board, pass a generator as a processor. + + This board does not require a cache hierarchy (it can be none) in which + case the processor (generator) will be directly connected to the memory. + The clock frequency is only used if there is a cache hierarchy or when + using the GUPS generators. """ def __init__( self, clk_freq: str, - processor: AbstractProcessor, + generator: AbstractGenerator, memory: AbstractMemorySystem, - cache_hierarchy: AbstractCacheHierarchy, + cache_hierarchy: Optional[AbstractCacheHierarchy], ): super().__init__( - clk_freq=clk_freq, - processor=processor, + clk_freq=clk_freq, # Only used if cache hierarchy or GUPS-gen + processor=generator, memory=memory, cache_hierarchy=cache_hierarchy, ) + self._set_fullsystem(False) @overrides(AbstractSystemBoard) def _setup_board(self) -> None: @@ -112,3 +114,16 @@ class TestBoard(AbstractSystemBoard): @overrides(AbstractSystemBoard) def has_dma_ports(self) -> bool: return False + + @overrides(AbstractBoard) + def _connect_things(self) -> None: + super()._connect_things() + + if not self.get_cache_hierarchy(): + # If we have no caches, then there must be a one-to-one + # connection between the generators and the memories. + assert len(self.get_processor().get_cores()) == 1 + assert len(self.get_memory().get_mem_ports()) == 1 + self.get_processor().get_cores()[0].connect_dcache( + self.get_memory().get_mem_ports()[0][1] + ) diff --git a/src/python/gem5/components/boards/x86_board.py b/src/python/gem5/components/boards/x86_board.py index 6761bdb3fa..04fec617c1 100644 --- a/src/python/gem5/components/boards/x86_board.py +++ b/src/python/gem5/components/boards/x86_board.py @@ -85,8 +85,10 @@ class X86Board(AbstractSystemBoard, KernelDiskWorkload): ) if self.get_processor().get_isa() != ISA.X86: - raise Exception("The X86Board requires a processor using the X86 " - f"ISA. Current processor ISA: '{processor.get_isa().name}'.") + raise Exception( + "The X86Board requires a processor using the X86 " + f"ISA. Current processor ISA: '{processor.get_isa().name}'." + ) @overrides(AbstractSystemBoard) def _setup_board(self) -> None: @@ -100,10 +102,10 @@ class X86Board(AbstractSystemBoard, KernelDiskWorkload): # Set up all of the I/O. self._setup_io_devices() - self.m5ops_base = 0xffff0000 + self.m5ops_base = 0xFFFF0000 def _setup_io_devices(self): - """ Sets up the x86 IO devices. + """Sets up the x86 IO devices. Note: This is mostly copy-paste from prior X86 FS setups. Some of it may not be documented and there may be bugs. diff --git a/src/python/gem5/components/cachehierarchies/abstract_cache_hierarchy.py b/src/python/gem5/components/cachehierarchies/abstract_cache_hierarchy.py index 5f6096e8ff..8d59a383f1 100644 --- a/src/python/gem5/components/cachehierarchies/abstract_cache_hierarchy.py +++ b/src/python/gem5/components/cachehierarchies/abstract_cache_hierarchy.py @@ -70,3 +70,7 @@ class AbstractCacheHierarchy(SubSystem): :returns: True if the cache hierarchy is ruby. Otherwise False. """ raise NotImplementedError + + def _post_instantiate(self): + """Called to set up anything needed after m5.instantiate""" + pass diff --git a/src/python/gem5/components/cachehierarchies/chi/nodes/abstract_node.py b/src/python/gem5/components/cachehierarchies/chi/nodes/abstract_node.py index e8797b6de8..9853174464 100644 --- a/src/python/gem5/components/cachehierarchies/chi/nodes/abstract_node.py +++ b/src/python/gem5/components/cachehierarchies/chi/nodes/abstract_node.py @@ -33,24 +33,29 @@ from m5.objects import Cache_Controller, MessageBuffer, RubyNetwork import math + class TriggerMessageBuffer(MessageBuffer): - ''' + """ MessageBuffer for triggering internal controller events. These buffers should not be affected by the Ruby tester randomization and allow poping messages enqueued in the same cycle. - ''' - randomization = 'disabled' + """ + + randomization = "disabled" allow_zero_latency = True + class OrderedTriggerMessageBuffer(TriggerMessageBuffer): ordered = True + class AbstractNode(Cache_Controller): """A node is the abstract unit for caches in the CHI protocol. You can extend the AbstractNode to create caches (private or shared) and directories with or without data caches. """ + _version = 0 @classmethod @@ -72,7 +77,7 @@ class AbstractNode(Cache_Controller): # triggers. To limit the controller performance, tweak other # params such as: input port buffer size, cache banks, and output # port latency - self.transitions_per_cycle = 128 + self.transitions_per_cycle = 1024 # This should be set to true in the data cache controller to enable # timeouts on unique lines when a store conditional fails self.sc_lock_enabled = False @@ -84,21 +89,10 @@ class AbstractNode(Cache_Controller): def getBlockSizeBits(self): bits = int(math.log(self._cache_line_size, 2)) - if 2 ** bits != self._cache_line_size.value: + if 2**bits != self._cache_line_size.value: raise Exception("Cache line size not a power of 2!") return bits - def sendEvicts(self, core: AbstractCore, target_isa: ISA): - """True if the CPU model or ISA requires sending evictions from caches - to the CPU. Scenarios warrant forwarding evictions to the CPU: - 1. The O3 model must keep the LSQ coherent with the caches - 2. The x86 mwait instruction is built on top of coherence - 3. The local exclusive monitor in ARM systems - """ - if core.get_type() is CPUTypes.O3 or target_isa in (ISA.X86, ISA.ARM): - return True - return False - def connectQueues(self, network: RubyNetwork): """Connect all of the queues for this controller. This may be extended in subclasses. @@ -128,5 +122,3 @@ class AbstractNode(Cache_Controller): self.rspIn.in_port = network.out_port self.snpIn.in_port = network.out_port self.datIn.in_port = network.out_port - - diff --git a/src/python/gem5/components/cachehierarchies/chi/nodes/directory.py b/src/python/gem5/components/cachehierarchies/chi/nodes/directory.py index 8f1275c9c9..3488435d56 100644 --- a/src/python/gem5/components/cachehierarchies/chi/nodes/directory.py +++ b/src/python/gem5/components/cachehierarchies/chi/nodes/directory.py @@ -26,12 +26,8 @@ from .abstract_node import AbstractNode -from m5.objects import ( - ClockDomain, - NULL, - RubyCache, - RubyNetwork, -) +from m5.objects import ClockDomain, NULL, RubyCache, RubyNetwork + class SimpleDirectory(AbstractNode): """A directory or home node (HNF) @@ -39,6 +35,7 @@ class SimpleDirectory(AbstractNode): This simple directory has no cache. It forwards all requests as directly as possible. """ + def __init__( self, network: RubyNetwork, @@ -49,10 +46,7 @@ class SimpleDirectory(AbstractNode): # Dummy cache self.cache = RubyCache( - dataAccessLatency = 0, - tagAccessLatency = 1, - size = "128", - assoc = 1 + dataAccessLatency=0, tagAccessLatency=1, size="128", assoc=1 ) self.clk_domain = clk_domain @@ -87,4 +81,6 @@ class SimpleDirectory(AbstractNode): self.number_of_TBEs = 32 self.number_of_repl_TBEs = 32 self.number_of_snoop_TBEs = 1 + self.number_of_DVM_TBEs = 1 # should not receive any dvm + self.number_of_DVM_snoop_TBEs = 1 # should not receive any dvm self.unify_repl_TBEs = False diff --git a/src/python/gem5/components/cachehierarchies/chi/nodes/dma_requestor.py b/src/python/gem5/components/cachehierarchies/chi/nodes/dma_requestor.py index 7543e06d5a..ccac6cae91 100644 --- a/src/python/gem5/components/cachehierarchies/chi/nodes/dma_requestor.py +++ b/src/python/gem5/components/cachehierarchies/chi/nodes/dma_requestor.py @@ -29,26 +29,16 @@ from gem5.isas import ISA from .abstract_node import AbstractNode -from m5.objects import ( - ClockDomain, - RubyCache, -) +from m5.objects import ClockDomain, RubyCache + class DMARequestor(AbstractNode): - def __init__( - self, - network, - cache_line_size, - clk_domain: ClockDomain, - ): + def __init__(self, network, cache_line_size, clk_domain: ClockDomain): super().__init__(network, cache_line_size) # Dummy cache self.cache = RubyCache( - dataAccessLatency = 0, - tagAccessLatency = 1, - size = "128", - assoc = 1 + dataAccessLatency=0, tagAccessLatency=1, size="128", assoc=1 ) self.clk_domain = clk_domain @@ -76,5 +66,7 @@ class DMARequestor(AbstractNode): # Some reasonable default TBE params self.number_of_TBEs = 16 self.number_of_repl_TBEs = 1 - self.number_of_snoop_TBEs = 1 # Should never receive snoops + self.number_of_snoop_TBEs = 1 # Should never receive snoops + self.number_of_DVM_TBEs = 1 # should not receive any dvm + self.number_of_DVM_snoop_TBEs = 1 # should not receive any dvm self.unify_repl_TBEs = False diff --git a/src/python/gem5/components/cachehierarchies/chi/nodes/memory_controller.py b/src/python/gem5/components/cachehierarchies/chi/nodes/memory_controller.py index cf7d660d7b..e7cbafefb2 100644 --- a/src/python/gem5/components/cachehierarchies/chi/nodes/memory_controller.py +++ b/src/python/gem5/components/cachehierarchies/chi/nodes/memory_controller.py @@ -36,9 +36,20 @@ from m5.objects import ( from .abstract_node import TriggerMessageBuffer -class MemoryController(Memory_Controller): - """A controller that connects to memory + +class MemCtrlMessageBuffer(MessageBuffer): """ + MessageBuffer exchanging messages with the memory + These buffers should also not be affected by the Ruby tester randomization. + """ + + randomization = "disabled" + ordered = True + + +class MemoryController(Memory_Controller): + """A controller that connects to memory""" + _version = 0 @classmethod @@ -47,10 +58,7 @@ class MemoryController(Memory_Controller): return cls._version - 1 def __init__( - self, - network: RubyNetwork, - ranges: List[AddrRange], - port: Port + self, network: RubyNetwork, ranges: List[AddrRange], port: Port ): super().__init__() @@ -64,10 +72,17 @@ class MemoryController(Memory_Controller): def connectQueues(self, network): self.triggerQueue = TriggerMessageBuffer() - self.responseFromMemory = MessageBuffer() - self.requestToMemory = MessageBuffer(ordered = True) + self.responseFromMemory = MemCtrlMessageBuffer() + self.requestToMemory = MemCtrlMessageBuffer() self.reqRdy = TriggerMessageBuffer() + # The Memory_Controller implementation deallocates the TBE for + # write requests when they are queue up to memory. The size of this + # buffer must be limited to prevent unlimited outstanding writes. + self.requestToMemory.buffer_size = ( + int(self.to_memory_controller_latency) + 1 + ) + self.reqOut = MessageBuffer() self.rspOut = MessageBuffer() self.snpOut = MessageBuffer() diff --git a/src/python/gem5/components/cachehierarchies/chi/nodes/private_l1_moesi_cache.py b/src/python/gem5/components/cachehierarchies/chi/nodes/private_l1_moesi_cache.py index 664522eacf..3e38c9038f 100644 --- a/src/python/gem5/components/cachehierarchies/chi/nodes/private_l1_moesi_cache.py +++ b/src/python/gem5/components/cachehierarchies/chi/nodes/private_l1_moesi_cache.py @@ -29,11 +29,7 @@ from gem5.isas import ISA from .abstract_node import AbstractNode -from m5.objects import ( - ClockDomain, - RubyCache, - RubyNetwork, -) +from m5.objects import ClockDomain, RubyCache, RubyNetwork class PrivateL1MOESICache(AbstractNode): @@ -54,7 +50,7 @@ class PrivateL1MOESICache(AbstractNode): ) self.clk_domain = clk_domain - self.send_evictions = self.sendEvicts(core=core, target_isa=target_isa) + self.send_evictions = core.requires_send_evicts() self.use_prefetcher = False # Only applies to home nodes @@ -69,7 +65,7 @@ class PrivateL1MOESICache(AbstractNode): self.alloc_on_readshared = True self.alloc_on_readunique = True self.alloc_on_readonce = True - self.alloc_on_writeback = False # Should never happen in an L1 + self.alloc_on_writeback = False # Should never happen in an L1 self.dealloc_on_unique = False self.dealloc_on_shared = False self.dealloc_backinv_unique = True @@ -78,4 +74,6 @@ class PrivateL1MOESICache(AbstractNode): self.number_of_TBEs = 16 self.number_of_repl_TBEs = 16 self.number_of_snoop_TBEs = 4 + self.number_of_DVM_TBEs = 16 + self.number_of_DVM_snoop_TBEs = 4 self.unify_repl_TBEs = False diff --git a/src/python/gem5/components/cachehierarchies/chi/private_l1_cache_hierarchy.py b/src/python/gem5/components/cachehierarchies/chi/private_l1_cache_hierarchy.py index 58dc780be6..9c91e05ac1 100644 --- a/src/python/gem5/components/cachehierarchies/chi/private_l1_cache_hierarchy.py +++ b/src/python/gem5/components/cachehierarchies/chi/private_l1_cache_hierarchy.py @@ -28,8 +28,9 @@ from itertools import chain from typing import List from m5.objects.SubSystem import SubSystem -from gem5.components.cachehierarchies.ruby.abstract_ruby_cache_hierarchy \ - import AbstractRubyCacheHierarchy +from gem5.components.cachehierarchies.ruby.abstract_ruby_cache_hierarchy import ( + AbstractRubyCacheHierarchy, +) from gem5.components.cachehierarchies.abstract_cache_hierarchy import ( AbstractCacheHierarchy, ) @@ -49,12 +50,7 @@ from .nodes.dma_requestor import DMARequestor from .nodes.directory import SimpleDirectory from .nodes.memory_controller import MemoryController -from m5.objects import ( - NULL, - RubySystem, - RubySequencer, - RubyPortProxy, -) +from m5.objects import NULL, RubySystem, RubySequencer, RubyPortProxy class PrivateL1CacheHierarchy(AbstractRubyCacheHierarchy): @@ -113,14 +109,15 @@ class PrivateL1CacheHierarchy(AbstractRubyCacheHierarchy): # Create the DMA Controllers, if required. if board.has_dma_ports(): self.dma_controllers = self._create_dma_controllers(board) - self.ruby_system.num_of_sequencers = len(self.core_clusters) * 2 \ - + len(self.dma_controllers) + self.ruby_system.num_of_sequencers = len( + self.core_clusters + ) * 2 + len(self.dma_controllers) else: self.ruby_system.num_of_sequencers = len(self.core_clusters) * 2 self.ruby_system.network.connectControllers( list( - chain.from_iterable( # Grab the controllers from each cluster + chain.from_iterable( # Grab the controllers from each cluster [ (cluster.dcache, cluster.icache) for cluster in self.core_clusters @@ -139,10 +136,8 @@ class PrivateL1CacheHierarchy(AbstractRubyCacheHierarchy): self.ruby_system.sys_port_proxy = RubyPortProxy() board.connect_system_port(self.ruby_system.sys_port_proxy.in_ports) - def _create_core_cluster(self, - core: AbstractCore, - core_num: int, - board: AbstractBoard + def _create_core_cluster( + self, core: AbstractCore, core_num: int, board: AbstractBoard ) -> SubSystem: """Given the core and the core number this function creates a cluster for the core with a split I/D cache @@ -168,9 +163,7 @@ class PrivateL1CacheHierarchy(AbstractRubyCacheHierarchy): ) cluster.icache.sequencer = RubySequencer( - version=core_num, - dcache=NULL, - clk_domain=cluster.icache.clk_domain, + version=core_num, dcache=NULL, clk_domain=cluster.icache.clk_domain ) cluster.dcache.sequencer = RubySequencer( version=core_num, @@ -206,23 +199,17 @@ class PrivateL1CacheHierarchy(AbstractRubyCacheHierarchy): return cluster def _create_memory_controllers( - self, - board: AbstractBoard + self, board: AbstractBoard ) -> List[MemoryController]: memory_controllers = [] - for rng, port in board.get_memory().get_mem_ports(): - mc = MemoryController( - self.ruby_system.network, - rng, - port, - ) + for rng, port in board.get_mem_ports(): + mc = MemoryController(self.ruby_system.network, rng, port) mc.ruby_system = self.ruby_system memory_controllers.append(mc) return memory_controllers def _create_dma_controllers( - self, - board: AbstractBoard + self, board: AbstractBoard ) -> List[DMARequestor]: dma_controllers = [] for i, port in enumerate(board.get_dma_ports()): @@ -232,10 +219,7 @@ class PrivateL1CacheHierarchy(AbstractRubyCacheHierarchy): board.get_clock_domain(), ) version = len(board.get_processor().get_cores()) + i - ctrl.sequencer = RubySequencer( - version=version, - in_ports=port - ) + ctrl.sequencer = RubySequencer(version=version, in_ports=port) ctrl.sequencer.dcache = NULL ctrl.ruby_system = self.ruby_system diff --git a/src/python/gem5/components/cachehierarchies/classic/caches/l1dcache.py b/src/python/gem5/components/cachehierarchies/classic/caches/l1dcache.py index 003a56b4fe..da4a4ead9b 100644 --- a/src/python/gem5/components/cachehierarchies/classic/caches/l1dcache.py +++ b/src/python/gem5/components/cachehierarchies/classic/caches/l1dcache.py @@ -34,6 +34,9 @@ from typing import Type class L1DCache(Cache): """ A simple L1 data cache with default values. + + If the cache has a mostly exclusive downstream cache, writeback_clean + should be set to True. """ def __init__( @@ -45,7 +48,7 @@ class L1DCache(Cache): response_latency: int = 1, mshrs: int = 16, tgts_per_mshr: int = 20, - writeback_clean: bool = True, + writeback_clean: bool = False, PrefetcherCls: Type[BasePrefetcher] = StridePrefetcher, ): super().__init__() diff --git a/src/python/gem5/components/cachehierarchies/classic/caches/l1icache.py b/src/python/gem5/components/cachehierarchies/classic/caches/l1icache.py index a83df10312..f1ac89cf1d 100644 --- a/src/python/gem5/components/cachehierarchies/classic/caches/l1icache.py +++ b/src/python/gem5/components/cachehierarchies/classic/caches/l1icache.py @@ -34,6 +34,9 @@ from .....utils.override import * class L1ICache(Cache): """ A simple L1 instruction cache with default values. + + If the cache does not have a downstream cache or the downstream cache + is mostly inclusive as usual, writeback_clean should be set to False. """ def __init__( diff --git a/src/python/gem5/components/cachehierarchies/classic/caches/l2cache.py b/src/python/gem5/components/cachehierarchies/classic/caches/l2cache.py index 43c18718de..86b69855b6 100644 --- a/src/python/gem5/components/cachehierarchies/classic/caches/l2cache.py +++ b/src/python/gem5/components/cachehierarchies/classic/caches/l2cache.py @@ -26,7 +26,7 @@ from .....utils.override import * -from m5.objects import Cache, BasePrefetcher, StridePrefetcher +from m5.objects import Cache, Clusivity, BasePrefetcher, StridePrefetcher from typing import Type @@ -45,7 +45,8 @@ class L2Cache(Cache): response_latency: int = 1, mshrs: int = 20, tgts_per_mshr: int = 12, - writeback_clean: bool = True, + writeback_clean: bool = False, + clusivity: Clusivity = "mostly_incl", PrefetcherCls: Type[BasePrefetcher] = StridePrefetcher, ): super().__init__() @@ -57,4 +58,5 @@ class L2Cache(Cache): self.mshrs = mshrs self.tgts_per_mshr = tgts_per_mshr self.writeback_clean = writeback_clean + self.clusivity = clusivity self.prefetcher = PrefetcherCls() diff --git a/src/python/gem5/components/cachehierarchies/classic/caches/mmu_cache.py b/src/python/gem5/components/cachehierarchies/classic/caches/mmu_cache.py index 7f65e73251..a6eb43cfb4 100644 --- a/src/python/gem5/components/cachehierarchies/classic/caches/mmu_cache.py +++ b/src/python/gem5/components/cachehierarchies/classic/caches/mmu_cache.py @@ -28,9 +28,13 @@ from .....utils.override import * from m5.objects import Cache, BasePrefetcher, StridePrefetcher + class MMUCache(Cache): """ A simple Memory Management Unit (MMU) cache with default values. + + If the cache does not have a downstream cache or the downstream cache + is mostly inclusive as usual, writeback_clean should be set to False. """ def __init__( diff --git a/src/python/gem5/components/cachehierarchies/classic/no_cache.py b/src/python/gem5/components/cachehierarchies/classic/no_cache.py index 7e8d314499..f3bbdcdf74 100644 --- a/src/python/gem5/components/cachehierarchies/classic/no_cache.py +++ b/src/python/gem5/components/cachehierarchies/classic/no_cache.py @@ -70,6 +70,9 @@ class NoCache(AbstractClassicCacheHierarchy): membus = SystemXBar(width=64) membus.badaddr_responder = BadAddr() membus.default = membus.badaddr_responder.pio + # the max. routing table size needs to be set + # to a higher value for HBM2 stack + membus.max_routing_table_size = 2048 return membus def __init__( diff --git a/src/python/gem5/components/cachehierarchies/classic/private_l1_cache_hierarchy.py b/src/python/gem5/components/cachehierarchies/classic/private_l1_cache_hierarchy.py index c2755fedc1..dc44c9e016 100644 --- a/src/python/gem5/components/cachehierarchies/classic/private_l1_cache_hierarchy.py +++ b/src/python/gem5/components/cachehierarchies/classic/private_l1_cache_hierarchy.py @@ -36,6 +36,7 @@ from m5.objects import Cache, BaseXBar, SystemXBar, BadAddr, Port from ....utils.override import * + class PrivateL1CacheHierarchy(AbstractClassicCacheHierarchy): """ A cache setup where each core has a private L1 data and instruction Cache. @@ -103,11 +104,13 @@ class PrivateL1CacheHierarchy(AbstractClassicCacheHierarchy): ] # ITLB Page walk caches self.iptw_caches = [ - MMUCache(size="8KiB") for _ in range(board.get_processor().get_num_cores()) + MMUCache(size="8KiB") + for _ in range(board.get_processor().get_num_cores()) ] # DTLB Page walk caches self.dptw_caches = [ - MMUCache(size="8KiB") for _ in range(board.get_processor().get_num_cores()) + MMUCache(size="8KiB") + for _ in range(board.get_processor().get_num_cores()) ] if board.has_coherent_io(): diff --git a/src/python/gem5/components/cachehierarchies/classic/private_l1_private_l2_cache_hierarchy.py b/src/python/gem5/components/cachehierarchies/classic/private_l1_private_l2_cache_hierarchy.py index ff66b30182..f10828b9c2 100644 --- a/src/python/gem5/components/cachehierarchies/classic/private_l1_private_l2_cache_hierarchy.py +++ b/src/python/gem5/components/cachehierarchies/classic/private_l1_private_l2_cache_hierarchy.py @@ -37,6 +37,7 @@ from m5.objects import Cache, L2XBar, BaseXBar, SystemXBar, BadAddr, Port from ....utils.override import * + class PrivateL1PrivateL2CacheHierarchy( AbstractClassicCacheHierarchy, AbstractTwoLevelCacheHierarchy ): @@ -134,12 +135,12 @@ class PrivateL1PrivateL2CacheHierarchy( ] # ITLB Page walk caches self.iptw_caches = [ - MMUCache(size='8KiB') + MMUCache(size="8KiB") for _ in range(board.get_processor().get_num_cores()) ] # DTLB Page walk caches self.dptw_caches = [ - MMUCache(size='8KiB') + MMUCache(size="8KiB") for _ in range(board.get_processor().get_num_cores()) ] diff --git a/src/python/gem5/components/cachehierarchies/classic/private_l1_shared_l2_cache_hierarchy.py b/src/python/gem5/components/cachehierarchies/classic/private_l1_shared_l2_cache_hierarchy.py new file mode 100644 index 0000000000..602c99c686 --- /dev/null +++ b/src/python/gem5/components/cachehierarchies/classic/private_l1_shared_l2_cache_hierarchy.py @@ -0,0 +1,182 @@ +# Copyright (c) 2022 The Regents of the Yonsei University +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from ..abstract_cache_hierarchy import AbstractCacheHierarchy +from .abstract_classic_cache_hierarchy import AbstractClassicCacheHierarchy +from ..abstract_two_level_cache_hierarchy import AbstractTwoLevelCacheHierarchy +from .caches.l1dcache import L1DCache +from .caches.l1icache import L1ICache +from .caches.l2cache import L2Cache +from .caches.mmu_cache import MMUCache +from ...boards.abstract_board import AbstractBoard +from ....isas import ISA +from m5.objects import Cache, L2XBar, BaseXBar, SystemXBar, BadAddr, Port + +from ....utils.override import * + + +class PrivateL1SharedL2CacheHierarchy( + AbstractClassicCacheHierarchy, AbstractTwoLevelCacheHierarchy +): + """ + A cache setup where each core has a private L1 Data and Instruction Cache, + and a L2 cache is shared with all cores. The shared L2 cache is mostly + inclusive with respect to the split I/D L1 and MMU caches. + """ + + @staticmethod + def _get_default_membus() -> SystemXBar: + """ + A method used to obtain the default memory bus of 64 bit in width for + the PrivateL1SharedL2 CacheHierarchy. + + :returns: The default memory bus for the PrivateL1SharedL2 + CacheHierarchy. + + :rtype: SystemXBar + """ + membus = SystemXBar(width=64) + membus.badaddr_responder = BadAddr() + membus.default = membus.badaddr_responder.pio + return membus + + def __init__( + self, + l1d_size: str, + l1i_size: str, + l2_size: str, + l1d_assoc: int = 8, + l1i_assoc: int = 8, + l2_assoc: int = 16, + membus: BaseXBar = _get_default_membus.__func__(), + ) -> None: + """ + :param l1d_size: The size of the L1 Data Cache (e.g., "32kB"). + :param l1i_size: The size of the L1 Instruction Cache (e.g., "32kB"). + :param l2_size: The size of the L2 Cache (e.g., "256kB"). + :param l1d_assoc: The associativity of the L1 Data Cache. + :param l1i_assoc: The associativity of the L1 Instruction Cache. + :param l2_assoc: The associativity of the L2 Cache. + :param membus: The memory bus. This parameter is optional parameter and + will default to a 64 bit width SystemXBar is not specified. + """ + + AbstractClassicCacheHierarchy.__init__(self=self) + AbstractTwoLevelCacheHierarchy.__init__( + self, + l1i_size=l1i_size, + l1i_assoc=l1i_assoc, + l1d_size=l1d_size, + l1d_assoc=l1d_assoc, + l2_size=l2_size, + l2_assoc=l2_assoc, + ) + + self.membus = membus + + @overrides(AbstractClassicCacheHierarchy) + def get_mem_side_port(self) -> Port: + return self.membus.mem_side_ports + + @overrides(AbstractClassicCacheHierarchy) + def get_cpu_side_port(self) -> Port: + return self.membus.cpu_side_ports + + @overrides(AbstractCacheHierarchy) + def incorporate_cache(self, board: AbstractBoard) -> None: + + # Set up the system port for functional access from the simulator. + board.connect_system_port(self.membus.cpu_side_ports) + + for cntr in board.get_memory().get_memory_controllers(): + cntr.port = self.membus.mem_side_ports + + self.l1icaches = [ + L1ICache( + size=self._l1i_size, + assoc=self._l1i_assoc, + writeback_clean=False, + ) + for i in range(board.get_processor().get_num_cores()) + ] + self.l1dcaches = [ + L1DCache(size=self._l1d_size, assoc=self._l1d_assoc) + for i in range(board.get_processor().get_num_cores()) + ] + self.l2bus = L2XBar() + self.l2cache = L2Cache(size=self._l2_size, assoc=self._l2_assoc) + # ITLB Page walk caches + self.iptw_caches = [ + MMUCache(size="8KiB", writeback_clean=False) + for _ in range(board.get_processor().get_num_cores()) + ] + # DTLB Page walk caches + self.dptw_caches = [ + MMUCache(size="8KiB", writeback_clean=False) + for _ in range(board.get_processor().get_num_cores()) + ] + + if board.has_coherent_io(): + self._setup_io_cache(board) + + for i, cpu in enumerate(board.get_processor().get_cores()): + + cpu.connect_icache(self.l1icaches[i].cpu_side) + cpu.connect_dcache(self.l1dcaches[i].cpu_side) + + self.l1icaches[i].mem_side = self.l2bus.cpu_side_ports + self.l1dcaches[i].mem_side = self.l2bus.cpu_side_ports + self.iptw_caches[i].mem_side = self.l2bus.cpu_side_ports + self.dptw_caches[i].mem_side = self.l2bus.cpu_side_ports + + cpu.connect_walker_ports( + self.iptw_caches[i].cpu_side, self.dptw_caches[i].cpu_side + ) + + if board.get_processor().get_isa() == ISA.X86: + int_req_port = self.membus.mem_side_ports + int_resp_port = self.membus.cpu_side_ports + cpu.connect_interrupt(int_req_port, int_resp_port) + else: + cpu.connect_interrupt() + + self.l2bus.mem_side_ports = self.l2cache.cpu_side + self.membus.cpu_side_ports = self.l2cache.mem_side + + def _setup_io_cache(self, board: AbstractBoard) -> None: + """Create a cache for coherent I/O connections""" + self.iocache = Cache( + assoc=8, + tag_latency=50, + data_latency=50, + response_latency=50, + mshrs=20, + size="1kB", + tgts_per_mshr=12, + addr_ranges=board.mem_ranges, + ) + self.iocache.mem_side = self.membus.cpu_side_ports + self.iocache.cpu_side = board.get_mem_side_coherent_io_port() diff --git a/src/python/gem5/components/cachehierarchies/ruby/caches/abstract_l1_cache.py b/src/python/gem5/components/cachehierarchies/ruby/caches/abstract_l1_cache.py index 7f014dc382..683d69584c 100644 --- a/src/python/gem5/components/cachehierarchies/ruby/caches/abstract_l1_cache.py +++ b/src/python/gem5/components/cachehierarchies/ruby/caches/abstract_l1_cache.py @@ -55,21 +55,10 @@ class AbstractL1Cache(L1Cache_Controller): def getBlockSizeBits(self): bits = int(math.log(self._cache_line_size, 2)) - if 2 ** bits != self._cache_line_size.value: + if 2**bits != self._cache_line_size.value: raise Exception("Cache line size not a power of 2!") return bits - def sendEvicts(self, core: AbstractCore, target_isa: ISA): - """True if the CPU model or ISA requires sending evictions from caches - to the CPU. Two scenarios warrant forwarding evictions to the CPU: - 1. The O3 model must keep the LSQ coherent with the caches - 2. The x86 mwait instruction is built on top of coherence - 3. The local exclusive monitor in ARM systems - """ - if core.get_type() is CPUTypes.O3 or target_isa in (ISA.X86, ISA.ARM): - return True - return False - @abstractmethod def connectQueues(self, network): """Connect all of the queues for this controller.""" diff --git a/src/python/gem5/components/cachehierarchies/ruby/caches/mesi_two_level/directory.py b/src/python/gem5/components/cachehierarchies/ruby/caches/mesi_two_level/directory.py index 990b0bf66a..cd4f166fed 100644 --- a/src/python/gem5/components/cachehierarchies/ruby/caches/mesi_two_level/directory.py +++ b/src/python/gem5/components/cachehierarchies/ruby/caches/mesi_two_level/directory.py @@ -27,10 +27,7 @@ from ......utils.override import overrides from ..abstract_directory import AbstractDirectory -from m5.objects import ( - MessageBuffer, - RubyDirectoryMemory, -) +from m5.objects import MessageBuffer, RubyDirectoryMemory class Directory(AbstractDirectory): diff --git a/src/python/gem5/components/cachehierarchies/ruby/caches/mesi_two_level/l1_cache.py b/src/python/gem5/components/cachehierarchies/ruby/caches/mesi_two_level/l1_cache.py index 7f6269d7e0..0e0e333da9 100644 --- a/src/python/gem5/components/cachehierarchies/ruby/caches/mesi_two_level/l1_cache.py +++ b/src/python/gem5/components/cachehierarchies/ruby/caches/mesi_two_level/l1_cache.py @@ -29,12 +29,7 @@ from ......isas import ISA from ..abstract_l1_cache import AbstractL1Cache from ......utils.override import * -from m5.objects import ( - MessageBuffer, - RubyPrefetcher, - RubyCache, - ClockDomain, -) +from m5.objects import MessageBuffer, RubyPrefetcher, RubyCache, ClockDomain import math @@ -74,7 +69,7 @@ class L1Cache(AbstractL1Cache): self.l2_select_num_bits = int(math.log(num_l2Caches, 2)) self.clk_domain = clk_domain self.prefetcher = RubyPrefetcher() - self.send_evictions = self.sendEvicts(core=core, target_isa=target_isa) + self.send_evictions = core.requires_send_evicts() self.transitions_per_cycle = 4 self.enable_prefetch = False diff --git a/src/python/gem5/components/cachehierarchies/ruby/caches/mi_example/directory.py b/src/python/gem5/components/cachehierarchies/ruby/caches/mi_example/directory.py index 23216ec887..e74772cc18 100644 --- a/src/python/gem5/components/cachehierarchies/ruby/caches/mi_example/directory.py +++ b/src/python/gem5/components/cachehierarchies/ruby/caches/mi_example/directory.py @@ -28,10 +28,7 @@ from ..abstract_directory import AbstractDirectory from ......utils.override import overrides -from m5.objects import ( - MessageBuffer, - RubyDirectoryMemory, -) +from m5.objects import MessageBuffer, RubyDirectoryMemory class Directory(AbstractDirectory): diff --git a/src/python/gem5/components/cachehierarchies/ruby/caches/mi_example/l1_cache.py b/src/python/gem5/components/cachehierarchies/ruby/caches/mi_example/l1_cache.py index 62adfb7917..1368b92bfc 100644 --- a/src/python/gem5/components/cachehierarchies/ruby/caches/mi_example/l1_cache.py +++ b/src/python/gem5/components/cachehierarchies/ruby/caches/mi_example/l1_cache.py @@ -29,11 +29,7 @@ from .....processors.abstract_core import AbstractCore from ......isas import ISA from ..abstract_l1_cache import AbstractL1Cache -from m5.objects import ( - MessageBuffer, - RubyCache, - ClockDomain, -) +from m5.objects import MessageBuffer, RubyCache, ClockDomain class L1Cache(AbstractL1Cache): @@ -54,7 +50,7 @@ class L1Cache(AbstractL1Cache): ) self.clk_domain = clk_domain - self.send_evictions = self.sendEvicts(core=core, target_isa=target_isa) + self.send_evictions = core.requires_send_evicts() @overrides(AbstractL1Cache) def connectQueues(self, network): diff --git a/src/python/gem5/components/cachehierarchies/ruby/mesi_two_level_cache_hierarchy.py b/src/python/gem5/components/cachehierarchies/ruby/mesi_two_level_cache_hierarchy.py index cd3ea7fece..82089a5bdc 100644 --- a/src/python/gem5/components/cachehierarchies/ruby/mesi_two_level_cache_hierarchy.py +++ b/src/python/gem5/components/cachehierarchies/ruby/mesi_two_level_cache_hierarchy.py @@ -38,12 +38,7 @@ from .caches.mesi_two_level.l2_cache import L2Cache from .caches.mesi_two_level.directory import Directory from .caches.mesi_two_level.dma_controller import DMAController -from m5.objects import ( - RubySystem, - RubySequencer, - DMASequencer, - RubyPortProxy, -) +from m5.objects import RubySystem, RubySequencer, DMASequencer, RubyPortProxy class MESITwoLevelCacheHierarchy( @@ -110,9 +105,7 @@ class MESITwoLevelCacheHierarchy( ) cache.sequencer = RubySequencer( - version=i, - dcache=cache.L1Dcache, - clk_domain=cache.clk_domain, + version=i, dcache=cache.L1Dcache, clk_domain=cache.clk_domain ) if board.has_io_bus(): @@ -154,7 +147,7 @@ class MESITwoLevelCacheHierarchy( self._directory_controllers = [ Directory(self.ruby_system.network, cache_line_size, range, port) - for range, port in board.get_memory().get_mem_ports() + for range, port in board.get_mem_ports() ] # TODO: Make this prettier: The problem is not being able to proxy # the ruby system correctly diff --git a/src/python/gem5/components/cachehierarchies/ruby/mi_example_cache_hierarchy.py b/src/python/gem5/components/cachehierarchies/ruby/mi_example_cache_hierarchy.py index 352bf00d07..5955ad3b20 100644 --- a/src/python/gem5/components/cachehierarchies/ruby/mi_example_cache_hierarchy.py +++ b/src/python/gem5/components/cachehierarchies/ruby/mi_example_cache_hierarchy.py @@ -37,12 +37,7 @@ from ....utils.override import overrides from ....utils.requires import requires -from m5.objects import ( - RubySystem, - RubySequencer, - DMASequencer, - RubyPortProxy, -) +from m5.objects import RubySystem, RubySequencer, DMASequencer, RubyPortProxy class MIExampleCacheHierarchy(AbstractRubyCacheHierarchy): @@ -51,11 +46,7 @@ class MIExampleCacheHierarchy(AbstractRubyCacheHierarchy): simple point-to-point topology. """ - def __init__( - self, - size: str, - assoc: str, - ): + def __init__(self, size: str, assoc: str): """ :param size: The size of each cache in the heirarchy. :param assoc: The associativity of each cache. @@ -127,7 +118,7 @@ class MIExampleCacheHierarchy(AbstractRubyCacheHierarchy): # Create the directory controllers self._directory_controllers = [] - for range, port in board.get_memory().get_mem_ports(): + for range, port in board.get_mem_ports(): dir = Directory( self.ruby_system.network, board.get_cache_line_size(), diff --git a/src/python/gem5/components/memory/__init__.py b/src/python/gem5/components/memory/__init__.py index 5e16865649..78aa4b8e01 100644 --- a/src/python/gem5/components/memory/__init__.py +++ b/src/python/gem5/components/memory/__init__.py @@ -32,5 +32,5 @@ from .single_channel import SingleChannelLPDDR3_1600 from .multi_channel import DualChannelDDR3_1600 from .multi_channel import DualChannelDDR3_2133 from .multi_channel import DualChannelDDR4_2400 -from .multi_channel import HBM2Stack from .multi_channel import DualChannelLPDDR3_1600 +from .hbm import HBM2Stack diff --git a/src/python/gem5/components/memory/abstract_memory_system.py b/src/python/gem5/components/memory/abstract_memory_system.py index 27bc152304..cfbf6ac01c 100644 --- a/src/python/gem5/components/memory/abstract_memory_system.py +++ b/src/python/gem5/components/memory/abstract_memory_system.py @@ -71,3 +71,7 @@ class AbstractMemorySystem(SubSystem): will be raised. """ raise NotImplementedError + + def _post_instantiate(self) -> None: + """Called to set up anything needed after m5.instantiate""" + pass diff --git a/src/python/gem5/components/memory/dram_interfaces/hbm.py b/src/python/gem5/components/memory/dram_interfaces/hbm.py index 994265d708..5063c4d9e1 100644 --- a/src/python/gem5/components/memory/dram_interfaces/hbm.py +++ b/src/python/gem5/components/memory/dram_interfaces/hbm.py @@ -38,7 +38,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -"""Interfaces for LPDDR5 memory devices +"""Interfaces for HBM memory devices These memory "interfaces" contain the timing,energy,etc parameters for each memory type and are usually based on datasheets for the memory devices. @@ -194,3 +194,85 @@ class HBM_1000_4H_1x64(HBM_1000_4H_1x128): # self refresh exit time tXS = "65ns" + + +# A single HBM2 x64 interface (tested with HBMCtrl in gem5) +# to be used as a single pseudo channel. The timings are based +# on HBM gen2 specifications. 4H stack, 8Gb per die and total capacity +# of 4GiB. +class HBM_2000_4H_1x64(DRAMInterface): + + # 64-bit interface for a single pseudo channel + device_bus_width = 64 + + # HBM2 supports BL4 + burst_length = 4 + + # size of channel in bytes, 4H stack of 8Gb dies is 4GiB per stack; + # with 16 pseudo channels, 256MiB per pseudo channel + device_size = "256MiB" + + device_rowbuffer_size = "1KiB" + + # 1x128 configuration + devices_per_rank = 1 + + ranks_per_channel = 1 + + banks_per_rank = 16 + bank_groups_per_rank = 4 + + # 1000 MHz for 2Gbps DDR data rate + tCK = "1ns" + + tRP = "14ns" + + tCCD_L = "3ns" + + tRCD = "12ns" + tRCD_WR = "6ns" + tCL = "18ns" + tCWL = "7ns" + tRAS = "28ns" + + # BL4 in pseudo channel mode + # DDR @ 1000 MHz means 4 * 1ns / 2 = 2ns + tBURST = "2ns" + + # value for 2Gb device from JEDEC spec + tRFC = "220ns" + + # value for 2Gb device from JEDEC spec + tREFI = "3.9us" + + tWR = "14ns" + tRTP = "5ns" + tWTR = "4ns" + tWTR_L = "9ns" + tRTW = "18ns" + + # tAAD from RBus + tAAD = "1ns" + + # single rank device, set to 0 + tCS = "0ns" + + tRRD = "4ns" + tRRD_L = "6ns" + + # for a single pseudo channel + tXAW = "16ns" + activation_limit = 4 + + # 4tCK + tXP = "8ns" + + # start with tRFC + tXP -> 160ns + 8ns = 168ns + tXS = "216ns" + + page_policy = "close_adaptive" + + read_buffer_size = 64 + write_buffer_size = 64 + + two_cycle_activate = True diff --git a/src/python/gem5/components/memory/hbm.py b/src/python/gem5/components/memory/hbm.py new file mode 100644 index 0000000000..35497c2f89 --- /dev/null +++ b/src/python/gem5/components/memory/hbm.py @@ -0,0 +1,165 @@ +# Copyright (c) 2022 The Regents of the University of California +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" HBM2 memory system using HBMCtrl +""" + +from .memory import ChanneledMemory +from .abstract_memory_system import AbstractMemorySystem +from math import log +from ...utils.override import overrides +from m5.objects import AddrRange, DRAMInterface, HBMCtrl, Port +from typing import Type, Optional, Union, Sequence, Tuple +from .memory import _try_convert +from .dram_interfaces.hbm import HBM_2000_4H_1x64 + + +class HighBandwidthMemory(ChanneledMemory): + """ + This class extends ChanneledMemory and can be used to create HBM based + memory system where a single physical channel contains two pseudo channels. + This is supposed to be used with the HBMCtrl and two dram (HBM2) interfaces + per channel. + """ + + def __init__( + self, + dram_interface_class: Type[DRAMInterface], + num_channels: Union[int, str], + interleaving_size: Union[int, str], + size: Optional[str] = None, + addr_mapping: Optional[str] = None, + ) -> None: + """ + :param dram_interface_class: The DRAM interface type to create with + this memory controller + :param num_channels: The number of channels that needs to be + simulated + :param size: Optionally specify the size of the DRAM controller's + address space. By default, it starts at 0 and ends at the size of + the DRAM device specified + :param addr_mapping: Defines the address mapping scheme to be used. + If None, it is defaulted to addr_mapping from dram_interface_class. + :param interleaving_size: Defines the interleaving size of the multi- + channel memory system. By default, it is equivalent to the atom + size, i.e., 64. + """ + super().__init__( + dram_interface_class, + num_channels, + interleaving_size, + size, + addr_mapping, + ) + + _num_channels = _try_convert(num_channels, int) + + @overrides(ChanneledMemory) + def _create_mem_interfaces_controller(self): + self._dram = [ + self._dram_class(addr_mapping=self._addr_mapping) + for _ in range(self._num_channels) + ] + self._dram_2 = [ + self._dram_class(addr_mapping=self._addr_mapping) + for _ in range(self._num_channels) + ] + + self.mem_ctrl = [ + HBMCtrl( + dram=self._dram[i], + dram_2=self._dram_2[i], + disable_sanity_check=True, + ) + for i in range(self._num_channels) + ] + + @overrides(ChanneledMemory) + def _interleave_addresses(self): + if self._addr_mapping == "RoRaBaChCo": + rowbuffer_size = ( + self._dram_class.device_rowbuffer_size.value + * self._dram_class.devices_per_rank.value + ) + intlv_low_bit = log(rowbuffer_size, 2) + elif self._addr_mapping in ["RoRaBaCoCh", "RoCoRaBaCh"]: + intlv_low_bit = log(self._intlv_size, 2) + else: + raise ValueError( + "Only these address mappings are supported: " + "RoRaBaChCo, RoRaBaCoCh, RoCoRaBaCh" + ) + + intlv_bits = log(self._num_channels, 2) + mask_list = [] + + for ib in range(int(intlv_bits)): + mask_list.append(1 << int(ib + intlv_low_bit)) + + # for interleaving across pseudo channels (at 64B currently) + mask_list.insert(0, 1 << 6) + for i, ctrl in enumerate(self.mem_ctrl): + ctrl.partitioned_q = False + ctrl.dram.range = AddrRange( + start=self._mem_range.start, + size=self._mem_range.size(), + masks=mask_list, + intlvMatch=(i << 1) | 0, + ) + ctrl.dram_2.range = AddrRange( + start=self._mem_range.start, + size=self._mem_range.size(), + masks=mask_list, + intlvMatch=(i << 1) | 1, + ) + + @overrides(ChanneledMemory) + def get_mem_ports(self) -> Sequence[Tuple[AddrRange, Port]]: + + intlv_bits = log(self._num_channels, 2) + mask_list = [] + + for ib in range(int(intlv_bits)): + mask_list.append(1 << int(ib + log(self._intlv_size, 2))) + addr_ranges = [] + for i in range(len(self.mem_ctrl)): + addr_ranges.append( + AddrRange( + start=self._mem_range.start, + size=self._mem_range.size(), + masks=mask_list, + intlvMatch=i, + ) + ) + return [ + (addr_ranges[i], ctrl.port) for i, ctrl in enumerate(self.mem_ctrl) + ] + + +def HBM2Stack( + size: Optional[str] = "4GiB", +) -> AbstractMemorySystem: + return HighBandwidthMemory(HBM_2000_4H_1x64, 8, 128, size=size) diff --git a/src/python/gem5/components/memory/memory.py b/src/python/gem5/components/memory/memory.py index 929a6ae8d8..e7e6cf46e3 100644 --- a/src/python/gem5/components/memory/memory.py +++ b/src/python/gem5/components/memory/memory.py @@ -42,19 +42,22 @@ def _try_convert(val, cls): except: raise Exception(f"Could not convert {val} to {cls}") + def _isPow2(num): log_num = int(log(num, 2)) - if 2 ** log_num != num: + if 2**log_num != num: return False else: return True + class ChanneledMemory(AbstractMemorySystem): """A class to implement multi-channel memory system This class can take a DRAM Interface as a parameter to model a multi channel DDR DRAM memory system. """ + def __init__( self, dram_interface_class: Type[DRAMInterface], @@ -104,12 +107,16 @@ class ChanneledMemory(AbstractMemorySystem): else: self._size = self._get_dram_size(num_channels, self._dram_class) + self._create_mem_interfaces_controller() + + def _create_mem_interfaces_controller(self): self._dram = [ self._dram_class(addr_mapping=self._addr_mapping) - for _ in range(num_channels) + for _ in range(self._num_channels) ] + self.mem_ctrl = [ - MemCtrl(dram=self._dram[i]) for i in range(num_channels) + MemCtrl(dram=self._dram[i]) for i in range(self._num_channels) ] def _get_dram_size(self, num_channels: int, dram: DRAMInterface) -> int: @@ -181,5 +188,3 @@ class ChanneledMemory(AbstractMemorySystem): ) self._mem_range = ranges[0] self._interleave_addresses() - - diff --git a/src/python/gem5/components/memory/multi_channel.py b/src/python/gem5/components/memory/multi_channel.py index d54347a271..1f14190c97 100644 --- a/src/python/gem5/components/memory/multi_channel.py +++ b/src/python/gem5/components/memory/multi_channel.py @@ -40,12 +40,8 @@ def DualChannelDDR3_1600( """ A dual channel memory system using DDR3_1600_8x8 based DIMM """ - return ChanneledMemory( - DDR3_1600_8x8, - 2, - 64, - size=size, - ) + return ChanneledMemory(DDR3_1600_8x8, 2, 64, size=size) + def DualChannelDDR3_2133( size: Optional[str] = None, @@ -53,12 +49,8 @@ def DualChannelDDR3_2133( """ A dual channel memory system using DDR3_2133_8x8 based DIMM """ - return ChanneledMemory( - DDR3_2133_8x8, - 2, - 64, - size=size, - ) + return ChanneledMemory(DDR3_2133_8x8, 2, 64, size=size) + def DualChannelDDR4_2400( size: Optional[str] = None, @@ -66,31 +58,10 @@ def DualChannelDDR4_2400( """ A dual channel memory system using DDR4_2400_8x8 based DIMM """ - return ChanneledMemory( - DDR4_2400_8x8, - 2, - 64, - size=size, - ) + return ChanneledMemory(DDR4_2400_8x8, 2, 64, size=size) + def DualChannelLPDDR3_1600( size: Optional[str] = None, ) -> AbstractMemorySystem: - return ChanneledMemory( - LPDDR3_1600_1x32, - 2, - 64, - size=size, - ) - -def HBM2Stack( - size: Optional[str] = None, -) -> AbstractMemorySystem: - if not size: - size = "4GiB" - return ChanneledMemory( - HBM_1000_4H_1x64, - 16, - 64, - size=size, - ) + return ChanneledMemory(LPDDR3_1600_1x32, 2, 64, size=size) diff --git a/src/python/gem5/components/memory/simple.py b/src/python/gem5/components/memory/simple.py index 80b123851a..b650a68ba4 100644 --- a/src/python/gem5/components/memory/simple.py +++ b/src/python/gem5/components/memory/simple.py @@ -34,6 +34,7 @@ from ..boards.abstract_board import AbstractBoard from .abstract_memory_system import AbstractMemorySystem from m5.objects import AddrRange, MemCtrl, Port, SimpleMemory + class SingleChannelSimpleMemory(AbstractMemorySystem): """A class to implement single channel memory system using SimpleMemory diff --git a/src/python/gem5/components/memory/single_channel.py b/src/python/gem5/components/memory/single_channel.py index 57ce2323d3..43aab45d76 100644 --- a/src/python/gem5/components/memory/single_channel.py +++ b/src/python/gem5/components/memory/single_channel.py @@ -41,12 +41,8 @@ def SingleChannelDDR3_1600( """ A single channel memory system using DDR3_1600_8x8 based DIMM """ - return ChanneledMemory( - DDR3_1600_8x8, - 1, - 64, - size=size, - ) + return ChanneledMemory(DDR3_1600_8x8, 1, 64, size=size) + def SingleChannelDDR3_2133( size: Optional[str] = None, @@ -54,12 +50,8 @@ def SingleChannelDDR3_2133( """ A single channel memory system using DDR3_2133_8x8 based DIMM """ - return ChanneledMemory( - DDR3_2133_8x8, - 1, - 64, - size=size, - ) + return ChanneledMemory(DDR3_2133_8x8, 1, 64, size=size) + def SingleChannelDDR4_2400( size: Optional[str] = None, @@ -67,31 +59,18 @@ def SingleChannelDDR4_2400( """ A single channel memory system using DDR4_2400_8x8 based DIMM """ - return ChanneledMemory( - DDR4_2400_8x8, - 1, - 64, - size=size, - ) + return ChanneledMemory(DDR4_2400_8x8, 1, 64, size=size) + def SingleChannelLPDDR3_1600( size: Optional[str] = None, ) -> AbstractMemorySystem: - return ChanneledMemory( - LPDDR3_1600_1x32, - 1, - 64, - size=size, - ) + return ChanneledMemory(LPDDR3_1600_1x32, 1, 64, size=size) + def SingleChannelHBM( size: Optional[str] = None, ) -> AbstractMemorySystem: if not size: size = "256MiB" - return ChanneledMemory( - HBM_1000_4H_1x128, - 1, - 64, - size=size - ) + return ChanneledMemory(HBM_1000_4H_1x128, 1, 64, size=size) diff --git a/src/python/gem5/components/processors/abstract_core.py b/src/python/gem5/components/processors/abstract_core.py index 32f597d8e2..58296bca3b 100644 --- a/src/python/gem5/components/processors/abstract_core.py +++ b/src/python/gem5/components/processors/abstract_core.py @@ -25,32 +25,42 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from abc import ABCMeta, abstractmethod -from typing import Optional -import importlib -import platform +from typing import Optional, List -from .cpu_types import CPUTypes from ...isas import ISA -from ...utils.requires import requires from m5.objects import BaseMMU, Port, SubSystem + class AbstractCore(SubSystem): __metaclass__ = ABCMeta - def __init__(self, cpu_type: CPUTypes): + def __init__(self): super().__init__() - if cpu_type == CPUTypes.KVM: - requires(kvm_required=True) - self._cpu_type = cpu_type - - def get_type(self) -> CPUTypes: - return self._cpu_type @abstractmethod def get_isa(self) -> ISA: raise NotImplementedError + @abstractmethod + def requires_send_evicts(self) -> bool: + """True if the CPU model or ISA requires sending evictions from caches + to the CPU. Scenarios warrant forwarding evictions to the CPU: + 1. The O3 model must keep the LSQ coherent with the caches + 2. The x86 mwait instruction is built on top of coherence + 3. The local exclusive monitor in ARM systems + """ + return False + + @abstractmethod + def is_kvm_core(self) -> bool: + """ + KVM cores need setup differently than other cores. Frequently it's + useful to know whether a core is a KVM core or not. This function helps + with this. + """ + raise NotImplementedError + @abstractmethod def connect_icache(self, port: Port) -> None: """ @@ -92,10 +102,11 @@ class AbstractCore(SubSystem): @abstractmethod def connect_interrupt( - self, interrupt_requestor: Optional[Port] = None, - interrupt_responce: Optional[Port] = None + self, + interrupt_requestor: Optional[Port] = None, + interrupt_responce: Optional[Port] = None, ) -> None: - """ Connect the core interrupts to the interrupt controller + """Connect the core interrupts to the interrupt controller This function is usually called from the cache hierarchy since the optional ports can be implemented as cache ports. @@ -104,82 +115,43 @@ class AbstractCore(SubSystem): @abstractmethod def get_mmu(self) -> BaseMMU: - """ Return the MMU for this core. + """Return the MMU for this core. This is used in the board to setup system-specific MMU settings. """ raise NotImplementedError - @classmethod - def cpu_simobject_factory(cls, cpu_type: CPUTypes, isa: ISA, core_id: int): + @abstractmethod + def _set_simpoint( + self, inst_starts: List[int], board_initialized: bool + ) -> None: + """Schedule simpoint exit events for the core. + + This is used to raise SIMPOINT_BEGIN exit events in the gem5 standard + library. This is called through the set_workload functions and should + not be called directly. Duplicate instruction counts in the inst_starts list will not + be scheduled. + + :param inst_starts: a list of SimPoints starting instructions + :param board_initialized: True if the board has already been + initialized, otherwise False. This parameter is necessary as simpoints + are setup differently dependent on this. """ - A factory used to return the SimObject core object given the cpu type, - and ISA target. An exception will be thrown if there is an - incompatibility. + raise NotImplementedError("This core type does not support simpoints") - :param cpu_type: The target CPU type. - :param isa: The target ISA. - :param core_id: The id of the core to be returned. + @abstractmethod + def _set_inst_stop_any_thread( + self, inst: int, board_initialized: bool + ) -> None: + """Schedule an exit event when any thread in this core reaches the + given number of instructions. This is called through the simulator + module and should not be called directly. + + This is used to raise MAX_INSTS exit event in the gem5 standard library + + :param inst: a number of instructions + :param board_initialized: True if the board has already been + initialized, otherwise False. This parameter is necessary as the + instruction stop is setup differently dependent on this. """ - requires(isa_required=isa) - - _isa_string_map = { - ISA.X86 : "X86", - ISA.ARM : "Arm", - ISA.RISCV : "Riscv", - ISA.SPARC : "Sparc", - ISA.POWER : "Power", - ISA.MIPS : "Mips", - } - - _cpu_types_string_map = { - CPUTypes.ATOMIC : "AtomicSimpleCPU", - CPUTypes.O3 : "O3CPU", - CPUTypes.TIMING : "TimingSimpleCPU", - CPUTypes.KVM : "KvmCPU", - CPUTypes.MINOR : "MinorCPU", - } - - if isa not in _isa_string_map: - raise NotImplementedError(f"ISA '{isa.name}' does not have an" - "entry in `AbstractCore.cpu_simobject_factory._isa_string_map`" - ) - - if cpu_type not in _cpu_types_string_map: - raise NotImplementedError(f"CPUType '{cpu_type.name}' " - "does not have an entry in " - "`AbstractCore.cpu_simobject_factory._cpu_types_string_map`" - ) - - if cpu_type == CPUTypes.KVM: - # For some reason, the KVM CPU is under "m5.objects" not the - # "m5.objects.{ISA}CPU". - module_str = f"m5.objects" - else: - module_str = f"m5.objects.{_isa_string_map[isa]}CPU" - - # GEM5 compiles two versions of KVM for ARM depending upon the host CPU - # : ArmKvmCPU and ArmV8KvmCPU for 32 bit (Armv7l) and 64 bit (Armv8) - # respectively. - - if isa.name == "ARM" and \ - cpu_type == CPUTypes.KVM and \ - platform.architecture()[0] == "64bit": - cpu_class_str = f"{_isa_string_map[isa]}V8"\ - f"{_cpu_types_string_map[cpu_type]}" - else: - cpu_class_str = f"{_isa_string_map[isa]}"\ - f"{_cpu_types_string_map[cpu_type]}" - - try: - to_return_cls = getattr(importlib.import_module(module_str), - cpu_class_str - ) - except ImportError: - raise Exception( - f"Cannot find CPU type '{cpu_type.name}' for '{isa.name}' " - "ISA. Please ensure you have compiled the correct version of " - "gem5." - ) - - return to_return_cls(cpu_id=core_id) + raise NotImplementedError("This core type does not support MAX_INSTS") diff --git a/src/python/gem5/components/processors/abstract_generator.py b/src/python/gem5/components/processors/abstract_generator.py new file mode 100644 index 0000000000..ff5387dd14 --- /dev/null +++ b/src/python/gem5/components/processors/abstract_generator.py @@ -0,0 +1,70 @@ +# Copyright (c) 2022 The Regents of the University of California +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from abc import abstractmethod +from ...utils.override import overrides +from ..boards.mem_mode import MemMode +from .abstract_generator_core import AbstractGeneratorCore + +from .abstract_processor import AbstractProcessor +from ..boards.abstract_board import AbstractBoard + +from typing import List + + +class AbstractGenerator(AbstractProcessor): + """The abstract generator + It defines the external interface of every generator component. + """ + + def __init__(self, cores: List[AbstractGeneratorCore]) -> None: + """ + Create a list of AbstractGeneratorCore (which is an AbstractCore), + to pass to the constructor of the AbstractProcessor. Due to the + different prototypes for the constructor of different generator types + inputs are noted as *args. This way the abstract method _create_cores + could be called without AbstractGenerator having to know what the + prototype for the constructor of the inheriting class is. It also + limits the _create_cores function to only using positional arguments. + keyword (optional arguments) are still allowable in the constructor of + the inheriting classes. + """ + super().__init__(cores=cores) + + @overrides(AbstractProcessor) + def incorporate_processor(self, board: AbstractBoard) -> None: + board.set_mem_mode(MemMode.TIMING) + + @abstractmethod + def start_traffic(self) -> None: + """ + Depending on what the internal generator core for inheriting classes is + this method needs to be implemented in detail or implmeneted as pass. + """ + raise NotImplementedError + + def _post_instantiate(self) -> None: + self.start_traffic() diff --git a/src/python/gem5/components/processors/abstract_generator_core.py b/src/python/gem5/components/processors/abstract_generator_core.py index 48a4e818f0..b49e86ee19 100644 --- a/src/python/gem5/components/processors/abstract_generator_core.py +++ b/src/python/gem5/components/processors/abstract_generator_core.py @@ -25,13 +25,12 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +from abc import abstractmethod from m5.objects import Port, PortTerminator from ...utils.override import overrides -from .cpu_types import CPUTypes from .abstract_core import AbstractCore from ...isas import ISA -from ...utils.requires import requires from typing import Optional @@ -48,14 +47,16 @@ class AbstractGeneratorCore(AbstractCore): def __init__(self): """ - Create an AbstractCore with the CPUType of Timing. Also, setup a - dummy generator object to connect to icache + Create an AbstractCore. Also, setup a dummy generator object to connect + to icache. """ - # TODO: Remove the CPU Type parameter. This not needed. - # Jira issue here: https://gem5.atlassian.net/browse/GEM5-1031 - super().__init__(CPUTypes.TIMING) + super().__init__() self.port_end = PortTerminator() + @overrides(AbstractCore) + def is_kvm_core(self) -> bool: + return False + @overrides(AbstractCore) def get_isa(self) -> ISA: return ISA.NULL @@ -100,3 +101,12 @@ class AbstractGeneratorCore(AbstractCore): connect them to walker ports. Just pass here. """ pass + + @abstractmethod + def start_traffic(self): + """ + External interface to start generating the trace of addresses. + Depending on what SimObject is wrapped by this component this method + might need be implemented. + """ + raise NotImplementedError diff --git a/src/python/gem5/components/processors/abstract_processor.py b/src/python/gem5/components/processors/abstract_processor.py index e6f6395acc..a0f8b5cf44 100644 --- a/src/python/gem5/components/processors/abstract_processor.py +++ b/src/python/gem5/components/processors/abstract_processor.py @@ -34,27 +34,38 @@ from m5.objects import SubSystem from ..boards.abstract_board import AbstractBoard from ...isas import ISA -from typing import List +from typing import List, Optional class AbstractProcessor(SubSystem): __metaclass__ = ABCMeta - def __init__(self, cores: List[AbstractCore]) -> None: + def __init__( + self, + cores: Optional[List[AbstractCore]] = None, + isa: ISA = ISA.NULL, + ) -> None: + """Set the cores on the processor + Cores are optional for some processor types. If a processor does not + set the cores here, it must override `get_num_cores` and `get_cores` + """ super().__init__() - assert len(cores) > 0 - # In the stdlib we assume the system processor conforms to a single - # ISA target. - assert len(set(core.get_isa() for core in cores)) == 1 - self._isa = cores[0].get_isa() - - self.cores = cores + if cores: + # In the stdlib we assume the system processor conforms to a single + # ISA target. + assert len(set(core.get_isa() for core in cores)) == 1 + self.cores = cores + self._isa = cores[0].get_isa() + else: + self._isa = isa def get_num_cores(self) -> int: + assert getattr(self, "cores") return len(self.cores) def get_cores(self) -> List[AbstractCore]: + assert getattr(self, "cores") return self.cores def get_isa(self) -> ISA: @@ -63,3 +74,7 @@ class AbstractProcessor(SubSystem): @abstractmethod def incorporate_processor(self, board: AbstractBoard) -> None: raise NotImplementedError + + def _post_instantiate(self) -> None: + """Called to set up anything needed after m5.instantiate""" + pass diff --git a/src/python/gem5/components/processors/base_cpu_core.py b/src/python/gem5/components/processors/base_cpu_core.py new file mode 100644 index 0000000000..631fd0ad0e --- /dev/null +++ b/src/python/gem5/components/processors/base_cpu_core.py @@ -0,0 +1,171 @@ +# Copyright (c) 2022 The Regents of the University of California +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from typing import Optional, List +from ...utils.requires import requires +from .abstract_core import AbstractCore + +from ...isas import ISA +from ...runtime import get_runtime_isa +from ...utils.override import overrides +from ...utils.requires import requires + +from m5.objects import BaseMMU, Port, BaseCPU, Process + + +class BaseCPUCore(AbstractCore): + """ + An stdlib AbstractCore subclass which wraps a BaseCPU SimObject type. + """ + + def __init__(self, core: BaseCPU, isa: Optional[ISA] = None): + super().__init__() + + # There is some annoying redundancy here. The BaseCPU type already + # defines the ISA, so here we are defining it twice. However, there + # currently isn't a good way to get the ISA from the BaseCPU Type. + if isa: + requires(isa_required=isa) + self._isa = isa + else: + self._isa = get_runtime_isa() + + self.core = core + self.core.createThreads() + + def get_simobject(self) -> BaseCPU: + return self.core + + @overrides(AbstractCore) + def requires_send_evicts(self) -> bool: + if self.get_isa() in (ISA.ARM, ISA.X86): + # * The x86 `mwait`` instruction is built on top of coherence, + # therefore evictions must be sent from cache to the CPU Core. + # + # * The local exclusive monitor in ARM systems requires the sending + # of evictions from cache to the CPU Core. + return True + + # The O3 model must keep the LSQ coherent with the caches. + # The code below will check to see if the current base CPU is of the O3 + # type for the current ISA target (a bit ugly but it works). + + try: + from m5.objects import BaseO3CPU + + return isinstance(self.get_simobject(), BaseO3CPU) + except ImportError: + # If, for whatever reason, the BaseO3CPU is not importable, then + # the current core cannot be an an O3 CPU. We therefore return + # False. + return False + + @overrides(AbstractCore) + def is_kvm_core(self) -> bool: + + try: + from m5.objects import BaseKvmCPU + + return isinstance(self.core, BaseKvmCPU) + except ImportError: + # If importing BaseKvmCPU throws an exception then it's because + # it's not compiled into the binary. If this is the case then this + # can't be a KVM core. + return False + + def get_isa(self) -> ISA: + return self._isa + + @overrides(AbstractCore) + def connect_icache(self, port: Port) -> None: + self.core.icache_port = port + + @overrides(AbstractCore) + def connect_dcache(self, port: Port) -> None: + self.core.dcache_port = port + + @overrides(AbstractCore) + def connect_walker_ports(self, port1: Port, port2: Port) -> None: + if self.get_isa() == ISA.ARM: + + # Unlike X86 and RISCV MMU, the ARM MMU has two L1 TLB walker ports + # named `walker` and `stage2_walker` for both data and instruction. + # The gem5 standard library currently supports one TLB walker port + # per cache level. Therefore, we are explicitly setting the walker + # ports and not setting the stage2_walker ports for ARM systems. + + self.core.mmu.itb_walker.port = port1 + self.core.mmu.dtb_walker.port = port2 + else: + self.core.mmu.connectWalkerPorts(port1, port2) + + @overrides(AbstractCore) + def set_workload(self, process: Process) -> None: + self.core.workload = process + + @overrides(AbstractCore) + def set_switched_out(self, value: bool) -> None: + self.core.switched_out = value + + @overrides(AbstractCore) + def connect_interrupt( + self, + interrupt_requestor: Optional[Port] = None, + interrupt_responce: Optional[Port] = None, + ) -> None: + + # TODO: This model assumes that we will only create an interrupt + # controller as we require it. Not sure how true this is in all cases. + self.core.createInterruptController() + + if self.get_isa().value == ISA.X86.value: + if interrupt_requestor != None: + self.core.interrupts[0].pio = interrupt_requestor + self.core.interrupts[0].int_responder = interrupt_requestor + if interrupt_responce != None: + self.core.interrupts[0].int_requestor = interrupt_responce + + @overrides(AbstractCore) + def get_mmu(self) -> BaseMMU: + return self.core.mmu + + @overrides(AbstractCore) + def _set_simpoint( + self, inst_starts: List[int], board_initialized: bool + ) -> None: + if board_initialized: + self.core.scheduleSimpointsInstStop(sorted(set(inst_starts))) + else: + self.core.simpoint_start_insts = sorted(set(inst_starts)) + + @overrides(AbstractCore) + def _set_inst_stop_any_thread( + self, inst: int, board_initialized: bool + ) -> None: + if board_initialized: + self.core.scheduleInstStopAnyThread(inst) + else: + self.core.max_insts_any_thread = inst diff --git a/src/python/gem5/components/processors/base_cpu_processor.py b/src/python/gem5/components/processors/base_cpu_processor.py new file mode 100644 index 0000000000..d311a0fdc0 --- /dev/null +++ b/src/python/gem5/components/processors/base_cpu_processor.py @@ -0,0 +1,103 @@ +# Copyright (c) 2022 The Regents of the University of California +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +from .base_cpu_core import BaseCPUCore +from ..boards.mem_mode import MemMode +from ...utils.override import overrides +from ..boards.mem_mode import MemMode +from .abstract_processor import AbstractProcessor +from ..boards.abstract_board import AbstractBoard + +from typing import List + +from m5.util import warn +from m5.objects import ( + BaseO3CPU, + BaseMinorCPU, + BaseAtomicSimpleCPU, + BaseNonCachingSimpleCPU, + BaseTimingSimpleCPU, +) + + +class BaseCPUProcessor(AbstractProcessor): + """ + A processor constructed from a List of BaseCPUCores. + + This gives gem5 stdlib users a way to create processors containing BaseCPU + SimObjects. While SimpleProcessor does this by-proxy (the user simply + specifies the desires CPUType and ISA and the correct BaseCPU + instantiation is chosen), this Processor allows a more raw passing + of BaseCPU objects. + + Disclaimer + ---------- + + Multiple cores comprising of different BaseCPU types has not been tested + and is not officially supported. + """ + + def __init__(self, cores: List[BaseCPUCore]): + super().__init__(cores=cores) + + if any(core.is_kvm_core() for core in self.get_cores()): + from m5.objects import KvmVM + + self.kvm_vm = KvmVM() + + @overrides(AbstractProcessor) + def incorporate_processor(self, board: AbstractBoard) -> None: + + if any(core.is_kvm_core() for core in self.get_cores()): + board.kvm_vm = self.kvm_vm + # To get the KVM CPUs to run on different host CPUs + # Specify a different event queue for each CPU + for i, core in enumerate(self.cores): + for obj in core.get_simobject().descendants(): + obj.eventq_index = 0 + core.get_simobject().eventq_index = i + 1 + board.set_mem_mode(MemMode.ATOMIC_NONCACHING) + elif isinstance( + self.cores[0].get_simobject(), + (BaseTimingSimpleCPU, BaseO3CPU, BaseMinorCPU), + ): + board.set_mem_mode(MemMode.TIMING) + elif isinstance( + self.cores[0].get_simobject(), BaseNonCachingSimpleCPU + ): + board.set_mem_mode(MemMode.ATOMIC_NONCACHING) + elif isinstance(self.cores[0].get_simobject(), BaseAtomicSimpleCPU): + if board.get_cache_hierarchy().is_ruby(): + warn( + "Using an atomic core with Ruby will result in " + "'atomic_noncaching' memory mode. This will skip caching " + "completely." + ) + else: + board.set_mem_mode(MemMode.ATOMIC) + else: + raise NotImplementedError diff --git a/src/python/gem5/components/processors/complex_generator.py b/src/python/gem5/components/processors/complex_generator.py index 2101e23ee3..b113640ae7 100644 --- a/src/python/gem5/components/processors/complex_generator.py +++ b/src/python/gem5/components/processors/complex_generator.py @@ -25,18 +25,15 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from ...utils.override import overrides -from ..boards.mem_mode import MemMode from .complex_generator_core import ComplexGeneratorCore +from .abstract_generator import AbstractGenerator -from .abstract_processor import AbstractProcessor -from ..boards.abstract_board import AbstractBoard +from typing import Iterator, List, Any -class ComplexGenerator(AbstractProcessor): +class ComplexGenerator(AbstractGenerator): def __init__(self, num_cores: int = 1) -> None: - super().__init__( - cores=[ComplexGeneratorCore() for i in range(num_cores)] - ) + super().__init__(cores=self._create_cores(num_cores=num_cores)) """The complex generator This class defines an external interface to create a list of complex @@ -45,9 +42,11 @@ class ComplexGenerator(AbstractProcessor): :param num_cores: The number of complex generator cores to create. """ - @overrides(AbstractProcessor) - def incorporate_processor(self, board: AbstractBoard) -> None: - board.set_mem_mode(MemMode.TIMING) + def _create_cores(self, num_cores: int) -> List[ComplexGeneratorCore]: + """ + Create a list of ComplexGeneratorCore. + """ + return [ComplexGeneratorCore() for _ in range(num_cores)] def add_linear( self, @@ -127,6 +126,19 @@ class ComplexGenerator(AbstractProcessor): data_limit, ) + def set_traffic_from_python_generator( + self, generator: Iterator[Any] + ) -> None: + """ + Sets the traffic pattern defined by generator argument. + + :param generator: A python generator object that creates traffic + patterns through calls to methods of PyTrafficGen. + """ + for core in self.cores: + core.set_traffic_from_python_generator(generator) + + @overrides(AbstractGenerator) def start_traffic(self) -> None: """ This function will start the traffic at the top of the traffic list. It diff --git a/src/python/gem5/components/processors/complex_generator_core.py b/src/python/gem5/components/processors/complex_generator_core.py index 583b318341..92f62ded09 100644 --- a/src/python/gem5/components/processors/complex_generator_core.py +++ b/src/python/gem5/components/processors/complex_generator_core.py @@ -24,6 +24,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +from typing import Iterator, Any from m5.ticks import fromSeconds from m5.util.convert import toLatency, toMemoryBandwidth from m5.objects import PyTrafficGen, Port @@ -178,6 +179,7 @@ class ComplexGeneratorCore(AbstractGeneratorCore): self._traffic_params = self._traffic_params + [param] self._traffic_set = False + @overrides(AbstractGeneratorCore) def start_traffic(self) -> None: """ This function first checks if there are any pending traffics that @@ -239,6 +241,25 @@ class ComplexGeneratorCore(AbstractGeneratorCore): self._traffic_set = True + def set_traffic_from_python_generator( + self, python_generator: Iterator[Any] + ) -> None: + """ + Function to set the traffic from a user defined python generator. + The generator should only only assume one input argument (positional) + for the actual PyTrafficGen object to create the traffic. This is possible + either through using a generator with hardcoded parameters in the + function calls to PyTrafficGen methods or by compiling a flexible + python generator into a generator object with only one + input argument (positional) using functools.partial. + + :param generator: A python generator object that creates traffic + patterns through calls to methods of PyTrafficGen. + """ + if not self._traffic_set: + self._set_traffic() + self._traffic.append(python_generator(self.generator)) + def _create_linear_traffic( self, duration: str, diff --git a/src/python/gem5/components/processors/cpu_types.py b/src/python/gem5/components/processors/cpu_types.py index 969b7a1bd1..e12eb99816 100644 --- a/src/python/gem5/components/processors/cpu_types.py +++ b/src/python/gem5/components/processors/cpu_types.py @@ -24,10 +24,13 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +from ..boards.mem_mode import MemMode + from enum import Enum from typing import Set import os + class CPUTypes(Enum): ATOMIC = "atomic" KVM = "kvm" @@ -35,12 +38,14 @@ class CPUTypes(Enum): TIMING = "timing" MINOR = "minor" -def get_cpu_types_str_set() -> Set[CPUTypes]: + +def get_cpu_types_str_set() -> Set[str]: """ Returns a set of all the CPU types as strings. """ return {cpu_type.value for cpu_type in CPUTypes} + def get_cpu_type_from_str(input: str) -> CPUTypes: """ Will return the correct enum given the input string. This is matched on @@ -57,7 +62,7 @@ def get_cpu_type_from_str(input: str) -> CPUTypes: if input.lower() == cpu_type.value: return cpu_type - valid_cpu_types_list_str =str() + valid_cpu_types_list_str = str() for cpu_type_str in get_cpu_types_str_set(): valid_cpu_types_list_str += f"{os.linesep}{cpu_type_str}" @@ -65,3 +70,21 @@ def get_cpu_type_from_str(input: str) -> CPUTypes: f"CPU type '{input}' does not correspond to a known CPU type. " f"Known CPU Types:{valid_cpu_types_list_str}" ) + + +def get_mem_mode(input: CPUTypes) -> MemMode: + """ + Returns the correct memory mode to be set for a given CPUType. + + :param input: The CPUType to check. + """ + + cpu_mem_mode_map = { + CPUTypes.TIMING: MemMode.TIMING, + CPUTypes.O3: MemMode.TIMING, + CPUTypes.MINOR: MemMode.TIMING, + CPUTypes.KVM: MemMode.ATOMIC_NONCACHING, + CPUTypes.ATOMIC: MemMode.ATOMIC, + } + + return cpu_mem_mode_map[input] diff --git a/src/python/gem5/components/processors/gups_generator.py b/src/python/gem5/components/processors/gups_generator.py index 41d181222d..76ea9e64b4 100644 --- a/src/python/gem5/components/processors/gups_generator.py +++ b/src/python/gem5/components/processors/gups_generator.py @@ -26,16 +26,15 @@ from typing import Optional + from m5.objects import Addr from ...utils.override import overrides -from ..boards.mem_mode import MemMode -from .abstract_processor import AbstractProcessor -from ..boards.abstract_board import AbstractBoard +from .abstract_generator import AbstractGenerator from .gups_generator_core import GUPSGeneratorCore -class GUPSGenerator(AbstractProcessor): +class GUPSGenerator(AbstractGenerator): def __init__( self, start_addr: Addr, @@ -68,12 +67,10 @@ class GUPSGenerator(AbstractProcessor): ] ) - @overrides(AbstractProcessor) - def incorporate_processor(self, board: AbstractBoard) -> None: - board.set_mem_mode(MemMode.TIMING) - + @overrides(AbstractGenerator) def start_traffic(self): - # This function should be implemented so that GUPSGenerator could be - # used in the same scripts that use LinearGenerator, RandomGenerator, - # and ComplexGenrator + """ + Since GUPSGeneratorCore does not need a call to start_traffic to + start generation. This function is just pass. + """ pass diff --git a/src/python/gem5/components/processors/gups_generator_core.py b/src/python/gem5/components/processors/gups_generator_core.py index f315b8bf1e..0090c72847 100644 --- a/src/python/gem5/components/processors/gups_generator_core.py +++ b/src/python/gem5/components/processors/gups_generator_core.py @@ -27,6 +27,7 @@ from typing import Optional from ...utils.override import overrides +from .abstract_core import AbstractCore from .abstract_generator_core import AbstractGeneratorCore from m5.objects import Port, GUPSGen, Addr, SrcClockDomain, VoltageDomain @@ -44,18 +45,14 @@ class GUPSGeneratorCore(AbstractGeneratorCore): """ super().__init__() self.generator = GUPSGen( - start_addr=start_addr, - mem_size=mem_size, - update_limit=update_limit, - ) + start_addr=start_addr, mem_size=mem_size, update_limit=update_limit + ) if clk_freq: clock_domain = SrcClockDomain( clock=clk_freq, voltage_domain=VoltageDomain() ) self.generator.clk_domain = clock_domain - - - @overrides(AbstractGeneratorCore) + @overrides(AbstractCore) def connect_dcache(self, port: Port) -> None: self.generator.port = port diff --git a/src/python/gem5/components/processors/gups_generator_ep.py b/src/python/gem5/components/processors/gups_generator_ep.py index da27494f56..68c9dcea04 100644 --- a/src/python/gem5/components/processors/gups_generator_ep.py +++ b/src/python/gem5/components/processors/gups_generator_ep.py @@ -27,14 +27,13 @@ from typing import Optional from m5.objects import Addr -from ..boards.mem_mode import MemMode from ...utils.override import overrides from m5.util.convert import toMemorySize -from .abstract_processor import AbstractProcessor -from ..boards.abstract_board import AbstractBoard +from .abstract_generator import AbstractGenerator from .gups_generator_core import GUPSGeneratorCore -class GUPSGeneratorEP(AbstractProcessor): + +class GUPSGeneratorEP(AbstractGenerator): def __init__( self, num_cores: int, @@ -72,7 +71,7 @@ class GUPSGeneratorEP(AbstractProcessor): start_addr: Addr, mem_size: str, update_limit: int, - clk_freq: Optional[str], + clk_freq: str, ): """ Helper function to create cores. @@ -85,17 +84,15 @@ class GUPSGeneratorEP(AbstractProcessor): start_addr=start_addr + i * chunk_size, mem_size=table_size, update_limit=update_limit, - clk_freq=clk_freq + clk_freq=clk_freq, ) for i in range(num_cores) ] - @overrides(AbstractProcessor) - def incorporate_processor(self, board: AbstractBoard) -> None: - board.set_mem_mode(MemMode.TIMING) - + @overrides(AbstractGenerator) def start_traffic(self): - # This function should be implemented so that GUPSGeneratorEP could be - # used in the same scripts that use LinearGenerator, RandomGenerator, - # and ComplexGenrator + """ + Since GUPSGeneratorCore does not need a call to start_traffic to + start generation. This function is just pass. + """ pass diff --git a/src/python/gem5/components/processors/gups_generator_par.py b/src/python/gem5/components/processors/gups_generator_par.py index 12e8d4cafd..5f6485b9e2 100644 --- a/src/python/gem5/components/processors/gups_generator_par.py +++ b/src/python/gem5/components/processors/gups_generator_par.py @@ -30,10 +30,12 @@ from m5.objects import Addr from ...utils.override import overrides from ..boards.mem_mode import MemMode -from .abstract_processor import AbstractProcessor +from .abstract_generator import AbstractGenerator from ..boards.abstract_board import AbstractBoard from .gups_generator_core import GUPSGeneratorCore -class GUPSGeneratorPAR(AbstractProcessor): + + +class GUPSGeneratorPAR(AbstractGenerator): def __init__( self, num_cores: int, @@ -71,11 +73,8 @@ class GUPSGeneratorPAR(AbstractProcessor): start_addr: Addr, mem_size: str, update_limit: int, - clk_freq: Optional[str], + clk_freq: str, ): - """ - Helper function to create cores. - """ return [ GUPSGeneratorCore( start_addr=start_addr, @@ -86,12 +85,10 @@ class GUPSGeneratorPAR(AbstractProcessor): for _ in range(num_cores) ] - @overrides(AbstractProcessor) - def incorporate_processor(self, board: AbstractBoard) -> None: - board.set_mem_mode(MemMode.TIMING) - + @overrides(AbstractGenerator) def start_traffic(self): - # This function should be implemented so that GUPSGeneratorPAR could be - # used in the same scripts that use LinearGenerator, RandomGenerator, - # and ComplexGenrator + """ + Since GUPSGeneratorCore does not need a call to start_traffic to + start generation. This function is just pass. + """ pass diff --git a/src/python/gem5/components/processors/linear_generator.py b/src/python/gem5/components/processors/linear_generator.py index 7bdc5ed1af..90fe62e7d6 100644 --- a/src/python/gem5/components/processors/linear_generator.py +++ b/src/python/gem5/components/processors/linear_generator.py @@ -25,16 +25,13 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from ...utils.override import overrides -from ..boards.mem_mode import MemMode from .linear_generator_core import LinearGeneratorCore - -from .abstract_processor import AbstractProcessor -from ..boards.abstract_board import AbstractBoard +from .abstract_generator import AbstractGenerator from typing import List -class LinearGenerator(AbstractProcessor): +class LinearGenerator(AbstractGenerator): def __init__( self, num_cores: int = 1, @@ -81,14 +78,14 @@ class LinearGenerator(AbstractProcessor): def _create_cores( self, - num_cores, - duration, - rate, - block_size, - min_addr, - max_addr, - rd_perc, - data_limit, + num_cores: int, + duration: str, + rate: str, + block_size: int, + min_addr: int, + max_addr: int, + rd_perc: int, + data_limit: int, ) -> List[LinearGeneratorCore]: """ The helper function to create the cores for the generator, it will use @@ -104,16 +101,10 @@ class LinearGenerator(AbstractProcessor): rd_perc=rd_perc, data_limit=data_limit, ) - for i in range(num_cores) + for _ in range(num_cores) ] - @overrides(AbstractProcessor) - def incorporate_processor(self, board: AbstractBoard) -> None: - board.set_mem_mode(MemMode.TIMING) - + @overrides(AbstractGenerator) def start_traffic(self) -> None: - """ - This function will start the assigned traffic to this generator. - """ for core in self.cores: core.start_traffic() diff --git a/src/python/gem5/components/processors/linear_generator_core.py b/src/python/gem5/components/processors/linear_generator_core.py index e6bf441015..b91b44d6aa 100644 --- a/src/python/gem5/components/processors/linear_generator_core.py +++ b/src/python/gem5/components/processors/linear_generator_core.py @@ -112,10 +112,7 @@ class LinearGeneratorCore(AbstractGeneratorCore): ) yield self.generator.createExit(0) + @overrides(AbstractGeneratorCore) def start_traffic(self) -> None: - """ - A call to this function will start generating the traffic, this call - should happen before m5.simulate() and after m5.instantiate() - """ self._set_traffic() self.generator.start(self._traffic) diff --git a/src/python/gem5/components/processors/random_generator.py b/src/python/gem5/components/processors/random_generator.py index 17c3ce3c17..ca7ed98f89 100644 --- a/src/python/gem5/components/processors/random_generator.py +++ b/src/python/gem5/components/processors/random_generator.py @@ -28,13 +28,13 @@ from ...utils.override import overrides from ..boards.mem_mode import MemMode from .random_generator_core import RandomGeneratorCore -from .abstract_processor import AbstractProcessor +from .abstract_generator import AbstractGenerator from ..boards.abstract_board import AbstractBoard from typing import List -class RandomGenerator(AbstractProcessor): +class RandomGenerator(AbstractGenerator): def __init__( self, num_cores: int = 1, @@ -81,14 +81,14 @@ class RandomGenerator(AbstractProcessor): def _create_cores( self, - num_cores, - duration, - rate, - block_size, - min_addr, - max_addr, - rd_perc, - data_limit, + num_cores: int, + duration: str, + rate: str, + block_size: int, + min_addr: int, + max_addr: int, + rd_perc: int, + data_limit: int, ) -> List[RandomGeneratorCore]: """ The helper function to create the cores for the generator, it will use @@ -104,13 +104,10 @@ class RandomGenerator(AbstractProcessor): rd_perc=rd_perc, data_limit=data_limit, ) - for i in range(num_cores) + for _ in range(num_cores) ] - @overrides(AbstractProcessor) - def incorporate_processor(self, board: AbstractBoard) -> None: - board.set_mem_mode(MemMode.TIMING) - + @overrides(AbstractGenerator) def start_traffic(self) -> None: """ This function will start the assigned traffic to this generator. diff --git a/src/python/gem5/components/processors/random_generator_core.py b/src/python/gem5/components/processors/random_generator_core.py index 219040c864..b5aced620d 100644 --- a/src/python/gem5/components/processors/random_generator_core.py +++ b/src/python/gem5/components/processors/random_generator_core.py @@ -112,10 +112,7 @@ class RandomGeneratorCore(AbstractGeneratorCore): ) yield self.generator.createExit(0) + @overrides(AbstractGeneratorCore) def start_traffic(self) -> None: - """ - A call to this function will start generating the traffic, this call - should happen before m5.simulate() and after m5.instantiate(). - """ self._set_traffic() self.generator.start(self._traffic) diff --git a/src/python/gem5/components/processors/simple_core.py b/src/python/gem5/components/processors/simple_core.py index 1fa2f68f91..15e15dc0cf 100644 --- a/src/python/gem5/components/processors/simple_core.py +++ b/src/python/gem5/components/processors/simple_core.py @@ -26,97 +26,124 @@ from typing import Optional from ...utils.requires import requires -from ..processors.abstract_core import AbstractCore - +from .base_cpu_core import BaseCPUCore from .cpu_types import CPUTypes from ...isas import ISA +from ...utils.requires import requires from ...runtime import get_runtime_isa -from ...utils.override import overrides - -from m5.objects import ( - BaseMMU, - Port, - BaseCPU, - Process, -) +import importlib +import platform -class SimpleCore(AbstractCore): +class SimpleCore(BaseCPUCore): + """ + A SimpleCore instantiates a core based on the CPUType enum pass. The + SimpleCore creates a single SimObject of that type. + """ def __init__( - self, - cpu_type: CPUTypes, - core_id: int, - isa: Optional[ISA]= None + self, cpu_type: CPUTypes, core_id: int, isa: Optional[ISA] = None ): - super().__init__(cpu_type=cpu_type) + + # If the ISA is not specified, we infer it via the `get_runtime_isa` + # function. if isa: requires(isa_required=isa) - self._isa = isa + isa = isa else: - self._isa = get_runtime_isa() - self.core = AbstractCore.cpu_simobject_factory( - isa=self._isa, - cpu_type=cpu_type, - core_id=core_id + isa = get_runtime_isa() + + super().__init__( + core=SimpleCore.cpu_simobject_factory( + isa=isa, cpu_type=cpu_type, core_id=core_id + ), + isa=isa, ) - self.core.createThreads() - def get_simobject(self) -> BaseCPU: - return self.core + self._cpu_type = cpu_type - @overrides(AbstractCore) - def get_isa(self) -> ISA: - return self._isa + def get_type(self) -> CPUTypes: + return self._cpu_type - @overrides(AbstractCore) - def connect_icache(self, port: Port) -> None: - self.core.icache_port = port + @classmethod + def cpu_simobject_factory(cls, cpu_type: CPUTypes, isa: ISA, core_id: int): + """ + A factory used to return the SimObject core object given the cpu type, + and ISA target. An exception will be thrown if there is an + incompatibility. - @overrides(AbstractCore) - def connect_dcache(self, port: Port) -> None: - self.core.dcache_port = port + :param cpu_type: The target CPU type. + :param isa: The target ISA. + :param core_id: The id of the core to be returned. + """ - @overrides(AbstractCore) - def connect_walker_ports(self, port1: Port, port2: Port) -> None: - if self.get_isa() == ISA.ARM: + assert isa is not None + requires(isa_required=isa) - # Unlike X86 and RISCV MMU, the ARM MMU has two L1 TLB walker ports - # named `walker` and `stage2_walker` for both data and instruction. - # The gem5 standard library currently supports one TLB walker port - # per cache level. Therefore, we are explicitly setting the walker - # ports and not setting the stage2_walker ports for ARM systems. + _isa_string_map = { + ISA.X86: "X86", + ISA.ARM: "Arm", + ISA.RISCV: "Riscv", + ISA.SPARC: "Sparc", + ISA.POWER: "Power", + ISA.MIPS: "Mips", + } - self.core.mmu.itb_walker.port = port1 - self.core.mmu.dtb_walker.port = port2 + _cpu_types_string_map = { + CPUTypes.ATOMIC: "AtomicSimpleCPU", + CPUTypes.O3: "O3CPU", + CPUTypes.TIMING: "TimingSimpleCPU", + CPUTypes.KVM: "KvmCPU", + CPUTypes.MINOR: "MinorCPU", + } + + if isa not in _isa_string_map: + raise NotImplementedError( + f"ISA '{isa.name}' does not have an" + "entry in `AbstractCore.cpu_simobject_factory._isa_string_map`" + ) + + if cpu_type not in _cpu_types_string_map: + raise NotImplementedError( + f"CPUType '{cpu_type.name}' " + "does not have an entry in " + "`AbstractCore.cpu_simobject_factory._cpu_types_string_map`" + ) + + if cpu_type == CPUTypes.KVM: + # For some reason, the KVM CPU is under "m5.objects" not the + # "m5.objects.{ISA}CPU". + module_str = f"m5.objects" else: - self.core.mmu.connectWalkerPorts(port1, port2) + module_str = f"m5.objects.{_isa_string_map[isa]}CPU" - @overrides(AbstractCore) - def set_workload(self, process: Process) -> None: - self.core.workload = process + # GEM5 compiles two versions of KVM for ARM depending upon the host CPU + # : ArmKvmCPU and ArmV8KvmCPU for 32 bit (Armv7l) and 64 bit (Armv8) + # respectively. - @overrides(AbstractCore) - def set_switched_out(self, value: bool) -> None: - self.core.switched_out = value + if ( + isa.name == "ARM" + and cpu_type == CPUTypes.KVM + and platform.architecture()[0] == "64bit" + ): + cpu_class_str = ( + f"{_isa_string_map[isa]}V8" + f"{_cpu_types_string_map[cpu_type]}" + ) + else: + cpu_class_str = ( + f"{_isa_string_map[isa]}" f"{_cpu_types_string_map[cpu_type]}" + ) - @overrides(AbstractCore) - def connect_interrupt( - self, interrupt_requestor: Optional[Port] = None, - interrupt_responce: Optional[Port] = None - ) -> None: + try: + to_return_cls = getattr( + importlib.import_module(module_str), cpu_class_str + ) + except ImportError: + raise Exception( + f"Cannot find CPU type '{cpu_type.name}' for '{isa.name}' " + "ISA. Please ensure you have compiled the correct version of " + "gem5." + ) - # TODO: This model assumes that we will only create an interrupt - # controller as we require it. Not sure how true this is in all cases. - self.core.createInterruptController() - - if self.get_isa() == ISA.X86: - if interrupt_requestor != None: - self.core.interrupts[0].pio = interrupt_requestor - self.core.interrupts[0].int_responder = interrupt_requestor - if interrupt_responce != None: - self.core.interrupts[0].int_requestor = interrupt_responce - - @overrides(AbstractCore) - def get_mmu(self) -> BaseMMU: - return self.core.mmu + return to_return_cls(cpu_id=core_id) diff --git a/src/python/gem5/components/processors/simple_processor.py b/src/python/gem5/components/processors/simple_processor.py index 3c9c5c84df..510e37df0e 100644 --- a/src/python/gem5/components/processors/simple_processor.py +++ b/src/python/gem5/components/processors/simple_processor.py @@ -25,34 +25,27 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from ...utils.override import overrides -from ..boards.mem_mode import MemMode +from m5.util import warn +from .base_cpu_processor import BaseCPUProcessor from ..processors.simple_core import SimpleCore -from m5.util import warn - -from .abstract_processor import AbstractProcessor from .cpu_types import CPUTypes from ...isas import ISA -from ..boards.abstract_board import AbstractBoard from typing import Optional -class SimpleProcessor(AbstractProcessor): +class SimpleProcessor(BaseCPUProcessor): """ - A SimpeProcessor contains a number of cores of a a single CPUType. + A SimpleProcessor contains a number of cores of SimpleCore objects of the + same CPUType. """ def __init__( - self, - cpu_type: CPUTypes, - num_cores: int, - isa: Optional[ISA] = None, + self, cpu_type: CPUTypes, num_cores: int, isa: Optional[ISA] = None ) -> None: """ - param cpu_type: The CPU type for each type in the processor. -: + :param cpu_type: The CPU type for each type in the processor. :param num_cores: The number of CPU cores in the processor. :param isa: The ISA of the processor. This argument is optional. If not @@ -61,57 +54,17 @@ class SimpleProcessor(AbstractProcessor): recommended you explicitly set your ISA via SimpleProcessor construction. """ - super().__init__( - cores=self._create_cores( - cpu_type=cpu_type, - num_cores=num_cores, - isa = isa, + if not isa: + warn( + "An ISA for the SimpleProcessor was not set. This will " + "result in usage of `runtime.get_runtime_isa` to obtain the " + "ISA. This function is deprecated and will be removed in " + "future releases of gem5. Please explicitly state the ISA " + "via the processor constructor." ) - ) - - self._cpu_type = cpu_type - if self._cpu_type == CPUTypes.KVM: - from m5.objects import KvmVM - - self.kvm_vm = KvmVM() - - def _create_cores( - self, - cpu_type: CPUTypes, - num_cores: int, - isa: Optional[ISA] - ): - return [ - SimpleCore(cpu_type=cpu_type, core_id=i, isa=isa,) \ + super().__init__( + cores=[ + SimpleCore(cpu_type=cpu_type, core_id=i, isa=isa) for i in range(num_cores) - ] - - @overrides(AbstractProcessor) - def incorporate_processor(self, board: AbstractBoard) -> None: - if self._cpu_type == CPUTypes.KVM: - board.kvm_vm = self.kvm_vm - - # Set the memory mode. - if self._cpu_type in (CPUTypes.TIMING, CPUTypes.O3, CPUTypes.MINOR): - board.set_mem_mode(MemMode.TIMING) - elif self._cpu_type == CPUTypes.KVM: - board.set_mem_mode(MemMode.ATOMIC_NONCACHING) - elif self._cpu_type == CPUTypes.ATOMIC: - if board.get_cache_hierarchy().is_ruby(): - warn( - "Using an atomic core with Ruby will result in " - "'atomic_noncaching' memory mode. This will skip caching " - "completely." - ) - else: - board.set_mem_mode(MemMode.ATOMIC) - else: - raise NotImplementedError - - if self._cpu_type == CPUTypes.KVM: - # To get the KVM CPUs to run on different host CPUs - # Specify a different event queue for each CPU - for i, core in enumerate(self.cores): - for obj in core.get_simobject().descendants(): - obj.eventq_index = 0 - core.get_simobject().eventq_index = i + 1 + ] + ) diff --git a/src/python/gem5/components/processors/simple_switchable_processor.py b/src/python/gem5/components/processors/simple_switchable_processor.py index 52ba013306..56603fa98b 100644 --- a/src/python/gem5/components/processors/simple_switchable_processor.py +++ b/src/python/gem5/components/processors/simple_switchable_processor.py @@ -27,9 +27,10 @@ from ..boards.mem_mode import MemMode from ..boards.abstract_board import AbstractBoard from ..processors.simple_core import SimpleCore -from ..processors.cpu_types import CPUTypes +from ..processors.cpu_types import CPUTypes, get_mem_mode from .switchable_processor import SwitchableProcessor from ...isas import ISA +from m5.util import warn from ...utils.override import * @@ -52,9 +53,9 @@ class SimpleSwitchableProcessor(SwitchableProcessor): isa: Optional[ISA] = None, ) -> None: """ - param starting_core_type: The CPU type for each type in the processor + :param starting_core_type: The CPU type for each type in the processor to start with (i.e., when the simulation has just started). -: + :param switch_core_types: The CPU type for each core, to be switched to.. @@ -65,6 +66,15 @@ class SimpleSwitchableProcessor(SwitchableProcessor): construction. """ + if not isa: + warn( + "An ISA for the SimpleSwitchableProcessor was not set. This " + "will result in usage of `runtime.get_runtime_isa` to obtain " + "the ISA. This function is deprecated and will be removed in " + "future releases of gem5. Please explicitly state the ISA " + "via the processor constructor." + ) + if num_cores <= 0: raise AssertionError("Number of cores must be a positive integer!") @@ -72,14 +82,7 @@ class SimpleSwitchableProcessor(SwitchableProcessor): self._switch_key = "switch" self._current_is_start = True - if starting_core_type in (CPUTypes.TIMING, CPUTypes.O3): - self._mem_mode = MemMode.TIMING - elif starting_core_type == CPUTypes.KVM: - self._mem_mode = MemMode.ATOMIC_NONCACHING - elif starting_core_type == CPUTypes.ATOMIC: - self._mem_mode = MemMode.ATOMIC - else: - raise NotImplementedError + self._mem_mode = get_mem_mode(starting_core_type) switchable_cores = { self._start_key: [ @@ -93,8 +96,7 @@ class SimpleSwitchableProcessor(SwitchableProcessor): } super().__init__( - switchable_cores=switchable_cores, - starting_cores=self._start_key, + switchable_cores=switchable_cores, starting_cores=self._start_key ) @overrides(SwitchableProcessor) diff --git a/src/python/gem5/components/processors/switchable_processor.py b/src/python/gem5/components/processors/switchable_processor.py index 4be3c74e71..20754fbf73 100644 --- a/src/python/gem5/components/processors/switchable_processor.py +++ b/src/python/gem5/components/processors/switchable_processor.py @@ -31,7 +31,7 @@ from .cpu_types import CPUTypes import m5 -from typing import Dict, Any, List +from typing import Dict, List from .abstract_processor import AbstractProcessor from ..boards.abstract_board import AbstractBoard @@ -41,7 +41,7 @@ from ...utils.override import * class SwitchableProcessor(AbstractProcessor): """ This class can be used to setup a switchable processor/processors on a - system. + system using SimpleCores. Though this class can be used directly, it is best inherited from. See "SimpleSwitchableCPU" for an example of this. @@ -49,8 +49,8 @@ class SwitchableProcessor(AbstractProcessor): def __init__( self, - switchable_cores: Dict[Any, List[SimpleCore]], - starting_cores: Any, + switchable_cores: Dict[str, List[SimpleCore]], + starting_cores: str, ) -> None: if starting_cores not in switchable_cores.keys(): @@ -62,28 +62,27 @@ class SwitchableProcessor(AbstractProcessor): self._current_cores = switchable_cores[starting_cores] self._switchable_cores = switchable_cores - all_cores = [] - for core_list in self._switchable_cores.values(): + # In the stdlib we assume the system processor conforms to a single + # ISA target. + assert len(set(core.get_isa() for core in self._current_cores)) == 1 + super().__init__(isa=self._current_cores[0].get_isa()) + + for name, core_list in self._switchable_cores.items(): + # Use the names from the user as the member variables + # This makes the stats print more nicely. + setattr(self, name, core_list) for core in core_list: core.set_switched_out(core not in self._current_cores) - all_cores.append(core) - self._prepare_kvm = CPUTypes.KVM in [ - core.get_type() for core in all_cores - ] + self._prepare_kvm = any( + core.is_kvm_core() for core in self._all_cores() + ) if self._prepare_kvm: - if all_cores[0].get_type() != CPUTypes.KVM: - raise Exception( - "When using KVM, the switchable processor must start " - "with the KVM cores." - ) from m5.objects import KvmVM self.kvm_vm = KvmVM() - super().__init__(cores=all_cores) - @overrides(AbstractProcessor) def incorporate_processor(self, board: AbstractBoard) -> None: @@ -94,12 +93,10 @@ class SwitchableProcessor(AbstractProcessor): self._board = board if self._prepare_kvm: - board.kvm_vm = self.kvm_vm - # To get the KVM CPUs to run on different host CPUs # Specify a different event queue for each CPU kvm_cores = [ - core for core in self.cores if core.get_type() == CPUTypes.KVM + core for core in self._all_cores() if core.is_kvm_core() ] for i, core in enumerate(kvm_cores): for obj in core.get_simobject().descendants(): @@ -116,7 +113,12 @@ class SwitchableProcessor(AbstractProcessor): def get_cores(self) -> List[AbstractCore]: return self._current_cores - def switch_to_processor(self, switchable_core_key: Any): + def _all_cores(self): + for core_list in self._switchable_cores.values(): + for core in core_list: + yield core + + def switch_to_processor(self, switchable_core_key: str): # Run various checks. if not hasattr(self, "_board"): @@ -150,8 +152,7 @@ class SwitchableProcessor(AbstractProcessor): # Switch the CPUs m5.switchCpus( - self._board, - list(zip(current_core_simobj, to_switch_simobj)), + self._board, list(zip(current_core_simobj, to_switch_simobj)) ) # Ensure the current processor is updated. diff --git a/src/python/gem5/components/processors/traffic_generator.py b/src/python/gem5/components/processors/traffic_generator.py new file mode 100644 index 0000000000..b4c400a64a --- /dev/null +++ b/src/python/gem5/components/processors/traffic_generator.py @@ -0,0 +1,67 @@ +# Copyright (c) 2022 The Regents of the University of California +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from ...utils.override import overrides +from .traffic_generator_core import TrafficGeneratorCore + +from .abstract_generator import AbstractGenerator + +from typing import List + + +class TrafficGenerator(AbstractGenerator): + def __init__( + self, + config_file_list: List[str], + ) -> None: + super().__init__( + cores=self._create_cores(config_file_list=config_file_list) + ) + """The traffic generator + + This class defines an external interface to create a list of traffic + generator cores that could replace the processing cores in a board. + + :param config_file_list: A list containing the path to configuration + file each describing the traffic pattern that should be created by + each core of the generator. + """ + + def _create_cores( + self, config_file_list: List[str] + ) -> List[TrafficGeneratorCore]: + """ + The helper function to create the cores for the generator, it will use + the same inputs as the constructor function. + """ + return [ + TrafficGeneratorCore(config_file) + for config_file in config_file_list + ] + + @overrides(AbstractGenerator) + def start_traffic(self) -> None: + pass diff --git a/src/python/gem5/components/processors/traffic_generator_core.py b/src/python/gem5/components/processors/traffic_generator_core.py new file mode 100644 index 0000000000..d542352481 --- /dev/null +++ b/src/python/gem5/components/processors/traffic_generator_core.py @@ -0,0 +1,55 @@ +# Copyright (c) 2022 The Regents of the University of California +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +from m5.objects import Port, TrafficGen + +from .abstract_core import AbstractCore +from .abstract_generator_core import AbstractGeneratorCore +from ...utils.override import overrides + + +class TrafficGeneratorCore(AbstractGeneratorCore): + """The traffic generator core interface. + + This class defines the interface for a generator core that will create + a compound traffic specified by the parameters below. It uses + TrafficGen to create the traffic. + + :param config_file: path to the configuration file specifying the + pattern of traffic. + """ + + def __init__(self, config_file: str): + """ + Create a TrafficGen SimObject as the core of this component. + """ + super().__init__() + self.generator = TrafficGen(config_file=config_file) + + @overrides(AbstractCore) + def connect_dcache(self, port: Port) -> None: + self.generator.port = port diff --git a/src/python/gem5/isas.py b/src/python/gem5/isas.py index c8b2a38312..84f02b87e7 100644 --- a/src/python/gem5/isas.py +++ b/src/python/gem5/isas.py @@ -32,6 +32,7 @@ import os from enum import Enum from typing import Set + class ISA(Enum): """ The ISA Enums which may be used in the gem5 stdlib to specify ISAs. @@ -46,6 +47,7 @@ class ISA(Enum): ... ``` """ + X86 = "x86" RISCV = "riscv" ARM = "arm" @@ -54,12 +56,14 @@ class ISA(Enum): SPARC = "sparc" NULL = "null" -def get_isas_str_set() -> Set[ISA]: + +def get_isas_str_set() -> Set[str]: """ Returns a set of all the ISA as strings. """ return {isa.value for isa in ISA} + def get_isa_from_str(input: str) -> ISA: """ Will return the correct enum given the input string. This is matched on @@ -76,8 +80,8 @@ def get_isa_from_str(input: str) -> ISA: if input.lower() == isa.value: return isa - valid_isas_str_list =str() - for isa_str in get_isa_from_str(): + valid_isas_str_list = str() + for isa_str in get_isas_str_set(): valid_isas_str_list += f"{os.linesep}{isa_str}" raise Exception( diff --git a/src/python/gem5/prebuilt/demo/x86_demo_board.py b/src/python/gem5/prebuilt/demo/x86_demo_board.py index e83fe3a340..eb38bb3e95 100644 --- a/src/python/gem5/prebuilt/demo/x86_demo_board.py +++ b/src/python/gem5/prebuilt/demo/x86_demo_board.py @@ -30,8 +30,9 @@ from ...components.processors.cpu_types import CPUTypes from ...components.boards.x86_board import X86Board from ...components.memory.single_channel import SingleChannelDDR3_1600 from ...components.processors.simple_processor import SimpleProcessor -from ...components.cachehierarchies.ruby.mesi_two_level_cache_hierarchy \ - import MESITwoLevelCacheHierarchy +from ...components.cachehierarchies.ruby.mesi_two_level_cache_hierarchy import ( + MESITwoLevelCacheHierarchy, +) from ...coherence_protocol import CoherenceProtocol from ...isas import ISA from ...utils.requires import requires @@ -70,15 +71,15 @@ class X86DemoBoard(X86Board): coherence_protocol_required=CoherenceProtocol.MESI_TWO_LEVEL, ) - warn("The X86DemoBoard is solely for demonstration purposes. " - "This board is not known to be be representative of any " - "real-world system. Use with caution.") + warn( + "The X86DemoBoard is solely for demonstration purposes. " + "This board is not known to be be representative of any " + "real-world system. Use with caution." + ) memory = SingleChannelDDR3_1600(size="2GB") processor = SimpleProcessor( - cpu_type=CPUTypes.TIMING, - isa=ISA.X86, - num_cores=4 + cpu_type=CPUTypes.TIMING, isa=ISA.X86, num_cores=4 ) cache_hierarchy = MESITwoLevelCacheHierarchy( l1d_size="32kB", diff --git a/ext/googletest/googlemock/scripts/generator/cpp/__init__.py b/src/python/gem5/prebuilt/riscvmatched/__init__.py old mode 100755 new mode 100644 similarity index 100% rename from ext/googletest/googlemock/scripts/generator/cpp/__init__.py rename to src/python/gem5/prebuilt/riscvmatched/__init__.py diff --git a/src/python/gem5/prebuilt/riscvmatched/riscvmatched_board.py b/src/python/gem5/prebuilt/riscvmatched/riscvmatched_board.py new file mode 100644 index 0000000000..4148c0a061 --- /dev/null +++ b/src/python/gem5/prebuilt/riscvmatched/riscvmatched_board.py @@ -0,0 +1,592 @@ +# Copyright (c) 2022 The Regents of the University of California +# Copyright (c) 2022 EXAscale Performance SYStems (EXAPSYS) +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os +import re + +from typing import List, Optional + +from gem5.utils.override import overrides +from gem5.components.boards.abstract_system_board import AbstractSystemBoard +from gem5.components.boards.kernel_disk_workload import KernelDiskWorkload +from gem5.components.boards.se_binary_workload import SEBinaryWorkload +from gem5.resources.resource import AbstractResource +from gem5.components.memory import SingleChannelDDR4_2400 +from gem5.utils.requires import requires +from gem5.isas import ISA +from .riscvmatched_cache import RISCVMatchedCacheHierarchy +from .riscvmatched_processor import U74Processor +from gem5.isas import ISA + +import m5 + +from m5.objects import ( + BadAddr, + Bridge, + PMAChecker, + RiscvLinux, + AddrRange, + IOXBar, + RiscvRTC, + HiFive, + IGbE_e1000, + CowDiskImage, + RawDiskImage, + RiscvMmioVirtIO, + VirtIOBlock, + VirtIORng, + Frequency, + Port, +) + +from m5.util.fdthelper import ( + Fdt, + FdtNode, + FdtProperty, + FdtPropertyStrings, + FdtPropertyWords, + FdtState, +) + + +def U74Memory(): + """ + Memory for the U74 board. + DDR4 Subsystem with 16GB of memory. + Starts at 0x80000000. + Details at: Section 23, page 195 of the datasheet. + + return: ChanneledMemory + """ + memory = SingleChannelDDR4_2400("16GB") + memory.set_memory_range( + [AddrRange(start=0x80000000, size=memory.get_size())] + ) + return memory + + +class RISCVMatchedBoard( + AbstractSystemBoard, KernelDiskWorkload, SEBinaryWorkload +): + """ + A board capable of full system simulation for RISC-V + + At a high-level, this is based on the HiFive Unmatched board from SiFive. + Based on : src/python/gem5/components/boards/riscv_board.py + + This board assumes that you will be booting Linux for fullsystem emulation. + + The frequency of the RTC for the system is set to 1MHz. + Details can be found on page 77, section 7.1 of the datasheet. + + Datasheet for inbuilt params can be found here: https://sifive.cdn.prismic.io/sifive/1a82e600-1f93-4f41-b2d8-86ed8b16acba_fu740-c000-manual-v1p6.pdf + """ + + def __init__( + self, + clk_freq: str = "1.2GHz", + l2_size: str = "2MB", + is_fs: bool = False, + ) -> None: + """ + + :param clk_freq: The clock frequency of the system, + default: 1.2GHz + :param l2_size: The size of the L2 cache, + default: 2MB + :param is_fs: Whether the system is a full system or not, + default: False (SE Mode) + + """ + requires(isa_required=ISA.RISCV) + self._fs = is_fs + + cache_hierarchy = RISCVMatchedCacheHierarchy(l2_size=l2_size) + + memory = U74Memory() + + processor = U74Processor(is_fs=is_fs) + super().__init__( + clk_freq=clk_freq, # real system is 1.0 to 1.5 GHz + processor=processor, + memory=memory, + cache_hierarchy=cache_hierarchy, + ) + + @overrides(AbstractSystemBoard) + def _setup_board(self) -> None: + if self._fs: + self.workload = RiscvLinux() + + # Contains a CLINT, PLIC, UART, and some functions for the dtb, etc. + self.platform = HiFive() + # Note: This only works with single threaded cores. + self.platform.plic.n_contexts = self.processor.get_num_cores() * 2 + self.platform.attachPlic() + self.platform.clint.num_threads = self.processor.get_num_cores() + + # Add the RTC + self.platform.rtc = RiscvRTC( + frequency=Frequency("100MHz") + ) # page 77, section 7.1 + self.platform.clint.int_pin = self.platform.rtc.int_pin + + # Incoherent I/O bus + self.iobus = IOXBar() + self.iobus.badaddr_responder = BadAddr() + self.iobus.default = self.iobus.badaddr_responder.pio + + # The virtio disk + self.disk = RiscvMmioVirtIO( + vio=VirtIOBlock(), + interrupt_id=0x8, + pio_size=4096, + pio_addr=0x10008000, + ) + + # The virtio rng + self.rng = RiscvMmioVirtIO( + vio=VirtIORng(), + interrupt_id=0x8, + pio_size=4096, + pio_addr=0x10007000, + ) + + # Note: This overrides the platform's code because the platform isn't + # general enough. + self._on_chip_devices = [self.platform.clint, self.platform.plic] + self._off_chip_devices = [self.platform.uart, self.disk, self.rng] + + else: + pass + + def _setup_io_devices(self) -> None: + """Connect the I/O devices to the I/O bus in FS mode.""" + if self._fs: + # Add PCI + self.platform.pci_host.pio = self.iobus.mem_side_ports + + # Add Ethernet card + self.ethernet = IGbE_e1000( + pci_bus=0, + pci_dev=0, + pci_func=0, + InterruptLine=1, + InterruptPin=1, + ) + + self.ethernet.host = self.platform.pci_host + self.ethernet.pio = self.iobus.mem_side_ports + self.ethernet.dma = self.iobus.cpu_side_ports + + if self.get_cache_hierarchy().is_ruby(): + for device in self._off_chip_devices + self._on_chip_devices: + device.pio = self.iobus.mem_side_ports + + else: + for device in self._off_chip_devices: + device.pio = self.iobus.mem_side_ports + for device in self._on_chip_devices: + device.pio = self.get_cache_hierarchy().get_mem_side_port() + + self.bridge = Bridge(delay="10ns") + self.bridge.mem_side_port = self.iobus.cpu_side_ports + self.bridge.cpu_side_port = ( + self.get_cache_hierarchy().get_mem_side_port() + ) + self.bridge.ranges = [ + AddrRange(dev.pio_addr, size=dev.pio_size) + for dev in self._off_chip_devices + ] + + # PCI + self.bridge.ranges.append(AddrRange(0x2F000000, size="16MB")) + self.bridge.ranges.append(AddrRange(0x30000000, size="256MB")) + self.bridge.ranges.append(AddrRange(0x40000000, size="512MB")) + + def _setup_pma(self) -> None: + """Set the PMA devices on each core""" + + uncacheable_range = [ + AddrRange(dev.pio_addr, size=dev.pio_size) + for dev in self._on_chip_devices + self._off_chip_devices + ] + + # PCI + uncacheable_range.append(AddrRange(0x2F000000, size="16MB")) + uncacheable_range.append(AddrRange(0x30000000, size="256MB")) + uncacheable_range.append(AddrRange(0x40000000, size="512MB")) + + # TODO: Not sure if this should be done per-core like in the example + for cpu in self.get_processor().get_cores(): + cpu.get_mmu().pma_checker = PMAChecker( + uncacheable=uncacheable_range + ) + + @overrides(AbstractSystemBoard) + def has_dma_ports(self) -> bool: + return False + + @overrides(AbstractSystemBoard) + def get_dma_ports(self) -> List[Port]: + raise NotImplementedError( + "RISCVBoard does not have DMA Ports. " + "Use `has_dma_ports()` to check this." + ) + + @overrides(AbstractSystemBoard) + def has_io_bus(self) -> bool: + return self._fs + + @overrides(AbstractSystemBoard) + def get_io_bus(self) -> IOXBar: + if self._fs: + return self.iobus + else: + raise NotImplementedError( + "HiFiveBoard does not have an IO bus. " + "Use `has_io_bus()` to check this." + ) + + @overrides(AbstractSystemBoard) + def has_coherent_io(self) -> bool: + return self._fs + + @overrides(AbstractSystemBoard) + def get_mem_side_coherent_io_port(self) -> Port: + if self._fs: + return self.iobus.mem_side_ports + else: + raise NotImplementedError( + "HiFiveBoard does not have any I/O ports. Use has_coherent_io to " + "check this." + ) + + @overrides(AbstractSystemBoard) + def _setup_memory_ranges(self): + """ + Starting range for the DDR memory is 0x80000000. + + Details can be found on page 201, section 23.2.3 of the datasheet. + + """ + if self._fs: + memory = self.get_memory() + mem_size = memory.get_size() + self.mem_ranges = [AddrRange(start=0x80000000, size=mem_size)] + memory.set_memory_range(self.mem_ranges) + else: + memory = self.get_memory() + # The SE board just has one memory range that is the size of the + # memory. + self.mem_ranges = [AddrRange(memory.get_size())] + memory.set_memory_range(self.mem_ranges) + + def generate_device_tree(self, outdir: str) -> None: + """Creates the dtb and dts files. + + Creates two files in the outdir: 'device.dtb' and 'device.dts' + + :param outdir: Directory to output the files + """ + + state = FdtState(addr_cells=2, size_cells=2, cpu_cells=1) + root = FdtNode("/") + root.append(state.addrCellsProperty()) + root.append(state.sizeCellsProperty()) + root.appendCompatible(["riscv-virtio"]) + + for mem_range in self.mem_ranges: + node = FdtNode("memory@%x" % int(mem_range.start)) + node.append(FdtPropertyStrings("device_type", ["memory"])) + node.append( + FdtPropertyWords( + "reg", + state.addrCells(mem_range.start) + + state.sizeCells(mem_range.size()), + ) + ) + root.append(node) + + # See Documentation/devicetree/bindings/riscv/cpus.txt for details. + cpus_node = FdtNode("cpus") + cpus_state = FdtState(addr_cells=1, size_cells=0) + cpus_node.append(cpus_state.addrCellsProperty()) + cpus_node.append(cpus_state.sizeCellsProperty()) + # Used by the CLINT driver to set the timer frequency. Value taken from + # RISC-V kernel docs (Note: freedom-u540 is actually 1MHz) + cpus_node.append(FdtPropertyWords("timebase-frequency", [100000000])) + + for i, core in enumerate(self.get_processor().get_cores()): + node = FdtNode(f"cpu@{i}") + node.append(FdtPropertyStrings("device_type", "cpu")) + node.append(FdtPropertyWords("reg", state.CPUAddrCells(i))) + node.append(FdtPropertyStrings("mmu-type", "riscv,sv48")) + node.append(FdtPropertyStrings("status", "okay")) + node.append(FdtPropertyStrings("riscv,isa", "rv64imafdc")) + freq = self.clk_domain.clock[0].frequency + node.append(FdtPropertyWords("clock-frequency", freq)) + node.appendCompatible(["riscv"]) + int_phandle = state.phandle(f"cpu@{i}.int_state") + node.appendPhandle(f"cpu@{i}") + + int_node = FdtNode("interrupt-controller") + int_state = FdtState(interrupt_cells=1) + int_phandle = int_state.phandle(f"cpu@{i}.int_state") + int_node.append(int_state.interruptCellsProperty()) + int_node.append(FdtProperty("interrupt-controller")) + int_node.appendCompatible("riscv,cpu-intc") + int_node.append(FdtPropertyWords("phandle", [int_phandle])) + + node.append(int_node) + cpus_node.append(node) + + root.append(cpus_node) + + soc_node = FdtNode("soc") + soc_state = FdtState(addr_cells=2, size_cells=2) + soc_node.append(soc_state.addrCellsProperty()) + soc_node.append(soc_state.sizeCellsProperty()) + soc_node.append(FdtProperty("ranges")) + soc_node.appendCompatible(["simple-bus"]) + + # CLINT node + clint = self.platform.clint + clint_node = clint.generateBasicPioDeviceNode( + soc_state, "clint", clint.pio_addr, clint.pio_size + ) + int_extended = list() + for i, core in enumerate(self.get_processor().get_cores()): + phandle = soc_state.phandle(f"cpu@{i}.int_state") + int_extended.append(phandle) + int_extended.append(0x3) + int_extended.append(phandle) + int_extended.append(0x7) + clint_node.append( + FdtPropertyWords("interrupts-extended", int_extended) + ) + clint_node.appendCompatible(["riscv,clint0"]) + soc_node.append(clint_node) + + # PLIC node + plic = self.platform.plic + plic_node = plic.generateBasicPioDeviceNode( + soc_state, "plic", plic.pio_addr, plic.pio_size + ) + + int_state = FdtState(addr_cells=0, interrupt_cells=1) + plic_node.append(int_state.addrCellsProperty()) + plic_node.append(int_state.interruptCellsProperty()) + + phandle = int_state.phandle(plic) + plic_node.append(FdtPropertyWords("phandle", [phandle])) + plic_node.append(FdtPropertyWords("riscv,ndev", [plic.n_src - 1])) + + int_extended = list() + for i, core in enumerate(self.get_processor().get_cores()): + phandle = state.phandle(f"cpu@{i}.int_state") + int_extended.append(phandle) + int_extended.append(0xB) + int_extended.append(phandle) + int_extended.append(0x9) + + plic_node.append(FdtPropertyWords("interrupts-extended", int_extended)) + plic_node.append(FdtProperty("interrupt-controller")) + plic_node.appendCompatible(["riscv,plic0"]) + + soc_node.append(plic_node) + + # PCI + pci_state = FdtState( + addr_cells=3, size_cells=2, cpu_cells=1, interrupt_cells=1 + ) + pci_node = FdtNode("pci") + + if int(self.platform.pci_host.conf_device_bits) == 8: + pci_node.appendCompatible("pci-host-cam-generic") + elif int(self.platform.pci_host.conf_device_bits) == 12: + pci_node.appendCompatible("pci-host-ecam-generic") + else: + m5.fatal("No compatibility string for the set conf_device_width") + + pci_node.append(FdtPropertyStrings("device_type", ["pci"])) + + # Cell sizes of child nodes/peripherals + pci_node.append(pci_state.addrCellsProperty()) + pci_node.append(pci_state.sizeCellsProperty()) + pci_node.append(pci_state.interruptCellsProperty()) + # PCI address for CPU + pci_node.append( + FdtPropertyWords( + "reg", + soc_state.addrCells(self.platform.pci_host.conf_base) + + soc_state.sizeCells(self.platform.pci_host.conf_size), + ) + ) + + # Ranges mapping + # For now some of this is hard coded, because the PCI module does not + # have a proper full understanding of the memory map, but adapting the + # PCI module is beyond the scope of what I'm trying to do here. + # Values are taken from the ARM VExpress_GEM5_V1 platform. + ranges = [] + # Pio address range + ranges += self.platform.pci_host.pciFdtAddr(space=1, addr=0) + ranges += soc_state.addrCells(self.platform.pci_host.pci_pio_base) + ranges += pci_state.sizeCells(0x10000) # Fixed size + + # AXI memory address range + ranges += self.platform.pci_host.pciFdtAddr(space=2, addr=0) + ranges += soc_state.addrCells(self.platform.pci_host.pci_mem_base) + ranges += pci_state.sizeCells(0x40000000) # Fixed size + pci_node.append(FdtPropertyWords("ranges", ranges)) + + # Interrupt mapping + plic_handle = int_state.phandle(plic) + int_base = self.platform.pci_host.int_base + + interrupts = [] + + for i in range(int(self.platform.pci_host.int_count)): + interrupts += self.platform.pci_host.pciFdtAddr( + device=i, addr=0 + ) + [int(i) + 1, plic_handle, int(int_base) + i] + + pci_node.append(FdtPropertyWords("interrupt-map", interrupts)) + + int_count = int(self.platform.pci_host.int_count) + if int_count & (int_count - 1): + fatal("PCI interrupt count should be power of 2") + + intmask = self.platform.pci_host.pciFdtAddr( + device=int_count - 1, addr=0 + ) + [0x0] + pci_node.append(FdtPropertyWords("interrupt-map-mask", intmask)) + + if self.platform.pci_host._dma_coherent: + pci_node.append(FdtProperty("dma-coherent")) + + soc_node.append(pci_node) + + # UART node + uart = self.platform.uart + uart_node = uart.generateBasicPioDeviceNode( + soc_state, "uart", uart.pio_addr, uart.pio_size + ) + uart_node.append( + FdtPropertyWords("interrupts", [self.platform.uart_int_id]) + ) + uart_node.append(FdtPropertyWords("clock-frequency", [0x384000])) + uart_node.append( + FdtPropertyWords("interrupt-parent", soc_state.phandle(plic)) + ) + uart_node.appendCompatible(["ns8250"]) + soc_node.append(uart_node) + + # VirtIO MMIO disk node + disk = self.disk + disk_node = disk.generateBasicPioDeviceNode( + soc_state, "virtio_mmio", disk.pio_addr, disk.pio_size + ) + disk_node.append(FdtPropertyWords("interrupts", [disk.interrupt_id])) + disk_node.append( + FdtPropertyWords("interrupt-parent", soc_state.phandle(plic)) + ) + disk_node.appendCompatible(["virtio,mmio"]) + soc_node.append(disk_node) + + # VirtIO MMIO rng node + rng = self.rng + rng_node = rng.generateBasicPioDeviceNode( + soc_state, "virtio_mmio", rng.pio_addr, rng.pio_size + ) + rng_node.append(FdtPropertyWords("interrupts", [rng.interrupt_id])) + rng_node.append( + FdtPropertyWords("interrupt-parent", soc_state.phandle(plic)) + ) + rng_node.appendCompatible(["virtio,mmio"]) + soc_node.append(rng_node) + + root.append(soc_node) + + fdt = Fdt() + fdt.add_rootnode(root) + fdt.writeDtsFile(os.path.join(outdir, "device.dts")) + fdt.writeDtbFile(os.path.join(outdir, "device.dtb")) + + @overrides(KernelDiskWorkload) + def get_disk_device(self): + return "/dev/vda" + + @overrides(KernelDiskWorkload) + def _add_disk_to_board(self, disk_image: AbstractResource): + image = CowDiskImage( + child=RawDiskImage(read_only=True), read_only=False + ) + image.child.image_file = disk_image.get_local_path() + self.disk.vio.image = image + + # Note: The below is a bit of a hack. We need to wait to generate the + # device tree until after the disk is set up. Now that the disk and + # workload are set, we can generate the device tree file. + self._setup_io_devices() + self._setup_pma() + + # Default DTB address if bbl is built with --with-dts option + self.workload.dtb_addr = 0x87E00000 + + self.generate_device_tree(m5.options.outdir) + self.workload.dtb_filename = os.path.join( + m5.options.outdir, "device.dtb" + ) + + @overrides(KernelDiskWorkload) + def get_default_kernel_args(self) -> List[str]: + return ["console=ttyS0", "root={root_value}", "rw"] + + @overrides(KernelDiskWorkload) + def set_kernel_disk_workload( + self, + kernel: AbstractResource, + disk_image: AbstractResource, + bootloader: Optional[AbstractResource] = None, + readfile: Optional[str] = None, + readfile_contents: Optional[str] = None, + kernel_args: Optional[List[str]] = None, + exit_on_work_items: bool = True, + ) -> None: + self.workload = RiscvLinux() + KernelDiskWorkload.set_kernel_disk_workload( + self=self, + kernel=kernel, + disk_image=disk_image, + bootloader=bootloader, + readfile=readfile, + readfile_contents=readfile_contents, + kernel_args=kernel_args, + exit_on_work_items=exit_on_work_items, + ) diff --git a/src/python/gem5/prebuilt/riscvmatched/riscvmatched_cache.py b/src/python/gem5/prebuilt/riscvmatched/riscvmatched_cache.py new file mode 100644 index 0000000000..dc66af354b --- /dev/null +++ b/src/python/gem5/prebuilt/riscvmatched/riscvmatched_cache.py @@ -0,0 +1,173 @@ +# Copyright (c) 2022 The Regents of the University of California +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from gem5.components.cachehierarchies.abstract_cache_hierarchy import ( + AbstractCacheHierarchy, +) +from gem5.components.cachehierarchies.classic.abstract_classic_cache_hierarchy import ( + AbstractClassicCacheHierarchy, +) +from gem5.components.cachehierarchies.abstract_two_level_cache_hierarchy import ( + AbstractTwoLevelCacheHierarchy, +) +from gem5.components.cachehierarchies.classic.caches.l1dcache import L1DCache +from gem5.components.cachehierarchies.classic.caches.l1icache import L1ICache +from gem5.components.cachehierarchies.classic.caches.l2cache import L2Cache +from gem5.components.cachehierarchies.classic.caches.mmu_cache import MMUCache +from gem5.components.boards.abstract_board import AbstractBoard +from gem5.isas import ISA +from m5.objects import Cache, L2XBar, BaseXBar, SystemXBar, BadAddr, Port + +from gem5.utils.override import * + + +class RISCVMatchedCacheHierarchy( + AbstractClassicCacheHierarchy, AbstractTwoLevelCacheHierarchy +): + """ + + A cache setup where each core has a private L1 Data and Instruction Cache, + and a private L2 cache. + The HiFive board has a partially inclusive cache hierarchy, hence this hierarchy is chosen. + The details of the cache hierarchy are in Table 7, page 36 of the datasheet. + + - L1 Instruction Cache: + - 32 KiB 4-way set associative + - L1 Data Cache + - 32 KiB 8-way set associative + - L2 Cache + - 2 MiB 16-way set associative + + """ + + def __init__( + self, + l2_size: str, + ) -> None: + """ + :param l2_size: The size of the L2 Cache (e.g., "256kB"). + :type l2_size: str + """ + AbstractClassicCacheHierarchy.__init__(self=self) + AbstractTwoLevelCacheHierarchy.__init__( + self, + l1i_size="32kB", + l1i_assoc=4, + l1d_size="32kB", + l1d_assoc=8, + l2_size=l2_size, + l2_assoc=16, + ) + + self.membus = SystemXBar(width=64) + self.membus.badaddr_responder = BadAddr() + self.membus.default = self.membus.badaddr_responder.pio + + @overrides(AbstractClassicCacheHierarchy) + def get_mem_side_port(self) -> Port: + return self.membus.mem_side_ports + + @overrides(AbstractClassicCacheHierarchy) + def get_cpu_side_port(self) -> Port: + return self.membus.cpu_side_ports + + @overrides(AbstractCacheHierarchy) + def incorporate_cache(self, board: AbstractBoard) -> None: + + # Set up the system port for functional access from the simulator. + board.connect_system_port(self.membus.cpu_side_ports) + + for cntr in board.get_memory().get_memory_controllers(): + cntr.port = self.membus.mem_side_ports + + self.l1icaches = [ + L1ICache(size=self._l1i_size, assoc=self._l1i_assoc) + for i in range(board.get_processor().get_num_cores()) + ] + self.l1dcaches = [ + L1DCache(size=self._l1d_size, assoc=self._l1d_assoc) + for i in range(board.get_processor().get_num_cores()) + ] + self.l2buses = [ + L2XBar() for i in range(board.get_processor().get_num_cores()) + ] + self.l2caches = [ + L2Cache(size=self._l2_size, assoc=self._l2_assoc) + for i in range(board.get_processor().get_num_cores()) + ] + # ITLB Page walk caches + self.iptw_caches = [ + MMUCache(size="4KiB") + for _ in range(board.get_processor().get_num_cores()) + ] + # DTLB Page walk caches + self.dptw_caches = [ + MMUCache(size="4KiB") + for _ in range(board.get_processor().get_num_cores()) + ] + + if board.has_coherent_io(): + self._setup_io_cache(board) + + for i, cpu in enumerate(board.get_processor().get_cores()): + + cpu.connect_icache(self.l1icaches[i].cpu_side) + cpu.connect_dcache(self.l1dcaches[i].cpu_side) + + self.l1icaches[i].mem_side = self.l2buses[i].cpu_side_ports + self.l1dcaches[i].mem_side = self.l2buses[i].cpu_side_ports + self.iptw_caches[i].mem_side = self.l2buses[i].cpu_side_ports + self.dptw_caches[i].mem_side = self.l2buses[i].cpu_side_ports + + self.l2buses[i].mem_side_ports = self.l2caches[i].cpu_side + + self.membus.cpu_side_ports = self.l2caches[i].mem_side + + cpu.connect_walker_ports( + self.iptw_caches[i].cpu_side, self.dptw_caches[i].cpu_side + ) + + if board.get_processor().get_isa() == ISA.X86: + int_req_port = self.membus.mem_side_ports + int_resp_port = self.membus.cpu_side_ports + cpu.connect_interrupt(int_req_port, int_resp_port) + else: + cpu.connect_interrupt() + + def _setup_io_cache(self, board: AbstractBoard) -> None: + """Create a cache for coherent I/O connections""" + self.iocache = Cache( + assoc=8, + tag_latency=50, + data_latency=50, + response_latency=50, + mshrs=20, + size="1kB", + tgts_per_mshr=12, + addr_ranges=board.mem_ranges, + ) + self.iocache.mem_side = self.membus.cpu_side_ports + self.iocache.cpu_side = board.get_mem_side_coherent_io_port() diff --git a/src/python/gem5/prebuilt/riscvmatched/riscvmatched_core.py b/src/python/gem5/prebuilt/riscvmatched/riscvmatched_core.py new file mode 100644 index 0000000000..0b4375ce8d --- /dev/null +++ b/src/python/gem5/prebuilt/riscvmatched/riscvmatched_core.py @@ -0,0 +1,168 @@ +# Copyright (c) 2022 The Regents of the University of California +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from typing import Optional +from gem5.utils.requires import requires +from gem5.components.processors.base_cpu_core import BaseCPUCore +from gem5.components.processors.cpu_types import CPUTypes +from gem5.isas import ISA +from gem5.utils.override import overrides +from m5.objects.RiscvCPU import RiscvMinorCPU +from m5.objects import ( + BaseMMU, + Port, + BaseCPU, + Process, +) +from m5.objects.BaseMinorCPU import * +from gem5.isas import ISA + + +class U74IntFU(MinorDefaultIntFU): + opLat = 1 + + +class U74IntMulFU(MinorDefaultIntMulFU): + opLat = 3 + + +class U74IntDivFU(MinorDefaultIntDivFU): + opLat = 6 + + +class U74FloatSimdFU(MinorDefaultFloatSimdFU): + pass + + +class U74PredFU(MinorDefaultPredFU): + pass + + +class U74MemFU(MinorDefaultMemFU): + opLat = 3 + + +class U74MiscFU(MinorDefaultMiscFU): + pass + + +class U74FUPool(MinorFUPool): + funcUnits = [ + U74IntFU(), + U74IntFU(), + U74IntMulFU(), + U74IntDivFU(), + U74FloatSimdFU(), + U74PredFU(), + U74MemFU(), + U74MiscFU(), + ] + + +class U74BP(TournamentBP): + BTBEntries = 16 + RASSize = 6 + localHistoryTableSize = 4096 # is 3.6 KiB but gem5 requires power of 2 + + indirectBranchPred = SimpleIndirectPredictor() + indirectBranchPred.indirectSets = 8 + + +class U74CPU(RiscvMinorCPU): + """ + The fetch, decode, and execute stage parameters from the ARM HPI CPU + This information about the CPU can be found on page 15 of + gem5_rsk_gem5-21.2.pdf at https://github.com/arm-university/arm-gem5-rsk + + The only parameter that is changed is the decodeToExecuteForwardDelay. + This is changed from 1 to 2 to avoid a PMC address fault. + + """ + + # Fetch1 stage + fetch1LineSnapWidth = 0 + fetch1LineWidth = 0 + fetch1FetchLimit = 1 + fetch1ToFetch2ForwardDelay = 1 + fetch1ToFetch2BackwardDelay = 1 + + # Fetch2 stage + fetch2InputBufferSize = 2 + fetch2ToDecodeForwardDelay = 1 + fetch2CycleInput = True + + # Decode stage + decodeInputBufferSize = 3 + decodeToExecuteForwardDelay = 2 + decodeInputWidth = 2 + decodeCycleInput = True + + # Execute stage + executeInputWidth = 2 + executeCycleInput = True + executeIssueLimit = 2 + executeMemoryIssueLimit = 1 + executeCommitLimit = 2 + executeMemoryCommitLimit = 1 + executeInputBufferSize = 7 + executeMaxAccessesInMemory = 2 + executeLSQMaxStoreBufferStoresPerCycle = 2 + executeLSQRequestsQueueSize = 1 + executeLSQTransfersQueueSize = 2 + executeLSQStoreBufferSize = 5 + executeBranchDelay = 1 + executeSetTraceTimeOnCommit = True + executeSetTraceTimeOnIssue = False + executeAllowEarlyMemoryIssue = True + enableIdling = True + + # Functional Units and Branch Prediction + executeFuncUnits = U74FUPool() + branchPred = U74BP() + + +class U74Core(BaseCPUCore): + """ + U74Core models the core of the HiFive Unmatched board. + The core has a single thread. + The latencies of the functional units are set to values found in Table 8 on page 40. + - IntFU: 1 cycle + - IntMulFU: 3 cycles + - IntDivFU: 6 cycles (NOTE: latency is variable, but is set to 6 cycles) + - MemFU: 3 cycles + The branch predictor is a TournamentBP, based on Section 4.2.5 on page 38. + - BTBEntries: 16 entries + - RASSize: 6 entries + - IndirectSets: 8 sets + - localHistoryTableSize: 4096 B + NOTE: The BHT of the HiFive Board is 3.6KiB but gem5 requires a power of 2, so the BHT is 4096B. + """ + + def __init__( + self, + core_id, + ): + super().__init__(core=U74CPU(cpu_id=core_id), isa=ISA.RISCV) diff --git a/tests/gem5/test_build/test_build.py b/src/python/gem5/prebuilt/riscvmatched/riscvmatched_processor.py similarity index 63% rename from tests/gem5/test_build/test_build.py rename to src/python/gem5/prebuilt/riscvmatched/riscvmatched_processor.py index 5be5d5d5d9..838f810073 100644 --- a/tests/gem5/test_build/test_build.py +++ b/src/python/gem5/prebuilt/riscvmatched/riscvmatched_processor.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 The Regents of the University of California. +# Copyright (c) 2022 The Regents of the University of California # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -24,30 +24,32 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -''' -Test file for simply building gem5 -''' -import re -import os -from testlib import * +from gem5.utils.override import overrides +from gem5.components.boards.mem_mode import MemMode -common_isas = [constants.vega_x86_tag, constants.arm_tag, constants.riscv_tag] -skipped_isas = {constants.null_tag, constants.all_compiled_tag} +from m5.util import warn -for isa in constants.supported_isas: - if isa in skipped_isas: continue +from gem5.components.processors.base_cpu_processor import BaseCPUProcessor +from gem5.components.processors.cpu_types import CPUTypes +from gem5.components.boards.abstract_board import AbstractBoard +from .riscvmatched_core import U74Core - for variant in constants.supported_variants: - if isa in common_isas: - length = constants.quick_tag + +class U74Processor(BaseCPUProcessor): + """ + A U74Processor contains a number of cores of U74Core. + """ + + def __init__( + self, + is_fs: bool, + ) -> None: + self._cpu_type = CPUTypes.MINOR + super().__init__(cores=self._create_cores(is_fs)) + + def _create_cores(self, is_fs: bool): + if is_fs: + num_cores = 4 else: - length = constants.long_tag - - tags = [isa, length, variant] - - name = 'build-{isa}-{var}'.format(isa=isa, var=variant) - fixture = Gem5Fixture(isa, variant) - - function = TestFunction(lambda fixtures: True, name, - fixtures=[fixture]) - TestSuite(name=name, tests=[function], tags=tags) + num_cores = 1 + return [U74Core(core_id=i) for i in range(num_cores)] diff --git a/src/python/gem5/resources/downloader.py b/src/python/gem5/resources/downloader.py index 3481c9d10e..f619b9771d 100644 --- a/src/python/gem5/resources/downloader.py +++ b/src/python/gem5/resources/downloader.py @@ -39,7 +39,7 @@ from pathlib import Path import tarfile from tempfile import gettempdir from urllib.error import HTTPError -from typing import List, Dict +from typing import List, Dict, Set, Optional from .md5_utils import md5_file, md5_dir @@ -50,15 +50,18 @@ This Python module contains functions used to download, list, and obtain information about resources from resources.gem5.org. """ + def _resources_json_version_required() -> str: """ Specifies the version of resources.json to obtain. """ - return "22.0" + return "22.1" + def _get_resources_json_uri() -> str: return "https://resources.gem5.org/resources.json" + def _url_validator(url): try: result = urllib.parse.urlparse(url) @@ -66,8 +69,9 @@ def _url_validator(url): except: return False + def _get_resources_json_at_path(path: str, use_caching: bool = True) -> Dict: - ''' + """ Returns a resource JSON, in the form of a Python Dict. The location of the JSON must be specified. @@ -78,7 +82,7 @@ def _get_resources_json_at_path(path: str, use_caching: bool = True) -> Dict: :param use_caching: True if a cached file is to be used (up to an hour), otherwise the file will be retrieved from the URL regardless. True by default. Only valid in cases where a URL is passed. - ''' + """ # If a local valid path is passed, just load it. if Path(path).is_file(): @@ -116,9 +120,12 @@ def _get_resources_json_at_path(path: str, use_caching: bool = True) -> Dict: # time of the file. This is the most portable solution as other ideas, # like "file creation time", are not always the same concept between # operating systems. - if not use_caching or not os.path.exists(download_path) or \ - (time.time() - os.path.getmtime(download_path)) > 3600: - _download(path, download_path) + if ( + not use_caching + or not os.path.exists(download_path) + or (time.time() - os.path.getmtime(download_path)) > 3600 + ): + _download(path, download_path) with open(download_path) as f: file_contents = f.read() @@ -134,6 +141,7 @@ def _get_resources_json_at_path(path: str, use_caching: bool = True) -> Dict: return to_return + def _get_resources_json() -> Dict: """ Gets the Resources JSON. @@ -142,7 +150,7 @@ def _get_resources_json() -> Dict: """ path = os.getenv("GEM5_RESOURCE_JSON", _get_resources_json_uri()) - to_return = _get_resources_json_at_path(path = path) + to_return = _get_resources_json_at_path(path=path) # If the current version pulled is not correct, look up the # "previous-versions" field to find the correct one. @@ -150,17 +158,18 @@ def _get_resources_json() -> Dict: if to_return["version"] != version: if version in to_return["previous-versions"].keys(): to_return = _get_resources_json_at_path( - path = to_return["previous-versions"][version] + path=to_return["previous-versions"][version] ) else: # This should never happen, but we thrown an exception to explain # that we can't find the version. raise Exception( f"Version '{version}' of resources.json cannot be found." - ) + ) return to_return + def _get_url_base() -> str: """ Obtains the "url_base" string from the resources.json file. @@ -173,20 +182,27 @@ def _get_url_base() -> str: return "" -def _get_resources(resources_group: Dict) -> Dict[str, Dict]: +def _get_resources( + valid_types: Set[str], resources_group: Optional[Dict] = None +) -> Dict[str, Dict]: """ - A recursive function to get all the resources. + A recursive function to get all the workload/resource of the specified type + in the resources.json file. - :returns: A dictionary of resource names to the resource JSON objects. + :param valid_types: The type to return (i.e., "resource" or "workload). + :param resource_group: Used for recursion: The current resource group being + iterated through. + + :returns: A dictionary of artifact names to the resource JSON objects. """ + if resources_group is None: + resources_group = _get_resources_json()["resources"] + to_return = {} for resource in resources_group: - # 'artifact' is the old naming, we keep it here for - # backwards compatibility, but it can be removed with time: - # https://gem5-review.googlesource.com/c/public/gem5-resources/+/51169. - if resource["type"] == "artifact" or resource["type"] == "resource": - # If the type is "resource" then we add it directly to the map + if resource["type"] in valid_types: + # If the type is valid then we add it directly to the map # after a check that the name is unique. if resource["name"] in to_return.keys(): raise Exception( @@ -198,7 +214,9 @@ def _get_resources(resources_group: Dict) -> Dict[str, Dict]: elif resource["type"] == "group": # If it's a group we get recursive. We then check to see if there # are any duplication of keys. - new_map = _get_resources(resource["contents"]) + new_map = _get_resources( + valid_types=valid_types, resources_group=resource["contents"] + ) intersection = set(new_map.keys()).intersection(to_return.keys()) if len(intersection) > 0: # Note: if this error is received it's likely an error with @@ -210,18 +228,11 @@ def _get_resources(resources_group: Dict) -> Dict[str, Dict]: ) ) to_return.update(new_map) - else: - raise Exception( - "Error: Unknown type '{}'.".format(resource["type"]) - ) return to_return -def _download( - url: str, - download_to: str, - max_attempts: int = 6, -) -> None: + +def _download(url: str, download_to: str, max_attempts: int = 6) -> None: """ Downloads a file. @@ -240,7 +251,6 @@ def _download( # TODO: This whole setup will only work for single files we can get via # wget. We also need to support git clones going forward. - attempt = 0 while True: # The loop will be broken on a successful download, via a `return`, or @@ -248,7 +258,33 @@ def _download( # number of download attempts has been reached or if a HTTP status code # other than 408, 429, or 5xx is received. try: - urllib.request.urlretrieve(url, download_to) + # check to see if user requests a proxy connection + use_proxy = os.getenv("GEM5_USE_PROXY") + if use_proxy: + # If the "use_proxy" variable is specified we setup a socks5 + # connection. + + import socks + import socket + import ssl + + IP_ADDR, host_port = use_proxy.split(":") + PORT = int(host_port) + socks.set_default_proxy(socks.SOCKS5, IP_ADDR, PORT) + socket.socket = socks.socksocket + + # base SSL context for https connection + ctx = ssl.create_default_context() + ctx.check_hostname = False + ctx.verify_mode = ssl.CERT_NONE + + # get the file as a bytes blob + request = urllib.request.Request(url) + with urllib.request.urlopen(request, context=ctx) as fr: + with open(download_to, "wb") as fw: + fw.write(fr.read()) + else: + urllib.request.urlretrieve(url, download_to) return except HTTPError as e: # If the error code retrieved is retryable, we retry using a @@ -263,10 +299,22 @@ def _download( "not be retrieved. HTTP Status Code retrieved: " f"{e.code}" ) - time.sleep((2 ** attempt) + random.uniform(0, 1)) + time.sleep((2**attempt) + random.uniform(0, 1)) else: raise e - + except ValueError as e: + raise Exception( + "Environment variable GEM5_USE_PROXY is set to " + f"'{use_proxy}'. The expected form is " + ":'." + ) + except ImportError as e: + raise Exception( + "An import error has occurred. This is likely due " + "the Python SOCKS client module not being " + "installed. It can be installed with " + "`pip install PySocks`." + ) def list_resources() -> List[str]: @@ -275,7 +323,26 @@ def list_resources() -> List[str]: :returns: A list of resources by name. """ - return _get_resources(_get_resources_json()["resources"]).keys() + return _get_resources(valid_types={"resource"}).keys() + + +def get_workload_json_obj(workload_name: str) -> Dict: + """ + Get a JSON object of a specified workload. + + :param workload_name: The name of the workload. + + :raises Exception: An exception is raised if the specified workload does + not exit. + """ + workload_map = _get_resources(valid_types={"workload"}) + + if workload_name not in workload_map: + raise Exception( + f"Error: Workload with name {workload_name} does not exist" + ) + + return workload_map[workload_name] def get_resources_json_obj(resource_name: str) -> Dict: @@ -289,7 +356,7 @@ def get_resources_json_obj(resource_name: str) -> Dict: :raises Exception: An exception is raised if the specified resources does not exist. """ - resource_map = _get_resources(_get_resources_json()["resources"]) + resource_map = _get_resources(valid_types={"resource"}) if resource_name not in resource_map: raise Exception( @@ -377,14 +444,16 @@ def get_resource( else: raise Exception( "The resource.json entry for '{}' has a value for the " - "'is_zipped' field which is neither a string or a boolean." - .format( + "'is_zipped' field which is neither a string or a boolean.".format( resource_name ) ) - run_tar_extract = untar and "is_tar_archive" in resource_json and \ - resource_json["is_tar_archive"] + run_tar_extract = ( + untar + and "is_tar_archive" in resource_json + and resource_json["is_tar_archive"] + ) tar_extension = ".tar" if run_tar_extract: @@ -397,8 +466,7 @@ def get_resource( # TODO: Might be nice to have some kind of download status bar here. # TODO: There might be a case where this should be silenced. print( - "Resource '{}' was not found locally. Downloading to '{}'..." - .format( + "Resource '{}' was not found locally. Downloading to '{}'...".format( resource_name, download_dest ) ) @@ -416,7 +484,7 @@ def get_resource( resource_name, download_dest ) ) - unzip_to = download_dest[:-len(zip_extension)] + unzip_to = download_dest[: -len(zip_extension)] with gzip.open(download_dest, "rb") as f: with open(unzip_to, "wb") as o: shutil.copyfileobj(f, o) @@ -431,7 +499,30 @@ def get_resource( f"Unpacking the the resource '{resource_name}' " f"('{download_dest}')" ) - unpack_to = download_dest[:-len(tar_extension)] + unpack_to = download_dest[: -len(tar_extension)] with tarfile.open(download_dest) as f: - f.extractall(unpack_to) + + def is_within_directory(directory, target): + + abs_directory = os.path.abspath(directory) + abs_target = os.path.abspath(target) + + prefix = os.path.commonprefix([abs_directory, abs_target]) + + return prefix == abs_directory + + def safe_extract( + tar, path=".", members=None, *, numeric_owner=False + ): + + for member in tar.getmembers(): + member_path = os.path.join(path, member.name) + if not is_within_directory(path, member_path): + raise Exception( + "Attempted Path Traversal in Tar File" + ) + + tar.extractall(path, members, numeric_owner=numeric_owner) + + safe_extract(f, unpack_to) os.remove(download_dest) diff --git a/src/python/gem5/resources/md5_utils.py b/src/python/gem5/resources/md5_utils.py index b98a81e947..d7212ab83f 100644 --- a/src/python/gem5/resources/md5_utils.py +++ b/src/python/gem5/resources/md5_utils.py @@ -28,14 +28,16 @@ from pathlib import Path import hashlib from _hashlib import HASH as Hash -def _md5_update_from_file(filename: Path, hash: Hash) -> Hash: + +def _md5_update_from_file(filename: Path, hash: Hash) -> Hash: assert filename.is_file() with open(str(filename), "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash.update(chunk) return hash -def _md5_update_from_dir(directory: Path, hash: Hash) -> Hash: + +def _md5_update_from_dir(directory: Path, hash: Hash) -> Hash: assert directory.is_dir() for path in sorted(directory.iterdir(), key=lambda p: str(p).lower()): hash.update(path.name.encode()) @@ -45,6 +47,7 @@ def _md5_update_from_dir(directory: Path, hash: Hash) -> Hash: hash = _md5_update_from_dir(path, hash) return hash + def md5(path: Path) -> str: """ Gets the md5 value of a file or directory. `md5_file` is used if the path @@ -60,7 +63,8 @@ def md5(path: Path) -> str: else: raise Exception(f"Path '{path}' is not a valid file or directory.") -def md5_file(filename: Path) -> str: + +def md5_file(filename: Path) -> str: """ Gives the md5 hash of a file @@ -68,6 +72,7 @@ def md5_file(filename: Path) -> str: """ return str(_md5_update_from_file(filename, hashlib.md5()).hexdigest()) + def md5_dir(directory: Path) -> str: """ Gives the md5 value of a directory. diff --git a/src/python/gem5/resources/resource.py b/src/python/gem5/resources/resource.py index f72215a49a..1f7305def7 100644 --- a/src/python/gem5/resources/resource.py +++ b/src/python/gem5/resources/resource.py @@ -80,6 +80,7 @@ class CustomResource(AbstractResource): """ super().__init__(local_path=local_path, metadata=metadata) + class CustomDiskImageResource(CustomResource): """ A custom disk image gem5 resource. It can be used to specify a custom, @@ -111,6 +112,7 @@ class CustomDiskImageResource(CustomResource): super().__init__(local_path=local_path, metadata=metadata) + class Resource(AbstractResource): """ An official gem5 resources as hosted within our gem5 resources repository @@ -163,15 +165,14 @@ class Resource(AbstractResource): to_path = os.path.join(resource_directory, resource_name) super().__init__( - local_path=to_path, - metadata=get_resources_json_obj(resource_name)) + local_path=to_path, metadata=get_resources_json_obj(resource_name) + ) get_resource( resource_name=resource_name, to_path=to_path, - download_md5_mismatch=download_md5_mismatch + download_md5_mismatch=download_md5_mismatch, ) - def _get_default_resource_dir(cls) -> str: """ Obtain the default gem5 resources directory on the host system. This @@ -188,14 +189,16 @@ class Resource(AbstractResource): ] for path in test_list: - if os.path.exists(path): # If the path already exists... - if os.path.isdir(path): # Check to see the path is a directory. - return path # If so, the path is valid and can be used. - else: # If the path does not exist, try to create it. + if os.path.exists(path): # If the path already exists... + if os.path.isdir( + path + ): # Check to see the path is a directory. + return path # If so, the path is valid and can be used. + else: # If the path does not exist, try to create it. try: os.makedirs(path, exist_ok=False) return path except OSError: - continue # If the path cannot be created, then try another. + continue # If the path cannot be created, then try another. raise Exception("Cannot find a valid location to download resources") diff --git a/src/python/gem5/resources/workload.py b/src/python/gem5/resources/workload.py new file mode 100644 index 0000000000..2ae89655e8 --- /dev/null +++ b/src/python/gem5/resources/workload.py @@ -0,0 +1,221 @@ +# Copyright (c) 2022 The Regents of the University of California +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from .downloader import get_workload_json_obj +from .resource import Resource + +from typing import Dict, Any, Optional + + +class AbstractWorkload: + """ + Workloads contain information needed to build a workload. + + A workload specifies a function and its parameters to run on a board to + set a workload. Workload's are passed to board via the `AbstractBoard`'s + `set_workload` function. + + The `AbstractBoard` has a `set_workload` function which accepts an + AbstractWorkload. The `set_workload` function uses the `get_function_str` + to determine which function should be called on the board and the + `get_parameters` function specifies the parameters to be passed. + + Example + ------- + + ```py + workload = CustomWorkload( + function = "set_se_binary_workload", + parameters = { + "binary" : Resource("x86-print-this"), + "arguments" : ["hello", 6] + }, + ) + + board.set_workload(workload) + ``` + + The above is the equivalent of: + + ```py + board.set_se_binary_workload( + binary = Resource("x86-print-this"), + arguments = ["hello", 6], + ) + ``` + + Notes + ----- + This class should not be used directly. Please use `Workload` or + `CustomWorkload`. + """ + + def __init__(self, function: str, parameters: Dict[str, Any]) -> None: + self._func = function + self._params = parameters + + def get_function_str(self) -> str: + """ + Returns the name of the workload function to be run. + + This function is called via the AbstractBoard's `set_workload` + function. The parameters from the `get_parameters` function are passed + to this function. + """ + return self._func + + def get_parameters(self) -> Dict[str, Any]: + """ + Returns a dictionary mapping the workload parameters to their values. + + These parameters are passed to the function specified by + `get_function_str` via the AbstractBoard's `set_workload` function. + """ + return self._params + + def set_parameter(self, parameter: str, value: Any) -> None: + """ + Used to set or override a workload parameter + + :param parameter: The parameter of the function to set. + :param value: The value to set to the parameter. + """ + self._params[parameter] = value + + +class CustomWorkload(AbstractWorkload): + """ + A workload specified locally (i.e., not via gem5-resources as with the + `Workload` class). Here the user specifies the function and the parameters + to be passed. + + Usage + ----- + + ```py + workload = CustomWorkload( + function = "set_se_binary_workload", + parameters = { + "binary" : Resource("x86-print-this"), + "arguments" : ["hello", 6] + }, + ) + + board.set_workload(workload) + ``` + """ + + def __init__(self, function: str, parameters: Dict[str, Any]) -> None: + super().__init__(function=function, parameters=parameters) + + +class Workload(AbstractWorkload): + """ + The `Workload` class loads a workload's information from gem5-resources + based on a name/id passed via the constructor. + + Usage + ----- + + ```py + # Determine what workload we want to run. + workload = Workload("example-workload-id") + + # Optionally we can override a parameter in the workload. In this example + # we are going to run this workload with a difference kernel. + workload.set_parameter("kernel", Resource("arm64-linux-kernel-4.14.134")) + + # We then set this workload to the board. + board.set_workload(workload) + ``` + + """ + + def __init__( + self, workload_name: str, resource_directory: Optional[str] = None + ) -> None: + """ + This constructor will load the workload details from the workload with + the given name/id. + + This function assumes the dictionary returned by the downloader's + `get_workload_json_obj` is a dictionary. An example of the schema is + shown below: + + ```json + { + "type" : "workload", + "name" : "x86-ubuntu-18.04-echo-hello", + "documentation" : "Description of workload here", + "function" : "set_kernel_disk_workload", + "resources" : { + "kernel" : "x86-linux-kernel-5.4.49", + "disk_image" : "x86-ubuntu-18.04-img" + }, + "additional_params" : { + "readfile_contents" : "m5_exit; echo 'hello'; m5_exit" + } + } + ``` + + This resource will result in the equivalent of the following action + being taken: + + ```python + board.set_kernel_disk_workload( + kernel = Resource("x86-linux-kernel-5.4.49"), + disk_image = Resource("x86-ubuntu-18.04-img"), + readfile_contents = "m5_exit; echo 'hello'; m5_exit", + ) + ``` + + :param workload_name: The name of the workload in the resources.json + file to be loaded. + :param resource_directory: An optional parameter that specifies where + any resources should be download and accessed from. If None, a default + location will be used. None by default. + """ + workload_json = get_workload_json_obj(workload_name=workload_name) + + func = workload_json["function"] + assert isinstance(func, str) + + params = {} + if "resources" in workload_json: + for key in workload_json["resources"].keys(): + assert isinstance(key, str) + value = workload_json["resources"][key] + assert isinstance(value, str) + params[key] = Resource( + value, resource_directory=resource_directory + ) + + if "additional_params" in workload_json: + for key in workload_json["additional_params"]: + assert isinstance(key, str) + params[key] = workload_json["additional_params"][key] + + super().__init__(function=func, parameters=params) diff --git a/src/python/gem5/runtime.py b/src/python/gem5/runtime.py index 623228cd18..6eed62a9da 100644 --- a/src/python/gem5/runtime.py +++ b/src/python/gem5/runtime.py @@ -35,6 +35,7 @@ from .isas import ISA, get_isa_from_str, get_isas_str_set from .coherence_protocol import CoherenceProtocol from typing import Set + def get_supported_isas() -> Set[ISA]: """ Returns the set of all the ISAs compiled into the current binary. @@ -45,13 +46,12 @@ def get_supported_isas() -> Set[ISA]: supported_isas.add(get_isa_from_str(buildEnv["TARGET_ISA"])) for key in get_isas_str_set(): - if f"USE_{key.upper()}_ISA" in buildEnv: + if buildEnv.get(f"USE_{key.upper()}_ISA", False): supported_isas.add(get_isa_from_str(key)) return supported_isas - def get_runtime_isa() -> ISA: """ Returns a single target ISA at runtime. @@ -68,8 +68,10 @@ def get_runtime_isa() -> ISA: :returns: The target ISA. """ - warn("The `get_runtime_isa` function is deprecated. Please migrate away " - "from using this function.") + warn( + "The `get_runtime_isa` function is deprecated. Please migrate away " + "from using this function." + ) if "TARGET_ISA" in buildEnv.keys(): return get_isa_from_str(buildEnv["TARGET_ISA"]) @@ -79,9 +81,12 @@ def get_runtime_isa() -> ISA: if len(supported_isas) == 1: return next(iter(supported_isas)) - raise Exception("Cannot determine the the runtime ISA. Either the " - "'TARGET_ISA' parameter must be set or the binary only " - "compiled to one ISA.") + raise Exception( + "Cannot determine the the runtime ISA. Either the " + "'TARGET_ISA' parameter must be set or the binary only " + "compiled to one ISA." + ) + def get_runtime_coherence_protocol() -> CoherenceProtocol: """Gets the cache coherence protocol. diff --git a/src/python/gem5/simulate/exit_event.py b/src/python/gem5/simulate/exit_event.py index 6dafc75a38..1e14fdd11a 100644 --- a/src/python/gem5/simulate/exit_event.py +++ b/src/python/gem5/simulate/exit_event.py @@ -42,10 +42,13 @@ class ExitEvent(Enum): SWITCHCPU = "switchcpu" # An exit needed to switch CPU cores. FAIL = "fail" # An exit because the simulation has failed. CHECKPOINT = "checkpoint" # An exit to load a checkpoint. - MAX_TICK = "max tick" # An exit due to a maximum tick value being met. - USER_INTERRUPT = ( # An exit due to a user interrupt (e.g., cntr + c) + SCHEDULED_TICK = "scheduled tick exit" + MAX_TICK = "max tick" # An exit due to a maximum tick value being met. + USER_INTERRUPT = ( # An exit due to a user interrupt (e.g., cntr + c) "user interupt" ) + SIMPOINT_BEGIN = "simpoint begins" + MAX_INSTS = "number of instructions reached" @classmethod def translate_exit_status(cls, exit_string: str) -> "ExitEvent": @@ -73,6 +76,8 @@ class ExitEvent(Enum): return ExitEvent.EXIT elif exit_string == "simulate() limit reached": return ExitEvent.MAX_TICK + elif exit_string == "Tick exit reached": + return ExitEvent.SCHEDULED_TICK elif exit_string == "switchcpu": return ExitEvent.SWITCHCPU elif exit_string == "m5_fail instruction encountered": @@ -81,6 +86,16 @@ class ExitEvent(Enum): return ExitEvent.CHECKPOINT elif exit_string == "user interrupt received": return ExitEvent.USER_INTERRUPT + elif exit_string == "simpoint starting point found": + return ExitEvent.SIMPOINT_BEGIN + elif exit_string == "a thread reached the max instruction count": + return ExitEvent.MAX_INSTS + elif exit_string.endswith("will terminate the simulation.\n"): + # This is for the traffic generator exit event + return ExitEvent.EXIT + elif exit_string.endswith("is finished updating the memory.\n"): + # This is for the gups generator exit event + return ExitEvent.EXIT raise NotImplementedError( "Exit event '{}' not implemented".format(exit_string) ) diff --git a/src/python/gem5/simulate/exit_event_generators.py b/src/python/gem5/simulate/exit_event_generators.py index 011bca6278..d6732bb49d 100644 --- a/src/python/gem5/simulate/exit_event_generators.py +++ b/src/python/gem5/simulate/exit_event_generators.py @@ -24,16 +24,36 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +from typing import Generator, Optional import m5.stats from ..components.processors.abstract_processor import AbstractProcessor from ..components.processors.switchable_processor import SwitchableProcessor +from ..utils.simpoint import SimPoint +from m5.util import warn +from pathlib import Path """ In this package we store generators for simulation exit events. """ -def default_exit_generator(): +def warn_default_decorator(gen: Generator, type: str, effect: str): + """A decortator for generators which will print a warning that it is a + default generator. + """ + + def wrapped_generator(*args, **kw_args): + warn( + f"No behavior was set by the user for {type}." + f" Default behavior is {effect}." + ) + for value in gen(*args, **kw_args): + yield value + + return wrapped_generator + + +def exit_generator(): """ A default generator for an exit event. It will return True, indicating that the Simulator run loop should exit. @@ -42,7 +62,7 @@ def default_exit_generator(): yield True -def default_switch_generator(processor: AbstractProcessor): +def switch_generator(processor: AbstractProcessor): """ A default generator for a switch exit event. If the processor is a SwitchableProcessor, this generator will switch it. Otherwise nothing will @@ -56,21 +76,94 @@ def default_switch_generator(processor: AbstractProcessor): yield False -def default_workbegin_generator(): +def dump_reset_generator(): """ - A default generator for a workbegin exit event. It will reset the - simulation statistics. + A generator for doing statstic dump and reset. It will reset the simulation + statistics and then dump simulation statistics. + The Simulation run loop will continue after executing the behavior of the + generator. + """ + while True: + m5.stats.dump() + m5.stats.reset() + yield False + + +def save_checkpoint_generator(checkpoint_dir: Optional[Path] = None): + """ + A generator for taking a checkpoint. It will take a checkpoint with the + input path and the current simulation Ticks. + The Simulation run loop will continue after executing the behavior of the + generator. + """ + if not checkpoint_dir: + from m5 import options + + checkpoint_dir = Path(options.outdir) + while True: + m5.checkpoint((checkpoint_dir / f"cpt.{str(m5.curTick())}").as_posix()) + yield False + + +def reset_stats_generator(): + """ + This generator resets the stats every time it is called. It does not dump + the stats before resetting them. """ while True: m5.stats.reset() yield False -def default_workend_generator(): +def dump_stats_generator(): """ - A default generator for a workend exit event. It will dump the simulation - statistics. + This generator dumps the stats every time it is called. """ while True: m5.stats.dump() yield False + + +def skip_generator(): + """ + This generator does nothing when on the exit event. + The simulation will continue after this generator. + """ + while True: + yield False + + +def simpoints_save_checkpoint_generator( + checkpoint_dir: Path, simpoint: SimPoint +): + """ + A generator for taking multiple checkpoints for SimPoints. It will save the + checkpoints in the checkpoint_dir path with the SimPoints' index. + The Simulation run loop will continue after executing the behavior of the + generator until all the SimPoints in the simpoint_list has taken a + checkpoint. + """ + simpoint_list = simpoint.get_simpoint_start_insts() + count = 0 + last_start = -1 + while True: + m5.checkpoint((checkpoint_dir / f"cpt.SimPoint{count}").as_posix()) + last_start = simpoint_list[count] + count += 1 + # When the next SimPoint starting instruction is the same as the last + # one, it will take a checkpoint for it with index+1. Because of there + # are cases that the warmup length is larger than multiple SimPoints + # starting instructions, then they might cause duplicates in the + # simpoint_start_ints. + while ( + count < len(simpoint_list) and last_start == simpoint_list[count] + ): + m5.checkpoint((checkpoint_dir / f"cpt.SimPoint{count}").as_posix()) + last_start = simpoint_list[count] + count += 1 + # When there are remaining SimPoints in the list, let the Simulation + # loop continues, otherwise, exit the Simulation loop. + if count < len(simpoint_list): + yield False + else: + yield True diff --git a/src/python/gem5/simulate/simulator.py b/src/python/gem5/simulate/simulator.py index 9c035890d4..e27679a996 100644 --- a/src/python/gem5/simulate/simulator.py +++ b/src/python/gem5/simulate/simulator.py @@ -27,23 +27,26 @@ import m5 import m5.ticks from m5.stats import addStatVisitor -from m5.stats.gem5stats import get_simstat +from m5.ext.pystats.simstat import SimStat from m5.objects import Root from m5.util import warn import os +import sys from pathlib import Path from typing import Optional, List, Tuple, Dict, Generator, Union from .exit_event_generators import ( - default_exit_generator, - default_switch_generator, - default_workbegin_generator, - default_workend_generator, + warn_default_decorator, + exit_generator, + switch_generator, + save_checkpoint_generator, + reset_stats_generator, + dump_stats_generator, ) from .exit_event import ExitEvent from ..components.boards.abstract_board import AbstractBoard -from ..components.processors.cpu_types import CPUTypes +from ..components.processors.switchable_processor import SwitchableProcessor class Simulator: @@ -69,12 +72,22 @@ class Simulator: This will run a simulation and execute default behavior for exit events. """ + # Here we declare the modules which should not be imported into any gem5 + # standard library run. The key is the module (e.g, + # "import common.Options") and the value is the reason, which will be + # output in the case this module is imported. + # This is checked with the `run` function is executed. + _banned_modules = { + "common.Options": "The options provided by 'Options' are not " + "compatible with the gem5 standard library.", + } + def __init__( self, board: AbstractBoard, full_system: Optional[bool] = None, on_exit_event: Optional[ - Dict[Union[str, ExitEvent], Generator[Optional[bool], None, None]] + Dict[ExitEvent, Generator[Optional[bool], None, None]] ] = None, expected_execution_order: Optional[List[ExitEvent]] = None, checkpoint_path: Optional[Path] = None, @@ -95,7 +108,9 @@ class Simulator: events is valid. :param checkpoint_path: An optional parameter specifying the directory of the checkpoint to instantiate from. When the path is None, no - checkpoint will be loaded. By default, the path is None. + checkpoint will be loaded. By default, the path is None. **This + parameter is deprecated. Please set the checkpoint when setting the + board's workload**. `on_exit_event` usage notes --------------------------- @@ -134,14 +149,17 @@ class Simulator: Each exit event has a default behavior if none is specified by the user. These are as follows: - * ExitEvent.EXIT: default_exit_list - * ExitEvent.CHECKPOINT: default_exit_list - * ExitEvent.FAIL : default_exit_list - * ExitEvent.SWITCHCPU: default_switch_list - * ExitEvent.WORKBEGIN: default_workbegin_list - * ExitEvent.WORKEND: default_workend_list - * ExitEvent.USER_INTERRUPT: default_exit_generator - * ExitEvent.MAX_TICK: default_exit_generator() + * ExitEvent.EXIT: exit simulation + * ExitEvent.CHECKPOINT: take a checkpoint + * ExitEvent.FAIL : exit simulation + * ExitEvent.SWITCHCPU: call `switch` on the processor + * ExitEvent.WORKBEGIN: reset stats + * ExitEvent.WORKEND: exit simulation + * ExitEvent.USER_INTERRUPT: exit simulation + * ExitEvent.MAX_TICK: exit simulation + * ExitEvent.SCHEDULED_TICK: exit simulation + * ExitEvent.SIMPOINT_BEGIN: reset stats + * ExitEvent.MAX_INSTS: exit simulation These generators can be found in the `exit_event_generator.py` module. @@ -156,17 +174,41 @@ class Simulator: # We specify a dictionary here outlining the default behavior for each # exit event. Each exit event is mapped to a generator. self._default_on_exit_dict = { - ExitEvent.EXIT: default_exit_generator(), - # TODO: Something else should be done here for CHECKPOINT - ExitEvent.CHECKPOINT: default_exit_generator(), - ExitEvent.FAIL: default_exit_generator(), - ExitEvent.SWITCHCPU: default_switch_generator( - processor=board.get_processor() - ), - ExitEvent.WORKBEGIN: default_workbegin_generator(), - ExitEvent.WORKEND: default_workend_generator(), - ExitEvent.USER_INTERRUPT: default_exit_generator(), - ExitEvent.MAX_TICK: default_exit_generator(), + ExitEvent.EXIT: exit_generator(), + ExitEvent.CHECKPOINT: warn_default_decorator( + save_checkpoint_generator, + "checkpoint", + "creating a checkpoint and continuing", + )(), + ExitEvent.FAIL: exit_generator(), + ExitEvent.SWITCHCPU: warn_default_decorator( + switch_generator, + "switch CPU", + "switching the CPU type of the processor and continuing", + )(processor=board.get_processor()), + ExitEvent.WORKBEGIN: warn_default_decorator( + reset_stats_generator, + "work begin", + "resetting the stats and continuing", + )(), + ExitEvent.WORKEND: warn_default_decorator( + dump_stats_generator, + "work end", + "dumping the stats and continuing", + )(), + ExitEvent.USER_INTERRUPT: exit_generator(), + ExitEvent.MAX_TICK: exit_generator(), + ExitEvent.SCHEDULED_TICK: exit_generator(), + ExitEvent.SIMPOINT_BEGIN: warn_default_decorator( + reset_stats_generator, + "simpoint begin", + "resetting the stats and continuing", + )(), + ExitEvent.MAX_INSTS: warn_default_decorator( + exit_generator, + "max instructions", + "exiting the simulation", + )(), } if on_exit_event: @@ -183,23 +225,70 @@ class Simulator: self._last_exit_event = None self._exit_event_count = 0 + if checkpoint_path: + warn( + "Setting the checkpoint path via the Simulator constructor is " + "deprecated and will be removed in future releases of gem5. " + "Please set this through via the appropriate workload " + "function (i.e., `set_se_binary_workload` or " + "`set_kernel_disk_workload`). If both are set the workload " + "function set takes precedence." + ) + self._checkpoint_path = checkpoint_path + def schedule_simpoint(self, simpoint_start_insts: List[int]) -> None: + """ + Schedule SIMPOINT_BEGIN exit events + + **Warning:** SimPoints only work with one core + + :param simpoint_start_insts: a list of number of instructions + indicating the starting point of the simpoints + """ + if self._board.get_processor().get_num_cores() > 1: + warn("SimPoints only work with one core") + self._board.get_processor().get_cores()[0].set_simpoint( + simpoint_start_insts, self._instantiated + ) + + def schedule_max_insts(self, inst: int) -> None: + """ + Schedule a MAX_INSTS exit event when any thread in any core reaches the + given number of instructions. + + :param insts: a number of instructions to run to. + """ + for core in self._board.get_processor().get_cores(): + core._set_inst_stop_any_thread(inst, self._instantiated) + def get_stats(self) -> Dict: """ Obtain the current simulation statistics as a Dictionary, conforming to a JSON-style schema. - **Warning:** Will throw an Exception if called before `run()`. The - board must be initialized before obtaining statistics + :raises Exception: An exception is raised if this function is called + before `run()`. The board must be initialized before obtaining + statistics. + """ + + return self.get_simstats().to_json() + + def get_simstats(self) -> SimStat: + """ + Obtains the SimStat of the current simulation. + + :raises Exception: An exception is raised if this function is called + before `run()`. The board must be initialized before obtaining + statistics. """ if not self._instantiated: raise Exception( - "Cannot obtain simulation statistics prior to inialization." + "Cannot obtain simulation statistics prior to initialization." ) - return get_simstat(self._root).to_json() + return m5.stats.gem5stats.get_simstat(self._root) def add_text_stats_output(self, path: str) -> None: """ @@ -210,9 +299,19 @@ class Simulator: :param path: That path in which the file should be output to. """ - if not os.is_path_exists_or_creatable(path): + path_path = Path(path) + parent = path_path.parent + + if ( + not parent.is_dir() + or not os.access(parent, os.W_OK) + or ( + path_path.exists() + and (path_path.is_dir() or not os.access(path_path, os.W_OK)) + ) + ): raise Exception( - f"Path '{path}' is is not a valid text stats output location." + f"Specified text stats output path '{path}' is invalid." ) addStatVisitor(path) @@ -224,9 +323,19 @@ class Simulator: :param path: That path in which the JSON should be output to. """ - if not os.is_path_exists_or_creatable(path): + path_path = Path(path) + parent = path_path.parent + + if ( + not parent.is_dir() + or not os.access(parent, os.W_OK) + or ( + path_path.exists() + and (path_path.is_dir() or not os.access(path_path, os.W_OK)) + ) + ): raise Exception( - f"Path '{path}' is is not a valid JSON output location." + f"Specified json stats output path '{path}' is invalid." ) addStatVisitor(f"json://{path}") @@ -272,6 +381,11 @@ class Simulator: """ if not self._instantiated: + + # Before anything else we run the AbstractBoard's + # `_pre_instantiate` function. + self._board._pre_instantiate() + root = Root( full_system=self._full_system if self._full_system is not None @@ -283,19 +397,42 @@ class Simulator: # (for example, in `get_stats()`). self._root = root - if CPUTypes.KVM in [ - core.get_type() - for core in self._board.get_processor().get_cores() - ]: + # The following is a bit of a hack. If a simulation is to use a KVM + # core then the `sim_quantum` value must be set. However, in the + # case of using a SwitchableProcessor the KVM cores may be + # switched out and therefore not accessible via `get_cores()`. + # This is the reason for the `isinstance` check. + # + # We cannot set the `sim_quantum` value in every simulation as + # setting it causes the scheduling of exits to be off by the + # `sim_quantum` value (something necessary if we are using KVM + # cores). Ergo we only set the value of KVM cores are present. + # + # There is still a bug here in that if the user is switching to and + # from KVM and non-KVM cores via the SwitchableProcessor then the + # scheduling of exits for the non-KVM cores will be incorrect. This + # will be fixed at a later date. + processor = self._board.processor + if any(core.is_kvm_core() for core in processor.get_cores()) or ( + isinstance(processor, SwitchableProcessor) + and any(core.is_kvm_core() for core in processor._all_cores()) + ): m5.ticks.fixGlobalFrequency() root.sim_quantum = m5.ticks.fromSeconds(0.001) # m5.instantiate() takes a parameter specifying the path to the # checkpoint directory. If the parameter is None, no checkpoint # will be restored. - m5.instantiate(self._checkpoint_path) + if self._board._checkpoint: + m5.instantiate(self._board._checkpoint.as_posix()) + else: + m5.instantiate(self._checkpoint_path) self._instantiated = True + # Let the board know that instantiate has been called so it can do + # any final things. + self._board._post_instantiate() + def run(self, max_ticks: int = m5.MaxTick) -> None: """ This function will start or continue the simulator run and handle exit @@ -304,9 +441,18 @@ class Simulator: :param max_ticks: The maximum number of ticks to execute per simulation run. If this max_ticks value is met, a MAX_TICK exit event is received, if another simulation exit event is met the tick count is - reset. This is the **maximum number of ticks per simululation run**. + reset. This is the **maximum number of ticks per simulation run**. """ + # Check to ensure no banned module has been imported. + for banned_module in self._banned_modules.keys(): + if banned_module in sys.modules: + raise Exception( + f"The banned module '{banned_module}' has been included. " + "Please do not use this in your simulations. " + f"Reason: {self._banned_modules[banned_module]}" + ) + # We instantiate the board if it has not already been instantiated. self._instantiate() @@ -372,4 +518,3 @@ class Simulator: will be saved. """ m5.checkpoint(str(checkpoint_dir)) - diff --git a/src/python/gem5/utils/multiprocessing/README.md b/src/python/gem5/utils/multiprocessing/README.md new file mode 100644 index 0000000000..da2116c44c --- /dev/null +++ b/src/python/gem5/utils/multiprocessing/README.md @@ -0,0 +1,71 @@ +# gem5's wrapper around python multiprocessing + +This module wraps python's multiprocessing module so that it works with gem5. +The multiprocessing module creates new python processes, but there is no way to customize the way these processes are created. +This wrapper extends the python multiprocessing to support passing new arguments to the python (or gem5 in this case) executable when a new process is created. + +This code replicates some of the multiprocessing module implementation from the python standard library in gem5. +The goal of this code is to enable users to use a *single* set of python scripts to run and analyze a suite of gem5 simulations. + +We must reimplement some of the multiprocessing module because it is not flexible enough to allow for customized command line parameter to the "python" executable (gem5 in our case). +To get around this, I extended the Process and context objects to be gem5 specific. + +The next steps is to wrap the Process and Pool types with gem5-specific versions that will improve their usability for our needs. +With this changeset, these objects are usable, but it will require significant user effort to reach the goal of running/analyzing many different gem5 simulations. + +## Example use + +test.py: + +```python +from gem5.utils.multiprocessing import Process, Pool +from sim import info, run_sim +if __name__ == '__m5_main__' or __name__ == '__main__': + info('main line') + p1 = Process(target=run_sim, args=('bob',)) + p2 = Process(target=run_sim, args=('jane',)) + p1.start() + p2.start() + p2.join() + p1.join() + with Pool(processes=4, maxtasksperchild=1) as pool: + pool.map(run_sim, range(10)) +``` + +sim.py: + +```python +import os +def info(title): + print(title) + print('module name:', __name__) + print('parent process:', os.getppid()) + print('process id:', os.getpid()) +def run_sim(name): + info('function g') + from gem5.prebuilt.demo.x86_demo_board import X86DemoBoard + from gem5.resources.resource import Resource + from gem5.simulate.simulator import Simulator + board = X86DemoBoard() + board.set_kernel_disk_workload( + kernel=Resource("x86-linux-kernel-5.4.49"), + disk_image=Resource("x86-ubuntu-18.04-img"), + ) + simulator = Simulator(board=board) + simulator.run(max_ticks=10000000) +``` + +Then, you can run `gem5 test.py`. +This will execute `run_sim` 12 times. +The first two will run in parallel, then the last 10 will run in parallel with up to 4 running at once. + +## Limitations + +- This only supports the spawn context. This is important because we need a fresh gem5 process for every subprocess. +- When using `Pool`, the `maxtasksperchild` must be 1. +- Process synchronization (queues, pipes, etc.) hasn't been tested +- Functions that are used to execute in the subprocess must be imported from another module. In other words, we cannot pickle functions in the main/runner module. + +## Implementation notes + +- The `_start_method` must be `None` for the `Spawn_gem5Process` class. Otherwise, in `_bootstrap` in the `BaseProcess` it will try to force the `_start_method` to be gem5-specific, which the `multiprocessing` module doesn't understand. diff --git a/src/arch/power/O3CPU.py b/src/python/gem5/utils/multiprocessing/__init__.py similarity index 87% rename from src/arch/power/O3CPU.py rename to src/python/gem5/utils/multiprocessing/__init__.py index fdb63edc9e..680aeac314 100644 --- a/src/arch/power/O3CPU.py +++ b/src/python/gem5/utils/multiprocessing/__init__.py @@ -1,4 +1,5 @@ -# Copyright 2021 Google, Inc. +# Copyright (c) 2022 The Regents of The University of California +# All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are @@ -23,9 +24,10 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from m5.objects.PowerCPU import PowerO3CPU +from .context import Process -O3CPU = PowerO3CPU +from .context import gem5Context -# Deprecated -DerivO3CPU = O3CPU +Pool = gem5Context().Pool + +__all__ = ["Process", "Pool"] diff --git a/src/python/gem5/utils/multiprocessing/_command_line.py b/src/python/gem5/utils/multiprocessing/_command_line.py new file mode 100644 index 0000000000..f68277540d --- /dev/null +++ b/src/python/gem5/utils/multiprocessing/_command_line.py @@ -0,0 +1,101 @@ +# Copyright (c) 2022 The Regents of The University of California +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" +This file contains extensions of the multiprocessing module to be used with gem5. +Specifically, it contains the code to produce the command line for spawned processes. +Some code inspired by the Python standard library implementation of the +multiprocessing module (i.e., cpython/Lib/multiprocessing/). +""" + +import sys +from multiprocessing import spawn, util + + +def _gem5_args_for_multiprocessing(name): + from m5 import options + + # Options that are disallowed with multiprocessing + disallowed = [ + options.build_info, + options.copyright, + options.readme, + options.interactive, + options.pdb, + options.verbose, + options.debug_break, + options.debug_help, + options.debug_flags, + options.debug_start, + options.debug_end, + options.debug_ignore, + options.list_sim_objects, + ] + if any(disallowed): + raise Exception( + f"Disallowed option for multiprocessing. " + f"See {__file__} for details." + ) + + # Options not forwarded: + # --allow-remote-connections, --listener-mode, --dump-config, --json-config + # --dot-config, --dot-dvfs-config, --debug-file, --remote-gdb-port, -c + + arguments = [ + f"--outdir={options.outdir}/{name}", + f"--stdout-file={options.stdout_file}", + f"--stderr-file={options.stderr_file}", + f"--stats-file={options.stats_file}", + ] + if options.redirect_stdout: + arguments.append("--redirect-stdout") + if options.redirect_stderr: + arguments.append("--redirect-stderr") + if options.silent_redirect: + arguments.append("--silent-redirect") + if options.path: + arguments.append(f"--path={':'.join(options.path)}") + if options.quiet: + arguments.append("--quiet") + + return arguments + + +def get_command_line(name, **kwds): + """ + Returns prefix of command line used for spawning a child process + """ + if getattr(sys, "frozen", False): + return [sys.executable, "--multiprocessing-fork"] + [ + "%s=%r" % item for item in kwds.items() + ] + else: + prog = "from multiprocessing.spawn import spawn_main; spawn_main(%s)" + prog %= ", ".join("%s=%r" % item for item in kwds.items()) + opts = util._args_from_interpreter_flags() + opts.extend(_gem5_args_for_multiprocessing(name)) + exe = spawn.get_executable() + return [exe] + opts + ["-c", prog, "--multiprocessing-fork"] diff --git a/src/python/gem5/utils/multiprocessing/context.py b/src/python/gem5/utils/multiprocessing/context.py new file mode 100644 index 0000000000..2108bc624c --- /dev/null +++ b/src/python/gem5/utils/multiprocessing/context.py @@ -0,0 +1,75 @@ +# Copyright (c) 2022 The Regents of The University of California +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" +This file contains extensions of the multiprocessing module to be used with gem5 +Some code inspired by the Python standard library implementation of the +multiprocessing module (i.e., cpython/Lib/multiprocessing/). +""" + +from multiprocessing import context, process +from multiprocessing.context import DefaultContext + +# The `_start_method` must be `None` for the `Spawn_gem5Process` class. +# Otherwise, in `_bootstrap` in the `BaseProcess` it will try to force the +# `_start_method` to be gem5-specific, which the `multiprocessing` module +# doesn't understand. +class Spawn_gem5Process(process.BaseProcess): + _start_method = None + + @staticmethod + def _Popen(process_obj): + from .popen_spawn_gem5 import Popen + + return Popen(process_obj) + + +class Process(process.BaseProcess): + _start_method = None + + @staticmethod + def _Popen(process_obj): + return _default_context.get_context().Process._Popen(process_obj) + + +class gem5Context(context.BaseContext): + _name = "spawn_gem5" + Process = Spawn_gem5Process + + def get_context(self, method=None): + if method is None: + return self + try: + ctx = _concrete_contexts[method] + except KeyError: + raise ValueError("cannot find context for %r" % method) from None + ctx._check_available() + return ctx + + +_concrete_contexts = {"spawn_gem5": gem5Context()} + +_default_context = DefaultContext(_concrete_contexts["spawn_gem5"]) diff --git a/src/python/gem5/utils/multiprocessing/popen_spawn_gem5.py b/src/python/gem5/utils/multiprocessing/popen_spawn_gem5.py new file mode 100644 index 0000000000..13fb3362fc --- /dev/null +++ b/src/python/gem5/utils/multiprocessing/popen_spawn_gem5.py @@ -0,0 +1,95 @@ +# Copyright (c) 2022 The Regents of The University of California +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" +This file contains extensions of the multiprocessing module to be used with gem5. +Specifically, it contains the code to spawn a new gem5 process with Popen. +Some code is from the Python standard library implementation of the +multiprocessing module (i.e., cpython/Lib/multiprocessing/). +""" + +import io +import os + +from multiprocessing.context import reduction, set_spawning_popen +from multiprocessing import popen_spawn_posix +from multiprocessing import spawn +from multiprocessing import util + +from ._command_line import get_command_line + +__all__ = ["Popen"] + + +class Popen(popen_spawn_posix.Popen): + method = "spawn_gem5" + + def __init__(self, process_obj): + super().__init__(process_obj) + + # Copyright (c) 2001-2022 Python Software Foundation; All Rights Reserved + # from cpython/Lib/multiprocessing/popen_spawn_posix.py + def _launch(self, process_obj): + from multiprocessing import resource_tracker + + tracker_fd = resource_tracker.getfd() + self._fds.append(tracker_fd) + prep_data = spawn.get_preparation_data(process_obj._name) + fp = io.BytesIO() + set_spawning_popen(self) + try: + reduction.dump(prep_data, fp) + reduction.dump(process_obj, fp) + finally: + set_spawning_popen(None) + + parent_r = child_w = child_r = parent_w = None + try: + parent_r, child_w = os.pipe() + child_r, parent_w = os.pipe() + # Note: This next line is the only modification + cmd = get_command_line( + tracker_fd=tracker_fd, + pipe_handle=child_r, + name=process_obj.name, + ) + self._fds.extend([child_r, child_w]) + self.pid = util.spawnv_passfds( + spawn.get_executable(), cmd, self._fds + ) + self.sentinel = parent_r + with open(parent_w, "wb", closefd=False) as f: + f.write(fp.getbuffer()) + finally: + fds_to_close = [] + for fd in (parent_r, parent_w): + if fd is not None: + fds_to_close.append(fd) + self.finalizer = util.Finalize(self, util.close_fds, fds_to_close) + + for fd in (child_r, child_w): + if fd is not None: + os.close(fd) diff --git a/src/python/gem5/utils/requires.py b/src/python/gem5/utils/requires.py index f4322ca440..30a8ef4a8b 100644 --- a/src/python/gem5/utils/requires.py +++ b/src/python/gem5/utils/requires.py @@ -39,14 +39,14 @@ def _get_exception_str(msg: str): # stated. `inspect.stack()[1]` is the `requires` caller method. One above # this on the stack, `inspect.stack()[2]` should be where `requires` is # called. - if inspect.stack()[2].function == '': + if inspect.stack()[2].function == "": # If the caller is a Python module, we use the filename. This is for # the case where the `requires` function is called outside of a class. name = inspect.stack()[2].filename else: # Otherwise we assume the `requires` is being called by a class, in # which case we label the exception message with the class name. - name = inspect.stack()[2].frame.f_locals['self'].__class__.__name__ + name = inspect.stack()[2].frame.f_locals["self"].__class__.__name__ return "[{}] {}".format(name, msg) @@ -93,9 +93,10 @@ def requires( # why the enum did not compare correctly yielded no results. The following # code works, even though it is verbose and appears functionally equivalent # to the original code. - if isa_required != None and isa_required.value not in \ - (isa.value for isa in supported_isas): - msg=f"The required ISA is '{isa_required.name}'. Supported ISAs: " + if isa_required != None and isa_required.value not in ( + isa.value for isa in supported_isas + ): + msg = f"The required ISA is '{isa_required.name}'. Supported ISAs: " for isa in supported_isas: msg += f"{os.linesep}{isa.name}" raise Exception(_get_exception_str(msg=msg)) @@ -108,9 +109,9 @@ def requires( raise Exception( _get_exception_str( msg="The current coherence protocol is " - "'{}'. Required: '{}'".format( - runtime_coherence_protocol.name, - coherence_protocol_required.name, + "'{}'. Required: '{}'".format( + runtime_coherence_protocol.name, + coherence_protocol_required.name, ) ) ) @@ -118,6 +119,6 @@ def requires( if kvm_required and not kvm_available: raise Exception( _get_exception_str( - msg="KVM is required but is unavaiable on this system" + msg="KVM is required but is unavailable on this system" ) - ) \ No newline at end of file + ) diff --git a/src/python/gem5/utils/simpoint.py b/src/python/gem5/utils/simpoint.py new file mode 100644 index 0000000000..9e861cc0a5 --- /dev/null +++ b/src/python/gem5/utils/simpoint.py @@ -0,0 +1,185 @@ +# Copyright (c) 2022 The Regents of the University of California +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from m5.util import fatal +from pathlib import Path +from typing import List, Tuple +from gem5.resources.resource import Resource, CustomResource + + +class SimPoint: + """ + This SimPoint class is used to manage the information needed for SimPoints + in workload + + """ + + def __init__( + self, + simpoint_resource: CustomResource = None, + simpoint_interval: int = None, + simpoint_file_path: Path = None, + weight_file_path: Path = None, + simpoint_list: List[int] = None, + weight_list: List[int] = None, + warmup_interval: int = 0, + ) -> None: + """ + :param simpoint_interval: the length of each SimPoints interval + :param simpoint_file_path: the path to the SimPoints result file + generated by Simpoint3.2 or gem5 + :param weight_file_path: the path to the weight result file generated + by Simpoint3.2 or gem5 + + :param simpoint_list: a list of SimPoints starting instructions + :param weight_list: a list of SimPoints weights + :param warmup_interval: a number of instructions for warming up before + restoring a SimPoints checkpoint + + usage note + ----------- + Need to pass in the paths or the lists for the SimPoints and their + weights. If the paths are passed in, no actions will be done to the + list. + + When passing in simpoint_list and weight_list, passing in sorted lists + (sorted by SimPoints in ascending order) is strongly suggested. + The warmup_list only works correctly with sorted simpoint_list. + """ + + # initalize input if you're passing in a CustomResource + if simpoint_resource is not None: + simpoint_directory = str(simpoint_resource.get_local_path()) + + simpoint_file_path = Path(simpoint_directory + "/simpoint.simpt") + weight_file_path = Path(simpoint_directory + "/simpoint.weight") + simpoint_interval = ( + simpoint_resource.get_metadata() + .get("additional_metadata") + .get("simpoint_interval") + ) + warmup_interval = ( + simpoint_resource.get_metadata() + .get("additional_metadata") + .get("warmup_interval") + ) + + self._simpoint_interval = simpoint_interval + + if simpoint_file_path is None or weight_file_path is None: + if simpoint_list is None or weight_list is None: + fatal( + "Please pass in file paths or lists for both simpoints " + "and weights." + ) + else: + self._simpoint_start_insts = list( + inst * simpoint_interval for inst in simpoint_list + ) + self._weight_list = weight_list + else: + # if passing in file paths then it calls the function to generate + # simpoint_start_insts and weight list from the files + ( + self._simpoint_start_insts, + self._weight_list, + ) = self.get_weights_and_simpoints_from_file( + simpoint_file_path, weight_file_path + ) + + if warmup_interval != 0: + self._warmup_list = self.set_warmup_intervals(warmup_interval) + else: + self._warmup_list = [0] * len(self._simpoint_start_insts) + + def get_weights_and_simpoints_from_file( + self, + simpoint_path: Path, + weight_path: Path, + ) -> Tuple[List[int], List[int]]: + """ + This function takes in file paths and outputs a list of SimPoints + instruction starts and a list of weights + """ + simpoint = [] + with open(simpoint_path) as simpoint_file, open( + weight_path + ) as weight_file: + while True: + line = simpoint_file.readline() + if not line: + break + interval = int(line.split(" ", 1)[0]) + line = weight_file.readline() + if not line: + fatal("not engough weights") + weight = float(line.split(" ", 1)[0]) + simpoint.append((interval, weight)) + simpoint.sort(key=lambda obj: obj[0]) + # use simpoint to sort + simpoint_start_insts = [] + weight_list = [] + for start, weight in simpoint: + simpoint_start_insts.append(start * self._simpoint_interval) + weight_list.append(weight) + return simpoint_start_insts, weight_list + + def set_warmup_intervals(self, warmup_interval: int) -> List[int]: + """ + This function takes the warmup_interval, fits it into the + _simpoint_start_insts, and outputs a list of warmup instruction lengths + for each SimPoint. + + The warmup instruction length is calculated using the starting + instruction of a SimPoint to minus the warmup_interval and the ending + instruction of the last SimPoint. If it is less than 0, then the warmup + instruction length is the gap between the starting instruction of a + SimPoint and the ending instruction of the last SimPoint. + """ + warmup_list = [] + for index, start_inst in enumerate(self._simpoint_start_insts): + warmup_inst = start_inst - warmup_interval + if warmup_inst < 0: + warmup_inst = start_inst + else: + warmup_inst = warmup_interval + warmup_list.append(warmup_inst) + # change the starting instruction of a SimPoint to include the + # warmup instruction length + self._simpoint_start_insts[index] = start_inst - warmup_inst + return warmup_list + + def get_simpoint_start_insts(self) -> List[int]: + return self._simpoint_start_insts + + def get_weight_list(self) -> List[float]: + return self._weight_list + + def get_simpoint_interval(self) -> int: + return self._simpoint_interval + + def get_warmup_list(self) -> List[int]: + return self._warmup_list diff --git a/src/python/importer.py b/src/python/importer.py index f75f95b675..3d3ee7c068 100644 --- a/src/python/importer.py +++ b/src/python/importer.py @@ -29,6 +29,7 @@ import importlib.abc import importlib.util import os + class ByteCodeLoader(importlib.abc.Loader): def __init__(self, code): super().__init__() @@ -37,14 +38,15 @@ class ByteCodeLoader(importlib.abc.Loader): def exec_module(self, module): exec(self.code, module.__dict__) + # Simple importer that allows python to import data from a dict of # code objects. The keys are the module path, and the items are the # filename and bytecode of the file. class CodeImporter(object): def __init__(self): self.modules = {} - override_var = os.environ.get('M5_OVERRIDE_PY_SOURCE', 'false') - self.override = (override_var.lower() in ('true', 'yes')) + override_var = os.environ.get("M5_OVERRIDE_PY_SOURCE", "false") + self.override = override_var.lower() in ("true", "yes") def add_module(self, abspath, modpath, code): if modpath in self.modules: @@ -59,18 +61,19 @@ class CodeImporter(object): abspath, code = self.modules[fullname] if self.override and os.path.exists(abspath): - src = open(abspath, 'r').read() - code = compile(src, abspath, 'exec') + src = open(abspath, "r").read() + code = compile(src, abspath, "exec") - is_package = (os.path.basename(abspath) == '__init__.py') + is_package = os.path.basename(abspath) == "__init__.py" spec = importlib.util.spec_from_loader( - name=fullname, loader=ByteCodeLoader(code), - is_package=is_package) + name=fullname, loader=ByteCodeLoader(code), is_package=is_package + ) spec.loader_state = self.modules.keys() return spec + # Create an importer and add it to the meta_path so future imports can # use it. There's currently nothing in the importer, but calls to # add_module can be used to add code. @@ -79,6 +82,7 @@ def install(): global add_module add_module = importer.add_module import sys + sys.meta_path.insert(0, importer) # Injected into this module's namespace by the c++ code that loads it. diff --git a/src/python/m5/SimObject.py b/src/python/m5/SimObject.py index 26147a1757..91cbbc59bd 100644 --- a/src/python/m5/SimObject.py +++ b/src/python/m5/SimObject.py @@ -46,6 +46,7 @@ import inspect import m5 from m5.util import * from m5.util.pybind import * + # Use the pyfdt and not the helper class, because the fdthelper # relies on the SimObject definition from m5.ext.pyfdt import pyfdt @@ -54,10 +55,16 @@ from m5.ext.pyfdt import pyfdt # load (when SimObject class references Param to create a class # variable, the 'name' param)... from m5.params import * + # There are a few things we need that aren't in params.__all__ since # normal users don't need them -from m5.params import ParamDesc, VectorParamDesc, \ - isNullPointer, SimObjectVector, Port +from m5.params import ( + ParamDesc, + VectorParamDesc, + isNullPointer, + SimObjectVector, + Port, +) from m5.proxy import * from m5.proxy import isproxy @@ -111,10 +118,12 @@ instanceDict = {} # Did any of the SimObjects lack a header file? noCxxHeader = False + def public_value(key, value): - return key.startswith('_') or \ - isinstance(value, (FunctionType, MethodType, ModuleType, - classmethod, type)) + return key.startswith("_") or isinstance( + value, (FunctionType, MethodType, ModuleType, classmethod, type) + ) + # The metaclass for SimObject. This class controls how new classes # that derive from SimObject are instantiated, and provides inherited @@ -123,19 +132,19 @@ def public_value(key, value): class MetaSimObject(type): # Attributes that can be set only at initialization time init_keywords = { - 'abstract' : bool, - 'cxx_class' : str, - 'cxx_type' : str, - 'cxx_header' : str, - 'type' : str, - 'cxx_base' : (str, type(None)), - 'cxx_extra_bases' : list, - 'cxx_exports' : list, - 'cxx_param_exports' : list, - 'cxx_template_params' : list, + "abstract": bool, + "cxx_class": str, + "cxx_type": str, + "cxx_header": str, + "type": str, + "cxx_base": (str, type(None)), + "cxx_extra_bases": list, + "cxx_exports": list, + "cxx_param_exports": list, + "cxx_template_params": list, } # Attributes that can be set any time - keywords = { 'check' : FunctionType } + keywords = {"check": FunctionType} # __new__ is called before __init__, and is where the statements # in the body of the class definition get loaded into the class's @@ -151,7 +160,7 @@ class MetaSimObject(type): cls_dict = {} value_dict = {} cxx_exports = [] - for key,val in dict.items(): + for key, val in dict.items(): try: cxx_exports.append(getattr(val, "__pybind")) except AttributeError: @@ -162,21 +171,21 @@ class MetaSimObject(type): else: # must be a param/port setting value_dict[key] = val - if 'abstract' not in value_dict: - value_dict['abstract'] = False - if 'cxx_extra_bases' not in value_dict: - value_dict['cxx_extra_bases'] = [] - if 'cxx_exports' not in value_dict: - value_dict['cxx_exports'] = cxx_exports + if "abstract" not in value_dict: + value_dict["abstract"] = False + if "cxx_extra_bases" not in value_dict: + value_dict["cxx_extra_bases"] = [] + if "cxx_exports" not in value_dict: + value_dict["cxx_exports"] = cxx_exports else: - value_dict['cxx_exports'] += cxx_exports - if 'cxx_param_exports' not in value_dict: - value_dict['cxx_param_exports'] = [] - if 'cxx_template_params' not in value_dict: - value_dict['cxx_template_params'] = [] - cls_dict['_value_dict'] = value_dict + value_dict["cxx_exports"] += cxx_exports + if "cxx_param_exports" not in value_dict: + value_dict["cxx_param_exports"] = [] + if "cxx_template_params" not in value_dict: + value_dict["cxx_template_params"] = [] + cls_dict["_value_dict"] = value_dict cls = super().__new__(mcls, name, bases, cls_dict) - if 'type' in value_dict: + if "type" in value_dict: allClasses[name] = cls return cls @@ -189,7 +198,7 @@ class MetaSimObject(type): # initialize required attributes # class-only attributes - cls._params = multidict() # param descriptions + cls._params = multidict() # param descriptions cls._ports = multidict() # port descriptions # Parameter names that are deprecated. Dict[str, DeprecatedParam] @@ -199,12 +208,12 @@ class MetaSimObject(type): cls._deprecated_params = multidict() # class or instance attributes - cls._values = multidict() # param values - cls._hr_values = multidict() # human readable param values - cls._children = multidict() # SimObject children - cls._port_refs = multidict() # port ref objects - cls._instantiated = False # really instantiated, cloned, or subclassed - cls._init_called = False # Used to check if __init__ overridden + cls._values = multidict() # param values + cls._hr_values = multidict() # human readable param values + cls._children = multidict() # SimObject children + cls._port_refs = multidict() # port ref objects + cls._instantiated = False # really instantiated, cloned, or subclassed + cls._init_called = False # Used to check if __init__ overridden # We don't support multiple inheritance of sim objects. If you want # to, you must fix multidict to deal with it properly. Non sim-objects @@ -215,7 +224,8 @@ class MetaSimObject(type): bTotal += 1 if bTotal > 1: raise TypeError( - "SimObjects do not support multiple inheritance") + "SimObjects do not support multiple inheritance" + ) base = bases[0] @@ -238,13 +248,13 @@ class MetaSimObject(type): cls._base = None # default keyword values - if 'type' in cls._value_dict: - if 'cxx_class' not in cls._value_dict: - cls._value_dict['cxx_class'] = cls._value_dict['type'] + if "type" in cls._value_dict: + if "cxx_class" not in cls._value_dict: + cls._value_dict["cxx_class"] = cls._value_dict["type"] - cls._value_dict['cxx_type'] = '%s *' % cls._value_dict['cxx_class'] + cls._value_dict["cxx_type"] = "%s *" % cls._value_dict["cxx_class"] - if 'cxx_header' not in cls._value_dict: + if "cxx_header" not in cls._value_dict: global noCxxHeader noCxxHeader = True warn("No header file specified for SimObject: %s", name) @@ -256,7 +266,7 @@ class MetaSimObject(type): # the class is defined, so we handle them here. The others # can be set later too, so just emulate that by calling # setattr(). - for key,val in cls._value_dict.items(): + for key, val in cls._value_dict.items(): # param descriptions if isinstance(val, ParamDesc): cls._new_param(key, val) @@ -284,29 +294,35 @@ class MetaSimObject(type): def _set_keyword(cls, keyword, val, kwtype): if not isinstance(val, kwtype): - raise TypeError('keyword %s has bad type %s (expecting %s)' % \ - (keyword, type(val), kwtype)) + raise TypeError( + "keyword %s has bad type %s (expecting %s)" + % (keyword, type(val), kwtype) + ) if isinstance(val, FunctionType): val = classmethod(val) type.__setattr__(cls, keyword, val) def _new_param(cls, name, pdesc): # each param desc should be uniquely assigned to one variable - assert(not hasattr(pdesc, 'name')) + assert not hasattr(pdesc, "name") pdesc.name = name cls._params[name] = pdesc - if hasattr(pdesc, 'default'): + if hasattr(pdesc, "default"): cls._set_param(name, pdesc.default, pdesc) def _set_param(cls, name, value, param): - assert(param.name == name) + assert param.name == name try: hr_value = value value = param.convert(value) except Exception as e: - msg = "%s\nError setting param %s.%s to %s\n" % \ - (e, cls.__name__, name, value) - e.args = (msg, ) + msg = "%s\nError setting param %s.%s to %s\n" % ( + e, + cls.__name__, + name, + value, + ) + e.args = (msg,) raise cls._values[name] = value # if param value is a SimObject, make it a child too, so that @@ -315,8 +331,9 @@ class MetaSimObject(type): cls._add_cls_child(name, value) # update human-readable values of the param if it has a literal # value and is not an object or proxy. - if not (isSimObjectOrVector(value) or\ - isinstance(value, m5.proxy.BaseProxy)): + if not ( + isSimObjectOrVector(value) or isinstance(value, m5.proxy.BaseProxy) + ): cls._hr_values[name] = hr_value def _add_cls_child(cls, name, child): @@ -331,7 +348,7 @@ class MetaSimObject(type): def _new_port(cls, name, port): # each port should be uniquely assigned to one variable - assert(not hasattr(port, 'name')) + assert not hasattr(port, "name") port.name = name cls._ports[name] = port @@ -352,11 +369,12 @@ class MetaSimObject(type): a runtime error. This will search both the current object and its parents. """ - for k,v in cls._value_dict.items(): + for k, v in cls._value_dict.items(): if v == value: - return k,v - raise RuntimeError("Cannot find parameter {} in parameter list" - .format(value)) + return k, v + raise RuntimeError( + "Cannot find parameter {} in parameter list".format(value) + ) # Set attribute (called on foo.attr = value when foo is an # instance of class cls). @@ -376,9 +394,10 @@ class MetaSimObject(type): if isSimObjectOrSequence(value) and cls._instantiated: raise RuntimeError( - "cannot set SimObject parameter '%s' after\n" \ - " class %s has been instantiated or subclassed" \ - % (attr, cls.__name__)) + "cannot set SimObject parameter '%s' after\n" + " class %s has been instantiated or subclassed" + % (attr, cls.__name__) + ) # check for param param = cls._params.get(attr) @@ -393,20 +412,21 @@ class MetaSimObject(type): # no valid assignment... raise exception raise AttributeError( - "Class %s has no parameter \'%s\'" % (cls.__name__, attr)) + "Class %s has no parameter '%s'" % (cls.__name__, attr) + ) def __getattr__(cls, attr): - if attr == 'cxx_class_path': - return cls.cxx_class.split('::') + if attr == "cxx_class_path": + return cls.cxx_class.split("::") - if attr == 'cxx_class_name': + if attr == "cxx_class_name": return cls.cxx_class_path[-1] - if attr == 'cxx_namespaces': + if attr == "cxx_namespaces": return cls.cxx_class_path[:-1] - if attr == 'pybind_class': - return '_COLONS_'.join(cls.cxx_class_path) + if attr == "pybind_class": + return "_COLONS_".join(cls.cxx_class_path) if attr in cls._values: return cls._values[attr] @@ -418,7 +438,8 @@ class MetaSimObject(type): return getattr(cls.getCCClass(), attr) except AttributeError: raise AttributeError( - "object '%s' has no attribute '%s'" % (cls.__name__, attr)) + "object '%s' has no attribute '%s'" % (cls.__name__, attr) + ) def __str__(cls): return cls.__name__ @@ -433,6 +454,7 @@ class MetaSimObject(type): def pybind_predecls(cls, code): code('#include "${{cls.cxx_header}}"') + # This *temporary* definition is required to support calls from the # SimObject class definition to the MetaSimObject methods (in # particular _set_param, which gets called for parameters with default @@ -442,6 +464,7 @@ class MetaSimObject(type): def isSimObjectOrVector(value): return False + def cxxMethod(*args, **kwargs): """Decorator to export C++ functions to Python""" @@ -454,17 +477,18 @@ def cxxMethod(*args, **kwargs): args, varargs, keywords, defaults = inspect.getargspec(func) if varargs or keywords: - raise ValueError("Wrapped methods must not contain variable " \ - "arguments") + raise ValueError( + "Wrapped methods must not contain variable " "arguments" + ) # Create tuples of (argument, default) if defaults: - args = args[:-len(defaults)] + \ - list(zip(args[-len(defaults):], defaults)) + args = args[: -len(defaults)] + list( + zip(args[-len(defaults) :], defaults) + ) # Don't include self in the argument list to PyBind args = args[1:] - @wraps(func) def cxx_call(self, *args, **kwargs): ccobj = self.getCCClass() if static else self.getCCObject() @@ -475,9 +499,13 @@ def cxxMethod(*args, **kwargs): return func(self, *args, **kwargs) f = py_call if override else cxx_call - f.__pybind = PyBindMethod(name, cxx_name=cxx_name, args=args, - return_value_policy=return_value_policy, - static=static) + f.__pybind = PyBindMethod( + name, + cxx_name=cxx_name, + args=args, + return_value_policy=return_value_policy, + static=static, + ) return f @@ -488,32 +516,35 @@ def cxxMethod(*args, **kwargs): else: raise TypeError("One argument and no kwargs, or only kwargs expected") + # This class holds information about each simobject parameter # that should be displayed on the command line for use in the # configuration system. class ParamInfo(object): - def __init__(self, type, desc, type_str, example, default_val, access_str): - self.type = type - self.desc = desc - self.type_str = type_str - self.example_str = example - self.default_val = default_val - # The string representation used to access this param through python. - # The method to access this parameter presented on the command line may - # be different, so this needs to be stored for later use. - self.access_str = access_str - self.created = True + def __init__(self, type, desc, type_str, example, default_val, access_str): + self.type = type + self.desc = desc + self.type_str = type_str + self.example_str = example + self.default_val = default_val + # The string representation used to access this param through python. + # The method to access this parameter presented on the command line may + # be different, so this needs to be stored for later use. + self.access_str = access_str + self.created = True + + # Make it so we can only set attributes at initialization time + # and effectively make this a const object. + def __setattr__(self, name, value): + if not "created" in self.__dict__: + self.__dict__[name] = value - # Make it so we can only set attributes at initialization time - # and effectively make this a const object. - def __setattr__(self, name, value): - if not "created" in self.__dict__: - self.__dict__[name] = value class SimObjectCliWrapperException(Exception): def __init__(self, message): super().__init__(message) + class SimObjectCliWrapper(object): """ Wrapper class to restrict operations that may be done @@ -526,11 +557,12 @@ class SimObjectCliWrapper(object): """ def __init__(self, sim_objects): - self.__dict__['_sim_objects'] = list(sim_objects) + self.__dict__["_sim_objects"] = list(sim_objects) def __getattr__(self, key): - return SimObjectCliWrapper(sim_object._children[key] - for sim_object in self._sim_objects) + return SimObjectCliWrapper( + sim_object._children[key] for sim_object in self._sim_objects + ) def __setattr__(self, key, val): for sim_object in self._sim_objects: @@ -539,12 +571,13 @@ class SimObjectCliWrapper(object): setattr(sim_object, key, val) else: raise SimObjectCliWrapperException( - 'tried to set or unsettable' \ - 'object parameter: ' + key) + "tried to set or unsettable" "object parameter: " + key + ) else: raise SimObjectCliWrapperException( - 'tried to set or access non-existent' \ - 'object parameter: ' + key) + "tried to set or access non-existent" + "object parameter: " + key + ) def __getitem__(self, idx): """ @@ -559,8 +592,9 @@ class SimObjectCliWrapper(object): if isinstance(idx, int): _range = range(idx, idx + 1) elif not isinstance(idx, slice): - raise SimObjectCliWrapperException( \ - 'invalid index type: ' + repr(idx)) + raise SimObjectCliWrapperException( + "invalid index type: " + repr(idx) + ) for sim_object in self._sim_objects: if isinstance(idx, slice): _range = range(*idx.indices(len(sim_object))) @@ -570,18 +604,19 @@ class SimObjectCliWrapper(object): def __iter__(self): return iter(self._sim_objects) + # The SimObject class is the root of the special hierarchy. Most of # the code in this class deals with the configuration hierarchy itself # (parent/child node relationships). class SimObject(object, metaclass=MetaSimObject): # Specify metaclass. Any class inheriting from SimObject will # get this metaclass. - type = 'SimObject' + type = "SimObject" abstract = True cxx_header = "sim/sim_object.hh" - cxx_class = 'gem5::SimObject' - cxx_extra_bases = [ "Drainable", "Serializable", "statistics::Group" ] + cxx_class = "gem5::SimObject" + cxx_extra_bases = ["Drainable", "Serializable", "statistics::Group"] eventq_index = Param.UInt32(Parent.eventq_index, "Event Queue Index") cxx_exports = [ @@ -594,9 +629,7 @@ class SimObject(object, metaclass=MetaSimObject): PyBindMethod("startup"), ] - cxx_param_exports = [ - PyBindProperty("name"), - ] + cxx_param_exports = [PyBindProperty("name")] @cxxMethod def loadState(self, cp): @@ -607,8 +640,7 @@ class SimObject(object, metaclass=MetaSimObject): # generated as command line options for this simobject instance # by tracing all reachable params in the top level instance and # any children it contains. - def enumerateParams(self, flags_dict = {}, - cmd_line_str = "", access_str = ""): + def enumerateParams(self, flags_dict={}, cmd_line_str="", access_str=""): if hasattr(self, "_paramEnumed"): print("Cycle detected enumerating params") else: @@ -621,44 +653,58 @@ class SimObject(object, metaclass=MetaSimObject): if not isSimObjectVector(child): next_cmdline_str = next_cmdline_str + "." next_access_str = next_access_str + "." - flags_dict = child.enumerateParams(flags_dict, - next_cmdline_str, - next_access_str) + flags_dict = child.enumerateParams( + flags_dict, next_cmdline_str, next_access_str + ) # Go through the simple params in the simobject in this level # of the simobject hierarchy and save information about the # parameter to be used for generating and processing command line # options to the simulator to set these parameters. - for keys,values in self._params.items(): + for keys, values in self._params.items(): if values.isCmdLineSettable(): - type_str = '' + type_str = "" ex_str = values.example_str() ptype = None if isinstance(values, VectorParamDesc): - type_str = 'Vector_%s' % values.ptype_str + type_str = "Vector_%s" % values.ptype_str ptype = values else: - type_str = '%s' % values.ptype_str + type_str = "%s" % values.ptype_str ptype = values.ptype - if keys in self._hr_values\ - and keys in self._values\ - and not isinstance(self._values[keys], - m5.proxy.BaseProxy): + if ( + keys in self._hr_values + and keys in self._values + and not isinstance( + self._values[keys], m5.proxy.BaseProxy + ) + ): cmd_str = cmd_line_str + keys acc_str = access_str + keys - flags_dict[cmd_str] = ParamInfo(ptype, - self._params[keys].desc, type_str, ex_str, - values.pretty_print(self._hr_values[keys]), - acc_str) - elif not keys in self._hr_values\ - and not keys in self._values: + flags_dict[cmd_str] = ParamInfo( + ptype, + self._params[keys].desc, + type_str, + ex_str, + values.pretty_print(self._hr_values[keys]), + acc_str, + ) + elif ( + not keys in self._hr_values + and not keys in self._values + ): # Empty param cmd_str = cmd_line_str + keys acc_str = access_str + keys - flags_dict[cmd_str] = ParamInfo(ptype, - self._params[keys].desc, - type_str, ex_str, '', acc_str) + flags_dict[cmd_str] = ParamInfo( + ptype, + self._params[keys].desc, + type_str, + ex_str, + "", + acc_str, + ) return flags_dict @@ -670,8 +716,8 @@ class SimObject(object, metaclass=MetaSimObject): # the same original object, we end up with the corresponding # cloned references all pointing to the same cloned instance. def __init__(self, **kwargs): - ancestor = kwargs.get('_ancestor') - memo_dict = kwargs.get('_memo') + ancestor = kwargs.get("_ancestor") + memo_dict = kwargs.get("_memo") if memo_dict is None: # prepare to memoize any recursively instantiated objects memo_dict = {} @@ -688,16 +734,18 @@ class SimObject(object, metaclass=MetaSimObject): self._name = None self._ccObject = None # pointer to C++ object self._ccParams = None - self._instantiated = False # really "cloned" - self._init_called = True # Checked so subclasses don't forget __init__ + self._instantiated = False # really "cloned" + self._init_called = True # Checked so subclasses don't forget __init__ # Clone children specified at class level. No need for a # multidict here since we will be cloning everything. # Do children before parameter values so that children that # are also param values get cloned properly. self._children = {} - for key,val in ancestor._children.items(): - self.add_child(key, val(_memo=memo_dict)) + for key, val in ancestor._children.items(): + newval = val(_memo=memo_dict) + if not newval.has_parent(): + self.add_child(key, newval) # Inherit parameter values from class using multidict so # individual value settings can be overridden but we still @@ -705,7 +753,7 @@ class SimObject(object, metaclass=MetaSimObject): self._values = multidict(ancestor._values) self._hr_values = multidict(ancestor._hr_values) # clone SimObject-valued parameters - for key,val in ancestor._values.items(): + for key, val in ancestor._values.items(): val = tryAsSimObjectOrVector(val) if val is not None: self._values[key] = val(_memo=memo_dict) @@ -713,10 +761,10 @@ class SimObject(object, metaclass=MetaSimObject): # clone port references. no need to use a multidict here # since we will be creating new references for all ports. self._port_refs = {} - for key,val in ancestor._port_refs.items(): + for key, val in ancestor._port_refs.items(): self._port_refs[key] = val.clone(self, memo_dict) # apply attribute assignments from keyword args, if any - for key,val in kwargs.items(): + for key, val in kwargs.items(): setattr(self, key, val) def _check_init(self): @@ -724,8 +772,10 @@ class SimObject(object, metaclass=MetaSimObject): __init__ """ if not self._init_called: - raise RuntimeError(f"{str(self.__class__)} is missing a call " - "to super().__init__()") + raise RuntimeError( + f"{str(self.__class__)} is missing a call " + "to super().__init__()" + ) # "Clone" the current instance by creating another instance of # this instance's class, but that inherits its parameter values @@ -733,21 +783,23 @@ class SimObject(object, metaclass=MetaSimObject): # "deep copy" recursive clone, check the _memo dict to see if # we've already cloned this instance. def __call__(self, **kwargs): - memo_dict = kwargs.get('_memo') + memo_dict = kwargs.get("_memo") if memo_dict is None: # no memo_dict: must be top-level clone operation. # this is only allowed at the root of a hierarchy if self._parent: - raise RuntimeError("attempt to clone object %s " \ - "not at the root of a tree (parent = %s)" \ - % (self, self._parent)) + raise RuntimeError( + "attempt to clone object %s " + "not at the root of a tree (parent = %s)" + % (self, self._parent) + ) # create a new dict and use that. memo_dict = {} - kwargs['_memo'] = memo_dict + kwargs["_memo"] = memo_dict elif self in memo_dict: # clone already done & memoized return memo_dict[self] - return self.__class__(_ancestor = self, **kwargs) + return self.__class__(_ancestor=self, **kwargs) def _get_port_ref(self, attr): # Return reference that can be assigned to another port @@ -785,12 +837,16 @@ class SimObject(object, metaclass=MetaSimObject): if self._ccObject and hasattr(self._ccObject, attr): return getattr(self._ccObject, attr) - err_string = "object '%s' has no attribute '%s'" \ - % (self.__class__.__name__, attr) + err_string = "object '%s' has no attribute '%s'" % ( + self.__class__.__name__, + attr, + ) if not self._ccObject: - err_string += "\n (C++ object is not yet constructed," \ - " so wrapped C++ methods are unavailable.)" + err_string += ( + "\n (C++ object is not yet constructed," + " so wrapped C++ methods are unavailable.)" + ) raise AttributeError(err_string) @@ -798,7 +854,7 @@ class SimObject(object, metaclass=MetaSimObject): # instance of class cls). def __setattr__(self, attr, value): # normal processing for private attributes - if attr.startswith('_'): + if attr.startswith("_"): object.__setattr__(self, attr, value) return @@ -818,9 +874,13 @@ class SimObject(object, metaclass=MetaSimObject): hr_value = value value = param.convert(value) except Exception as e: - msg = "%s\nError setting param %s.%s to %s\n" % \ - (e, self.__class__.__name__, attr, value) - e.args = (msg, ) + msg = "%s\nError setting param %s.%s to %s\n" % ( + e, + self.__class__.__name__, + attr, + value, + ) + e.args = (msg,) raise self._values[attr] = value @@ -835,8 +895,10 @@ class SimObject(object, metaclass=MetaSimObject): # set the human-readable value dict if this is a param # with a literal value and is not being set as an object # or proxy. - if not (isSimObjectOrVector(value) or\ - isinstance(value, m5.proxy.BaseProxy)): + if not ( + isSimObjectOrVector(value) + or isinstance(value, m5.proxy.BaseProxy) + ): self._hr_values[attr] = hr_value return @@ -847,9 +909,9 @@ class SimObject(object, metaclass=MetaSimObject): return # no valid assignment... raise exception - raise AttributeError("Class %s has no parameter %s" \ - % (self.__class__.__name__, attr)) - + raise AttributeError( + "Class %s has no parameter %s" % (self.__class__.__name__, attr) + ) # this hack allows tacking a '[0]' onto parameters that may or may # not be vectors, and always getting the first element (e.g. cpus) @@ -899,8 +961,10 @@ class SimObject(object, metaclass=MetaSimObject): def add_child(self, name, child): child = coerceSimObjectOrVector(child) if child.has_parent(): - warn(f"{self}.{name} already has parent not resetting parent.\n" - f"\tNote: {name} is not a parameter of {type(self).__name__}") + warn( + f"{self}.{name} already has parent not resetting parent.\n" + f"\tNote: {name} is not a parameter of {type(self).__name__}" + ) warn(f"(Previously declared as {child._parent}.{name}") return if name in self._children: @@ -919,7 +983,7 @@ class SimObject(object, metaclass=MetaSimObject): # that when we instantiate all the parameter objects we're still # inside the configuration hierarchy. def adoptOrphanParams(self): - for key,val in self._values.items(): + for key, val in self._values.items(): if not isSimObjectVector(val) and isSimObjectSequence(val): # need to convert raw SimObject sequences to # SimObjectVector class so we can call has_parent() @@ -931,18 +995,18 @@ class SimObject(object, metaclass=MetaSimObject): def path(self): if not self._parent: - return '' % self.__class__ + return "" % self.__class__ elif isinstance(self._parent, MetaSimObject): return str(self.__class__) ppath = self._parent.path() - if ppath == 'root': + if ppath == "root": return self._name return ppath + "." + self._name def path_list(self): if self._parent: - return self._parent.path_list() + [ self._name, ] + return self._parent.path_list() + [self._name] else: # Don't include the root node return [] @@ -963,23 +1027,25 @@ class SimObject(object, metaclass=MetaSimObject): found_obj = None for child in self._children.values(): visited = False - if hasattr(child, '_visited'): - visited = getattr(child, '_visited') + if hasattr(child, "_visited"): + visited = getattr(child, "_visited") if isinstance(child, ptype) and not visited: if found_obj != None and child != found_obj: raise AttributeError( - 'parent.any matched more than one: %s %s' % \ - (found_obj.path, child.path)) + "parent.any matched more than one: %s %s" + % (found_obj.path, child.path) + ) found_obj = child # search param space - for pname,pdesc in self._params.items(): + for pname, pdesc in self._params.items(): if issubclass(pdesc.ptype, ptype): match_obj = self._values[pname] if found_obj != None and found_obj != match_obj: raise AttributeError( - 'parent.any matched more than one: %s and %s' % \ - (found_obj.path, match_obj.path)) + "parent.any matched more than one: %s and %s" + % (found_obj.path, match_obj.path) + ) found_obj = match_obj return found_obj, found_obj != None @@ -994,22 +1060,25 @@ class SimObject(object, metaclass=MetaSimObject): children = [child] for child in children: - if isinstance(child, ptype) and not isproxy(child) and \ - not isNullPointer(child): + if ( + isinstance(child, ptype) + and not isproxy(child) + and not isNullPointer(child) + ): all[child] = True if isSimObject(child): # also add results from the child itself child_all, done = child.find_all(ptype) all.update(dict(zip(child_all, [done] * len(child_all)))) # search param space - for pname,pdesc in self._params.items(): + for pname, pdesc in self._params.items(): if issubclass(pdesc.ptype, ptype): match_obj = self._values[pname] if not isproxy(match_obj) and not isNullPointer(match_obj): all[match_obj] = True # Also make sure to sort the keys based on the objects' path to # ensure that the order is the same on all hosts - return sorted(all.keys(), key = lambda o: o.path()), True + return sorted(all.keys(), key=lambda o: o.path()), True def unproxy(self, base): return self @@ -1021,8 +1090,10 @@ class SimObject(object, metaclass=MetaSimObject): try: value = value.unproxy(self) except: - print("Error in unproxying param '%s' of %s" % - (param, self.path())) + print( + "Error in unproxying param '%s' of %s" + % (param, self.path()) + ) raise setattr(self, param, value) @@ -1036,40 +1107,46 @@ class SimObject(object, metaclass=MetaSimObject): port.unproxy(self) def print_ini(self, ini_file): - print('[' + self.path() + ']', file=ini_file) # .ini section header + print("[" + self.path() + "]", file=ini_file) # .ini section header instanceDict[self.path()] = self - if hasattr(self, 'type'): - print('type=%s' % self.type, file=ini_file) + if hasattr(self, "type"): + print("type=%s" % self.type, file=ini_file) if len(self._children.keys()): - print('children=%s' % - ' '.join(self._children[n].get_name() - for n in sorted(self._children.keys())), - file=ini_file) + print( + "children=%s" + % " ".join( + self._children[n].get_name() + for n in sorted(self._children.keys()) + ), + file=ini_file, + ) for param in sorted(self._params.keys()): value = self._values.get(param) if value != None: - print('%s=%s' % (param, self._values[param].ini_str()), - file=ini_file) + print( + "%s=%s" % (param, self._values[param].ini_str()), + file=ini_file, + ) for port_name in sorted(self._ports.keys()): port = self._port_refs.get(port_name, None) if port != None: - print('%s=%s' % (port_name, port.ini_str()), file=ini_file) + print("%s=%s" % (port_name, port.ini_str()), file=ini_file) - print(file=ini_file) # blank line between objects + print(file=ini_file) # blank line between objects # generate a tree of dictionaries expressing all the parameters in the # instantiated system for use by scripts that want to do power, thermal # visualization, and other similar tasks def get_config_as_dict(self): d = attrdict() - if hasattr(self, 'type'): + if hasattr(self, "type"): d.type = self.type - if hasattr(self, 'cxx_class'): + if hasattr(self, "cxx_class"): d.cxx_class = self.cxx_class # Add the name and path of this object to be able to link to # the stats @@ -1104,7 +1181,7 @@ class SimObject(object, metaclass=MetaSimObject): # Ensure that m5.internal.params is available. import m5.internal.params - cc_params_struct = getattr(m5.internal.params, '%sParams' % self.type) + cc_params_struct = getattr(m5.internal.params, "%sParams" % self.type) cc_params = cc_params_struct() cc_params.name = str(self) @@ -1113,8 +1190,11 @@ class SimObject(object, metaclass=MetaSimObject): for param in param_names: value = self._values.get(param) if value is None: - fatal("%s.%s without default or user set value", - self.path(), param) + fatal( + "%s.%s without default or user set value", + self.path(), + param, + ) value = value.getValue() if isinstance(self._params[param], VectorParamDesc): @@ -1140,8 +1220,11 @@ class SimObject(object, metaclass=MetaSimObject): port_count = len(port) else: port_count = 0 - setattr(cc_params, 'port_' + port_name + '_connection_count', - port_count) + setattr( + cc_params, + "port_" + port_name + "_connection_count", + port_count, + ) self._ccParams = cc_params return self._ccParams @@ -1160,8 +1243,9 @@ class SimObject(object, metaclass=MetaSimObject): params = self.getCCParams() self._ccObject = params.create() elif self._ccObject == -1: - raise RuntimeError("%s: Cycle found in configuration hierarchy." \ - % self.path()) + raise RuntimeError( + "%s: Cycle found in configuration hierarchy." % self.path() + ) return self._ccObject def descendants(self): @@ -1178,7 +1262,7 @@ class SimObject(object, metaclass=MetaSimObject): if self.abstract: fatal(f"Cannot instantiate an abstract SimObject ({self.path()})") self.getCCParams() - self.getCCObject() # force creation + self.getCCObject() # force creation def getValue(self): return self.getCCObject() @@ -1198,12 +1282,12 @@ class SimObject(object, metaclass=MetaSimObject): # Default function for generating the device structure. # Can be overloaded by the inheriting class def generateDeviceTree(self, state): - return # return without yielding anything + return # return without yielding anything yield # make this function a (null) generator def recurseDeviceTree(self, state): for child in self._children.values(): - for item in child: # For looping over SimObjectVectors + for item in child: # For looping over SimObjectVectors for dt in item.generateDeviceTree(state): yield dt @@ -1212,8 +1296,7 @@ class SimObject(object, metaclass=MetaSimObject): # in function 'apply_config' def _apply_config_get_dict(self): return { - child_name: SimObjectCliWrapper( - iter(self._children[child_name])) + child_name: SimObjectCliWrapper(iter(self._children[child_name])) for child_name in self._children } @@ -1243,20 +1326,25 @@ class SimObject(object, metaclass=MetaSimObject): d = self._apply_config_get_dict() return eval(simobj_path, d) + # Function to provide to C++ so it can look up instances based on paths def resolveSimObject(name): obj = instanceDict[name] return obj.getCCObject() + def isSimObject(value): return isinstance(value, SimObject) + def isSimObjectClass(value): return issubclass(value, SimObject) + def isSimObjectVector(value): return isinstance(value, SimObjectVector) + def isSimObjectSequence(value): if not isinstance(value, (list, tuple)) or len(value) == 0: return False @@ -1267,16 +1355,21 @@ def isSimObjectSequence(value): return True + def isSimObjectOrSequence(value): return isSimObject(value) or isSimObjectSequence(value) + def isRoot(obj): from m5.objects import Root + return obj and obj is Root.getInstance() + def isSimObjectOrVector(value): return isSimObject(value) or isSimObjectVector(value) + def tryAsSimObjectOrVector(value): if isSimObjectOrVector(value): return value @@ -1284,15 +1377,18 @@ def tryAsSimObjectOrVector(value): return SimObjectVector(value) return None + def coerceSimObjectOrVector(value): value = tryAsSimObjectOrVector(value) if value is None: raise TypeError("SimObject or SimObjectVector expected") return value + baseClasses = allClasses.copy() baseInstances = instanceDict.copy() + def clear(): global allClasses, instanceDict, noCxxHeader @@ -1300,12 +1396,8 @@ def clear(): instanceDict = baseInstances.copy() noCxxHeader = False + # __all__ defines the list of symbols that get exported when # 'from config import *' is invoked. Try to keep this reasonably # short to avoid polluting other namespaces. -__all__ = [ - 'SimObject', - 'cxxMethod', - 'PyBindMethod', - 'PyBindProperty', -] +__all__ = ["SimObject", "cxxMethod", "PyBindMethod", "PyBindProperty"] diff --git a/src/python/m5/__init__.py b/src/python/m5/__init__.py index 254d9a6822..f029adffdc 100644 --- a/src/python/m5/__init__.py +++ b/src/python/m5/__init__.py @@ -46,7 +46,8 @@ if in_gem5: from . import objects from . import params from . import stats - if defines.buildEnv['USE_SYSTEMC']: + + if defines.buildEnv["USE_SYSTEMC"]: from . import systemc from . import tlm from . import util @@ -54,4 +55,3 @@ if in_gem5: from .event import * from .main import main from .simulate import * - diff --git a/src/python/m5/debug.py b/src/python/m5/debug.py index 787a39ece7..70af2e0f3a 100644 --- a/src/python/m5/debug.py +++ b/src/python/m5/debug.py @@ -28,33 +28,40 @@ from collections.abc import Mapping import _m5.debug from _m5.debug import SimpleFlag, CompoundFlag -from _m5.debug import schedBreak, setRemoteGDBPort +from _m5.debug import schedBreak from m5.util import printList + def help(): sorted_flags = sorted(flags.items(), key=lambda kv: kv[0]) print("Base Flags:") - for name, flag in filter(lambda kv: isinstance(kv[1], SimpleFlag) - and not kv[1].isFormat, sorted_flags): + for name, flag in filter( + lambda kv: isinstance(kv[1], SimpleFlag) and not kv[1].isFormat, + sorted_flags, + ): print(" %s: %s" % (name, flag.desc)) print() print("Compound Flags:") - for name, flag in filter(lambda kv: isinstance(kv[1], CompoundFlag), - sorted_flags): + for name, flag in filter( + lambda kv: isinstance(kv[1], CompoundFlag), sorted_flags + ): print(" %s: %s" % (name, flag.desc)) # The list of kids for flag "All" is too long, so it is not printed if name != "All": - printList([ c.name for c in flag.kids() ], indent=8) + printList([c.name for c in flag.kids()], indent=8) else: print(" All Base Flags") print() print("Formatting Flags:") - for name, flag in filter(lambda kv: isinstance(kv[1], SimpleFlag) - and kv[1].isFormat, sorted_flags): + for name, flag in filter( + lambda kv: isinstance(kv[1], SimpleFlag) and kv[1].isFormat, + sorted_flags, + ): print(" %s: %s" % (name, flag.desc)) print() + class AllFlags(Mapping): def __init__(self): self._version = -1 @@ -98,4 +105,5 @@ class AllFlags(Mapping): self._update() return self._dict.items() + flags = AllFlags() diff --git a/src/python/m5/event.py b/src/python/m5/event.py index 67c9fc627c..707d65d63d 100644 --- a/src/python/m5/event.py +++ b/src/python/m5/event.py @@ -47,6 +47,7 @@ from _m5.event import getEventQueue, setEventQueue mainq = None + class EventWrapper(Event): """Helper class to wrap callable objects in an Event base class""" @@ -54,8 +55,9 @@ class EventWrapper(Event): super().__init__(**kwargs) if not callable(func): - raise RuntimeError("Can't wrap '%s', object is not callable" % \ - str(func)) + raise RuntimeError( + "Can't wrap '%s', object is not callable" % str(func) + ) self._func = func @@ -63,7 +65,7 @@ class EventWrapper(Event): self._func() def __str__(self): - return "EventWrapper(%s)" % (str(self._func), ) + return "EventWrapper(%s)" % (str(self._func),) class ProgressEvent(Event): @@ -74,7 +76,7 @@ class ProgressEvent(Event): self.eventq.schedule(self, m5.curTick() + self.period) def __call__(self): - print("Progress! Time now %fs" % (m5.curTick()/1e12)) + print("Progress! Time now %fs" % (m5.curTick() / 1e12)) self.eventq.schedule(self, m5.curTick() + self.period) @@ -83,5 +85,12 @@ def create(func, priority=Event.Default_Pri): return EventWrapper(func, priority=priority) -__all__ = [ 'Event', 'EventWrapper', 'ProgressEvent', 'SimExit', - 'mainq', 'create' ] + +__all__ = [ + "Event", + "EventWrapper", + "ProgressEvent", + "SimExit", + "mainq", + "create", +] diff --git a/src/python/m5/ext/__init__.py b/src/python/m5/ext/__init__.py index cdd1f4277f..0ce290d862 100644 --- a/src/python/m5/ext/__init__.py +++ b/src/python/m5/ext/__init__.py @@ -34,4 +34,3 @@ # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/src/python/m5/ext/pyfdt/pyfdt.py b/src/python/m5/ext/pyfdt/pyfdt.py index 3c78b036c4..191a57740d 100644 --- a/src/python/m5/ext/pyfdt/pyfdt.py +++ b/src/python/m5/ext/pyfdt/pyfdt.py @@ -28,14 +28,14 @@ import json from copy import deepcopy, copy from struct import Struct, unpack, pack -FDT_MAGIC = 0xd00dfeed +FDT_MAGIC = 0xD00DFEED FDT_BEGIN_NODE = 0x1 FDT_END_NODE = 0x2 FDT_PROP = 0x3 FDT_NOP = 0x4 FDT_END = 0x9 -INDENT = ' ' * 4 +INDENT = " " * 4 FDT_MAX_VERSION = 17 @@ -46,8 +46,7 @@ class FdtProperty(object): @staticmethod def __validate_dt_name(name): """Checks the name validity""" - return not any([True for char in name - if char not in string.printable]) + return not any([True for char in name if char not in string.printable]) def __init__(self, name): """Init with name""" @@ -65,26 +64,25 @@ class FdtProperty(object): def dts_represent(self, depth=0): """Get dts string representation""" - return INDENT*depth + self.name + ';' + return INDENT * depth + self.name + ";" def dtb_represent(self, string_store, pos=0, version=17): """Get blob representation""" # print "%x:%s" % (pos, self) - strpos = string_store.find(self.name+'\0') + strpos = string_store.find(self.name + "\0") if strpos < 0: strpos = len(string_store) - string_store += self.name+'\0' + string_store += self.name + "\0" pos += 12 - return (pack('>III', FDT_PROP, 0, strpos), - string_store, pos) + return (pack(">III", FDT_PROP, 0, strpos), string_store, pos) def json_represent(self, depth=0): """Ouput JSON""" - return '%s: null' % json.dumps(self.name) + return "%s: null" % json.dumps(self.name) def to_raw(self): """Return RAW value representation""" - return '' + return "" def __getitem__(self, value): """Returns No Items""" @@ -117,27 +115,30 @@ class FdtProperty(object): if not len(value): return None - #Needed for python 3 support: If a bytes object is passed, - #decode it with the ascii codec. If the decoding fails, assume - #it was not a string object. + # Needed for python 3 support: If a bytes object is passed, + # decode it with the ascii codec. If the decoding fails, assume + # it was not a string object. try: - value = value.decode('ascii') + value = value.decode("ascii") except ValueError: return None - #Test both against string 0 and int 0 because of + # Test both against string 0 and int 0 because of # python2/3 compatibility - if value[-1] != '\0': + if value[-1] != "\0": return None while pos < end: posi = pos - while pos < end and value[pos] != '\0' \ - and value[pos] in string.printable \ - and value[pos] not in ('\r', '\n'): + while ( + pos < end + and value[pos] != "\0" + and value[pos] in string.printable + and value[pos] not in ("\r", "\n") + ): pos += 1 - if value[pos] != '\0' or pos == posi: + if value[pos] != "\0" or pos == posi: return None pos += 1 @@ -162,8 +163,7 @@ class FdtPropertyStrings(FdtProperty): @classmethod def __extract_prop_strings(cls, value): """Extract strings from raw_value""" - return [st for st in \ - value.decode('ascii').split('\0') if len(st)] + return [st for st in value.decode("ascii").split("\0") if len(st)] def __init__(self, name, strings): """Init with strings""" @@ -173,9 +173,13 @@ class FdtPropertyStrings(FdtProperty): for stri in strings: if len(stri) == 0: raise Exception("Invalid strings") - if any([True for char in stri - if char not in string.printable - or char in ('\r', '\n')]): + if any( + [ + True + for char in stri + if char not in string.printable or char in ("\r", "\n") + ] + ): raise Exception("Invalid chars in strings") self.strings = strings @@ -186,38 +190,43 @@ class FdtPropertyStrings(FdtProperty): def dts_represent(self, depth=0): """Get dts string representation""" - return INDENT*depth + self.name + ' = "' + \ - '", "'.join(self.strings) + '";' + return ( + INDENT * depth + + self.name + + ' = "' + + '", "'.join(self.strings) + + '";' + ) def dtb_represent(self, string_store, pos=0, version=17): """Get blob representation""" # print "%x:%s" % (pos, self) - blob = pack('') + blob = pack("") for chars in self.strings: - blob += chars.encode('ascii') + pack('b', 0) + blob += chars.encode("ascii") + pack("b", 0) blob_len = len(blob) - if version < 16 and (pos+12) % 8 != 0: - blob = pack('b', 0) * (8-((pos+12) % 8)) + blob + if version < 16 and (pos + 12) % 8 != 0: + blob = pack("b", 0) * (8 - ((pos + 12) % 8)) + blob if blob_len % 4: - blob += pack('b', 0) * (4-(blob_len % 4)) - strpos = string_store.find(self.name+'\0') + blob += pack("b", 0) * (4 - (blob_len % 4)) + strpos = string_store.find(self.name + "\0") if strpos < 0: strpos = len(string_store) - string_store += self.name+'\0' - blob = pack('>III', FDT_PROP, blob_len, strpos) + blob + string_store += self.name + "\0" + blob = pack(">III", FDT_PROP, blob_len, strpos) + blob pos += len(blob) return (blob, string_store, pos) def json_represent(self, depth=0): """Ouput JSON""" result = '%s: ["strings", ' % json.dumps(self.name) - result += ', '.join([json.dumps(stri) for stri in self.strings]) - result += ']' + result += ", ".join([json.dumps(stri) for stri in self.strings]) + result += "]" return result def to_raw(self): """Return RAW value representation""" - return ''.join([chars+'\0' for chars in self.strings]) + return "".join([chars + "\0" for chars in self.strings]) def __str__(self): """String representation""" @@ -244,6 +253,7 @@ class FdtPropertyStrings(FdtProperty): return False return True + class FdtPropertyWords(FdtProperty): """Property with words as value""" @@ -252,8 +262,13 @@ class FdtPropertyWords(FdtProperty): FdtProperty.__init__(self, name) for word in words: if not 0 <= word <= 4294967295: - raise Exception(("Invalid word value %d, requires " + - "0 <= number <= 4294967295") % word) + raise Exception( + ( + "Invalid word value %d, requires " + + "0 <= number <= 4294967295" + ) + % word + ) if not len(words): raise Exception("Invalid Words") self.words = words @@ -262,26 +277,34 @@ class FdtPropertyWords(FdtProperty): def init_raw(cls, name, raw_value): """Init from raw""" if len(raw_value) % 4 == 0: - words = [unpack(">I", raw_value[i:i+4])[0] - for i in range(0, len(raw_value), 4)] + words = [ + unpack(">I", raw_value[i : i + 4])[0] + for i in range(0, len(raw_value), 4) + ] return cls(name, words) else: raise Exception("Invalid raw Words") def dts_represent(self, depth=0): """Get dts string representation""" - return INDENT*depth + self.name + ' = <' + \ - ' '.join(["0x%08x" % word for word in self.words]) + ">;" + return ( + INDENT * depth + + self.name + + " = <" + + " ".join(["0x%08x" % word for word in self.words]) + + ">;" + ) def dtb_represent(self, string_store, pos=0, version=17): """Get blob representation""" # # print "%x:%s" % (pos, self) - strpos = string_store.find(self.name+'\0') + strpos = string_store.find(self.name + "\0") if strpos < 0: strpos = len(string_store) - string_store += self.name+'\0' - blob = pack('>III', FDT_PROP, len(self.words)*4, strpos) + \ - pack('').join([pack('>I', word) for word in self.words]) + string_store += self.name + "\0" + blob = pack(">III", FDT_PROP, len(self.words) * 4, strpos) + pack( + "" + ).join([pack(">I", word) for word in self.words]) pos += len(blob) return (blob, string_store, pos) @@ -294,7 +317,7 @@ class FdtPropertyWords(FdtProperty): def to_raw(self): """Return RAW value representation""" - return ''.join([pack('>I', word) for word in self.words]) + return "".join([pack(">I", word) for word in self.words]) def __str__(self): """String representation""" @@ -330,8 +353,13 @@ class FdtPropertyBytes(FdtProperty): FdtProperty.__init__(self, name) for byte in bytez: if not -128 <= byte <= 127: - raise Exception(("Invalid value for byte %d, " + - "requires -128 <= number <= 127") % byte) + raise Exception( + ( + "Invalid value for byte %d, " + + "requires -128 <= number <= 127" + ) + % byte + ) if not bytez: raise Exception("Invalid Bytes") self.bytes = bytez @@ -339,39 +367,44 @@ class FdtPropertyBytes(FdtProperty): @classmethod def init_raw(cls, name, raw_value): """Init from raw""" - return cls(name, unpack('b' * len(raw_value), raw_value)) + return cls(name, unpack("b" * len(raw_value), raw_value)) def dts_represent(self, depth=0): """Get dts string representation""" - return INDENT*depth + self.name + ' = [' + \ - ' '.join(["%02x" % (byte & int('ffffffff',16)) - for byte in self.bytes]) + "];" + return ( + INDENT * depth + + self.name + + " = [" + + " ".join( + ["%02x" % (byte & int("ffffffff", 16)) for byte in self.bytes] + ) + + "];" + ) def dtb_represent(self, string_store, pos=0, version=17): """Get blob representation""" # print "%x:%s" % (pos, self) - strpos = string_store.find(self.name+'\0') + strpos = string_store.find(self.name + "\0") if strpos < 0: strpos = len(string_store) - string_store += self.name+'\0' - blob = pack('>III', FDT_PROP, len(self.bytes), strpos) - blob += pack('').join([pack('>b', byte) for byte in self.bytes]) + string_store += self.name + "\0" + blob = pack(">III", FDT_PROP, len(self.bytes), strpos) + blob += pack("").join([pack(">b", byte) for byte in self.bytes]) if len(blob) % 4: - blob += pack('b', 0) * (4-(len(blob) % 4)) + blob += pack("b", 0) * (4 - (len(blob) % 4)) pos += len(blob) return (blob, string_store, pos) def json_represent(self, depth=0): """Ouput JSON""" result = '%s: ["bytes", "' % json.dumps(self.name) - result += '", "'.join(["%02x" % byte - for byte in self.bytes]) + result += '", "'.join(["%02x" % byte for byte in self.bytes]) result += '"]' return result def to_raw(self): """Return RAW value representation""" - return ''.join([pack('>b', byte) for byte in self.bytes]) + return "".join([pack(">b", byte) for byte in self.bytes]) def __str__(self): """String representation""" @@ -411,17 +444,17 @@ class FdtNop(object): # pylint: disable-msg=R0903 def __str__(self): """String representation""" - return '' + return "" def dts_represent(self, depth=0): # pylint: disable-msg=R0201 """Get dts string representation""" - return INDENT*depth+'// [NOP]' + return INDENT * depth + "// [NOP]" def dtb_represent(self, string_store, pos=0, version=17): """Get blob representation""" # print "%x:%s" % (pos, self) pos += 4 - return (pack('>I', FDT_NOP), string_store, pos) + return (pack(">I", FDT_NOP), string_store, pos) class FdtNode(object): @@ -430,8 +463,7 @@ class FdtNode(object): @staticmethod def __validate_dt_name(name): """Checks the name validity""" - return not any([True for char in name - if char not in string.printable]) + return not any([True for char in name if char not in string.printable]) def __init__(self, name): """Init node with name""" @@ -448,9 +480,8 @@ class FdtNode(object): def __check_name_duplicate(self, name): """Checks if name is not in a subnode""" for data in self.subdata: - if not isinstance(data, FdtNop) \ - and data.get_name() == name: - return True + if not isinstance(data, FdtNop) and data.get_name() == name: + return True return False def add_subnode(self, node): @@ -463,8 +494,7 @@ class FdtNode(object): def set_parent_node(self, node): """Set parent node, None and FdtNode accepted""" - if node is not None and \ - not isinstance(node, FdtNode): + if node is not None and not isinstance(node, FdtNode): raise Exception("Invalid object type") self.parent = node @@ -478,12 +508,19 @@ class FdtNode(object): def dts_represent(self, depth=0): """Get dts string representation""" - result = ('\n').join([sub.dts_represent(depth+1) - for sub in self.subdata]) + result = ("\n").join( + [sub.dts_represent(depth + 1) for sub in self.subdata] + ) if len(result) > 0: - result += '\n' - return INDENT*depth + self.name + ' {\n' + \ - result + INDENT*depth + "};" + result += "\n" + return ( + INDENT * depth + + self.name + + " {\n" + + result + + INDENT * depth + + "};" + ) def dtb_represent(self, strings_store, pos=0, version=17): """Get blob representation @@ -492,34 +529,42 @@ class FdtNode(object): """ # print "%x:%s" % (pos, self) strings = strings_store - if self.get_name() == '/': - blob = pack('>II', FDT_BEGIN_NODE, 0) + if self.get_name() == "/": + blob = pack(">II", FDT_BEGIN_NODE, 0) else: - blob = pack('>I', FDT_BEGIN_NODE) - blob += self.get_name().encode('ascii') + pack('b', 0) + blob = pack(">I", FDT_BEGIN_NODE) + blob += self.get_name().encode("ascii") + pack("b", 0) if len(blob) % 4: - blob += pack('b', 0) * (4-(len(blob) % 4)) + blob += pack("b", 0) * (4 - (len(blob) % 4)) pos += len(blob) for sub in self.subdata: (data, strings, pos) = sub.dtb_represent(strings, pos, version) blob += data pos += 4 - blob += pack('>I', FDT_END_NODE) + blob += pack(">I", FDT_END_NODE) return (blob, strings, pos) def json_represent(self, depth=0): """Get dts string representation""" - result = (',\n'+ \ - INDENT*(depth+1)).join([sub.json_represent(depth+1) - for sub in self.subdata - if not isinstance(sub, FdtNop)]) + result = (",\n" + INDENT * (depth + 1)).join( + [ + sub.json_represent(depth + 1) + for sub in self.subdata + if not isinstance(sub, FdtNop) + ] + ) if len(result) > 0: - result = INDENT + result + '\n'+INDENT*depth - if self.get_name() == '/': - return "{\n" + INDENT*(depth) + result + "}" + result = INDENT + result + "\n" + INDENT * depth + if self.get_name() == "/": + return "{\n" + INDENT * (depth) + result + "}" else: - return json.dumps(self.name) + ': {\n' + \ - INDENT*(depth) + result + "}" + return ( + json.dumps(self.name) + + ": {\n" + + INDENT * (depth) + + result + + "}" + ) def __getitem__(self, index): """Get subnodes, returns either a Node, a Property or a Nop""" @@ -529,10 +574,12 @@ class FdtNode(object): """Set node at index, replacing previous subnode, must not be a duplicate name """ - if self.subdata[index].get_name() != subnode.get_name() and \ - self.__check_name_duplicate(subnode.get_name()): - raise Exception("%s : %s subnode already exists" % \ - (self, subnode)) + if self.subdata[ + index + ].get_name() != subnode.get_name() and self.__check_name_duplicate( + subnode.get_name() + ): + raise Exception("%s : %s subnode already exists" % (self, subnode)) if not isinstance(subnode, (FdtNode, FdtProperty, FdtNop)): raise Exception("Invalid object type") self.subdata[index] = subnode @@ -559,14 +606,27 @@ class FdtNode(object): raise Exception("Invalid object type") if self.name != node.get_name(): return False - curnames = set([subnode.get_name() for subnode in self.subdata - if not isinstance(subnode, FdtNop)]) - cmpnames = set([subnode.get_name() for subnode in node - if not isinstance(subnode, FdtNop)]) + curnames = set( + [ + subnode.get_name() + for subnode in self.subdata + if not isinstance(subnode, FdtNop) + ] + ) + cmpnames = set( + [ + subnode.get_name() + for subnode in node + if not isinstance(subnode, FdtNop) + ] + ) if curnames != cmpnames: return False - for subnode in [subnode for subnode in self.subdata - if not isinstance(subnode, FdtNop)]: + for subnode in [ + subnode + for subnode in self.subdata + if not isinstance(subnode, FdtNop) + ]: index = node.index(subnode.get_name()) if subnode != node[index]: return False @@ -575,8 +635,7 @@ class FdtNode(object): def append(self, subnode): """Append subnode, same as add_subnode""" if self.__check_name_duplicate(subnode.get_name()): - raise Exception("%s : %s subnode already exists" % \ - (self, subnode)) + raise Exception("%s : %s subnode already exists" % (self, subnode)) if not isinstance(subnode, (FdtNode, FdtProperty, FdtNop)): raise Exception("Invalid object type") self.subdata.append(subnode) @@ -588,8 +647,7 @@ class FdtNode(object): def insert(self, index, subnode): """Insert subnode before index, must not be a duplicate name""" if self.__check_name_duplicate(subnode.get_name()): - raise Exception("%s : %s subnode already exists" % \ - (self, subnode)) + raise Exception("%s : %s subnode already exists" % (self, subnode)) if not isinstance(subnode, (FdtNode, FdtProperty, FdtNop)): raise Exception("Invalid object type") self.subdata.insert(index, subnode) @@ -597,8 +655,10 @@ class FdtNode(object): def _find(self, name): """Find name in subnodes""" for i in range(0, len(self.subdata)): - if not isinstance(self.subdata[i], FdtNop) and \ - name == self.subdata[i].get_name(): + if ( + not isinstance(self.subdata[i], FdtNop) + and name == self.subdata[i].get_name() + ): return i return None @@ -626,8 +686,9 @@ class FdtNode(object): """ if not isinstance(node, FdtNode): raise Exception("Can only merge with a FdtNode") - for subnode in [obj for obj in node - if isinstance(obj, (FdtNode, FdtProperty))]: + for subnode in [ + obj for obj in node if isinstance(obj, (FdtNode, FdtProperty)) + ]: index = self._find(subnode.get_name()) if index is None: dup = deepcopy(subnode) @@ -651,11 +712,13 @@ class FdtNode(object): while True: for index in range(start, len(node)): if isinstance(node[index], (FdtNode, FdtProperty)): - yield ('/' + '/'.join(curpath+[node[index].get_name()]), - node[index]) + yield ( + "/" + "/".join(curpath + [node[index].get_name()]), + node[index], + ) if isinstance(node[index], FdtNode): if len(node[index]): - hist.append((node, index+1)) + hist.append((node, index + 1)) curpath.append(node[index].get_name()) node = node[index] start = 0 @@ -674,16 +737,18 @@ class Fdt(object): def __init__(self, version=17, last_comp_version=16, boot_cpuid_phys=0): """Init FDT object with version and boot values""" - self.header = {'magic': FDT_MAGIC, - 'totalsize': 0, - 'off_dt_struct': 0, - 'off_dt_strings': 0, - 'off_mem_rsvmap': 0, - 'version': version, - 'last_comp_version': last_comp_version, - 'boot_cpuid_phys': boot_cpuid_phys, - 'size_dt_strings': 0, - 'size_dt_struct': 0} + self.header = { + "magic": FDT_MAGIC, + "totalsize": 0, + "off_dt_struct": 0, + "off_dt_strings": 0, + "off_mem_rsvmap": 0, + "version": version, + "last_comp_version": last_comp_version, + "boot_cpuid_phys": boot_cpuid_phys, + "size_dt_strings": 0, + "size_dt_struct": 0, + } self.rootnode = None self.prenops = None self.postnops = None @@ -707,90 +772,105 @@ class Fdt(object): def to_dts(self): """Export to DTS representation in string format""" result = "/dts-v1/;\n" - result += "// version:\t\t%d\n" % self.header['version'] - result += "// last_comp_version:\t%d\n" % \ - self.header['last_comp_version'] - if self.header['version'] >= 2: - result += "// boot_cpuid_phys:\t0x%x\n" % \ - self.header['boot_cpuid_phys'] - result += '\n' + result += "// version:\t\t%d\n" % self.header["version"] + result += ( + "// last_comp_version:\t%d\n" % self.header["last_comp_version"] + ) + if self.header["version"] >= 2: + result += ( + "// boot_cpuid_phys:\t0x%x\n" % self.header["boot_cpuid_phys"] + ) + result += "\n" if self.reserve_entries is not None: for entry in self.reserve_entries: result += "/memreserve/ " - if entry['address']: - result += "%#x " % entry['address'] + if entry["address"]: + result += "%#x " % entry["address"] else: result += "0 " - if entry['size']: - result += "%#x" % entry['size'] + if entry["size"]: + result += "%#x" % entry["size"] else: result += "0" result += ";\n" if self.prenops: - result += '\n'.join([nop.dts_represent() for nop in self.prenops]) - result += '\n' + result += "\n".join([nop.dts_represent() for nop in self.prenops]) + result += "\n" if self.rootnode is not None: result += self.rootnode.dts_represent() if self.postnops: - result += '\n' - result += '\n'.join([nop.dts_represent() for nop in self.postnops]) + result += "\n" + result += "\n".join([nop.dts_represent() for nop in self.postnops]) return result def to_dtb(self): """Export to Blob format""" if self.rootnode is None: return None - blob_reserve_entries = pack('') + blob_reserve_entries = pack("") if self.reserve_entries is not None: for entry in self.reserve_entries: - blob_reserve_entries += pack('>QQ', - entry['address'], - entry['size']) - blob_reserve_entries += pack('>QQ', 0, 0) + blob_reserve_entries += pack( + ">QQ", entry["address"], entry["size"] + ) + blob_reserve_entries += pack(">QQ", 0, 0) header_size = 7 * 4 - if self.header['version'] >= 2: + if self.header["version"] >= 2: header_size += 4 - if self.header['version'] >= 3: + if self.header["version"] >= 3: header_size += 4 - if self.header['version'] >= 17: + if self.header["version"] >= 17: header_size += 4 - header_adjust = pack('') + header_adjust = pack("") if header_size % 8 != 0: - header_adjust = pack('b', 0) * (8 - (header_size % 8)) + header_adjust = pack("b", 0) * (8 - (header_size % 8)) header_size += len(header_adjust) dt_start = header_size + len(blob_reserve_entries) # print "dt_start %d" % dt_start - (blob_dt, blob_strings, dt_pos) = \ - self.rootnode.dtb_represent('', dt_start, self.header['version']) + (blob_dt, blob_strings, dt_pos) = self.rootnode.dtb_represent( + "", dt_start, self.header["version"] + ) if self.prenops is not None: - blob_dt = pack('').join([nop.dtb_represent('')[0] - for nop in self.prenops])\ - + blob_dt + blob_dt = ( + pack("").join( + [nop.dtb_represent("")[0] for nop in self.prenops] + ) + + blob_dt + ) if self.postnops is not None: - blob_dt += pack('').join([nop.dtb_represent('')[0] - for nop in self.postnops]) - blob_dt += pack('>I', FDT_END) - self.header['size_dt_strings'] = len(blob_strings) - self.header['size_dt_struct'] = len(blob_dt) - self.header['off_mem_rsvmap'] = header_size - self.header['off_dt_struct'] = dt_start - self.header['off_dt_strings'] = dt_start + len(blob_dt) - self.header['totalsize'] = dt_start + len(blob_dt) + len(blob_strings) - blob_header = pack('>IIIIIII', self.header['magic'], - self.header['totalsize'], - self.header['off_dt_struct'], - self.header['off_dt_strings'], - self.header['off_mem_rsvmap'], - self.header['version'], - self.header['last_comp_version']) - if self.header['version'] >= 2: - blob_header += pack('>I', self.header['boot_cpuid_phys']) - if self.header['version'] >= 3: - blob_header += pack('>I', self.header['size_dt_strings']) - if self.header['version'] >= 17: - blob_header += pack('>I', self.header['size_dt_struct']) - return blob_header + header_adjust + blob_reserve_entries + \ - blob_dt + blob_strings.encode('ascii') + blob_dt += pack("").join( + [nop.dtb_represent("")[0] for nop in self.postnops] + ) + blob_dt += pack(">I", FDT_END) + self.header["size_dt_strings"] = len(blob_strings) + self.header["size_dt_struct"] = len(blob_dt) + self.header["off_mem_rsvmap"] = header_size + self.header["off_dt_struct"] = dt_start + self.header["off_dt_strings"] = dt_start + len(blob_dt) + self.header["totalsize"] = dt_start + len(blob_dt) + len(blob_strings) + blob_header = pack( + ">IIIIIII", + self.header["magic"], + self.header["totalsize"], + self.header["off_dt_struct"], + self.header["off_dt_strings"], + self.header["off_mem_rsvmap"], + self.header["version"], + self.header["last_comp_version"], + ) + if self.header["version"] >= 2: + blob_header += pack(">I", self.header["boot_cpuid_phys"]) + if self.header["version"] >= 3: + blob_header += pack(">I", self.header["size_dt_strings"]) + if self.header["version"] >= 17: + blob_header += pack(">I", self.header["size_dt_struct"]) + return ( + blob_header + + header_adjust + + blob_reserve_entries + + blob_dt + + blob_strings.encode("ascii") + ) def to_json(self): """Ouput JSON""" @@ -803,14 +883,14 @@ class Fdt(object): a FdtProperty or None""" if self.rootnode is None: return None - if not path.startswith('/'): + if not path.startswith("/"): return None - if len(path) > 1 and path.endswith('/'): + if len(path) > 1 and path.endswith("/"): path = path[:-1] - if path == '/': + if path == "/": return self.rootnode curnode = self.rootnode - for subpath in path[1:].split('/'): + for subpath in path[1:].split("/"): found = None if not isinstance(curnode, FdtNode): return None @@ -823,6 +903,7 @@ class Fdt(object): curnode = found return curnode + def _add_json_to_fdtnode(node, subjson): """Populate FdtNode with JSON dict items""" for (key, value) in subjson.items(): @@ -841,8 +922,7 @@ def _add_json_to_fdtnode(node, subjson): bytez = [int(byte, 16) for byte in value[1:]] node.append(FdtPropertyBytes(key, bytez)) elif value[0] == "strings": - node.append(FdtPropertyStrings(key, \ - [s for s in value[1:]])) + node.append(FdtPropertyStrings(key, [s for s in value[1:]])) else: raise Exception("Invalid list for %s" % key) elif value is None: @@ -850,6 +930,7 @@ def _add_json_to_fdtnode(node, subjson): else: raise Exception("Invalid value for %s" % key) + def FdtJsonParse(buf): """Import FDT from JSON representation, see JSONDeviceTree.md for structure and encoding @@ -857,7 +938,7 @@ def FdtJsonParse(buf): """ tree = json.loads(buf) - root = FdtNode('/') + root = FdtNode("/") _add_json_to_fdtnode(root, tree) @@ -865,6 +946,7 @@ def FdtJsonParse(buf): fdt.add_rootnode(root) return fdt + def FdtFsParse(path): """Parse device tree filesystem and return a Fdt instance Should be /proc/device-tree on a device, or the fusemount.py @@ -872,7 +954,7 @@ def FdtFsParse(path): """ root = FdtNode("/") - if path.endswith('/'): + if path.endswith("/"): path = path[:-1] nodes = {path: root} @@ -882,7 +964,7 @@ def FdtFsParse(path): raise Exception("os.walk error") cur = nodes[subpath] for f in files: - with open(subpath+'/'+f, 'rb') as content_file: + with open(subpath + "/" + f, "rb") as content_file: content = content_file.read() prop = FdtProperty.new_raw_property(f, content) cur.add_subnode(prop) @@ -890,30 +972,39 @@ def FdtFsParse(path): subnode = FdtNode(subdir) cur.add_subnode(subnode) subnode.set_parent_node(cur) - nodes[subpath+'/'+subdir] = subnode + nodes[subpath + "/" + subdir] = subnode fdt = Fdt() fdt.add_rootnode(root) return fdt + class FdtBlobParse(object): # pylint: disable-msg=R0903 """Parse from file input""" __fdt_header_format = ">IIIIIII" - __fdt_header_names = ('magic', 'totalsize', 'off_dt_struct', - 'off_dt_strings', 'off_mem_rsvmap', 'version', - 'last_comp_version') + __fdt_header_names = ( + "magic", + "totalsize", + "off_dt_struct", + "off_dt_strings", + "off_mem_rsvmap", + "version", + "last_comp_version", + ) __fdt_reserve_entry_format = ">QQ" - __fdt_reserve_entry_names = ('address', 'size') + __fdt_reserve_entry_names = ("address", "size") __fdt_dt_cell_format = ">I" __fdt_dt_prop_format = ">II" - __fdt_dt_tag_name = {FDT_BEGIN_NODE: 'node_begin', - FDT_END_NODE: 'node_end', - FDT_PROP: 'prop', - FDT_NOP: 'nop', - FDT_END: 'end'} + __fdt_dt_tag_name = { + FDT_BEGIN_NODE: "node_begin", + FDT_END_NODE: "node_end", + FDT_PROP: "prop", + FDT_NOP: "nop", + FDT_END: "end", + } def __extract_fdt_header(self): """Extract DTB header""" @@ -921,54 +1012,55 @@ class FdtBlobParse(object): # pylint: disable-msg=R0903 header_entry = Struct(">I") data = self.infile.read(header.size) result = dict(zip(self.__fdt_header_names, header.unpack_from(data))) - if result['version'] >= 2: + if result["version"] >= 2: data = self.infile.read(header_entry.size) - result['boot_cpuid_phys'] = header_entry.unpack_from(data)[0] - if result['version'] >= 3: + result["boot_cpuid_phys"] = header_entry.unpack_from(data)[0] + if result["version"] >= 3: data = self.infile.read(header_entry.size) - result['size_dt_strings'] = header_entry.unpack_from(data)[0] - if result['version'] >= 17: + result["size_dt_strings"] = header_entry.unpack_from(data)[0] + if result["version"] >= 17: data = self.infile.read(header_entry.size) - result['size_dt_struct'] = header_entry.unpack_from(data)[0] + result["size_dt_struct"] = header_entry.unpack_from(data)[0] return result def __extract_fdt_reserve_entries(self): """Extract reserved memory entries""" header = Struct(self.__fdt_reserve_entry_format) entries = [] - self.infile.seek(self.fdt_header['off_mem_rsvmap']) + self.infile.seek(self.fdt_header["off_mem_rsvmap"]) while True: data = self.infile.read(header.size) - result = dict(zip(self.__fdt_reserve_entry_names, - header.unpack_from(data))) - if result['address'] == 0 and result['size'] == 0: + result = dict( + zip(self.__fdt_reserve_entry_names, header.unpack_from(data)) + ) + if result["address"] == 0 and result["size"] == 0: return entries entries.append(result) def __extract_fdt_nodename(self): """Extract node name""" - data = '' + data = "" pos = self.infile.tell() while True: byte = self.infile.read(1) if ord(byte) == 0: break - data += byte.decode('ascii') + data += byte.decode("ascii") align_pos = pos + len(data) + 1 - align_pos = (((align_pos) + ((4) - 1)) & ~((4) - 1)) + align_pos = ((align_pos) + ((4) - 1)) & ~((4) - 1) self.infile.seek(align_pos) return data def __extract_fdt_string(self, prop_string_pos): """Extract string from string pool""" - data = '' + data = "" pos = self.infile.tell() - self.infile.seek(self.fdt_header['off_dt_strings']+prop_string_pos) + self.infile.seek(self.fdt_header["off_dt_strings"] + prop_string_pos) while True: byte = self.infile.read(1) if ord(byte) == 0: break - data += byte.decode('ascii') + data += byte.decode("ascii") self.infile.seek(pos) return data @@ -977,17 +1069,17 @@ class FdtBlobParse(object): # pylint: disable-msg=R0903 prop = Struct(self.__fdt_dt_prop_format) pos = self.infile.tell() data = self.infile.read(prop.size) - (prop_size, prop_string_pos,) = prop.unpack_from(data) + (prop_size, prop_string_pos) = prop.unpack_from(data) prop_start = pos + prop.size - if self.fdt_header['version'] < 16 and prop_size >= 8: - prop_start = (((prop_start) + ((8) - 1)) & ~((8) - 1)) + if self.fdt_header["version"] < 16 and prop_size >= 8: + prop_start = ((prop_start) + ((8) - 1)) & ~((8) - 1) self.infile.seek(prop_start) value = self.infile.read(prop_size) align_pos = self.infile.tell() - align_pos = (((align_pos) + ((4) - 1)) & ~((4) - 1)) + align_pos = ((align_pos) + ((4) - 1)) & ~((4) - 1) self.infile.seek(align_pos) return (self.__extract_fdt_string(prop_string_pos), value) @@ -996,24 +1088,24 @@ class FdtBlobParse(object): # pylint: disable-msg=R0903 """Extract tags""" cell = Struct(self.__fdt_dt_cell_format) tags = [] - self.infile.seek(self.fdt_header['off_dt_struct']) + self.infile.seek(self.fdt_header["off_dt_struct"]) while True: data = self.infile.read(cell.size) if len(data) < cell.size: break tag, = cell.unpack_from(data) # print "*** %s" % self.__fdt_dt_tag_name.get(tag, '') - if self.__fdt_dt_tag_name.get(tag, '') in 'node_begin': + if self.__fdt_dt_tag_name.get(tag, "") in "node_begin": name = self.__extract_fdt_nodename() if len(name) == 0: - name = '/' + name = "/" tags.append((tag, name)) - elif self.__fdt_dt_tag_name.get(tag, '') in ('node_end', 'nop'): - tags.append((tag, '')) - elif self.__fdt_dt_tag_name.get(tag, '') in 'end': - tags.append((tag, '')) + elif self.__fdt_dt_tag_name.get(tag, "") in ("node_end", "nop"): + tags.append((tag, "")) + elif self.__fdt_dt_tag_name.get(tag, "") in "end": + tags.append((tag, "")) break - elif self.__fdt_dt_tag_name.get(tag, '') in 'prop': + elif self.__fdt_dt_tag_name.get(tag, "") in "prop": propdata = self.__extract_fdt_prop() tags.append((tag, propdata)) else: @@ -1024,13 +1116,15 @@ class FdtBlobParse(object): # pylint: disable-msg=R0903 """Init with file input""" self.infile = infile self.fdt_header = self.__extract_fdt_header() - if self.fdt_header['magic'] != FDT_MAGIC: - raise Exception('Invalid Magic') - if self.fdt_header['version'] > FDT_MAX_VERSION: - raise Exception('Invalid Version %d' % self.fdt_header['version']) - if self.fdt_header['last_comp_version'] > FDT_MAX_VERSION-1: - raise Exception('Invalid last compatible Version %d' % - self.fdt_header['last_comp_version']) + if self.fdt_header["magic"] != FDT_MAGIC: + raise Exception("Invalid Magic") + if self.fdt_header["version"] > FDT_MAX_VERSION: + raise Exception("Invalid Version %d" % self.fdt_header["version"]) + if self.fdt_header["last_comp_version"] > FDT_MAX_VERSION - 1: + raise Exception( + "Invalid last compatible Version %d" + % self.fdt_header["last_comp_version"] + ) self.fdt_reserve_entries = self.__extract_fdt_reserve_entries() self.fdt_dt_tags = self.__extract_fdt_dt() @@ -1044,7 +1138,7 @@ class FdtBlobParse(object): # pylint: disable-msg=R0903 rootnode = None curnode = None for tag in self.fdt_dt_tags: - if self.__fdt_dt_tag_name.get(tag[0], '') in 'node_begin': + if self.__fdt_dt_tag_name.get(tag[0], "") in "node_begin": newnode = FdtNode(tag[1]) if rootnode is None: rootnode = newnode @@ -1052,20 +1146,20 @@ class FdtBlobParse(object): # pylint: disable-msg=R0903 curnode.add_subnode(newnode) newnode.set_parent_node(curnode) curnode = newnode - elif self.__fdt_dt_tag_name.get(tag[0], '') in 'node_end': + elif self.__fdt_dt_tag_name.get(tag[0], "") in "node_end": if curnode is not None: curnode = curnode.get_parent_node() - elif self.__fdt_dt_tag_name.get(tag[0], '') in 'nop': + elif self.__fdt_dt_tag_name.get(tag[0], "") in "nop": if curnode is not None: curnode.add_subnode(FdtNop()) elif rootnode is not None: postnops.append(FdtNop()) else: prenops.append(FdtNop()) - elif self.__fdt_dt_tag_name.get(tag[0], '') in 'prop': + elif self.__fdt_dt_tag_name.get(tag[0], "") in "prop": if curnode is not None: curnode.add_raw_attribute(tag[1][0], tag[1][1]) - elif self.__fdt_dt_tag_name.get(tag[0], '') in 'end': + elif self.__fdt_dt_tag_name.get(tag[0], "") in "end": continue return (prenops, rootnode, postnops) @@ -1073,13 +1167,15 @@ class FdtBlobParse(object): # pylint: disable-msg=R0903 """Create a fdt object Returns a Fdt object """ - if self.fdt_header['version'] >= 2: - boot_cpuid_phys = self.fdt_header['boot_cpuid_phys'] + if self.fdt_header["version"] >= 2: + boot_cpuid_phys = self.fdt_header["boot_cpuid_phys"] else: boot_cpuid_phys = 0 - fdt = Fdt(version=self.fdt_header['version'], - last_comp_version=self.fdt_header['last_comp_version'], - boot_cpuid_phys=boot_cpuid_phys) + fdt = Fdt( + version=self.fdt_header["version"], + last_comp_version=self.fdt_header["last_comp_version"], + boot_cpuid_phys=boot_cpuid_phys, + ) (prenops, rootnode, postnops) = self.__to_nodes() fdt.add_rootnode(rootnode, prenops=prenops, postnops=postnops) fdt.add_reserve_entries(self.fdt_reserve_entries) diff --git a/src/python/m5/ext/pystats/__init__.py b/src/python/m5/ext/pystats/__init__.py index 668dddda93..32cee43296 100644 --- a/src/python/m5/ext/pystats/__init__.py +++ b/src/python/m5/ext/pystats/__init__.py @@ -24,7 +24,8 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from .jsonserializable import JsonSerializable +from .abstract_stat import AbstractStat +from .serializable_stat import SerializableStat from .group import Group from .simstat import SimStat from .statistic import Statistic @@ -33,11 +34,12 @@ from .timeconversion import TimeConversion from .jsonloader import JsonLoader __all__ = [ - "Group", - "SimStat", - "Statistic", - "TimeConversion", - "StorageType", - "JsonSerializable", - "JsonLoader", - ] + "AbstractStat", + "Group", + "SimStat", + "Statistic", + "TimeConversion", + "StorageType", + "SerializableStat", + "JsonLoader", +] diff --git a/src/python/m5/ext/pystats/abstract_stat.py b/src/python/m5/ext/pystats/abstract_stat.py new file mode 100644 index 0000000000..f2a75fca1e --- /dev/null +++ b/src/python/m5/ext/pystats/abstract_stat.py @@ -0,0 +1,97 @@ +# Copyright (c) 2022 The Regents of The University of California +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from .serializable_stat import SerializableStat + +import re +from typing import ( + Callable, + List, + Optional, + Pattern, + Union, +) + + +class AbstractStat(SerializableStat): + """ + An abstract class which all PyStats inherit from. + + All PyStats are JsonSerializable. + """ + + def children( + self, + predicate: Optional[Callable[[str], bool]] = None, + recursive: bool = False, + ) -> List["AbstractStat"]: + """Iterate through all of the children, optionally with a predicate + + ``` + >>> system.children(lambda _name: 'cpu' in name) + [cpu0, cpu1, cpu2] + ``` + + :param: predicate(str) -> bool: Optional. Each child's name is passed + to this function. If it returns true, then the child is + yielded. Otherwise, the child is skipped. + If not provided then all children are returned. + """ + + to_return = [] + for attr in self.__dict__: + obj = getattr(self, attr) + if isinstance(obj, AbstractStat): + if (predicate and predicate(attr)) or not predicate: + to_return.append(obj) + if recursive: + to_return = to_return + obj.children( + predicate=predicate, recursive=True + ) + + return to_return + + def find(self, regex: Union[str, Pattern]) -> List["AbstractStat"]: + """Find all stats that match the name, recursively through all the + SimStats. + + + ``` + >>> system.find('cpu[0-9]') + [cpu0, cpu1, cpu2] + ``` + Note: The above will not match `cpu_other`. + + :param: regex: The regular expression used to search. Can be a + precompiled regex or a string in regex format + """ + if isinstance(regex, str): + pattern = re.compile(regex) + else: + pattern = regex + return self.children( + lambda _name: re.match(pattern, _name), recursive=True + ) diff --git a/src/python/m5/ext/pystats/group.py b/src/python/m5/ext/pystats/group.py index 366061fd0e..0b71663565 100644 --- a/src/python/m5/ext/pystats/group.py +++ b/src/python/m5/ext/pystats/group.py @@ -24,15 +24,20 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import re -from typing import Callable, Dict, Iterator, List, Mapping, Optional, Pattern,\ - Union +from typing import ( + Dict, + List, + Mapping, + Optional, + Union, +) -from .jsonserializable import JsonSerializable +from .abstract_stat import AbstractStat from .statistic import Scalar, Statistic from .timeconversion import TimeConversion -class Group(JsonSerializable): + +class Group(AbstractStat): """ Used to create the heirarchical stats structure. A Group object contains a map of labeled Groups, Statistics, Lists of Groups, or List of Statistics. @@ -41,10 +46,14 @@ class Group(JsonSerializable): type: Optional[str] time_conversion: Optional[TimeConversion] - def __init__(self, type: Optional[str] = None, - time_conversion: Optional[TimeConversion] = None, - **kwargs: Dict[str, Union["Group",Statistic,List["Group"], - List["Statistic"]]]): + def __init__( + self, + type: Optional[str] = None, + time_conversion: Optional[TimeConversion] = None, + **kwargs: Dict[ + str, Union["Group", Statistic, List["Group"], List["Statistic"]] + ], + ): if type is None: self.type = "Group" else: @@ -52,77 +61,9 @@ class Group(JsonSerializable): self.time_conversion = time_conversion - for key,value in kwargs.items(): + for key, value in kwargs.items(): setattr(self, key, value) - def children(self, predicate: Optional[Callable[[str], bool]] = None - ) -> Iterator[Union["Group", Statistic]]: - """ Iterate through all of the children, optionally with a predicate - - ``` - >>> system.children(lambda _name: 'cpu' in name) - [cpu0, cpu1, cpu2] - ``` - - :param: predicate(str) -> bool: Optional. Each child's name is passed - to this function. If it returns true, then the child is - yielded. Otherwise, the child is skipped. - If not provided then all children are returned. - """ - for attr in self.__dict__: - # Check the provided predicate. If not a match, skip this child - if predicate and not predicate(attr): continue - obj = getattr(self, attr) - if isinstance(obj, Group) or isinstance(obj, Statistic): - yield obj - - def find(self, name: str) -> Iterator[Union["Group", Statistic]]: - """ Find all stats that match the name - - This function searches all of the "children" in this group. It yields - the set of attributes (children) that have the `name` as a substring. - The order of the objects returned by the generator is arbitrary. - - ``` - >>> system.find('cpu') - [cpu0, cpu1, cpu2, cpu3, other_cpu, ...] - ``` - - This is useful for performing aggregates over substats. For instance: - - ``` - >>> total_instructions = sum([cpu.exec_context.thread_0.numInsts.value - for cpu in simstat.system.find('cpu')]) - 100000 - ``` - - :param: name: The name to search for - """ - yield from self.children(lambda _name: _name in name) - - def find_re(self, regex: Union[str, Pattern] - ) -> Iterator[Union["Group", Statistic]]: - """ Find all stats that match the name - - This function searches all of the "children" in this group. It yields - the set of attributes (children) that have the `name` mathing the - regex provided. The order of the objects returned by the generator is - arbitrary. - - ``` - >>> system.find_re('cpu[0-9]') - [cpu0, cpu1, cpu2] - ``` - Note: The above will not match `cpu_other`. - - :param: regex: The regular expression used to search. Can be a - precompiled regex or a string in regex format - """ - if isinstance(regex, str): - pattern = re.compile(regex) - else: - pattern = regex - yield from self.children(lambda _name: bool(pattern.search(_name))) class Vector(Group): """ @@ -132,5 +73,9 @@ class Vector(Group): accordance to decisions made in relation to https://gem5.atlassian.net/browse/GEM5-867. """ - def __init__(self, scalar_map: Mapping[str,Scalar]): + + def __init__(self, scalar_map: Mapping[str, Scalar]): super().__init__(type="Vector", time_conversion=None, **scalar_map) + + def _repr_name(self) -> str: + return "Vector" diff --git a/src/python/m5/ext/pystats/jsonloader.py b/src/python/m5/ext/pystats/jsonloader.py index ffe87f748a..7461f8d7a8 100644 --- a/src/python/m5/ext/pystats/jsonloader.py +++ b/src/python/m5/ext/pystats/jsonloader.py @@ -31,6 +31,7 @@ from .group import Group, Vector import json from typing import IO, Union + class JsonLoader(json.JSONDecoder): """ Subclass of JSONDecoder that overrides 'object_hook'. Converts JSON object @@ -49,26 +50,26 @@ class JsonLoader(json.JSONDecoder): def __init__(self): super().__init__(self, object_hook=self.__json_to_simstat) - def __json_to_simstat(self, d: dict) -> Union[SimStat,Statistic,Group]: - if 'type' in d: - if d['type'] == 'Scalar': - d.pop('type', None) + def __json_to_simstat(self, d: dict) -> Union[SimStat, Statistic, Group]: + if "type" in d: + if d["type"] == "Scalar": + d.pop("type", None) return Scalar(**d) - elif d['type'] == 'Distribution': - d.pop('type', None) + elif d["type"] == "Distribution": + d.pop("type", None) return Distribution(**d) - elif d['type'] == 'Accumulator': - d.pop('type', None) + elif d["type"] == "Accumulator": + d.pop("type", None) return Accumulator(**d) - elif d['type'] == 'Group': + elif d["type"] == "Group": return Group(**d) - elif d['type'] == 'Vector': - d.pop('type', None) - d.pop('time_conversion', None) + elif d["type"] == "Vector": + d.pop("type", None) + d.pop("time_conversion", None) return Vector(d) else: @@ -78,6 +79,7 @@ class JsonLoader(json.JSONDecoder): else: return SimStat(**d) + def load(json_file: IO) -> SimStat: """ Wrapper function that provides a cleaner interface for using the @@ -95,4 +97,3 @@ def load(json_file: IO) -> SimStat: simstat_object = json.load(json_file, cls=JsonLoader) return simstat_object - diff --git a/src/python/m5/ext/pystats/jsonserializable.py b/src/python/m5/ext/pystats/serializable_stat.py similarity index 91% rename from src/python/m5/ext/pystats/jsonserializable.py rename to src/python/m5/ext/pystats/serializable_stat.py index 69b15f08fa..c4de181e70 100644 --- a/src/python/m5/ext/pystats/jsonserializable.py +++ b/src/python/m5/ext/pystats/serializable_stat.py @@ -30,10 +30,11 @@ from typing import Dict, List, Union, Any, IO from .storagetype import StorageType -class JsonSerializable: + +class SerializableStat: """ - Classes which inherit from JsonSerializable can be translated into JSON - using Python's json package. + Classes which inherit from SerializableStat can be serialized as JSON + output. Usage ----- @@ -61,8 +62,9 @@ class JsonSerializable: model_dct[key] = new_value return model_dct - def __process_json_value(self, - value: Any) -> Union[str,int,float,Dict,List,None]: + def __process_json_value( + self, value: Any + ) -> Union[str, int, float, Dict, List, None]: """ Translate values into a value which can be handled by the Python stdlib JSON package. @@ -78,7 +80,7 @@ class JsonSerializable: A value which can be handled by the Python stdlib JSON package. """ - if isinstance(value, JsonSerializable): + if isinstance(value, SerializableStat): return value.to_json() elif isinstance(value, (str, int, float)): return value @@ -91,7 +93,6 @@ class JsonSerializable: return None - def dumps(self, **kwargs) -> str: """ This function mirrors the Python stdlib JSON module method @@ -126,8 +127,8 @@ class JsonSerializable: """ # Setting the default indentation to something readable. - if 'indent' not in kwargs: - kwargs['indent'] = 4 + if "indent" not in kwargs: + kwargs["indent"] = 4 return json.dumps(obj=self.to_json(), **kwargs) @@ -161,7 +162,7 @@ class JsonSerializable: """ # Setting the default indentation to something readable. - if 'indent' not in kwargs: - kwargs['indent'] = 4 + if "indent" not in kwargs: + kwargs["indent"] = 4 - json.dump(obj=self.to_json(), fp=fp, **kwargs) \ No newline at end of file + json.dump(obj=self.to_json(), fp=fp, **kwargs) diff --git a/src/python/m5/ext/pystats/simstat.py b/src/python/m5/ext/pystats/simstat.py index 6f4565edff..c7c28f419a 100644 --- a/src/python/m5/ext/pystats/simstat.py +++ b/src/python/m5/ext/pystats/simstat.py @@ -27,12 +27,13 @@ from datetime import datetime from typing import Dict, List, Optional, Union -from .jsonserializable import JsonSerializable +from .abstract_stat import AbstractStat from .group import Group from .statistic import Statistic from .timeconversion import TimeConversion -class SimStat(JsonSerializable): + +class SimStat(AbstractStat): """ Contains all the statistics for a given simulation. """ @@ -42,15 +43,18 @@ class SimStat(JsonSerializable): simulated_begin_time: Optional[Union[int, float]] simulated_end_time: Optional[Union[int, float]] - def __init__(self, creation_time: Optional[datetime] = None, - time_conversion: Optional[TimeConversion] = None, - simulated_begin_time: Optional[Union[int, float]] = None, - simulated_end_time: Optional[Union[int, float]] = None, - **kwargs: Dict[str, Union[Group,Statistic,List[Group]]]): + def __init__( + self, + creation_time: Optional[datetime] = None, + time_conversion: Optional[TimeConversion] = None, + simulated_begin_time: Optional[Union[int, float]] = None, + simulated_end_time: Optional[Union[int, float]] = None, + **kwargs: Dict[str, Union[Group, Statistic, List[Group]]] + ): self.creation_time = creation_time self.time_conversion = time_conversion self.simulated_begin_time = simulated_begin_time self.simulated_end_time = simulated_end_time - for key,value in kwargs.items(): - setattr(self, key, value) \ No newline at end of file + for key, value in kwargs.items(): + setattr(self, key, value) diff --git a/src/python/m5/ext/pystats/statistic.py b/src/python/m5/ext/pystats/statistic.py index d078a164e2..4111bde23e 100644 --- a/src/python/m5/ext/pystats/statistic.py +++ b/src/python/m5/ext/pystats/statistic.py @@ -27,10 +27,11 @@ from abc import ABC from typing import Any, Iterable, Optional, Union, List -from .jsonserializable import JsonSerializable +from .abstract_stat import AbstractStat from .storagetype import StorageType -class Statistic(ABC, JsonSerializable): + +class Statistic(ABC, AbstractStat): """ The abstract base class for all Python statistics. """ @@ -41,16 +42,24 @@ class Statistic(ABC, JsonSerializable): description: Optional[str] datatype: Optional[StorageType] - def __init__(self, value: Any, type: Optional[str] = None, - unit: Optional[str] = None, - description: Optional[str] = None, - datatype: Optional[StorageType] = None): + def __init__( + self, + value: Any, + type: Optional[str] = None, + unit: Optional[str] = None, + description: Optional[str] = None, + datatype: Optional[StorageType] = None, + ): self.value = value self.type = type self.unit = unit self.description = description self.datatype = datatype + def __repr__(self): + return str(self.value) + + class Scalar(Statistic): """ A scalar Python statistic type. @@ -58,26 +67,44 @@ class Scalar(Statistic): value: Union[float, int] - def __init__(self, value: Any, - unit: Optional[str] = None, - description: Optional[str] = None, - datatype: Optional[StorageType] = None): - super().__init__(value=value, type="Scalar", unit=unit, - description=description, datatype=datatype) + def __init__( + self, + value: Any, + unit: Optional[str] = None, + description: Optional[str] = None, + datatype: Optional[StorageType] = None, + ): + super().__init__( + value=value, + type="Scalar", + unit=unit, + description=description, + datatype=datatype, + ) + class BaseScalarVector(Statistic): """ An abstract base class for classes containing a vector of Scalar values. """ - value: List[Union[int,float]] - def __init__(self, value: Iterable[Union[int,float]], - type: Optional[str] = None, - unit: Optional[str] = None, - description: Optional[str] = None, - datatype: Optional[StorageType] = None): - super().__init__(value=list(value), type=type, unit=unit, - description=description, datatype=datatype) + value: List[Union[int, float]] + + def __init__( + self, + value: Iterable[Union[int, float]], + type: Optional[str] = None, + unit: Optional[str] = None, + description: Optional[str] = None, + datatype: Optional[StorageType] = None, + ): + super().__init__( + value=list(value), + type=type, + unit=unit, + description=description, + datatype=datatype, + ) def mean(self) -> float: """ @@ -88,10 +115,11 @@ class BaseScalarVector(Statistic): float The mean value across all bins. """ - assert(self.value != None) - assert(isinstance(self.value, List)) + assert self.value != None + assert isinstance(self.value, List) from statistics import mean as statistics_mean + return statistics_mean(self.value) def count(self) -> float: @@ -103,7 +131,7 @@ class BaseScalarVector(Statistic): float The sum of all bin values. """ - assert(self.value != None) + assert self.value != None return sum(self.value) @@ -127,21 +155,29 @@ class Distribution(BaseScalarVector): overflow: Optional[int] logs: Optional[float] - def __init__(self, value: Iterable[int], - min: Union[float, int], - max: Union[float, int], - num_bins: int, - bin_size: Union[float, int], - sum: Optional[int] = None, - sum_squared: Optional[int] = None, - underflow: Optional[int] = None, - overflow: Optional[int] = None, - logs: Optional[float] = None, - unit: Optional[str] = None, - description: Optional[str] = None, - datatype: Optional[StorageType] = None): - super().__init__(value=value, type="Distribution", unit=unit, - description=description, datatype=datatype) + def __init__( + self, + value: Iterable[int], + min: Union[float, int], + max: Union[float, int], + num_bins: int, + bin_size: Union[float, int], + sum: Optional[int] = None, + sum_squared: Optional[int] = None, + underflow: Optional[int] = None, + overflow: Optional[int] = None, + logs: Optional[float] = None, + unit: Optional[str] = None, + description: Optional[str] = None, + datatype: Optional[StorageType] = None, + ): + super().__init__( + value=value, + type="Distribution", + unit=unit, + description=description, + datatype=datatype, + ) self.min = min self.max = max @@ -154,8 +190,9 @@ class Distribution(BaseScalarVector): self.sum_squared = sum_squared # These check some basic conditions of a distribution. - assert(self.bin_size >= 0) - assert(self.num_bins >= 1) + assert self.bin_size >= 0 + assert self.num_bins >= 1 + class Accumulator(BaseScalarVector): """ @@ -167,16 +204,24 @@ class Accumulator(BaseScalarVector): max: Union[int, float] sum_squared: Optional[int] - def __init__(self, value: Iterable[Union[int,float]], - count: int, - min: Union[int, float], - max: Union[int, float], - sum_squared: Optional[int] = None, - unit: Optional[str] = None, - description: Optional[str] = None, - datatype: Optional[StorageType] = None): - super().__init__(value=value, type="Accumulator", unit=unit, - description=description, datatype=datatype) + def __init__( + self, + value: Iterable[Union[int, float]], + count: int, + min: Union[int, float], + max: Union[int, float], + sum_squared: Optional[int] = None, + unit: Optional[str] = None, + description: Optional[str] = None, + datatype: Optional[StorageType] = None, + ): + super().__init__( + value=value, + type="Accumulator", + unit=unit, + description=description, + datatype=datatype, + ) self._count = count self.min = min diff --git a/src/python/m5/ext/pystats/storagetype.py b/src/python/m5/ext/pystats/storagetype.py index 562cc8315e..77970ef0c9 100644 --- a/src/python/m5/ext/pystats/storagetype.py +++ b/src/python/m5/ext/pystats/storagetype.py @@ -27,6 +27,7 @@ from enum import Enum from typing import Dict + class StorageType(Enum): """ An enum used to declare what C++ data type was used to store a value. @@ -34,9 +35,10 @@ class StorageType(Enum): E.g. 's64' indicates a 64 bit signed integer """ + u32: str = "u32" u64: str = "u64" s32: str = "s32" s64: str = "s64" f32: str = "f32" - f64: str = "f64" \ No newline at end of file + f64: str = "f64" diff --git a/src/python/m5/ext/pystats/timeconversion.py b/src/python/m5/ext/pystats/timeconversion.py index 92d0749ef9..e9589ea5eb 100644 --- a/src/python/m5/ext/pystats/timeconversion.py +++ b/src/python/m5/ext/pystats/timeconversion.py @@ -26,14 +26,16 @@ from typing import Optional + class TimeConversion: """ A class for specifying a scale factor necessary to translate a simulation time measurement (e.g. ticks) into seconds. """ + scale_factor: float description: Optional[str] def __init__(self, scale_factor: float, description: Optional[str] = None): self.scale_factor = scale_factor - self.description = description \ No newline at end of file + self.description = description diff --git a/src/python/m5/internal/params.py b/src/python/m5/internal/params.py index 2fc79c0c82..8762a69e61 100644 --- a/src/python/m5/internal/params.py +++ b/src/python/m5/internal/params.py @@ -40,5 +40,5 @@ import inspect import _m5 for name, module in inspect.getmembers(_m5): - if name.startswith('param_') or name.startswith('enum_'): + if name.startswith("param_") or name.startswith("enum_"): exec("from _m5.%s import *" % name) diff --git a/src/python/m5/main.py b/src/python/m5/main.py index b216840df7..458e143a53 100644 --- a/src/python/m5/main.py +++ b/src/python/m5/main.py @@ -42,14 +42,17 @@ import os import socket import sys -__all__ = [ 'options', 'arguments', 'main' ] +__all__ = ["options", "arguments", "main"] -usage="%prog [gem5 options] script.py [script options]" -brief_copyright=\ +usage = "%prog [gem5 options] script.py [script options]" +brief_copyright = ( "gem5 is copyrighted software; use the --copyright option for details." +) + def _stats_help(option, opt, value, parser): import m5 + print("A stat file can either be specified as a URI or a plain") print("path. When specified as a path, gem5 uses the default text ") print("format.") @@ -67,95 +70,237 @@ def parse_options(): option = options.add_option group = options.set_group - listener_modes = ( "on", "off", "auto" ) + listener_modes = ("on", "off", "auto") # Help options - option('-B', "--build-info", action="store_true", default=False, - help="Show build information") - option('-C', "--copyright", action="store_true", default=False, - help="Show full copyright information") - option('-R', "--readme", action="store_true", default=False, - help="Show the readme") + option( + "-B", + "--build-info", + action="store_true", + default=False, + help="Show build information", + ) + option( + "-C", + "--copyright", + action="store_true", + default=False, + help="Show full copyright information", + ) + option( + "-R", + "--readme", + action="store_true", + default=False, + help="Show the readme", + ) # Options for configuring the base simulator - option('-d', "--outdir", metavar="DIR", default="m5out", - help="Set the output directory to DIR [Default: %default]") - option('-r', "--redirect-stdout", action="store_true", default=False, - help="Redirect stdout (& stderr, without -e) to file") - option('-e', "--redirect-stderr", action="store_true", default=False, - help="Redirect stderr to file") - option("--silent-redirect", action="store_true", default=False, - help="Suppress printing a message when redirecting stdout or stderr") - option("--stdout-file", metavar="FILE", default="simout", - help="Filename for -r redirection [Default: %default]") - option("--stderr-file", metavar="FILE", default="simerr", - help="Filename for -e redirection [Default: %default]") - option("--listener-mode", metavar="{on,off,auto}", - choices=listener_modes, default="auto", - help="Port (e.g., gdb) listener mode (auto: Enable if running " \ - "interactively) [Default: %default]") - option("--allow-remote-connections", action="store_true", default=False, + option( + "-d", + "--outdir", + metavar="DIR", + default="m5out", + help="Set the output directory to DIR [Default: %default]", + ) + option( + "-r", + "--redirect-stdout", + action="store_true", + default=False, + help="Redirect stdout (& stderr, without -e) to file", + ) + option( + "-e", + "--redirect-stderr", + action="store_true", + default=False, + help="Redirect stderr to file", + ) + option( + "--silent-redirect", + action="store_true", + default=False, + help="Suppress printing a message when redirecting stdout or stderr", + ) + option( + "--stdout-file", + metavar="FILE", + default="simout", + help="Filename for -r redirection [Default: %default]", + ) + option( + "--stderr-file", + metavar="FILE", + default="simerr", + help="Filename for -e redirection [Default: %default]", + ) + option( + "--listener-mode", + metavar="{on,off,auto}", + choices=listener_modes, + default="auto", + help="Port (e.g., gdb) listener mode (auto: Enable if running " + "interactively) [Default: %default]", + ) + option( + "--allow-remote-connections", + action="store_true", + default=False, help="Port listeners will accept connections from anywhere (0.0.0.0). " - "Default is only localhost.") - option('-i', "--interactive", action="store_true", default=False, - help="Invoke the interactive interpreter after running the script") - option("--pdb", action="store_true", default=False, - help="Invoke the python debugger before running the script") - option('-p', "--path", metavar="PATH[:PATH]", action='append', split=':', - help="Prepend PATH to the system path when invoking the script") - option('-q', "--quiet", action="count", default=0, - help="Reduce verbosity") - option('-v', "--verbose", action="count", default=0, - help="Increase verbosity") + "Default is only localhost.", + ) + option( + "-i", + "--interactive", + action="store_true", + default=False, + help="Invoke the interactive interpreter after running the script", + ) + option( + "--pdb", + action="store_true", + default=False, + help="Invoke the python debugger before running the script", + ) + option( + "-p", + "--path", + metavar="PATH[:PATH]", + action="append", + split=":", + help="Prepend PATH to the system path when invoking the script", + ) + option("-q", "--quiet", action="count", default=0, help="Reduce verbosity") + option( + "-v", "--verbose", action="count", default=0, help="Increase verbosity" + ) + + # To make gem5 mimic python better. After `-c` we should consume all other + # arguments and add those to argv. + def collect_args(option, opt_str, value, parser): + extra_args = parser.rargs[:] + del parser.rargs[:] + setattr(parser.values, option.dest, (value, extra_args)) + + option( + "-c", + type=str, + help="program passed in as string (terminates option list)", + default="", + metavar="cmd", + action="callback", + callback=collect_args, + ) # Statistics options group("Statistics Options") - option("--stats-file", metavar="FILE", default="stats.txt", - help="Sets the output file for statistics [Default: %default]") - option("--stats-help", - action="callback", callback=_stats_help, - help="Display documentation for available stat visitors") + option( + "--stats-file", + metavar="FILE", + default="stats.txt", + help="Sets the output file for statistics [Default: %default]", + ) + option( + "--stats-help", + action="callback", + callback=_stats_help, + help="Display documentation for available stat visitors", + ) # Configuration Options group("Configuration Options") - option("--dump-config", metavar="FILE", default="config.ini", - help="Dump configuration output file [Default: %default]") - option("--json-config", metavar="FILE", default="config.json", - help="Create JSON output of the configuration [Default: %default]") - option("--dot-config", metavar="FILE", default="config.dot", - help="Create DOT & pdf outputs of the configuration [Default: %default]") - option("--dot-dvfs-config", metavar="FILE", default=None, - help="Create DOT & pdf outputs of the DVFS configuration" + \ - " [Default: %default]") + option( + "--dump-config", + metavar="FILE", + default="config.ini", + help="Dump configuration output file [Default: %default]", + ) + option( + "--json-config", + metavar="FILE", + default="config.json", + help="Create JSON output of the configuration [Default: %default]", + ) + option( + "--dot-config", + metavar="FILE", + default="config.dot", + help="Create DOT & pdf outputs of the configuration [Default: %default]", + ) + option( + "--dot-dvfs-config", + metavar="FILE", + default=None, + help="Create DOT & pdf outputs of the DVFS configuration" + + " [Default: %default]", + ) # Debugging options group("Debugging Options") - option("--debug-break", metavar="TICK[,TICK]", action='append', split=',', - help="Create breakpoint(s) at TICK(s) " \ - "(kills process if no debugger attached)") - option("--debug-help", action='store_true', - help="Print help on debug flags") - option("--debug-flags", metavar="FLAG[,FLAG]", action='append', split=',', - help="Sets the flags for debug output (-FLAG disables a flag)") - option("--debug-start", metavar="TICK", type='int', - help="Start debug output at TICK") - option("--debug-end", metavar="TICK", type='int', - help="End debug output at TICK") - option("--debug-file", metavar="FILE", default="cout", + option( + "--debug-break", + metavar="TICK[,TICK]", + action="append", + split=",", + help="Create breakpoint(s) at TICK(s) " + "(kills process if no debugger attached)", + ) + option( + "--debug-help", action="store_true", help="Print help on debug flags" + ) + option( + "--debug-flags", + metavar="FLAG[,FLAG]", + action="append", + split=",", + help="Sets the flags for debug output (-FLAG disables a flag)", + ) + option( + "--debug-start", + metavar="TICK", + type="int", + help="Start debug output at TICK", + ) + option( + "--debug-end", + metavar="TICK", + type="int", + help="End debug output at TICK", + ) + option( + "--debug-file", + metavar="FILE", + default="cout", help="Sets the output file for debug. Append '.gz' to the name for it" - " to be compressed automatically [Default: %default]") - option("--debug-ignore", metavar="EXPR", action='append', split=':', - help="Ignore EXPR sim objects") - option("--remote-gdb-port", type='int', default=7000, - help="Remote gdb base port (set to 0 to disable listening)") + " to be compressed automatically [Default: %default]", + ) + option( + "--debug-ignore", + metavar="EXPR", + action="append", + split=":", + help="Ignore EXPR sim objects", + ) + option( + "--remote-gdb-port", + type="int", + default=7000, + help="Remote gdb base port (set to 0 to disable listening)", + ) # Help options group("Help Options") - option("--list-sim-objects", action='store_true', default=False, - help="List all built-in SimObjects, their params and default values") + option( + "--list-sim-objects", + action="store_true", + default=False, + help="List all built-in SimObjects, their params and default values", + ) arguments = options.parse_args() - return options,arguments + return options, arguments + def interact(scope): banner = "gem5 Interactive Console" @@ -172,8 +317,9 @@ def interact(scope): cfg = Config() cfg.PromptManager.in_template = prompt_in1 cfg.PromptManager.out_template = prompt_out - ipshell = InteractiveShellEmbed(config=cfg, user_ns=scope, - banner1=banner) + ipshell = InteractiveShellEmbed( + config=cfg, user_ns=scope, banner1=banner + ) except ImportError: pass @@ -182,6 +328,8 @@ def interact(scope): else: # Use the Python shell in the standard library if IPython # isn't available. + import readline # if this is imported, then the up arrow works + code.InteractiveConsole(scope).interact(banner) @@ -193,6 +341,7 @@ def _check_tracing(): fatal("Tracing is not enabled. Compile with TRACING_ON") + def main(): import m5 import _m5.core @@ -235,29 +384,29 @@ def main(): # Now redirect stdout/stderr as desired if options.redirect_stdout: - redir_fd = os.open(stdout_file, os. O_WRONLY | os.O_CREAT | os.O_TRUNC) + redir_fd = os.open(stdout_file, os.O_WRONLY | os.O_CREAT | os.O_TRUNC) os.dup2(redir_fd, sys.stdout.fileno()) if not options.redirect_stderr: os.dup2(redir_fd, sys.stderr.fileno()) if options.redirect_stderr: - redir_fd = os.open(stderr_file, os. O_WRONLY | os.O_CREAT | os.O_TRUNC) + redir_fd = os.open(stderr_file, os.O_WRONLY | os.O_CREAT | os.O_TRUNC) os.dup2(redir_fd, sys.stderr.fileno()) done = False if options.build_info: done = True - print('Build information:') + print("Build information:") print() - print('gem5 version %s' % defines.gem5Version) - print('compiled %s' % defines.compileDate) - print('build options:') + print("gem5 version %s" % defines.gem5Version) + print("compiled %s" % defines.compileDate) + print("build options:") keys = list(defines.buildEnv.keys()) keys.sort() for key in keys: val = defines.buildEnv[key] - print(' %s = %s' % (key, val)) + print(" %s = %s" % (key, val)) print() if options.copyright: @@ -267,7 +416,7 @@ def main(): if options.readme: done = True - print('Readme:') + print("Readme:") print() print(info.README) print() @@ -279,6 +428,7 @@ def main(): if options.list_sim_objects: from . import SimObject + done = True print("SimObjects:") objects = list(SimObject.allClasses.keys()) @@ -291,13 +441,19 @@ def main(): params.sort() for pname in params: param = obj._params[pname] - default = getattr(param, 'default', '') + default = getattr(param, "default", "") print(terminal_formatter.format_output(pname, indent=8)) if default: - print(terminal_formatter.format_output( - str(default), label="default: ", indent=21)) - print(terminal_formatter.format_output( - param.desc, label="desc: ", indent=21)) + print( + terminal_formatter.format_output( + str(default), label="default: ", indent=21 + ) + ) + print( + terminal_formatter.format_output( + param.desc, label="desc: ", indent=21 + ) + ) print() print() @@ -317,18 +473,22 @@ def main(): print("gem5 version %s" % _m5.core.gem5Version) print("gem5 compiled %s" % _m5.core.compileDate) - print("gem5 started %s" % - datetime.datetime.now().strftime("%b %e %Y %X")) - print("gem5 executing on %s, pid %d" % - (socket.gethostname(), os.getpid())) + print( + "gem5 started %s" % datetime.datetime.now().strftime("%b %e %Y %X") + ) + print( + "gem5 executing on %s, pid %d" + % (socket.gethostname(), os.getpid()) + ) # in Python 3 pipes.quote() is moved to shlex.quote() import pipes + print("command line:", " ".join(map(pipes.quote, sys.argv))) print() # check to make sure we can find the listed script - if not arguments or not os.path.isfile(arguments[0]): + if not options.c and (not arguments or not os.path.isfile(arguments[0])): if arguments and not os.path.isfile(arguments[0]): print("Script %s not found" % arguments[0]) @@ -359,8 +519,6 @@ def main(): if not options.allow_remote_connections: m5.listenersLoopbackOnly() - # set debugging options - debug.setRemoteGDBPort(options.remote_gdb_port) for when in options.debug_break: debug.schedBreak(int(when)) @@ -371,7 +529,7 @@ def main(): off_flags = [] for flag in options.debug_flags: off = False - if flag.startswith('-'): + if flag.startswith("-"): flag = flag[1:] off = True @@ -403,13 +561,18 @@ def main(): trace.ignore(ignore) sys.argv = arguments - sys.path = [ os.path.dirname(sys.argv[0]) ] + sys.path - filename = sys.argv[0] - filedata = open(filename, 'r').read() - filecode = compile(filedata, filename, 'exec') - scope = { '__file__' : filename, - '__name__' : '__m5_main__' } + if options.c: + filedata = options.c[0] + filecode = compile(filedata, "", "exec") + sys.argv = ["-c"] + options.c[1] + scope = {"__name__": "__m5_main__"} + else: + sys.path = [os.path.dirname(sys.argv[0])] + sys.path + filename = sys.argv[0] + filedata = open(filename, "r").read() + filecode = compile(filedata, filename, "exec") + scope = {"__file__": filename, "__name__": "__m5_main__"} # if pdb was requested, execfile the thing under pdb, otherwise, # just do the execfile normally @@ -421,7 +584,7 @@ def main(): try: pdb.run(filecode, scope) except SystemExit: - print("The program exited via sys.exit(). Exit status: ", end=' ') + print("The program exited via sys.exit(). Exit status: ", end=" ") print(sys.exc_info()[1]) except: traceback.print_exc() @@ -429,7 +592,7 @@ def main(): t = sys.exc_info()[2] while t.tb_next is not None: t = t.tb_next - pdb.interaction(t.tb_frame,t) + pdb.interaction(t.tb_frame, t) else: exec(filecode, scope) diff --git a/src/python/m5/objects/SimObject.py b/src/python/m5/objects/SimObject.py index efec5eba4f..6cf44d07c2 100644 --- a/src/python/m5/objects/SimObject.py +++ b/src/python/m5/objects/SimObject.py @@ -24,6 +24,7 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from m5.SimObject import * + # The ByteOrder enum is defined in params. Expose it here so we can declare it # to SCons, since there's no normal SimObject file to make it a part of. from m5.params import ByteOrder diff --git a/src/python/m5/objects/__init__.py b/src/python/m5/objects/__init__.py index 4bec74dfaa..b6672331f4 100644 --- a/src/python/m5/objects/__init__.py +++ b/src/python/m5/objects/__init__.py @@ -25,5 +25,5 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. for module in __spec__.loader_state: - if module.startswith('m5.objects.'): + if module.startswith("m5.objects."): exec("from %s import *" % module) diff --git a/src/python/m5/options.py b/src/python/m5/options.py index 56c1a29a11..08638c65a7 100644 --- a/src/python/m5/options.py +++ b/src/python/m5/options.py @@ -29,11 +29,15 @@ import sys from optparse import * -class nodefault(object): pass + +class nodefault(object): + pass + class splitter(object): def __init__(self, split): self.split = split + def __call__(self, option, opt_str, value, parser): values = value.split(self.split) dest = getattr(parser.values, option.dest) @@ -42,9 +46,10 @@ class splitter(object): else: dest.extend(values) + class OptionParser(dict): def __init__(self, *args, **kwargs): - kwargs.setdefault('formatter', optparse.TitledHelpFormatter()) + kwargs.setdefault("formatter", optparse.TitledHelpFormatter()) self._optparse = optparse.OptionParser(*args, **kwargs) self._optparse.disable_interspersed_args() @@ -57,24 +62,24 @@ class OptionParser(dict): return self._optparse.set_defaults(*args, **kwargs) def set_group(self, *args, **kwargs): - '''set the current option group''' + """set the current option group""" if not args and not kwargs: self._group = self._optparse else: self._group = self._optparse.add_option_group(*args, **kwargs) def add_option(self, *args, **kwargs): - '''add an option to the current option group, or global none set''' + """add an option to the current option group, or global none set""" # if action=split, but allows the option arguments # themselves to be lists separated by the split variable''' - if kwargs.get('action', None) == 'append' and 'split' in kwargs: - split = kwargs.pop('split') - kwargs['default'] = [] - kwargs['type'] = 'string' - kwargs['action'] = 'callback' - kwargs['callback'] = splitter(split) + if kwargs.get("action", None) == "append" and "split" in kwargs: + split = kwargs.pop("split") + kwargs["default"] = [] + kwargs["type"] = "string" + kwargs["action"] = "callback" + kwargs["callback"] = splitter(split) option = self._group.add_option(*args, **kwargs) dest = option.dest @@ -84,12 +89,12 @@ class OptionParser(dict): return option def bool_option(self, name, default, help): - '''add a boolean option called --name and --no-name. - Display help depending on which is the default''' + """add a boolean option called --name and --no-name. + Display help depending on which is the default""" - tname = '--%s' % name - fname = '--no-%s' % name - dest = name.replace('-', '_') + tname = "--%s" % name + fname = "--no-%s" % name + dest = name.replace("-", "_") if default: thelp = optparse.SUPPRESS_HELP fhelp = help @@ -97,15 +102,17 @@ class OptionParser(dict): thelp = help fhelp = optparse.SUPPRESS_HELP - topt = self.add_option(tname, action="store_true", default=default, - help=thelp) - fopt = self.add_option(fname, action="store_false", dest=dest, - help=fhelp) + topt = self.add_option( + tname, action="store_true", default=default, help=thelp + ) + fopt = self.add_option( + fname, action="store_false", dest=dest, help=fhelp + ) - return topt,fopt + return topt, fopt def __getattr__(self, attr): - if attr.startswith('_'): + if attr.startswith("_"): return super().__getattribute__(attr) if attr in self: @@ -114,10 +121,10 @@ class OptionParser(dict): return super().__getattribute__(attr) def __setattr__(self, attr, value): - if attr.startswith('_'): + if attr.startswith("_"): super().__setattr__(attr, value) elif attr in self._allopts: - defaults = { attr : value } + defaults = {attr: value} self.set_defaults(**defaults) if attr in self: self[attr] = value @@ -125,9 +132,9 @@ class OptionParser(dict): super().__setattr__(attr, value) def parse_args(self): - opts,args = self._optparse.parse_args() + opts, args = self._optparse.parse_args() - for key,val in opts.__dict__.items(): + for key, val in opts.__dict__.items(): if val is not None or key not in self: self[key] = val @@ -137,4 +144,3 @@ class OptionParser(dict): self._optparse.print_help() if exitcode is not None: sys.exit(exitcode) - diff --git a/src/python/m5/params.py b/src/python/m5/params.py index 57a3d3e186..8e96f4668e 100644 --- a/src/python/m5/params.py +++ b/src/python/m5/params.py @@ -65,26 +65,36 @@ from . import proxy from . import ticks from .util import * + def isSimObject(*args, **kwargs): from . import SimObject + return SimObject.isSimObject(*args, **kwargs) + def isSimObjectSequence(*args, **kwargs): from . import SimObject + return SimObject.isSimObjectSequence(*args, **kwargs) + def isSimObjectClass(*args, **kwargs): from . import SimObject + return SimObject.isSimObjectClass(*args, **kwargs) + allParams = {} + class MetaParamValue(type): def __new__(mcls, name, bases, dct): cls = super().__new__(mcls, name, bases, dct) if name in allParams: - warn("%s already exists in allParams. This may be caused by the " \ - "Python 2.7 compatibility layer." % (name, )) + warn( + "%s already exists in allParams. This may be caused by the " + "Python 2.7 compatibility layer." % (name,) + ) allParams[name] = cls return cls @@ -125,8 +135,8 @@ class ParamValue(object, metaclass=MetaParamValue): # src into lvalue dest (of the param's C++ type) @classmethod def cxx_ini_parse(cls, code, src, dest, ret): - code('// Unhandled param type: %s' % cls.__name__) - code('%s false;' % ret) + code("// Unhandled param type: %s" % cls.__name__) + code("%s false;" % ret) # allows us to blithely call unproxy() on things without checking # if they're really proxies or not @@ -137,6 +147,7 @@ class ParamValue(object, metaclass=MetaParamValue): def pretty_print(self, value): return str(value) + # Regular parameter description. class ParamDesc(object): def __init__(self, ptype_str, ptype, *args, **kwargs): @@ -152,34 +163,36 @@ class ParamDesc(object): self.default = args[0] self.desc = args[1] else: - raise TypeError('too many arguments') + raise TypeError("too many arguments") - if 'desc' in kwargs: - assert(not hasattr(self, 'desc')) - self.desc = kwargs['desc'] - del kwargs['desc'] + if "desc" in kwargs: + assert not hasattr(self, "desc") + self.desc = kwargs["desc"] + del kwargs["desc"] - if 'default' in kwargs: - assert(not hasattr(self, 'default')) - self.default = kwargs['default'] - del kwargs['default'] + if "default" in kwargs: + assert not hasattr(self, "default") + self.default = kwargs["default"] + del kwargs["default"] if kwargs: - raise TypeError('extra unknown kwargs %s' % kwargs) + raise TypeError("extra unknown kwargs %s" % kwargs) - if not hasattr(self, 'desc'): - raise TypeError('desc attribute missing') + if not hasattr(self, "desc"): + raise TypeError("desc attribute missing") def __getattr__(self, attr): - if attr == 'ptype': + if attr == "ptype": from . import SimObject + ptype = SimObject.allClasses[self.ptype_str] assert isSimObjectClass(ptype) self.ptype = ptype return ptype - raise AttributeError("'%s' object has no attribute '%s'" % \ - (type(self).__name__, attr)) + raise AttributeError( + "'%s' object has no attribute '%s'" % (type(self).__name__, attr) + ) def example_str(self): if hasattr(self.ptype, "ex_str"): @@ -198,7 +211,7 @@ class ParamDesc(object): if isinstance(value, proxy.BaseProxy): value.set_param_desc(self) return value - if 'ptype' not in self.__dict__ and isNullPointer(value): + if "ptype" not in self.__dict__ and isNullPointer(value): # deferred evaluation of SimObject; continue to defer if # we're just assigning a null pointer return value @@ -210,38 +223,41 @@ class ParamDesc(object): def pretty_print(self, value): if isinstance(value, proxy.BaseProxy): - return str(value) + return str(value) if isNullPointer(value): - return NULL + return NULL return self.ptype(value).pretty_print(value) def cxx_predecls(self, code): - code('#include ') + code("#include ") self.ptype.cxx_predecls(code) def pybind_predecls(self, code): self.ptype.pybind_predecls(code) def cxx_decl(self, code): - code('${{self.ptype.cxx_type}} ${{self.name}};') + code("${{self.ptype.cxx_type}} ${{self.name}};") + # Vector-valued parameter description. Just like ParamDesc, except # that the value is a vector (list) of the specified type instead of a # single value. + class VectorParamValue(list, metaclass=MetaParamValue): def __setattr__(self, attr, value): - raise AttributeError("Not allowed to set %s on '%s'" % \ - (attr, type(self).__name__)) + raise AttributeError( + "Not allowed to set %s on '%s'" % (attr, type(self).__name__) + ) def config_value(self): return [v.config_value() for v in self] def ini_str(self): - return ' '.join([v.ini_str() for v in self]) + return " ".join([v.ini_str() for v in self]) def getValue(self): - return [ v.getValue() for v in self ] + return [v.getValue() for v in self] def unproxy(self, base): if len(self) == 1 and isinstance(self[0], proxy.BaseProxy): @@ -251,6 +267,7 @@ class VectorParamValue(list, metaclass=MetaParamValue): else: return [v.unproxy(base) for v in self] + class SimObjectVector(VectorParamValue): # support clone operation def __call__(self, **kwargs): @@ -264,8 +281,8 @@ class SimObjectVector(VectorParamValue): if len(self) == 1: self[0].set_parent(parent, name) else: - width = int(math.ceil(math.log(len(self))/math.log(10))) - for i,v in enumerate(self): + width = int(math.ceil(math.log(len(self)) / math.log(10))) + for i, v in enumerate(self): v.set_parent(parent, "%s%0*d" % (name, width, i)) def has_parent(self): @@ -273,7 +290,7 @@ class SimObjectVector(VectorParamValue): # return 'cpu0 cpu1' etc. for print_ini() def get_name(self): - return ' '.join([v._name for v in self]) + return " ".join([v._name for v in self]) # By iterating through the constituent members of the vector here # we can nicely handle iterating over all a SimObject's children @@ -298,17 +315,17 @@ class SimObjectVector(VectorParamValue): def __setitem__(self, key, value): val = self[key] if value.has_parent(): - warn("SimObject %s already has a parent" % value.get_name() +\ - " that is being overwritten by a SimObjectVector") + warn( + "SimObject %s already has a parent" % value.get_name() + + " that is being overwritten by a SimObjectVector" + ) value.set_parent(val.get_parent(), val._name) super().__setitem__(key, value) # Enumerate the params of each member of the SimObject vector. Creates # strings that will allow indexing into the vector by the python code and # allow it to be specified on the command line. - def enumerateParams(self, flags_dict = {}, - cmd_line_str = "", - access_str = ""): + def enumerateParams(self, flags_dict={}, cmd_line_str="", access_str=""): if hasattr(self, "_paramEnumed"): print("Cycle detected enumerating params at %s?!" % (cmd_line_str)) else: @@ -316,27 +333,32 @@ class SimObjectVector(VectorParamValue): for vals in self: # Each entry in the SimObjectVector should be an # instance of a SimObject - flags_dict = vals.enumerateParams(flags_dict, - cmd_line_str + "%d." % x, - access_str + "[%d]." % x) + flags_dict = vals.enumerateParams( + flags_dict, + cmd_line_str + "%d." % x, + access_str + "[%d]." % x, + ) x = x + 1 return flags_dict + class VectorParamDesc(ParamDesc): # Convert assigned value to appropriate type. If the RHS is not a # list or tuple, it generates a single-element list. def convert(self, value): if isinstance(value, (list, tuple)): # list: coerce each element into new list - tmp_list = [ ParamDesc.convert(self, v) for v in value ] + tmp_list = [ParamDesc.convert(self, v) for v in value] elif isinstance(value, str): # If input is a csv string - tmp_list = [ ParamDesc.convert(self, v) \ - for v in value.strip('[').strip(']').split(',') ] + tmp_list = [ + ParamDesc.convert(self, v) + for v in value.strip("[").strip("]").split(",") + ] else: # singleton: coerce to a single-element list - tmp_list = [ ParamDesc.convert(self, value) ] + tmp_list = [ParamDesc.convert(self, value)] if isSimObjectSequence(tmp_list): return SimObjectVector(tmp_list) @@ -354,11 +376,13 @@ class VectorParamDesc(ParamDesc): # Produce a human readable representation of the value of this vector param. def pretty_print(self, value): if isinstance(value, (list, tuple)): - tmp_list = [ ParamDesc.pretty_print(self, v) for v in value ] + tmp_list = [ParamDesc.pretty_print(self, v) for v in value] elif isinstance(value, str): - tmp_list = [ ParamDesc.pretty_print(self, v) for v in value.split(',') ] + tmp_list = [ + ParamDesc.pretty_print(self, v) for v in value.split(",") + ] else: - tmp_list = [ ParamDesc.pretty_print(self, value) ] + tmp_list = [ParamDesc.pretty_print(self, value)] return tmp_list @@ -366,36 +390,39 @@ class VectorParamDesc(ParamDesc): def __call__(self, value): if isinstance(value, (list, tuple)): # list: coerce each element into new list - tmp_list = [ ParamDesc.convert(self, v) for v in value ] + tmp_list = [ParamDesc.convert(self, v) for v in value] elif isinstance(value, str): # If input is a csv string - tmp_list = [ ParamDesc.convert(self, v) \ - for v in value.strip('[').strip(']').split(',') ] + tmp_list = [ + ParamDesc.convert(self, v) + for v in value.strip("[").strip("]").split(",") + ] else: # singleton: coerce to a single-element list - tmp_list = [ ParamDesc.convert(self, value) ] + tmp_list = [ParamDesc.convert(self, value)] return VectorParamValue(tmp_list) def cxx_predecls(self, code): - code('#include ') + code("#include ") self.ptype.cxx_predecls(code) def pybind_predecls(self, code): - code('#include ') + code("#include ") self.ptype.pybind_predecls(code) def cxx_decl(self, code): - code('std::vector< ${{self.ptype.cxx_type}} > ${{self.name}};') + code("std::vector< ${{self.ptype.cxx_type}} > ${{self.name}};") + class ParamFactory(object): - def __init__(self, param_desc_class, ptype_str = None): + def __init__(self, param_desc_class, ptype_str=None): self.param_desc_class = param_desc_class self.ptype_str = ptype_str def __getattr__(self, attr): if self.ptype_str: - attr = self.ptype_str + '.' + attr + attr = self.ptype_str + "." + attr return ParamFactory(self.param_desc_class, attr) # E.g., Param.Int(5, "number of widgets") @@ -409,6 +436,7 @@ class ParamFactory(object): pass return self.param_desc_class(self.ptype_str, ptype, *args, **kwargs) + Param = ParamFactory(ParamDesc) VectorParam = ParamFactory(VectorParamDesc) @@ -427,13 +455,13 @@ VectorParam = ParamFactory(VectorParamDesc) # String-valued parameter. Just mixin the ParamValue class with the # built-in str class. -class String(ParamValue,str): - cxx_type = 'std::string' +class String(ParamValue, str): + cxx_type = "std::string" cmd_line_settable = True @classmethod def cxx_predecls(self, code): - code('#include ') + code("#include ") def __call__(self, value): self = value @@ -441,12 +469,13 @@ class String(ParamValue,str): @classmethod def cxx_ini_parse(self, code, src, dest, ret): - code('%s = %s;' % (dest, src)) - code('%s true;' % ret) + code("%s = %s;" % (dest, src)) + code("%s true;" % ret) def getValue(self): return self + # superclass for "numeric" parameter values, to emulate math # operations in a type-safe way. e.g., a Latency times an int returns # a new Latency object. @@ -488,7 +517,6 @@ class NumericParamValue(ParamValue): newobj._check() return newobj - def __add__(self, other): newobj = self.__class__(self) newobj.value += NumericParamValue.unwrap(other) @@ -543,7 +571,8 @@ class NumericParamValue(ParamValue): # the dest type. @classmethod def cxx_ini_parse(self, code, src, dest, ret): - code('%s to_number(%s, %s);' % (ret, src, dest)) + code("%s to_number(%s, %s);" % (ret, src, dest)) + # Metaclass for bounds-checked integer parameters. See CheckedInt. class CheckedIntType(MetaParamValue): @@ -553,21 +582,24 @@ class CheckedIntType(MetaParamValue): # CheckedInt is an abstract base class, so we actually don't # want to do any processing on it... the rest of this code is # just for classes that derive from CheckedInt. - if name == 'CheckedInt': + if name == "CheckedInt": return - if not (hasattr(cls, 'min') and hasattr(cls, 'max')): - if not (hasattr(cls, 'size') and hasattr(cls, 'unsigned')): - panic("CheckedInt subclass %s must define either\n" \ - " 'min' and 'max' or 'size' and 'unsigned'\n", - name); + if not (hasattr(cls, "min") and hasattr(cls, "max")): + if not (hasattr(cls, "size") and hasattr(cls, "unsigned")): + panic( + "CheckedInt subclass %s must define either\n" + " 'min' and 'max' or 'size' and 'unsigned'\n", + name, + ) if cls.unsigned: cls.min = 0 - cls.max = 2 ** cls.size - 1 + cls.max = 2**cls.size - 1 else: cls.min = -(2 ** (cls.size - 1)) cls.max = (2 ** (cls.size - 1)) - 1 + # Abstract superclass for bounds-checked integer parameters. This # class is subclassed to generate parameter classes with specific # bounds. Initialization of the min and max bounds is done in the @@ -577,8 +609,10 @@ class CheckedInt(NumericParamValue, metaclass=CheckedIntType): def _check(self): if not self.min <= self.value <= self.max: - raise TypeError('Integer param out of bounds %d < %d < %d' % \ - (self.min, self.value, self.max)) + raise TypeError( + "Integer param out of bounds %d < %d < %d" + % (self.min, self.value, self.max) + ) def __init__(self, value): if isinstance(value, str): @@ -586,8 +620,10 @@ class CheckedInt(NumericParamValue, metaclass=CheckedIntType): elif isinstance(value, (int, float, NumericParamValue)): self.value = int(value) else: - raise TypeError("Can't convert object of type %s to CheckedInt" \ - % type(value).__name__) + raise TypeError( + "Can't convert object of type %s to CheckedInt" + % type(value).__name__ + ) self._check() def __call__(self, value): @@ -605,32 +641,105 @@ class CheckedInt(NumericParamValue, metaclass=CheckedIntType): def getValue(self): return int(self.value) -class Int(CheckedInt): cxx_type = 'int'; size = 32; unsigned = False -class Unsigned(CheckedInt): cxx_type = 'unsigned'; size = 32; unsigned = True -class Int8(CheckedInt): cxx_type = 'int8_t'; size = 8; unsigned = False -class UInt8(CheckedInt): cxx_type = 'uint8_t'; size = 8; unsigned = True -class Int16(CheckedInt): cxx_type = 'int16_t'; size = 16; unsigned = False -class UInt16(CheckedInt): cxx_type = 'uint16_t'; size = 16; unsigned = True -class Int32(CheckedInt): cxx_type = 'int32_t'; size = 32; unsigned = False -class UInt32(CheckedInt): cxx_type = 'uint32_t'; size = 32; unsigned = True -class Int64(CheckedInt): cxx_type = 'int64_t'; size = 64; unsigned = False -class UInt64(CheckedInt): cxx_type = 'uint64_t'; size = 64; unsigned = True +class Int(CheckedInt): + cxx_type = "int" + size = 32 + unsigned = False -class Counter(CheckedInt): cxx_type = 'Counter'; size = 64; unsigned = True -class Tick(CheckedInt): cxx_type = 'Tick'; size = 64; unsigned = True -class TcpPort(CheckedInt): cxx_type = 'uint16_t'; size = 16; unsigned = True -class UdpPort(CheckedInt): cxx_type = 'uint16_t'; size = 16; unsigned = True -class Percent(CheckedInt): cxx_type = 'int'; min = 0; max = 100 +class Unsigned(CheckedInt): + cxx_type = "unsigned" + size = 32 + unsigned = True + + +class Int8(CheckedInt): + cxx_type = "int8_t" + size = 8 + unsigned = False + + +class UInt8(CheckedInt): + cxx_type = "uint8_t" + size = 8 + unsigned = True + + +class Int16(CheckedInt): + cxx_type = "int16_t" + size = 16 + unsigned = False + + +class UInt16(CheckedInt): + cxx_type = "uint16_t" + size = 16 + unsigned = True + + +class Int32(CheckedInt): + cxx_type = "int32_t" + size = 32 + unsigned = False + + +class UInt32(CheckedInt): + cxx_type = "uint32_t" + size = 32 + unsigned = True + + +class Int64(CheckedInt): + cxx_type = "int64_t" + size = 64 + unsigned = False + + +class UInt64(CheckedInt): + cxx_type = "uint64_t" + size = 64 + unsigned = True + + +class Counter(CheckedInt): + cxx_type = "Counter" + size = 64 + unsigned = True + + +class Tick(CheckedInt): + cxx_type = "Tick" + size = 64 + unsigned = True + + +class TcpPort(CheckedInt): + cxx_type = "uint16_t" + size = 16 + unsigned = True + + +class UdpPort(CheckedInt): + cxx_type = "uint16_t" + size = 16 + unsigned = True + + +class Percent(CheckedInt): + cxx_type = "int" + min = 0 + max = 100 + class Cycles(CheckedInt): - cxx_type = 'Cycles' + cxx_type = "Cycles" size = 64 unsigned = True def getValue(self): from _m5.core import Cycles + return Cycles(self.value) @classmethod @@ -641,22 +750,25 @@ class Cycles(CheckedInt): @classmethod def cxx_ini_parse(cls, code, src, dest, ret): - code('uint64_t _temp;') - code('bool _ret = to_number(%s, _temp);' % src) - code('if (_ret)') - code(' %s = Cycles(_temp);' % dest) - code('%s _ret;' % ret) + code("uint64_t _temp;") + code("bool _ret = to_number(%s, _temp);" % src) + code("if (_ret)") + code(" %s = Cycles(_temp);" % dest) + code("%s _ret;" % ret) + class Float(ParamValue, float): - cxx_type = 'double' + cxx_type = "double" cmd_line_settable = True def __init__(self, value): if isinstance(value, (int, float, NumericParamValue, Float, str)): self.value = float(value) else: - raise TypeError("Can't convert object of type %s to Float" \ - % type(value).__name__) + raise TypeError( + "Can't convert object of type %s to Float" + % type(value).__name__ + ) def __call__(self, value): self.__init__(value) @@ -670,17 +782,19 @@ class Float(ParamValue, float): @classmethod def cxx_ini_predecls(cls, code): - code('#include ') + code("#include ") @classmethod def cxx_ini_parse(self, code, src, dest, ret): - code('%s (std::istringstream(%s) >> %s).eof();' % (ret, src, dest)) + code("%s (std::istringstream(%s) >> %s).eof();" % (ret, src, dest)) + class MemorySize(CheckedInt): - cxx_type = 'uint64_t' - ex_str = '512MiB' + cxx_type = "uint64_t" + ex_str = "512MiB" size = 64 unsigned = True + def __init__(self, value): if isinstance(value, MemorySize): self.value = value.value @@ -688,11 +802,13 @@ class MemorySize(CheckedInt): self.value = convert.toMemorySize(value) self._check() + class MemorySize32(CheckedInt): - cxx_type = 'uint32_t' - ex_str = '512MiB' + cxx_type = "uint32_t" + ex_str = "512MiB" size = 32 unsigned = True + def __init__(self, value): if isinstance(value, MemorySize): self.value = value.value @@ -700,10 +816,12 @@ class MemorySize32(CheckedInt): self.value = convert.toMemorySize(value) self._check() + class Addr(CheckedInt): - cxx_type = 'Addr' + cxx_type = "Addr" size = 64 unsigned = True + def __init__(self, value): if isinstance(value, Addr): self.value = value.value @@ -721,11 +839,13 @@ class Addr(CheckedInt): self.value = int(str(value), base=0) self._check() + def __add__(self, other): if isinstance(other, Addr): return self.value + other.value else: return self.value + other + def pretty_print(self, value): try: val = convert.toMemorySize(value) @@ -733,8 +853,9 @@ class Addr(CheckedInt): val = int(value) return "0x%x" % int(val) + class AddrRange(ParamValue): - cxx_type = 'AddrRange' + cxx_type = "AddrRange" def __init__(self, *args, **kwargs): # Disable interleaving and hashing by default @@ -746,30 +867,30 @@ class AddrRange(ParamValue): # An address range needs to have an upper limit, specified # either explicitly with an end, or as an offset using the # size keyword. - if 'end' in kwargs: - self.end = Addr(kwargs.pop('end')) - elif 'size' in kwargs: - self.end = self.start + Addr(kwargs.pop('size')) + if "end" in kwargs: + self.end = Addr(kwargs.pop("end")) + elif "size" in kwargs: + self.end = self.start + Addr(kwargs.pop("size")) else: raise TypeError("Either end or size must be specified") # Now on to the optional bit - if 'intlvMatch' in kwargs: - self.intlvMatch = int(kwargs.pop('intlvMatch')) + if "intlvMatch" in kwargs: + self.intlvMatch = int(kwargs.pop("intlvMatch")) - if 'masks' in kwargs: - self.masks = [ int(x) for x in list(kwargs.pop('masks')) ] + if "masks" in kwargs: + self.masks = [int(x) for x in list(kwargs.pop("masks"))] self.intlvBits = len(self.masks) else: - if 'intlvBits' in kwargs: - self.intlvBits = int(kwargs.pop('intlvBits')) + if "intlvBits" in kwargs: + self.intlvBits = int(kwargs.pop("intlvBits")) self.masks = [0] * self.intlvBits - if 'intlvHighBit' not in kwargs: + if "intlvHighBit" not in kwargs: raise TypeError("No interleave bits specified") - intlv_high_bit = int(kwargs.pop('intlvHighBit')) + intlv_high_bit = int(kwargs.pop("intlvHighBit")) xor_high_bit = 0 - if 'xorHighBit' in kwargs: - xor_high_bit = int(kwargs.pop('xorHighBit')) + if "xorHighBit" in kwargs: + xor_high_bit = int(kwargs.pop("xorHighBit")) for i in range(0, self.intlvBits): bit1 = intlv_high_bit - i mask = 1 << bit1 @@ -779,7 +900,7 @@ class AddrRange(ParamValue): self.masks[self.intlvBits - i - 1] = mask if len(args) == 0: - self.start = Addr(kwargs.pop('start')) + self.start = Addr(kwargs.pop("start")) handle_kwargs(self, kwargs) elif len(args) == 1: @@ -804,10 +925,14 @@ class AddrRange(ParamValue): def __str__(self): if len(self.masks) == 0: - return '%s:%s' % (self.start, self.end) + return "%s:%s" % (self.start, self.end) else: - return '%s:%s:%s:%s' % (self.start, self.end, self.intlvMatch, - ':'.join(str(m) for m in self.masks)) + return "%s:%s:%s:%s" % ( + self.start, + self.end, + self.intlvMatch, + ":".join(str(m) for m in self.masks), + ) def size(self): # Divide the size by the size of the interleaving slice @@ -825,56 +950,58 @@ class AddrRange(ParamValue): @classmethod def cxx_ini_predecls(cls, code): - code('#include ') - code('#include ') + code("#include ") + code("#include ") code('#include "base/types.hh"') @classmethod def cxx_ini_parse(cls, code, src, dest, ret): - code('bool _ret = true;') - code('uint64_t _start, _end, _intlvMatch = 0;') - code('std::vector _masks;') - code('char _sep;') - code('std::istringstream _stream(${src});') - code('_stream >> _start;') - code('_stream.get(_sep);') - code('_ret = _sep == \':\';') - code('_stream >> _end;') - code('if (!_stream.fail() && !_stream.eof()) {') - code(' _stream.get(_sep);') - code(' _ret = ret && _sep == \':\';') - code(' _stream >> _intlvMatch;') - code(' while (!_stream.fail() && !_stream.eof()) {') - code(' _stream.get(_sep);') - code(' _ret = ret && _sep == \':\';') - code(' Addr mask;') - code(' _stream >> mask;') - code(' _masks.push_back(mask);') - code(' }') - code('}') - code('_ret = _ret && !_stream.fail() && _stream.eof();') - code('if (_ret)') - code(' ${dest} = AddrRange(_start, _end, _masks, _intlvMatch);') - code('${ret} _ret;') + code("bool _ret = true;") + code("uint64_t _start, _end, _intlvMatch = 0;") + code("std::vector _masks;") + code("char _sep;") + code("std::istringstream _stream(${src});") + code("_stream >> _start;") + code("_stream.get(_sep);") + code("_ret = _sep == ':';") + code("_stream >> _end;") + code("if (!_stream.fail() && !_stream.eof()) {") + code(" _stream.get(_sep);") + code(" _ret = ret && _sep == ':';") + code(" _stream >> _intlvMatch;") + code(" while (!_stream.fail() && !_stream.eof()) {") + code(" _stream.get(_sep);") + code(" _ret = ret && _sep == ':';") + code(" Addr mask;") + code(" _stream >> mask;") + code(" _masks.push_back(mask);") + code(" }") + code("}") + code("_ret = _ret && !_stream.fail() && _stream.eof();") + code("if (_ret)") + code(" ${dest} = AddrRange(_start, _end, _masks, _intlvMatch);") + code("${ret} _ret;") def getValue(self): # Go from the Python class to the wrapped C++ class from _m5.range import AddrRange - return AddrRange(int(self.start), int(self.end), - self.masks, int(self.intlvMatch)) + return AddrRange( + int(self.start), int(self.end), self.masks, int(self.intlvMatch) + ) def exclude(self, ranges): - pybind_exclude = list([ r.getValue() for r in ranges ]) + pybind_exclude = list([r.getValue() for r in ranges]) pybind_include = self.getValue().exclude(pybind_exclude) - return list([ AddrRange(r.start(), r.end()) for r in pybind_include ]) + return list([AddrRange(r.start(), r.end()) for r in pybind_include]) + # Boolean parameter type. Python doesn't let you subclass bool, since # it doesn't want to let you create multiple instances of True and # False. Thus this is a little more complicated than String. class Bool(ParamValue): - cxx_type = 'bool' + cxx_type = "bool" cmd_line_settable = True def __init__(self, value): @@ -903,8 +1030,8 @@ class Bool(ParamValue): def ini_str(self): if self.value: - return 'true' - return 'false' + return "true" + return "false" def config_value(self): return self.value @@ -917,21 +1044,25 @@ class Bool(ParamValue): @classmethod def cxx_ini_parse(cls, code, src, dest, ret): - code('%s to_bool(%s, %s);' % (ret, src, dest)) + code("%s to_bool(%s, %s);" % (ret, src, dest)) -def IncEthernetAddr(addr, val = 1): - bytes = [ int(x, 16) for x in addr.split(':') ] + +def IncEthernetAddr(addr, val=1): + bytes = [int(x, 16) for x in addr.split(":")] bytes[5] += val for i in (5, 4, 3, 2, 1): - val,rem = divmod(bytes[i], 256) + val, rem = divmod(bytes[i], 256) bytes[i] = rem if val == 0: break bytes[i - 1] += val - assert(bytes[0] <= 255) - return ':'.join(map(lambda x: '%02x' % x, bytes)) + assert bytes[0] <= 255 + return ":".join(map(lambda x: "%02x" % x, bytes)) + _NextEthernetAddr = "00:90:00:00:00:01" + + def NextEthernetAddr(): global _NextEthernetAddr @@ -939,8 +1070,9 @@ def NextEthernetAddr(): _NextEthernetAddr = IncEthernetAddr(_NextEthernetAddr, 1) return value + class EthernetAddr(ParamValue): - cxx_type = 'networking::EthAddr' + cxx_type = "networking::EthAddr" ex_str = "00:90:00:00:00:01" cmd_line_settable = True @@ -956,13 +1088,13 @@ class EthernetAddr(ParamValue): if not isinstance(value, str): raise TypeError("expected an ethernet address and didn't get one") - bytes = value.split(':') + bytes = value.split(":") if len(bytes) != 6: - raise TypeError('invalid ethernet address %s' % value) + raise TypeError("invalid ethernet address %s" % value) for byte in bytes: - if not 0 <= int(byte, base=16) <= 0xff: - raise TypeError('invalid ethernet address %s' % value) + if not 0 <= int(byte, base=16) <= 0xFF: + raise TypeError("invalid ethernet address %s" % value) self.value = value @@ -977,6 +1109,7 @@ class EthernetAddr(ParamValue): def getValue(self): from _m5.net import EthAddr + return EthAddr(self.value) def __str__(self): @@ -987,13 +1120,14 @@ class EthernetAddr(ParamValue): @classmethod def cxx_ini_parse(self, code, src, dest, ret): - code('%s = networking::EthAddr(%s);' % (dest, src)) - code('%s true;' % ret) + code("%s = networking::EthAddr(%s);" % (dest, src)) + code("%s true;" % ret) + # When initializing an IpAddress, pass in an existing IpAddress, a string of # the form "a.b.c.d", or an integer representing an IP. class IpAddress(ParamValue): - cxx_type = 'networking::IpAddress' + cxx_type = "networking::IpAddress" ex_str = "127.0.0.1" cmd_line_settable = True @@ -1016,8 +1150,8 @@ class IpAddress(ParamValue): return value def __str__(self): - tup = [(self.ip >> i) & 0xff for i in (24, 16, 8, 0)] - return '%d.%d.%d.%d' % tuple(tup) + tup = [(self.ip >> i) & 0xFF for i in (24, 16, 8, 0)] + return "%d.%d.%d.%d" % tuple(tup) def __eq__(self, other): if isinstance(other, IpAddress): @@ -1039,13 +1173,15 @@ class IpAddress(ParamValue): def getValue(self): from _m5.net import IpAddress + return IpAddress(self.ip) + # When initializing an IpNetmask, pass in an existing IpNetmask, a string of # the form "a.b.c.d/n" or "a.b.c.d/e.f.g.h", or an ip and netmask as # positional or keyword arguments. class IpNetmask(IpAddress): - cxx_type = 'networking::IpNetmask' + cxx_type = "networking::IpNetmask" ex_str = "127.0.0.0/24" cmd_line_settable = True @@ -1054,7 +1190,7 @@ class IpNetmask(IpAddress): code('#include "base/inet.hh"') def __init__(self, *args, **kwargs): - def handle_kwarg(self, kwargs, key, elseVal = None): + def handle_kwarg(self, kwargs, key, elseVal=None): if key in kwargs: setattr(self, key, kwargs.pop(key)) elif elseVal: @@ -1063,15 +1199,15 @@ class IpNetmask(IpAddress): raise TypeError("No value set for %s" % key) if len(args) == 0: - handle_kwarg(self, kwargs, 'ip') - handle_kwarg(self, kwargs, 'netmask') + handle_kwarg(self, kwargs, "ip") + handle_kwarg(self, kwargs, "netmask") elif len(args) == 1: if kwargs: - if not 'ip' in kwargs and not 'netmask' in kwargs: + if not "ip" in kwargs and not "netmask" in kwargs: raise TypeError("Invalid arguments") - handle_kwarg(self, kwargs, 'ip', args[0]) - handle_kwarg(self, kwargs, 'netmask', args[0]) + handle_kwarg(self, kwargs, "ip", args[0]) + handle_kwarg(self, kwargs, "netmask", args[0]) elif isinstance(args[0], IpNetmask): self.ip = args[0].ip self.netmask = args[0].netmask @@ -1114,12 +1250,14 @@ class IpNetmask(IpAddress): def getValue(self): from _m5.net import IpNetmask + return IpNetmask(self.ip, self.netmask) + # When initializing an IpWithPort, pass in an existing IpWithPort, a string of # the form "a.b.c.d:p", or an ip and port as positional or keyword arguments. class IpWithPort(IpAddress): - cxx_type = 'networking::IpWithPort' + cxx_type = "networking::IpWithPort" ex_str = "127.0.0.1:80" cmd_line_settable = True @@ -1128,7 +1266,7 @@ class IpWithPort(IpAddress): code('#include "base/inet.hh"') def __init__(self, *args, **kwargs): - def handle_kwarg(self, kwargs, key, elseVal = None): + def handle_kwarg(self, kwargs, key, elseVal=None): if key in kwargs: setattr(self, key, kwargs.pop(key)) elif elseVal: @@ -1137,15 +1275,15 @@ class IpWithPort(IpAddress): raise TypeError("No value set for %s" % key) if len(args) == 0: - handle_kwarg(self, kwargs, 'ip') - handle_kwarg(self, kwargs, 'port') + handle_kwarg(self, kwargs, "ip") + handle_kwarg(self, kwargs, "port") elif len(args) == 1: if kwargs: - if not 'ip' in kwargs and not 'port' in kwargs: + if not "ip" in kwargs and not "port" in kwargs: raise TypeError("Invalid arguments") - handle_kwarg(self, kwargs, 'ip', args[0]) - handle_kwarg(self, kwargs, 'port', args[0]) + handle_kwarg(self, kwargs, "ip", args[0]) + handle_kwarg(self, kwargs, "port", args[0]) elif isinstance(args[0], IpWithPort): self.ip = args[0].ip self.port = args[0].port @@ -1183,24 +1321,28 @@ class IpWithPort(IpAddress): def verify(self): self.verifyIp() - if self.port < 0 or self.port > 0xffff: + if self.port < 0 or self.port > 0xFFFF: raise TypeError("invalid port %d" % self.port) def getValue(self): from _m5.net import IpWithPort + return IpWithPort(self.ip, self.port) -time_formats = [ "%a %b %d %H:%M:%S %Z %Y", - "%a %b %d %H:%M:%S %Y", - "%Y/%m/%d %H:%M:%S", - "%Y/%m/%d %H:%M", - "%Y/%m/%d", - "%m/%d/%Y %H:%M:%S", - "%m/%d/%Y %H:%M", - "%m/%d/%Y", - "%m/%d/%y %H:%M:%S", - "%m/%d/%y %H:%M", - "%m/%d/%y"] + +time_formats = [ + "%a %b %d %H:%M:%S %Z %Y", + "%a %b %d %H:%M:%S %Y", + "%Y/%m/%d %H:%M:%S", + "%Y/%m/%d %H:%M", + "%Y/%m/%d", + "%m/%d/%Y %H:%M:%S", + "%m/%d/%Y %H:%M", + "%m/%d/%Y", + "%m/%d/%y %H:%M:%S", + "%m/%d/%y %H:%M", + "%m/%d/%y", +] def parse_time(value): @@ -1217,7 +1359,7 @@ def parse_time(value): return value.timetuple() if isinstance(value, str): - if value in ('Now', 'Today'): + if value in ("Now", "Today"): return time.gmtime(time.time()) for format in time_formats: @@ -1228,12 +1370,13 @@ def parse_time(value): raise ValueError("Could not parse '%s' as a time" % value) + class Time(ParamValue): - cxx_type = 'tm' + cxx_type = "tm" @classmethod def cxx_predecls(cls, code): - code('#include ') + code("#include ") def __init__(self, value): self.value = parse_time(value) @@ -1260,13 +1403,14 @@ class Time(ParamValue): @classmethod def cxx_ini_predecls(cls, code): - code('#include ') + code("#include ") @classmethod def cxx_ini_parse(cls, code, src, dest, ret): - code('char *_parse_ret = strptime((${src}).c_str(),') + code("char *_parse_ret = strptime((${src}).c_str(),") code(' "%a %b %d %H:%M:%S %Y", &(${dest}));') - code('${ret} _parse_ret && *_parse_ret == \'\\0\';'); + code("${ret} _parse_ret && *_parse_ret == '\\0';") + # Enumerated types are a little more complex. The user specifies the # type as Enum(foo) where foo is either a list or dictionary of @@ -1292,39 +1436,45 @@ class MetaEnum(MetaParamValue): return cls def __init__(cls, name, bases, init_dict): - if 'map' in init_dict: + if "map" in init_dict: if not isinstance(cls.map, dict): - raise TypeError("Enum-derived class attribute 'map' " \ - "must be of type dict") + raise TypeError( + "Enum-derived class attribute 'map' " + "must be of type dict" + ) # build list of value strings from map cls.vals = list(cls.map.keys()) cls.vals.sort() - elif 'vals' in init_dict: + elif "vals" in init_dict: if not isinstance(cls.vals, list): - raise TypeError("Enum-derived class attribute 'vals' " \ - "must be of type list") + raise TypeError( + "Enum-derived class attribute 'vals' " + "must be of type list" + ) # build string->value map from vals sequence cls.map = {} - for idx,val in enumerate(cls.vals): + for idx, val in enumerate(cls.vals): cls.map[val] = idx else: - raise TypeError("Enum-derived class must define "\ - "attribute 'map' or 'vals'") + raise TypeError( + "Enum-derived class must define " "attribute 'map' or 'vals'" + ) if cls.is_class: - cls.cxx_type = '%s' % name + cls.cxx_type = "%s" % name else: - cls.cxx_type = 'enums::%s' % name + cls.cxx_type = "enums::%s" % name super().__init__(name, bases, init_dict) + # Base class for enum types. class Enum(ParamValue, metaclass=MetaEnum): vals = [] cmd_line_settable = True # The name of the wrapping namespace or struct - wrapper_name = 'enums' + wrapper_name = "enums" # If true, the enum is wrapped in a struct rather than a namespace wrapper_is_struct = False @@ -1336,8 +1486,10 @@ class Enum(ParamValue, metaclass=MetaEnum): def __init__(self, value): if value not in self.map: - raise TypeError("Enum param got bad value '%s' (not in %s)" \ - % (value, self.vals)) + raise TypeError( + "Enum param got bad value '%s' (not in %s)" + % (value, self.vals) + ) self.value = value def __call__(self, value): @@ -1350,27 +1502,31 @@ class Enum(ParamValue, metaclass=MetaEnum): @classmethod def cxx_ini_parse(cls, code, src, dest, ret): - code('if (false) {') + code("if (false) {") for elem_name in cls.map.keys(): code('} else if (%s == "%s") {' % (src, elem_name)) code.indent() name = cls.__name__ if cls.enum_name is None else cls.enum_name - code('%s = %s::%s;' % (dest, name if cls.is_class else 'enums', - elem_name)) - code('%s true;' % ret) + code( + "%s = %s::%s;" + % (dest, name if cls.is_class else "enums", elem_name) + ) + code("%s true;" % ret) code.dedent() - code('} else {') - code(' %s false;' % ret) - code('}') + code("} else {") + code(" %s false;" % ret) + code("}") def getValue(self): import m5.internal.params + e = getattr(m5.internal.params, "enum_%s" % self.__class__.__name__) return e(self.map[self.value]) def __str__(self): return self.value + # This param will generate a scoped c++ enum and its python bindings. class ScopedEnum(Enum): vals = [] @@ -1388,19 +1544,19 @@ class ScopedEnum(Enum): # If not None, use this as the enum name rather than this class name enum_name = None + class ByteOrder(ScopedEnum): """Enum representing component's byte order (endianness)""" - vals = [ - 'big', - 'little', - ] + vals = ["big", "little"] + # how big does a rounding error need to be before we warn about it? frequency_tolerance = 0.001 # 0.1% + class TickParamValue(NumericParamValue): - cxx_type = 'Tick' + cxx_type = "Tick" ex_str = "1MHz" cmd_line_settable = True @@ -1417,13 +1573,14 @@ class TickParamValue(NumericParamValue): @classmethod def cxx_ini_predecls(cls, code): - code('#include ') + code("#include ") # Ticks are expressed in seconds in JSON files and in plain # Ticks in .ini files. Switch based on a config flag @classmethod def cxx_ini_parse(self, code, src, dest, ret): - code('${ret} to_number(${src}, ${dest});') + code("${ret} to_number(${src}, ${dest});") + class Latency(TickParamValue): ex_str = "100ns" @@ -1435,7 +1592,7 @@ class Latency(TickParamValue): elif isinstance(value, Frequency): self.ticks = value.ticks self.value = 1.0 / value.value - elif value.endswith('t'): + elif value.endswith("t"): self.ticks = True self.value = int(value[:-1]) else: @@ -1447,9 +1604,9 @@ class Latency(TickParamValue): return value def __getattr__(self, attr): - if attr in ('latency', 'period'): + if attr in ("latency", "period"): return self - if attr == 'frequency': + if attr == "frequency": return Frequency(self) raise AttributeError("Latency object has no attribute '%s'" % attr) @@ -1465,7 +1622,8 @@ class Latency(TickParamValue): # convert latency to ticks def ini_str(self): - return '%d' % self.getValue() + return "%d" % self.getValue() + class Frequency(TickParamValue): ex_str = "1GHz" @@ -1489,9 +1647,9 @@ class Frequency(TickParamValue): return value def __getattr__(self, attr): - if attr == 'frequency': + if attr == "frequency": return self - if attr in ('latency', 'period'): + if attr in ("latency", "period"): return Latency(self) raise AttributeError("Frequency object has no attribute '%s'" % attr) @@ -1507,7 +1665,8 @@ class Frequency(TickParamValue): return self.getValue() def ini_str(self): - return '%d' % self.getValue() + return "%d" % self.getValue() + # A generic Frequency and/or Latency value. Value is stored as a # latency, just like Latency and Frequency. @@ -1519,7 +1678,7 @@ class Clock(TickParamValue): elif isinstance(value, Frequency): self.ticks = value.ticks self.value = 1.0 / value.value - elif value.endswith('t'): + elif value.endswith("t"): self.ticks = True self.value = int(value[:-1]) else: @@ -1534,9 +1693,9 @@ class Clock(TickParamValue): return "%s" % Latency(self) def __getattr__(self, attr): - if attr == 'frequency': + if attr == "frequency": return Frequency(self) - if attr in ('latency', 'period'): + if attr in ("latency", "period"): return Latency(self) raise AttributeError("Frequency object has no attribute '%s'" % attr) @@ -1549,6 +1708,7 @@ class Clock(TickParamValue): def ini_str(self): return self.period.ini_str() + class Voltage(Float): ex_str = "1V" @@ -1560,6 +1720,7 @@ class Voltage(Float): value = convert.toVoltage(value) super().__init__(value) + class Current(Float): ex_str = "1mA" @@ -1571,6 +1732,7 @@ class Current(Float): value = convert.toCurrent(value) super().__init__(value) + class Energy(Float): ex_str = "1pJ" @@ -1582,8 +1744,9 @@ class Energy(Float): value = convert.toEnergy(value) super().__init__(value) + class Temperature(ParamValue): - cxx_type = 'Temperature' + cxx_type = "Temperature" cmd_line_settable = True ex_str = "1C" @@ -1599,6 +1762,7 @@ class Temperature(ParamValue): def getValue(self): from _m5.core import Temperature + return Temperature.from_kelvin(self.value) def config_value(self): @@ -1616,14 +1780,15 @@ class Temperature(ParamValue): @classmethod def cxx_ini_parse(self, code, src, dest, ret): - code('double _temp;') - code('bool _ret = to_number(%s, _temp);' % src) - code('if (_ret)') - code(' %s = Temperature(_temp);' % dest) - code('%s _ret;' % ret) + code("double _temp;") + code("bool _ret = to_number(%s, _temp);" % src) + code("if (_ret)") + code(" %s = Temperature(_temp);" % dest) + code("%s _ret;" % ret) -class NetworkBandwidth(float,ParamValue): - cxx_type = 'float' + +class NetworkBandwidth(float, ParamValue): + cxx_type = "float" ex_str = "1Gbps" cmd_line_settable = True @@ -1648,21 +1813,22 @@ class NetworkBandwidth(float,ParamValue): return float(value) def ini_str(self): - return '%f' % self.getValue() + return "%f" % self.getValue() def config_value(self): - return '%f' % self.getValue() + return "%f" % self.getValue() @classmethod def cxx_ini_predecls(cls, code): - code('#include ') + code("#include ") @classmethod def cxx_ini_parse(self, code, src, dest, ret): - code('%s (std::istringstream(%s) >> %s).eof();' % (ret, src, dest)) + code("%s (std::istringstream(%s) >> %s).eof();" % (ret, src, dest)) -class MemoryBandwidth(float,ParamValue): - cxx_type = 'float' + +class MemoryBandwidth(float, ParamValue): + cxx_type = "float" ex_str = "1GiB/s" cmd_line_settable = True @@ -1686,18 +1852,19 @@ class MemoryBandwidth(float,ParamValue): return float(value) def ini_str(self): - return '%f' % self.getValue() + return "%f" % self.getValue() def config_value(self): - return '%f' % self.getValue() + return "%f" % self.getValue() @classmethod def cxx_ini_predecls(cls, code): - code('#include ') + code("#include ") @classmethod def cxx_ini_parse(self, code, src, dest, ret): - code('%s (std::istringstream(%s) >> %s).eof();' % (ret, src, dest)) + code("%s (std::istringstream(%s) >> %s).eof();" % (ret, src, dest)) + # # "Constants"... handy aliases for various values. @@ -1708,16 +1875,16 @@ class MemoryBandwidth(float,ParamValue): # SimObject is required. # only one copy of a particular node class NullSimObject(object, metaclass=Singleton): - _name = 'Null' + _name = "Null" def __call__(cls): return cls - def _instantiate(self, parent = None, path = ''): + def _instantiate(self, parent=None, path=""): pass def ini_str(self): - return 'Null' + return "Null" def unproxy(self, base): return self @@ -1747,12 +1914,15 @@ class NullSimObject(object, metaclass=Singleton): def getValue(self): return None + # The only instance you'll ever need... NULL = NullSimObject() + def isNullPointer(value): return isinstance(value, NullSimObject) + # Some memory range specifications use this as a default upper bound. MaxAddr = Addr.max MaxTick = Tick.max @@ -1771,17 +1941,17 @@ AllMemory = AddrRange(0, MaxAddr) # particular SimObject. class PortRef(object): def __init__(self, simobj, name, role, is_source): - assert(isSimObject(simobj) or isSimObjectClass(simobj)) + assert isSimObject(simobj) or isSimObjectClass(simobj) self.simobj = simobj self.name = name self.role = role self.is_source = is_source - self.peer = None # not associated with another port yet - self.ccConnected = False # C++ port connection done? + self.peer = None # not associated with another port yet + self.ccConnected = False # C++ port connection done? self.index = -1 # always -1 for non-vector ports def __str__(self): - return '%s.%s' % (self.simobj, self.name) + return "%s.%s" % (self.simobj, self.name) def __len__(self): # Return the number of connected ports, i.e. 0 is we have no @@ -1794,15 +1964,20 @@ class PortRef(object): # for config.json def get_config_as_dict(self): - return {'role' : self.role, 'peer' : str(self.peer), - 'is_source' : str(self.is_source)} + return { + "role": self.role, + "peer": str(self.peer), + "is_source": str(self.is_source), + } def __getattr__(self, attr): - if attr == 'peerObj': + if attr == "peerObj": # shorthand for proxies return self.peer.simobj - raise AttributeError("'%s' object has no attribute '%s'" % \ - (self.__class__.__name__, attr)) + raise AttributeError( + "'%s' object has no attribute '%s'" + % (self.__class__.__name__, attr) + ) # Full connection is symmetric (both ways). Called via # SimObject.__setattr__ as a result of a port assignment, e.g., @@ -1813,20 +1988,32 @@ class PortRef(object): # reference to plain VectorPort is implicit append other = other._get_next() if self.peer and not proxy.isproxy(self.peer): - fatal("Port %s is already connected to %s, cannot connect %s\n", - self, self.peer, other); + fatal( + "Port %s is already connected to %s, cannot connect %s\n", + self, + self.peer, + other, + ) self.peer = other if proxy.isproxy(other): other.set_param_desc(PortParamDesc()) return elif not isinstance(other, PortRef): - raise TypeError("assigning non-port reference '%s' to port '%s'" \ - % (other, self)) + raise TypeError( + "assigning non-port reference '%s' to port '%s'" + % (other, self) + ) if not Port.is_compat(self, other): - fatal("Ports %s and %s with roles '%s' and '%s' " - "are not compatible", self, other, self.role, other.role) + fatal( + "Ports %s and %s with roles '%s' and '%s' " + "are not compatible", + self, + other, + self.role, + other.role, + ) if other.peer is not self: other.connect(self) @@ -1841,8 +2028,9 @@ class PortRef(object): if not isinstance(new_1, PortRef) or not isinstance(new_2, PortRef): raise TypeError( - "Splicing non-port references '%s','%s' to port '%s'" % \ - (new_1, new_2, self)) + "Splicing non-port references '%s','%s' to port '%s'" + % (new_1, new_2, self) + ) old_peer = self.peer @@ -1857,10 +2045,18 @@ class PortRef(object): self.peer = new_1 new_1.peer = self else: - fatal("Ports %s(%s) and %s(%s) can't be compatibly spliced with " - "%s(%s) and %s(%s)", self, self.role, - old_peer, old_peer.role, new_1, new_1.role, - new_2, new_2.role) + fatal( + "Ports %s(%s) and %s(%s) can't be compatibly spliced with " + "%s(%s) and %s(%s)", + self, + self.role, + old_peer, + old_peer.role, + new_1, + new_1.role, + new_2, + new_2.role, + ) def clone(self, simobj, memo): if self in memo: @@ -1868,31 +2064,33 @@ class PortRef(object): newRef = copy.copy(self) memo[self] = newRef newRef.simobj = simobj - assert(isSimObject(newRef.simobj)) + assert isSimObject(newRef.simobj) if self.peer and not proxy.isproxy(self.peer): peerObj = self.peer.simobj(_memo=memo) newRef.peer = self.peer.clone(peerObj, memo) - assert(not isinstance(newRef.peer, VectorPortRef)) + assert not isinstance(newRef.peer, VectorPortRef) return newRef def unproxy(self, simobj): - assert(simobj is self.simobj) + assert simobj is self.simobj if proxy.isproxy(self.peer): try: realPeer = self.peer.unproxy(self.simobj) except: - print("Error in unproxying port '%s' of %s" % - (self.name, self.simobj.path())) + print( + "Error in unproxying port '%s' of %s" + % (self.name, self.simobj.path()) + ) raise self.connect(realPeer) # Call C++ to create corresponding port connection between C++ objects def ccConnect(self): - if self.ccConnected: # already done this + if self.ccConnected: # already done this return peer = self.peer - if not self.peer: # nothing to connect to + if not self.peer: # nothing to connect to return port = self.simobj.getPort(self.name, self.index) @@ -1901,6 +2099,7 @@ class PortRef(object): self.ccConnected = True + # A reference to an individual element of a VectorPort... much like a # PortRef, but has an index. class VectorPortElementRef(PortRef): @@ -1909,13 +2108,14 @@ class VectorPortElementRef(PortRef): self.index = index def __str__(self): - return '%s.%s[%d]' % (self.simobj, self.name, self.index) + return "%s.%s[%d]" % (self.simobj, self.name, self.index) + # A reference to a complete vector-valued port (not just a single element). # Can be indexed to retrieve individual VectorPortElementRef instances. class VectorPortRef(object): def __init__(self, simobj, name, role, is_source): - assert(isSimObject(simobj) or isSimObjectClass(simobj)) + assert isSimObject(simobj) or isSimObjectClass(simobj) self.simobj = simobj self.name = name self.role = role @@ -1923,7 +2123,7 @@ class VectorPortRef(object): self.elements = [] def __str__(self): - return '%s.%s[:]' % (self.simobj, self.name) + return "%s.%s[:]" % (self.simobj, self.name) def __len__(self): # Return the number of connected peers, corresponding the the @@ -1932,22 +2132,27 @@ class VectorPortRef(object): # for config.ini, print peer's name (not ours) def ini_str(self): - return ' '.join([el.ini_str() for el in self.elements]) + return " ".join([el.ini_str() for el in self.elements]) # for config.json def get_config_as_dict(self): - return {'role' : self.role, - 'peer' : [el.ini_str() for el in self.elements], - 'is_source' : str(self.is_source)} + return { + "role": self.role, + "peer": [el.ini_str() for el in self.elements], + "is_source": str(self.is_source), + } def __getitem__(self, key): if not isinstance(key, int): raise TypeError("VectorPort index must be integer") if key >= len(self.elements): # need to extend list - ext = [VectorPortElementRef( - self.simobj, self.name, self.role, self.is_source, i) - for i in range(len(self.elements), key+1)] + ext = [ + VectorPortElementRef( + self.simobj, self.name, self.role, self.is_source, i + ) + for i in range(len(self.elements), key + 1) + ] self.elements.extend(ext) return self.elements[key] @@ -1976,7 +2181,7 @@ class VectorPortRef(object): newRef = copy.copy(self) memo[self] = newRef newRef.simobj = simobj - assert(isSimObject(newRef.simobj)) + assert isSimObject(newRef.simobj) newRef.elements = [el.clone(simobj, memo) for el in self.elements] return newRef @@ -1986,13 +2191,14 @@ class VectorPortRef(object): def ccConnect(self): [el.ccConnect() for el in self.elements] + # Port description object. Like a ParamDesc object, this represents a # logical port in the SimObject class, not a particular port on a # SimObject instance. The latter are represented by PortRef objects. class Port(object): # Port("role", "description") - _compat_dict = { } + _compat_dict = {} @classmethod def compat(cls, role, peer): @@ -2033,19 +2239,23 @@ class Port(object): # will eventually hold the number of connected ports (and thus the # number of elements for a VectorPort). def cxx_decl(self, code): - code('unsigned int port_${{self.name}}_connection_count;') + code("unsigned int port_${{self.name}}_connection_count;") + + +Port.compat("GEM5 REQUESTOR", "GEM5 RESPONDER") -Port.compat('GEM5 REQUESTOR', 'GEM5 RESPONDER') class RequestPort(Port): # RequestPort("description") def __init__(self, desc): - super().__init__('GEM5 REQUESTOR', desc, is_source=True) + super().__init__("GEM5 REQUESTOR", desc, is_source=True) + class ResponsePort(Port): # ResponsePort("description") def __init__(self, desc): - super().__init__('GEM5 RESPONDER', desc) + super().__init__("GEM5 RESPONDER", desc) + # VectorPort description object. Like Port, but represents a vector # of connections (e.g., as on a XBar). @@ -2053,15 +2263,18 @@ class VectorPort(Port): def makeRef(self, simobj): return VectorPortRef(simobj, self.name, self.role, self.is_source) + class VectorRequestPort(VectorPort): # VectorRequestPort("description") def __init__(self, desc): - super().__init__('GEM5 REQUESTOR', desc, is_source=True) + super().__init__("GEM5 REQUESTOR", desc, is_source=True) + class VectorResponsePort(VectorPort): # VectorResponsePort("description") def __init__(self, desc): - super().__init__('GEM5 RESPONDER', desc) + super().__init__("GEM5 RESPONDER", desc) + # Old names, maintained for compatibility. MasterPort = RequestPort @@ -2073,9 +2286,10 @@ VectorSlavePort = VectorResponsePort # proxy objects (via set_param_desc()) so that proxy error messages # make sense. class PortParamDesc(object, metaclass=Singleton): - ptype_str = 'Port' + ptype_str = "Port" ptype = Port + class DeprecatedParam(object): """A special type for deprecated parameter variable names. @@ -2094,7 +2308,7 @@ class DeprecatedParam(object): ``` """ - def __init__(self, new_param, message=''): + def __init__(self, new_param, message=""): """new_param: the new parameter variable that users should be using instead of this parameter variable. message: an optional message to print when warning the user @@ -2106,27 +2320,27 @@ class DeprecatedParam(object): # will be called when the SimObject type (class) is initialized so # these variables should be filled in before the instance of the # SimObject with this parameter is constructed - self._oldName = '' - self._newName = '' + self._oldName = "" + self._newName = "" @property def oldName(self): - assert(self._oldName != '') # should already be set + assert self._oldName != "" # should already be set return self._oldName @oldName.setter def oldName(self, name): - assert(self._oldName == '') # Cannot "re-set" this value + assert self._oldName == "" # Cannot "re-set" this value self._oldName = name @property def newName(self): - assert(self._newName != '') # should already be set + assert self._newName != "" # should already be set return self._newName @newName.setter def newName(self, name): - assert(self._newName == '') # Cannot "re-set" this value + assert self._newName == "" # Cannot "re-set" this value self._newName = name def printWarning(self, instance_name, simobj_name): @@ -2137,35 +2351,79 @@ class DeprecatedParam(object): """ if not self.message: self.message = "See {} for more information".format(simobj_name) - warn('{}.{} is deprecated. {}'.format( - instance_name, self._oldName, self.message)) + warn( + "{}.{} is deprecated. {}".format( + instance_name, self._oldName, self.message + ) + ) + baseEnums = allEnums.copy() baseParams = allParams.copy() + def clear(): global allEnums, allParams allEnums = baseEnums.copy() allParams = baseParams.copy() -__all__ = ['Param', 'VectorParam', - 'Enum', 'ScopedEnum', 'Bool', 'String', 'Float', - 'Int', 'Unsigned', 'Int8', 'UInt8', 'Int16', 'UInt16', - 'Int32', 'UInt32', 'Int64', 'UInt64', - 'Counter', 'Addr', 'Tick', 'Percent', - 'TcpPort', 'UdpPort', 'EthernetAddr', - 'IpAddress', 'IpNetmask', 'IpWithPort', - 'MemorySize', 'MemorySize32', - 'Latency', 'Frequency', 'Clock', 'Voltage', 'Current', 'Energy', - 'Temperature', - 'NetworkBandwidth', 'MemoryBandwidth', - 'AddrRange', - 'MaxAddr', 'MaxTick', 'AllMemory', - 'Time', - 'NextEthernetAddr', 'NULL', - 'Port', 'RequestPort', 'ResponsePort', 'MasterPort', 'SlavePort', - 'VectorPort', 'VectorRequestPort', 'VectorResponsePort', - 'VectorMasterPort', 'VectorSlavePort', - 'DeprecatedParam', - ] + +__all__ = [ + "Param", + "VectorParam", + "Enum", + "ScopedEnum", + "Bool", + "String", + "Float", + "Int", + "Unsigned", + "Int8", + "UInt8", + "Int16", + "UInt16", + "Int32", + "UInt32", + "Int64", + "UInt64", + "Counter", + "Addr", + "Tick", + "Percent", + "TcpPort", + "UdpPort", + "EthernetAddr", + "IpAddress", + "IpNetmask", + "IpWithPort", + "MemorySize", + "MemorySize32", + "Latency", + "Frequency", + "Clock", + "Voltage", + "Current", + "Energy", + "Temperature", + "NetworkBandwidth", + "MemoryBandwidth", + "AddrRange", + "MaxAddr", + "MaxTick", + "AllMemory", + "Time", + "NextEthernetAddr", + "NULL", + "Port", + "RequestPort", + "ResponsePort", + "MasterPort", + "SlavePort", + "VectorPort", + "VectorRequestPort", + "VectorResponsePort", + "VectorMasterPort", + "VectorSlavePort", + "DeprecatedParam", +] diff --git a/src/python/m5/proxy.py b/src/python/m5/proxy.py index b4e9bbe47c..16aa0c4505 100644 --- a/src/python/m5/proxy.py +++ b/src/python/m5/proxy.py @@ -44,6 +44,7 @@ import copy + class BaseProxy(object): def __init__(self, search_self, search_up): self._search_self = search_self @@ -52,49 +53,52 @@ class BaseProxy(object): def __str__(self): if self._search_self and not self._search_up: - s = 'Self' + s = "Self" elif not self._search_self and self._search_up: - s = 'Parent' + s = "Parent" else: - s = 'ConfusedProxy' - return s + '.' + self.path() + s = "ConfusedProxy" + return s + "." + self.path() def __setattr__(self, attr, value): - if not attr.startswith('_'): + if not attr.startswith("_"): raise AttributeError( - "cannot set attribute '%s' on proxy object" % attr) + "cannot set attribute '%s' on proxy object" % attr + ) super().__setattr__(attr, value) def _gen_op(operation): def op(self, operand): - if not (isinstance(operand, (int, float)) or \ - isproxy(operand)): + if not (isinstance(operand, (int, float)) or isproxy(operand)): raise TypeError( - "Proxy operand must be a constant or a proxy to a param") + "Proxy operand must be a constant or a proxy to a param" + ) self._ops.append((operation, operand)) return self + return op # Support for multiplying proxies by either constants or other proxies - __mul__ = _gen_op(lambda operand_a, operand_b : operand_a * operand_b) + __mul__ = _gen_op(lambda operand_a, operand_b: operand_a * operand_b) __rmul__ = __mul__ # Support for dividing proxies by either constants or other proxies - __truediv__ = _gen_op(lambda operand_a, operand_b : - operand_a / operand_b) - __floordiv__ = _gen_op(lambda operand_a, operand_b : - operand_a // operand_b) + __truediv__ = _gen_op(lambda operand_a, operand_b: operand_a / operand_b) + __floordiv__ = _gen_op(lambda operand_a, operand_b: operand_a // operand_b) # Support for dividing constants by proxies - __rtruediv__ = _gen_op(lambda operand_a, operand_b : - operand_b / operand_a.getValue()) - __rfloordiv__ = _gen_op(lambda operand_a, operand_b : - operand_b // operand_a.getValue()) + __rtruediv__ = _gen_op( + lambda operand_a, operand_b: operand_b / operand_a.getValue() + ) + __rfloordiv__ = _gen_op( + lambda operand_a, operand_b: operand_b // operand_a.getValue() + ) # After all the operators and operands have been defined, this function # should be called to perform the actual operation def _opcheck(self, result, base): from . import params + for operation, operand in self._ops: # Get the operand's value if isproxy(operand): @@ -132,8 +136,9 @@ class BaseProxy(object): if not done: raise AttributeError( - "Can't resolve proxy '%s' of type '%s' from '%s'" % \ - (self.path(), self._pdesc.ptype_str, base.path())) + "Can't resolve proxy '%s' of type '%s' from '%s'" + % (self.path(), self._pdesc.ptype_str, base.path()) + ) if isinstance(result, BaseProxy): if result == self: @@ -153,6 +158,7 @@ class BaseProxy(object): # if index is 0 and item is not subscriptable, just # use item itself (so cpu[0] works on uniprocessors) return obj + getindex = staticmethod(getindex) # This method should be called once the proxy is assigned to a @@ -161,6 +167,7 @@ class BaseProxy(object): def set_param_desc(self, pdesc): self._pdesc = pdesc + class AttrProxy(BaseProxy): def __init__(self, search_self, search_up, attr): super().__init__(search_self, search_up) @@ -169,11 +176,12 @@ class AttrProxy(BaseProxy): def __getattr__(self, attr): # python uses __bases__ internally for inheritance - if attr.startswith('_'): + if attr.startswith("_"): return super().__getattr__(self, attr) - if hasattr(self, '_pdesc'): - raise AttributeError("Attribute reference on bound proxy " - f"({self}.{attr})") + if hasattr(self, "_pdesc"): + raise AttributeError( + "Attribute reference on bound proxy " f"({self}.{attr})" + ) # Return a copy of self rather than modifying self in place # since self could be an indirect reference via a variable or # parameter @@ -185,7 +193,7 @@ class AttrProxy(BaseProxy): def __getitem__(self, key): if not isinstance(key, int): raise TypeError("Proxy object requires integer index") - if hasattr(self, '_pdesc'): + if hasattr(self, "_pdesc"): raise AttributeError("Index operation on bound proxy") new_self = copy.deepcopy(self) new_self._modifiers.append(key) @@ -195,8 +203,8 @@ class AttrProxy(BaseProxy): try: val = getattr(obj, self._attr) visited = False - if hasattr(val, '_visited'): - visited = getattr(val, '_visited') + if hasattr(val, "_visited"): + visited = getattr(val, "_visited") if visited: return None, False @@ -217,7 +225,7 @@ class AttrProxy(BaseProxy): elif isinstance(m, int): val = val[m] else: - assert("Item must be string or integer") + assert "Item must be string or integer" while isproxy(val): val = val.unproxy(obj) return val, True @@ -226,19 +234,21 @@ class AttrProxy(BaseProxy): p = self._attr for m in self._modifiers: if isinstance(m, str): - p += '.%s' % m + p += ".%s" % m elif isinstance(m, int): - p += '[%d]' % m + p += "[%d]" % m else: - assert("Item must be string or integer") + assert "Item must be string or integer" return p + class AnyProxy(BaseProxy): def find(self, obj): return obj.find_any(self._pdesc.ptype) def path(self): - return 'any' + return "any" + # The AllProxy traverses the entire sub-tree (not only the children) # and adds all objects of a specific type @@ -247,10 +257,12 @@ class AllProxy(BaseProxy): return obj.find_all(self._pdesc.ptype) def path(self): - return 'all' + return "all" + def isproxy(obj): from . import params + if isinstance(obj, (BaseProxy, params.EthernetAddr)): return True elif isinstance(obj, (list, tuple)): @@ -259,24 +271,26 @@ def isproxy(obj): return True return False + class ProxyFactory(object): def __init__(self, search_self, search_up): self.search_self = search_self self.search_up = search_up def __getattr__(self, attr): - if attr == 'any': + if attr == "any": return AnyProxy(self.search_self, self.search_up) - elif attr == 'all': + elif attr == "all": if self.search_up: - assert("Parant.all is not supported") + assert "Parant.all is not supported" return AllProxy(self.search_self, self.search_up) else: return AttrProxy(self.search_self, self.search_up, attr) + # global objects for handling proxies -Parent = ProxyFactory(search_self = False, search_up = True) -Self = ProxyFactory(search_self = True, search_up = False) +Parent = ProxyFactory(search_self=False, search_up=True) +Self = ProxyFactory(search_self=True, search_up=False) # limit exports on 'from proxy import *' -__all__ = ['Parent', 'Self'] +__all__ = ["Parent", "Self"] diff --git a/src/python/m5/simulate.py b/src/python/m5/simulate.py index 0e222cfbdf..18fb1d6cd4 100644 --- a/src/python/m5/simulate.py +++ b/src/python/m5/simulate.py @@ -54,7 +54,7 @@ from . import params from m5.util.dot_writer import do_dot, do_dvfs_dot from m5.util.dot_writer_ruby import do_ruby_dot -from .util import fatal +from .util import fatal, warn from .util import attrdict # define a MaxTick parameter, unsigned 64 bit @@ -62,7 +62,7 @@ MaxTick = 2**64 - 1 _drain_manager = _m5.drain.DrainManager.instance() -_instantiated = False # Has m5.instantiate() been called? +_instantiated = False # Has m5.instantiate() been called? # The final call to instantiate the SimObject graph and initialize the # system. @@ -85,13 +85,15 @@ def instantiate(ckpt_dir=None): # Make sure SimObject-valued params are in the configuration # hierarchy so we catch them with future descendants() walks - for obj in root.descendants(): obj.adoptOrphanParams() + for obj in root.descendants(): + obj.adoptOrphanParams() # Unproxy in sorted order for determinism - for obj in root.descendants(): obj.unproxyParams() + for obj in root.descendants(): + obj.unproxyParams() if options.dump_config: - ini_file = open(os.path.join(options.outdir, options.dump_config), 'w') + ini_file = open(os.path.join(options.outdir, options.dump_config), "w") # Print ini sections in sorted order for easier diffing for obj in sorted(root.descendants(), key=lambda o: o.path()): obj.print_ini(ini_file) @@ -100,8 +102,10 @@ def instantiate(ckpt_dir=None): if options.json_config: try: import json + json_file = open( - os.path.join(options.outdir, options.json_config), 'w') + os.path.join(options.outdir, options.json_config), "w" + ) d = root.get_config_as_dict() json.dump(d, json_file, indent=4) json_file.close() @@ -116,21 +120,26 @@ def instantiate(ckpt_dir=None): stats.initSimStats() # Create the C++ sim objects and connect ports - for obj in root.descendants(): obj.createCCObject() - for obj in root.descendants(): obj.connectPorts() + for obj in root.descendants(): + obj.createCCObject() + for obj in root.descendants(): + obj.connectPorts() # Do a second pass to finish initializing the sim objects - for obj in root.descendants(): obj.init() + for obj in root.descendants(): + obj.init() # Do a third pass to initialize statistics stats._bindStatHierarchy(root) root.regStats() # Do a fourth pass to initialize probe points - for obj in root.descendants(): obj.regProbePoints() + for obj in root.descendants(): + obj.regProbePoints() # Do a fifth pass to connect probe listeners - for obj in root.descendants(): obj.regProbeListeners() + for obj in root.descendants(): + obj.regProbeListeners() # We want to generate the DVFS diagram for the system. This can only be # done once all of the CPP objects have been created and initialised so @@ -145,15 +154,20 @@ def instantiate(ckpt_dir=None): if ckpt_dir: _drain_manager.preCheckpointRestore() ckpt = _m5.core.getCheckpoint(ckpt_dir) - for obj in root.descendants(): obj.loadState(ckpt) + for obj in root.descendants(): + obj.loadState(ckpt) else: - for obj in root.descendants(): obj.initState() + for obj in root.descendants(): + obj.initState() # Check to see if any of the stat events are in the past after resuming from # a checkpoint, If so, this call will shift them to be at a valid time. updateStatEvents() + need_startup = True + + def simulate(*args, **kwargs): global need_startup global _instantiated @@ -163,7 +177,8 @@ def simulate(*args, **kwargs): if need_startup: root = objects.Root.getInstance() - for obj in root.descendants(): obj.startup() + for obj in root.descendants(): + obj.startup() need_startup = False # Python exit handlers happen in reverse order. @@ -189,6 +204,64 @@ def simulate(*args, **kwargs): return sim_out + +def setMaxTick(tick: int) -> None: + """Sets the maximum tick the simulation may run to. When when using the + stdlib simulator module, reaching this max tick triggers a + `ExitEvent.MAX_TICK` exit event. + + :param tick: the maximum tick (absolute, not relative to the current tick). + """ + if tick <= curTick(): + warn("Max tick scheduled for the past. This will not be triggered.") + _m5.event.setMaxTick(tick=tick) + + +def getMaxTick() -> int: + """Returns the current maximum tick.""" + return _m5.event.getMaxTick() + + +def getTicksUntilMax() -> int: + """Returns the current number of ticks until the maximum tick.""" + return getMaxTick() - curTick() + + +def scheduleTickExitFromCurrent( + ticks: int, exit_string: str = "Tick exit reached" +) -> None: + """Schedules a tick exit event from the current tick. I.e., if ticks == 100 + then an exit event will be scheduled at tick `curTick() + 100`. + + The default `exit_string` value is used by the stdlib Simulator module to + declare this exit event as `ExitEvent.SCHEDULED_TICK`. + + :param ticks: The simulation ticks, from `curTick()` to schedule the exit + event. + :param exit_string: The exit string to return when the exit event is + triggered. + """ + scheduleTickExitAbsolute(tick=ticks + curTick(), exit_string=exit_string) + + +def scheduleTickExitAbsolute( + tick: int, exit_string: str = "Tick exit reached" +) -> None: + """Schedules a tick exit event using absolute ticks. I.e., if tick == 100 + then an exit event will be scheduled at tick 100. + + The default `exit_string` value is used by the stdlib Simulator module to + declare this exit event as `ExitEvent.SCHEDULED_TICK`. + + :param tick: The absolute simulation tick to schedule the exit event. + :param exit_string: The exit string to return when the exit event is + triggered. + """ + if tick <= curTick(): + warn("Tick exit scheduled for the past. This will not be triggered.") + _m5.event.exitSimLoop(exit_string, 0, tick, 0, False) + + def drain(): """Drain the simulator in preparation of a checkpoint or memory mode switch. @@ -212,7 +285,7 @@ def drain(): # WARNING: if a valid exit event occurs while draining, it # will not get returned to the user script exit_event = _m5.event.simulate() - while exit_event.getCause() != 'Finished drain': + while exit_event.getCause() != "Finished drain": exit_event = simulate() return False @@ -224,14 +297,17 @@ def drain(): assert _drain_manager.isDrained(), "Drain state inconsistent" + def memWriteback(root): for obj in root.descendants(): obj.memWriteback() + def memInvalidate(root): for obj in root.descendants(): obj.memInvalidate() + def checkpoint(dir): root = objects.Root.getInstance() if not isinstance(root, objects.Root): @@ -242,15 +318,19 @@ def checkpoint(dir): print("Writing checkpoint") _m5.core.serializeAll(dir) + def _changeMemoryMode(system, mode): if not isinstance(system, (objects.Root, objects.System)): - raise TypeError("Parameter of type '%s'. Must be type %s or %s." % \ - (type(system), objects.Root, objects.System)) + raise TypeError( + "Parameter of type '%s'. Must be type %s or %s." + % (type(system), objects.Root, objects.System) + ) if system.getMemoryMode() != mode: system.setMemoryMode(mode) else: print("System already in target mode. Memory mode unchanged.") + def switchCpus(system, cpuList, verbose=True): """Switch CPUs in a system. @@ -283,21 +363,25 @@ def switchCpus(system, cpuList, verbose=True): raise TypeError("%s is not of type BaseCPU" % new_cpu) if new_cpu in old_cpu_set: raise RuntimeError( - "New CPU (%s) is in the list of old CPUs." % (old_cpu,)) + "New CPU (%s) is in the list of old CPUs." % (old_cpu,) + ) if not new_cpu.switchedOut(): raise RuntimeError("New CPU (%s) is already active." % (new_cpu,)) if not new_cpu.support_take_over(): raise RuntimeError( - "New CPU (%s) does not support CPU handover." % (old_cpu,)) + "New CPU (%s) does not support CPU handover." % (old_cpu,) + ) if new_cpu.memory_mode() != memory_mode_name: raise RuntimeError( - "%s and %s require different memory modes." % (new_cpu, - new_cpus[0])) + "%s and %s require different memory modes." + % (new_cpu, new_cpus[0]) + ) if old_cpu.switchedOut(): raise RuntimeError("Old CPU (%s) is inactive." % (new_cpu,)) if not old_cpu.support_take_over(): raise RuntimeError( - "Old CPU (%s) does not support CPU handover." % (old_cpu,)) + "Old CPU (%s) does not support CPU handover." % (old_cpu,) + ) MemoryMode = params.allEnums["MemoryMode"] try: @@ -326,11 +410,15 @@ def switchCpus(system, cpuList, verbose=True): for old_cpu, new_cpu in cpuList: new_cpu.takeOverFrom(old_cpu) + def notifyFork(root): for obj in root.descendants(): obj.notifyFork() + fork_count = 0 + + def fork(simout="%(parent)s.f%(fork_seq)i"): """Fork the simulator. @@ -353,6 +441,7 @@ def fork(simout="%(parent)s.f%(fork_seq)i"): pid of the child process or 0 if running in the child. """ from m5 import options + global fork_count if not _m5.core.listenersDisabled(): @@ -375,16 +464,17 @@ def fork(simout="%(parent)s.f%(fork_seq)i"): # Setup a new output directory parent = options.outdir options.outdir = simout % { - "parent" : parent, - "fork_seq" : fork_count, - "pid" : os.getpid(), - } + "parent": parent, + "fork_seq": fork_count, + "pid": os.getpid(), + } _m5.core.setOutputDir(options.outdir) else: fork_count += 1 return pid + from _m5.core import disableAllListeners, listenersDisabled from _m5.core import listenersLoopbackOnly from _m5.core import curTick diff --git a/src/python/m5/stats/__init__.py b/src/python/m5/stats/__init__.py index 7f91487e77..6bc50cfad9 100644 --- a/src/python/m5/stats/__init__.py +++ b/src/python/m5/stats/__init__.py @@ -53,12 +53,13 @@ outputList = [] # Dictionary of stat visitor factories populated by the _url_factory # visitor. -factories = { } +factories = {} # List of all factories. Contains tuples of (factory, schemes, # enabled). all_factories = [] + def _url_factory(schemes, enable=True): """Wrap a plain Python function with URL parsing helpers @@ -101,19 +102,23 @@ def _url_factory(schemes, enable=True): # values into proper Python types. def parse_value(key, values): if len(values) == 0 or (len(values) == 1 and not values[0]): - fatal("%s: '%s' doesn't have a value." % ( - url.geturl(), key)) + fatal( + "%s: '%s' doesn't have a value." % (url.geturl(), key) + ) elif len(values) > 1: - fatal("%s: '%s' has multiple values." % ( - url.geturl(), key)) + fatal( + "%s: '%s' has multiple values." % (url.geturl(), key) + ) else: try: return key, literal_eval(values[0]) except ValueError: - fatal("%s: %s isn't a valid Python literal" \ - % (url.geturl(), values[0])) + fatal( + "%s: %s isn't a valid Python literal" + % (url.geturl(), values[0]) + ) - kwargs = dict([ parse_value(k, v) for k, v in qs.items() ]) + kwargs = dict([parse_value(k, v) for k, v in qs.items()]) try: return func("%s%s" % (url.netloc, url.path), **kwargs) @@ -128,7 +133,8 @@ def _url_factory(schemes, enable=True): return decorator -@_url_factory([ None, "", "text", "file", ]) + +@_url_factory([None, "", "text", "file"]) def _textFactory(fn, desc=True, spaces=True): """Output stats in text format. @@ -147,7 +153,8 @@ def _textFactory(fn, desc=True, spaces=True): return _m5.stats.initText(fn, desc, spaces) -@_url_factory([ "h5", ], enable=hasattr(_m5.stats, "initHDF5")) + +@_url_factory(["h5"], enable=hasattr(_m5.stats, "initHDF5")) def _hdf5Factory(fn, chunking=10, desc=True, formulas=True): """Output stats in HDF5 format. @@ -183,6 +190,7 @@ def _hdf5Factory(fn, chunking=10, desc=True, formulas=True): return _m5.stats.initHDF5(fn, chunking, desc, formulas) + @_url_factory(["json"]) def _jsonFactory(fn): """Output stats in JSON format. @@ -194,6 +202,7 @@ def _jsonFactory(fn): return JsonOutputVistor(fn) + def addStatVisitor(url): """Add a stat visitor specified using a URL string @@ -225,6 +234,7 @@ def addStatVisitor(url): outputList.append(factory(parsed)) + def printStatVisitorTypes(): """List available stat visitors and their documentation""" @@ -235,17 +245,19 @@ def printStatVisitorTypes(): print("| %s" % line) print() - enabled_visitors = [ x for x in all_factories if x[2] ] + enabled_visitors = [x for x in all_factories if x[2]] for factory, schemes, _ in enabled_visitors: print("%s:" % ", ".join(filter(lambda x: x is not None, schemes))) # Try to extract the factory doc string print_doc(inspect.getdoc(factory)) + def initSimStats(): _m5.stats.initSimStats() _m5.stats.registerPythonStatsHandlers() + def _visit_groups(visitor, root=None): if root is None: root = Root.getInstance() @@ -253,12 +265,15 @@ def _visit_groups(visitor, root=None): visitor(group) _visit_groups(visitor, root=group) + def _visit_stats(visitor, root=None): def for_each_stat(g): for stat in g.getStats(): visitor(g, stat) + _visit_groups(for_each_stat, root=root) + def _bindStatHierarchy(root): def _bind_obj(name, obj): if isNullPointer(obj): @@ -278,33 +293,39 @@ def _bindStatHierarchy(root): if isinstance(obj.getCCObject(), _m5.stats.Group): parent = root while parent: - if hasattr(parent, 'addStatGroup'): + if hasattr(parent, "addStatGroup"): parent.addStatGroup(name, obj.getCCObject()) break - parent = parent.get_parent(); + parent = parent.get_parent() _bindStatHierarchy(obj) for name, obj in root._children.items(): _bind_obj(name, obj) + names = [] stats_dict = {} stats_list = [] + + def enable(): - '''Enable the statistics package. Before the statistics package is + """Enable the statistics package. Before the statistics package is enabled, all statistics must be created and initialized and once - the package is enabled, no more statistics can be created.''' + the package is enabled, no more statistics can be created.""" def check_stat(group, stat): if not stat.check() or not stat.baseCheck(): - fatal("statistic '%s' (%d) was not properly initialized " \ - "by a regStats() function\n", stat.name, stat.id) + fatal( + "statistic '%s' (%d) was not properly initialized " + "by a regStats() function\n", + stat.name, + stat.id, + ) if not (stat.flags & flags.display): stat.name = "__Stat%06d" % stat.id - # Legacy stat global stats_list stats_list = list(_m5.stats.statsList()) @@ -312,21 +333,21 @@ def enable(): for stat in stats_list: check_stat(None, stat) - stats_list.sort(key=lambda s: s.name.split('.')) + stats_list.sort(key=lambda s: s.name.split(".")) for stat in stats_list: stats_dict[stat.name] = stat stat.enable() - # New stats _visit_stats(check_stat) _visit_stats(lambda g, s: s.enable()) - _m5.stats.enable(); + _m5.stats.enable() + def prepare(): - '''Prepare all stats for data access. This must be done before - dumping and serialization.''' + """Prepare all stats for data access. This must be done before + dumping and serialization.""" # Legacy stats for stat in stats_list: @@ -335,6 +356,7 @@ def prepare(): # New stats _visit_stats(lambda g, s: s.prepare()) + def _dump_to_visitor(visitor, roots=None): # New stats def dump_group(group): @@ -361,12 +383,14 @@ def _dump_to_visitor(visitor, roots=None): for stat in stats_list: stat.visit(visitor) + lastDump = 0 # List[SimObject]. global_dump_roots = [] + def dump(roots=None): - '''Dump all statistics data to the registered outputs''' + """Dump all statistics data to the registered outputs""" all_roots = [] if roots is not None: @@ -391,7 +415,7 @@ def dump(roots=None): # Notify new-style stats group that we are about to dump stats. sim_root = Root.getInstance() if sim_root: - sim_root.preDumpStats(); + sim_root.preDumpStats() prepare() for output in outputList: @@ -406,8 +430,9 @@ def dump(roots=None): _dump_to_visitor(output, roots=all_roots) output.end() + def reset(): - '''Reset all statistics to the base state''' + """Reset all statistics to the base state""" # call reset stats on all SimObjects root = Root.getInstance() @@ -420,14 +445,17 @@ def reset(): _m5.stats.processResetQueue() -flags = attrdict({ - 'none' : 0x0000, - 'init' : 0x0001, - 'display' : 0x0002, - 'total' : 0x0010, - 'pdf' : 0x0020, - 'cdf' : 0x0040, - 'dist' : 0x0080, - 'nozero' : 0x0100, - 'nonan' : 0x0200, -}) + +flags = attrdict( + { + "none": 0x0000, + "init": 0x0001, + "display": 0x0002, + "total": 0x0010, + "pdf": 0x0020, + "cdf": 0x0040, + "dist": 0x0080, + "nozero": 0x0100, + "nonan": 0x0200, + } +) diff --git a/src/python/m5/stats/gem5stats.py b/src/python/m5/stats/gem5stats.py index 3b4bc7eb6c..07636e3e3f 100644 --- a/src/python/m5/stats/gem5stats.py +++ b/src/python/m5/stats/gem5stats.py @@ -39,11 +39,13 @@ from m5.ext.pystats.simstat import * from m5.ext.pystats.statistic import * from m5.ext.pystats.storagetype import * -class JsonOutputVistor(): + +class JsonOutputVistor: """ This is a helper vistor class used to include a JSON output via the stats API (`src/python/m5/stats/__init__.py`). """ + file: str json_args: Dict @@ -77,10 +79,11 @@ class JsonOutputVistor(): The Root, or List of roots, whose stats are are to be dumped JSON. """ - with open(self.file, 'w') as fp: + with open(self.file, "w") as fp: simstat = get_simstat(root=roots, prepare_stats=False) simstat.dump(fp=fp, **self.json_args) + def get_stats_group(group: _m5.stats.Group) -> Group: """ Translates a gem5 Group object into a Python stats Group object. A Python @@ -113,6 +116,7 @@ def get_stats_group(group: _m5.stats.Group) -> Group: return Group(**stats_dict) + def __get_statistic(statistic: _m5.stats.Info) -> Optional[Statistic]: """ Translates a _m5.stats.Info object into a Statistic object, to process @@ -130,7 +134,7 @@ def __get_statistic(statistic: _m5.stats.Info) -> Optional[Statistic]: cannot be translated. """ - assert(isinstance(statistic, _m5.stats.Info)) + assert isinstance(statistic, _m5.stats.Info) statistic.prepare() if isinstance(statistic, _m5.stats.ScalarInfo): @@ -146,6 +150,7 @@ def __get_statistic(statistic: _m5.stats.Info) -> Optional[Statistic]: return None + def __get_scaler(statistic: _m5.stats.ScalarInfo) -> Scalar: value = statistic.value unit = statistic.unit @@ -154,11 +159,9 @@ def __get_scaler(statistic: _m5.stats.ScalarInfo) -> Scalar: datatype = StorageType["f64"] return Scalar( - value=value, - unit=unit, - description=description, - datatype=datatype, - ) + value=value, unit=unit, description=description, datatype=datatype + ) + def __get_distribution(statistic: _m5.stats.DistInfo) -> Distribution: unit = statistic.unit @@ -177,20 +180,21 @@ def __get_distribution(statistic: _m5.stats.DistInfo) -> Distribution: datatype = StorageType["f64"] return Distribution( - value=value, - min=min, - max=max, - num_bins=num_bins, - bin_size=bin_size, - sum = sum_val, - sum_squared = sum_squared, - underflow = underflow, - overflow = overflow, - logs = logs, - unit=unit, - description=description, - datatype=datatype, - ) + value=value, + min=min, + max=max, + num_bins=num_bins, + bin_size=bin_size, + sum=sum_val, + sum_squared=sum_squared, + underflow=underflow, + overflow=overflow, + logs=logs, + unit=unit, + description=description, + datatype=datatype, + ) + def __get_vector(statistic: _m5.stats.VectorInfo) -> Vector: to_add = dict() @@ -212,14 +216,12 @@ def __get_vector(statistic: _m5.stats.VectorInfo) -> Vector: index_string = str(index) to_add[index_string] = Scalar( - value=value, - unit=unit, - description=description, - datatype=datatype, - ) + value=value, unit=unit, description=description, datatype=datatype + ) return Vector(scalar_map=to_add) + def _prepare_stats(group: _m5.stats.Group): """ Prepares the statistics for dumping. @@ -234,8 +236,9 @@ def _prepare_stats(group: _m5.stats.Group): _prepare_stats(child) -def get_simstat(root: Union[SimObject, List[SimObject]], - prepare_stats: bool = True) -> SimStat: +def get_simstat( + root: Union[SimObject, List[SimObject]], prepare_stats: bool = True +) -> SimStat: """ This function will return the SimStat object for a simulation given a SimObject (typically a Root SimObject), or list of SimObjects. The returned @@ -262,7 +265,7 @@ def get_simstat(root: Union[SimObject, List[SimObject]], """ stats_map = {} creation_time = datetime.now() - time_converstion = None # TODO https://gem5.atlassian.net/browse/GEM5-846 + time_converstion = None # TODO https://gem5.atlassian.net/browse/GEM5-846 final_tick = Root.getInstance().resolveStat("finalTick").value sim_ticks = Root.getInstance().resolveStat("simTicks").value simulated_begin_time = int(final_tick - sim_ticks) @@ -284,16 +287,16 @@ def get_simstat(root: Union[SimObject, List[SimObject]], _prepare_stats(r) stats_map[r.get_name()] = get_stats_group(r) else: - raise TypeError("Object (" + str(r) + ") passed is not a " - "SimObject. " + __name__ + " only processes " - "SimObjects, or a list of SimObjects.") - - + raise TypeError( + "Object (" + str(r) + ") passed is not a " + "SimObject. " + __name__ + " only processes " + "SimObjects, or a list of SimObjects." + ) return SimStat( - creation_time=creation_time, - time_conversion=time_converstion, - simulated_begin_time=simulated_begin_time, - simulated_end_time=simulated_end_time, - **stats_map, - ) + creation_time=creation_time, + time_conversion=time_converstion, + simulated_begin_time=simulated_begin_time, + simulated_end_time=simulated_end_time, + **stats_map, + ) diff --git a/src/python/m5/ticks.py b/src/python/m5/ticks.py index 1ec012bd59..c1c6a507ee 100644 --- a/src/python/m5/ticks.py +++ b/src/python/m5/ticks.py @@ -32,8 +32,10 @@ from m5.util import warn # fix the global frequency def fixGlobalFrequency(): import _m5.core + _m5.core.fixClockFrequency() + def setGlobalFrequency(ticksPerSecond): from m5.util import convert import _m5.core @@ -46,12 +48,15 @@ def setGlobalFrequency(ticksPerSecond): tps = round(convert.anyToFrequency(ticksPerSecond)) else: raise TypeError( - "wrong type '%s' for ticksPerSecond" % type(ticksPerSecond)) + "wrong type '%s' for ticksPerSecond" % type(ticksPerSecond) + ) _m5.core.setClockFrequency(int(tps)) + # how big does a rounding error need to be before we warn about it? frequency_tolerance = 0.001 # 0.1% + def fromSeconds(value): import _m5.core @@ -62,7 +67,8 @@ def fromSeconds(value): # had better be fixed if not _m5.core.clockFrequencyFixed(): raise AttributeError( - "In order to do conversions, the global frequency must be fixed") + "In order to do conversions, the global frequency must be fixed" + ) if value == 0: return 0 @@ -71,12 +77,21 @@ def fromSeconds(value): value *= _m5.core.getClockFrequency() int_value = int( - decimal.Decimal(value).to_integral_value( decimal.ROUND_HALF_UP)) + decimal.Decimal(value).to_integral_value(decimal.ROUND_HALF_UP) + ) err = (value - int_value) / value if err > frequency_tolerance: - warn("rounding error > tolerance\n %f rounded to %d", value, - int_value) + warn( + "rounding error > tolerance\n %f rounded to %d", + value, + int_value, + ) return int_value -__all__ = [ 'setGlobalFrequency', 'fixGlobalFrequency', 'fromSeconds', - 'frequency_tolerance' ] + +__all__ = [ + "setGlobalFrequency", + "fixGlobalFrequency", + "fromSeconds", + "frequency_tolerance", +] diff --git a/src/python/m5/util/__init__.py b/src/python/m5/util/__init__.py index f5b5c798e5..bc4ab4a0f5 100644 --- a/src/python/m5/util/__init__.py +++ b/src/python/m5/util/__init__.py @@ -52,65 +52,77 @@ from .multidict import multidict # ever happen regardless of what the user does (i.e., an acutal m5 # bug). def panic(fmt, *args): - print('panic:', fmt % args, file=sys.stderr) + print("panic:", fmt % args, file=sys.stderr) sys.exit(1) + # fatal() should be called when the simulation cannot continue due to # some condition that is the user's fault (bad configuration, invalid # arguments, etc.) and not a simulator bug. def fatal(fmt, *args): - print('fatal:', fmt % args, file=sys.stderr) + print("fatal:", fmt % args, file=sys.stderr) sys.exit(1) + # warn() should be called when the user should be warned about some condition # that may or may not be the user's fault, but that they should be made aware # of as it may affect the simulation or results. def warn(fmt, *args): - print('warn:', fmt % args, file=sys.stderr) + print("warn:", fmt % args, file=sys.stderr) + # inform() should be called when the user should be informed about some # condition that they may be interested in. def inform(fmt, *args): - print('info:', fmt % args, file=sys.stdout) + print("info:", fmt % args, file=sys.stdout) + def callOnce(func): """Decorator that enables to run a given function only once. Subsequent calls are discarded.""" + @wraps(func) def wrapper(*args, **kwargs): if not wrapper.has_run: wrapper.has_run = True return func(*args, **kwargs) + wrapper.has_run = False return wrapper + def deprecated(replacement=None, logger=warn): """This decorator warns the user about a deprecated function.""" + def decorator(func): @callOnce def notifyDeprecation(): try: - func_name = lambda f: f.__module__ + '.' + f.__qualname__ - message = f'Function {func_name(func)} is deprecated.' + func_name = lambda f: f.__module__ + "." + f.__qualname__ + message = f"Function {func_name(func)} is deprecated." if replacement: - message += f' Prefer {func_name(replacement)} instead.' + message += f" Prefer {func_name(replacement)} instead." except AttributeError: - message = f'Function {func} is deprecated.' + message = f"Function {func} is deprecated." if replacement: - message += f' Prefer {replacement} instead.' + message += f" Prefer {replacement} instead." logger(message) + notifyDeprecation() return func + return decorator + class Singleton(type): def __call__(cls, *args, **kwargs): - if hasattr(cls, '_instance'): + if hasattr(cls, "_instance"): return cls._instance cls._instance = super().__call__(*args, **kwargs) return cls._instance + def addToPath(path): """Prepend given directory to system module search path. We may not need this anymore if we can structure our config library more like a @@ -125,6 +137,7 @@ def addToPath(path): # so place the new dir right after that. sys.path.insert(1, path) + def repoPath(): """ Return the abspath of the gem5 repository. @@ -132,15 +145,15 @@ def repoPath(): /build//gem5.[opt,debug...] """ - return os.path.dirname( - os.path.dirname( - os.path.dirname(sys.executable))) + return os.path.dirname(os.path.dirname(os.path.dirname(sys.executable))) + # Apply method to object. # applyMethod(obj, 'meth', ) is equivalent to obj.meth() def applyMethod(obj, meth, *args, **kwargs): return getattr(obj, meth)(*args, **kwargs) + # If the first argument is an (non-sequence) object, apply the named # method with the given arguments. If the first argument is a # sequence, apply the method to each element of the sequence (a la @@ -151,6 +164,7 @@ def applyOrMap(objOrSeq, meth, *args, **kwargs): else: return [applyMethod(o, meth, *args, **kwargs) for o in objOrSeq] + def crossproduct(items): if len(items) == 1: for i in items[0]: @@ -160,6 +174,7 @@ def crossproduct(items): for j in crossproduct(items[1:]): yield (i,) + j + def flatten(items): while items: item = items.pop(0) @@ -168,25 +183,28 @@ def flatten(items): else: yield item + # force scalars to one-element lists for uniformity def makeList(objOrList): if isinstance(objOrList, list): return objOrList return [objOrList] + def printList(items, indent=4): - line = ' ' * indent - for i,item in enumerate(items): + line = " " * indent + for i, item in enumerate(items): if len(line) + len(item) > 76: print(line) - line = ' ' * indent + line = " " * indent if i < len(items) - 1: - line += '%s, ' % item + line += "%s, " % item else: line += item print(line) + def isInteractive(): """Check if the simulator is run interactively or in a batch environment""" diff --git a/src/python/m5/util/attrdict.py b/src/python/m5/util/attrdict.py index d953e8c666..0ff2103277 100644 --- a/src/python/m5/util/attrdict.py +++ b/src/python/m5/util/attrdict.py @@ -24,17 +24,19 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -__all__ = [ 'attrdict', 'multiattrdict', 'optiondict' ] +__all__ = ["attrdict", "multiattrdict", "optiondict"] + class attrdict(dict): """Wrap dict, so you can use attribute access to get/set elements""" + def __getattr__(self, attr): if attr in self: return self.__getitem__(attr) return super().__getattribute__(attr) def __setattr__(self, attr, value): - if attr in dir(self) or attr.startswith('_'): + if attr in dir(self) or attr.startswith("_"): return super().__setattr__(attr, value) return self.__setitem__(attr, value) @@ -49,40 +51,45 @@ class attrdict(dict): def __setstate__(self, state): self.update(state) + class multiattrdict(attrdict): """Wrap attrdict so that nested attribute accesses automatically create nested dictionaries.""" + def __getattr__(self, attr): try: return super().__getattr__(attr) except AttributeError: - if attr.startswith('_'): + if attr.startswith("_"): raise d = multiattrdict() setattr(self, attr, d) return d + class optiondict(attrdict): """Modify attrdict so that a missing attribute just returns None""" + def __getattr__(self, attr): try: return super().__getattr__(attr) except AttributeError: return None -if __name__ == '__main__': + +if __name__ == "__main__": x = attrdict() x.y = 1 - x['z'] = 2 - print(x['y'], x.y) - print(x['z'], x.z) + x["z"] = 2 + print(x["y"], x.y) + print(x["z"], x.z) print(dir(x)) print(x) print() - del x['y'] + del x["y"] del x.z print(dir(x)) print(x) diff --git a/src/python/m5/util/convert.py b/src/python/m5/util/convert.py index e66eb5c049..ca897ba9c8 100644 --- a/src/python/m5/util/convert.py +++ b/src/python/m5/util/convert.py @@ -38,10 +38,10 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # metric prefixes -atto = 1.0e-18 +atto = 1.0e-18 femto = 1.0e-15 -pico = 1.0e-12 -nano = 1.0e-9 +pico = 1.0e-12 +nano = 1.0e-9 micro = 1.0e-6 milli = 1.0e-3 @@ -50,7 +50,7 @@ mega = 1.0e6 giga = 1.0e9 tera = 1.0e12 peta = 1.0e15 -exa = 1.0e18 +exa = 1.0e18 # power of 2 prefixes kibi = 1024 @@ -61,47 +61,49 @@ pebi = tebi * 1024 exbi = pebi * 1024 metric_prefixes = { - 'Ei': exbi, - 'E': exa, - 'Pi': pebi, - 'P': peta, - 'Ti': tebi, - 'T': tera, - 'Gi': gibi, - 'G': giga, - 'M': mega, - 'Ki': kibi, - 'k': kilo, - 'Mi': mebi, - 'm': milli, - 'u': micro, - 'n': nano, - 'p': pico, - 'f': femto, - 'a': atto, + "Ei": exbi, + "E": exa, + "Pi": pebi, + "P": peta, + "Ti": tebi, + "T": tera, + "Gi": gibi, + "G": giga, + "M": mega, + "Ki": kibi, + "k": kilo, + "Mi": mebi, + "m": milli, + "u": micro, + "n": nano, + "p": pico, + "f": femto, + "a": atto, } binary_prefixes = { - 'Ei': exbi, - 'E' : exbi, - 'Pi': pebi, - 'P' : pebi, - 'Ti': tebi, - 'T' : tebi, - 'Gi': gibi, - 'G' : gibi, - 'Mi': mebi, - 'M' : mebi, - 'Ki': kibi, - 'k' : kibi, + "Ei": exbi, + "E": exbi, + "Pi": pebi, + "P": pebi, + "Ti": tebi, + "T": tebi, + "Gi": gibi, + "G": gibi, + "Mi": mebi, + "M": mebi, + "Ki": kibi, + "k": kibi, } + def assertStr(value): if not isinstance(value, str): raise TypeError("wrong type '%s' should be str" % type(value)) + def _split_suffix(value, suffixes): - '''Split a string based on a suffix from a list of suffixes. + """Split a string based on a suffix from a list of suffixes. :param value: String value to test for a matching suffix. :param suffixes: Container of suffixes to test. @@ -109,16 +111,15 @@ def _split_suffix(value, suffixes): :returns: A tuple of (value, suffix). Suffix is the empty string if there is no match. - ''' - matches = [ sfx for sfx in suffixes if value.endswith(sfx) ] + """ + matches = [sfx for sfx in suffixes if value.endswith(sfx)] assert len(matches) <= 1 - return (value[:-len(matches[0])], matches[0]) if matches \ - else (value, '') + return (value[: -len(matches[0])], matches[0]) if matches else (value, "") def toNum(value, target_type, units, prefixes, converter): - '''Convert a string using units and prefixes to (typically) a float or + """Convert a string using units and prefixes to (typically) a float or integer. String values are assumed to either be a naked magnitude without a @@ -133,7 +134,7 @@ def toNum(value, target_type, units, prefixes, converter): :returns: Tuple of (converted value, unit) - ''' + """ assertStr(value) def convert(val): @@ -141,7 +142,8 @@ def toNum(value, target_type, units, prefixes, converter): return converter(val) except ValueError: raise ValueError( - "cannot convert '%s' to %s" % (value, target_type)) + "cannot convert '%s' to %s" % (value, target_type) + ) # Units can be None, the empty string, or a list/tuple. Convert # to a tuple for consistent handling. @@ -159,56 +161,67 @@ def toNum(value, target_type, units, prefixes, converter): magnitude, prefix = _split_suffix(magnitude_prefix, prefixes) scale = prefixes[prefix] if prefix else 1 else: - magnitude, prefix, scale = magnitude_prefix, '', 1 + magnitude, prefix, scale = magnitude_prefix, "", 1 return convert(magnitude) * scale, unit -def toFloat(value, target_type='float', units=None, prefixes=[]): + +def toFloat(value, target_type="float", units=None, prefixes=[]): return toNum(value, target_type, units, prefixes, float)[0] -def toMetricFloat(value, target_type='float', units=None): + +def toMetricFloat(value, target_type="float", units=None): return toFloat(value, target_type, units, metric_prefixes) -def toBinaryFloat(value, target_type='float', units=None): + +def toBinaryFloat(value, target_type="float", units=None): return toFloat(value, target_type, units, binary_prefixes) -def toInteger(value, target_type='integer', units=None, prefixes=[]): - return toNum(value, target_type, units, prefixes, - lambda x: int(x, 0))[0] -def toMetricInteger(value, target_type='integer', units=None): +def toInteger(value, target_type="integer", units=None, prefixes=[]): + return toNum(value, target_type, units, prefixes, lambda x: int(x, 0))[0] + + +def toMetricInteger(value, target_type="integer", units=None): return toInteger(value, target_type, units, metric_prefixes) -def toBinaryInteger(value, target_type='integer', units=None): + +def toBinaryInteger(value, target_type="integer", units=None): return toInteger(value, target_type, units, binary_prefixes) + def toBool(value): assertStr(value) value = value.lower() - if value in ('true', 't', 'yes', 'y', '1'): + if value in ("true", "t", "yes", "y", "1"): return True - if value in ('false', 'f', 'no', 'n', '0'): + if value in ("false", "f", "no", "n", "0"): return False raise ValueError("cannot convert '%s' to bool" % value) + def toFrequency(value): - return toMetricFloat(value, 'frequency', 'Hz') + return toMetricFloat(value, "frequency", "Hz") + def toLatency(value): - return toMetricFloat(value, 'latency', 's') + return toMetricFloat(value, "latency", "s") + def anyToLatency(value): """Convert a magnitude and unit to a clock period.""" - magnitude, unit = toNum(value, - target_type='latency', - units=('Hz', 's'), - prefixes=metric_prefixes, - converter=float) - if unit == 's': + magnitude, unit = toNum( + value, + target_type="latency", + units=("Hz", "s"), + prefixes=metric_prefixes, + converter=float, + ) + if unit == "s": return magnitude - elif unit == 'Hz': + elif unit == "Hz": try: return 1.0 / magnitude except ZeroDivisionError: @@ -216,17 +229,20 @@ def anyToLatency(value): else: raise ValueError(f"'{value}' needs a valid unit to be unambiguous.") + def anyToFrequency(value): """Convert a magnitude and unit to a clock frequency.""" - magnitude, unit = toNum(value, - target_type='frequency', - units=('Hz', 's'), - prefixes=metric_prefixes, - converter=float) - if unit == 'Hz': + magnitude, unit = toNum( + value, + target_type="frequency", + units=("Hz", "s"), + prefixes=metric_prefixes, + converter=float, + ) + if unit == "Hz": return magnitude - elif unit == 's': + elif unit == "s": try: return 1.0 / magnitude except ZeroDivisionError: @@ -234,40 +250,49 @@ def anyToFrequency(value): else: raise ValueError(f"'{value}' needs a valid unit to be unambiguous.") + def toNetworkBandwidth(value): - return toMetricFloat(value, 'network bandwidth', 'bps') + return toMetricFloat(value, "network bandwidth", "bps") + def toMemoryBandwidth(value): - return toBinaryFloat(value, 'memory bandwidth', 'B/s') + return toBinaryFloat(value, "memory bandwidth", "B/s") + def toMemorySize(value): - return toBinaryInteger(value, 'memory size', 'B') + return toBinaryInteger(value, "memory size", "B") + def toIpAddress(value): if not isinstance(value, str): raise TypeError("wrong type '%s' should be str" % type(value)) - bytes = value.split('.') + bytes = value.split(".") if len(bytes) != 4: - raise ValueError('invalid ip address %s' % value) + raise ValueError("invalid ip address %s" % value) for byte in bytes: - if not 0 <= int(byte) <= 0xff: - raise ValueError('invalid ip address %s' % value) + if not 0 <= int(byte) <= 0xFF: + raise ValueError("invalid ip address %s" % value) + + return ( + (int(bytes[0]) << 24) + | (int(bytes[1]) << 16) + | (int(bytes[2]) << 8) + | (int(bytes[3]) << 0) + ) - return (int(bytes[0]) << 24) | (int(bytes[1]) << 16) | \ - (int(bytes[2]) << 8) | (int(bytes[3]) << 0) def toIpNetmask(value): if not isinstance(value, str): raise TypeError("wrong type '%s' should be str" % type(value)) - (ip, netmask) = value.split('/') + (ip, netmask) = value.split("/") ip = toIpAddress(ip) - netmaskParts = netmask.split('.') + netmaskParts = netmask.split(".") if len(netmaskParts) == 1: if not 0 <= int(netmask) <= 32: - raise ValueError('invalid netmask %s' % netmask) + raise ValueError("invalid netmask %s" % netmask) return (ip, int(netmask)) elif len(netmaskParts) == 4: netmaskNum = toIpAddress(netmask) @@ -275,45 +300,52 @@ def toIpNetmask(value): return (ip, 0) testVal = 0 for i in range(32): - testVal |= (1 << (31 - i)) + testVal |= 1 << (31 - i) if testVal == netmaskNum: return (ip, i + 1) - raise ValueError('invalid netmask %s' % netmask) + raise ValueError("invalid netmask %s" % netmask) else: - raise ValueError('invalid netmask %s' % netmask) + raise ValueError("invalid netmask %s" % netmask) + def toIpWithPort(value): if not isinstance(value, str): raise TypeError("wrong type '%s' should be str" % type(value)) - (ip, port) = value.split(':') + (ip, port) = value.split(":") ip = toIpAddress(ip) - if not 0 <= int(port) <= 0xffff: - raise ValueError('invalid port %s' % port) + if not 0 <= int(port) <= 0xFFFF: + raise ValueError("invalid port %s" % port) return (ip, int(port)) + def toVoltage(value): - return toMetricFloat(value, 'voltage', 'V') + return toMetricFloat(value, "voltage", "V") + def toCurrent(value): - return toMetricFloat(value, 'current', 'A') + return toMetricFloat(value, "current", "A") + def toEnergy(value): - return toMetricFloat(value, 'energy', 'J') + return toMetricFloat(value, "energy", "J") + def toTemperature(value): """Convert a string value specified to a temperature in Kelvin""" - magnitude, unit = toNum(value, - target_type='temperature', - units=('K', 'C', 'F'), - prefixes=metric_prefixes, - converter=float) - if unit == 'K': + magnitude, unit = toNum( + value, + target_type="temperature", + units=("K", "C", "F"), + prefixes=metric_prefixes, + converter=float, + ) + if unit == "K": kelvin = magnitude - elif unit == 'C': + elif unit == "C": kelvin = magnitude + 273.15 - elif unit == 'F': + elif unit == "F": kelvin = (magnitude + 459.67) / 1.8 else: raise ValueError(f"'{value}' needs a valid temperature unit.") diff --git a/src/python/m5/util/dot_writer.py b/src/python/m5/util/dot_writer.py index 7b10fdcf32..6d49f4ef79 100644 --- a/src/python/m5/util/dot_writer.py +++ b/src/python/m5/util/dot_writer.py @@ -57,11 +57,13 @@ import m5, os, re from m5.SimObject import isRoot, isSimObjectVector from m5.params import PortRef, isNullPointer from m5.util import warn + try: import pydot except: pydot = False + def simnode_children(simNode): for child in simNode._children.values(): if isNullPointer(child): @@ -73,15 +75,16 @@ def simnode_children(simNode): else: yield child + # need to create all nodes (components) before creating edges (memory channels) def dot_create_nodes(simNode, callgraph): if isRoot(simNode): label = "root" else: label = simNode._name - full_path = re.sub('\.', '_', simNode.path()) + full_path = re.sub("\.", "_", simNode.path()) # add class name under the label - label = "\"" + label + " \\n: " + simNode.__class__.__name__ + "\"" + label = '"' + label + " \\n: " + simNode.__class__.__name__ + '"' # each component is a sub-graph (cluster) cluster = dot_create_cluster(simNode, full_path, label) @@ -100,12 +103,13 @@ def dot_create_nodes(simNode, callgraph): callgraph.add_subgraph(cluster) + # create all edges according to memory hierarchy def dot_create_edges(simNode, callgraph): for port_name in simNode._ports.keys(): port = simNode._port_refs.get(port_name, None) if port != None: - full_path = re.sub('\.', '_', simNode.path()) + full_path = re.sub("\.", "_", simNode.path()) full_port_name = full_path + "_" + port_name port_node = dot_create_node(simNode, full_port_name, port_name) # create edges @@ -121,24 +125,25 @@ def dot_create_edges(simNode, callgraph): for child in simnode_children(simNode): dot_create_edges(child, callgraph) + def dot_add_edge(simNode, callgraph, full_port_name, port): peer = port.peer - full_peer_path = re.sub('\.', '_', peer.simobj.path()) + full_peer_path = re.sub("\.", "_", peer.simobj.path()) full_peer_port_name = full_peer_path + "_" + peer.name # Each edge is encountered twice, once for each peer. We only want one # edge, so we'll arbitrarily chose which peer "wins" based on their names. if full_peer_port_name < full_port_name: dir_type = { - (False, False) : 'both', - (True, False) : 'forward', - (False, True) : 'back', - (True, True) : 'none' - }[ (port.is_source, - peer.is_source) ] + (False, False): "both", + (True, False): "forward", + (False, True): "back", + (True, True): "none", + }[(port.is_source, peer.is_source)] edge = pydot.Edge(full_port_name, full_peer_port_name, dir=dir_type) callgraph.add_edge(edge) + def dot_create_cluster(simNode, full_path, label): # get the parameter values of the node and use them as a tooltip ini_strings = [] @@ -146,36 +151,45 @@ def dot_create_cluster(simNode, full_path, label): value = simNode._values.get(param) if value != None: # parameter name = value in HTML friendly format - ini_strings.append(str(param) + "=" + - simNode._values[param].ini_str()) + ini_strings.append( + str(param) + "=" + simNode._values[param].ini_str() + ) # join all the parameters with an HTML newline + # Pydot limit line length to 16384. + # Account for the quotes added later around the tooltip string tooltip = " \\".join(ini_strings) + max_tooltip_length = 16384 - 2 + if len(tooltip) > max_tooltip_length: + truncated = "... (truncated)" + tooltip = tooltip[: max_tooltip_length - len(truncated)] + truncated + + return pydot.Cluster( + full_path, + shape="box", + label=label, + tooltip='"' + tooltip + '"', + style='"rounded, filled"', + color="#000000", + fillcolor=dot_gen_colour(simNode), + fontname="Arial", + fontsize="14", + fontcolor="#000000", + ) - return pydot.Cluster( \ - full_path, \ - shape = "Mrecord", \ - label = label, \ - tooltip = "\"" + tooltip + "\"", \ - style = "\"rounded, filled\"", \ - color = "#000000", \ - fillcolor = dot_gen_colour(simNode), \ - fontname = "Arial", \ - fontsize = "14", \ - fontcolor = "#000000" \ - ) def dot_create_node(simNode, full_path, label): - return pydot.Node( \ - full_path, \ - shape = "Mrecord", \ - label = label, \ - style = "\"rounded, filled\"", \ - color = "#000000", \ - fillcolor = dot_gen_colour(simNode, True), \ - fontname = "Arial", \ - fontsize = "14", \ - fontcolor = "#000000" \ - ) + return pydot.Node( + full_path, + shape="box", + label=label, + style='"rounded, filled"', + color="#000000", + fillcolor=dot_gen_colour(simNode, True), + fontname="Arial", + fontsize="14", + fontcolor="#000000", + ) + # an enumerator for different kinds of node types, at the moment we # discern the majority of node types, with the caches being the @@ -188,17 +202,20 @@ class NodeType: DEV = 4 OTHER = 5 + # based on the sim object, determine the node type def get_node_type(simNode): if isinstance(simNode, m5.objects.System): return NodeType.SYS # NULL ISA has no BaseCPU or PioDevice, so check if these names # exists before using them - elif 'BaseCPU' in dir(m5.objects) and \ - isinstance(simNode, m5.objects.BaseCPU): + elif "BaseCPU" in dir(m5.objects) and isinstance( + simNode, m5.objects.BaseCPU + ): return NodeType.CPU - elif 'PioDevice' in dir(m5.objects) and \ - isinstance(simNode, m5.objects.PioDevice): + elif "PioDevice" in dir(m5.objects) and isinstance( + simNode, m5.objects.PioDevice + ): return NodeType.DEV elif isinstance(simNode, m5.objects.BaseXBar): return NodeType.XBAR @@ -207,6 +224,7 @@ def get_node_type(simNode): else: return NodeType.OTHER + # based on the node type, determine the colour as an RGB tuple, the # palette is rather arbitrary at this point (some coherent natural # tones), and someone that feels artistic should probably have a look @@ -225,9 +243,10 @@ def get_type_colour(nodeType): # use a relatively gray shade return (186, 182, 174) + # generate colour for a node, either corresponding to a sim object or a # port -def dot_gen_colour(simNode, isPort = False): +def dot_gen_colour(simNode, isPort=False): # determine the type of the current node, and also its parent, if # the node is not the same type as the parent then we use the base # colour for its type @@ -263,35 +282,38 @@ def dot_gen_colour(simNode, isPort = False): return dot_rgb_to_html(r, g, b) + def dot_rgb_to_html(r, g, b): return "#%.2x%.2x%.2x" % (int(r), int(g), int(b)) + # We need to create all of the clock domains. We abuse the alpha channel to get # the correct domain colouring. def dot_add_clk_domain(c_dom, v_dom): - label = "\"" + str(c_dom) + "\ :\ " + str(v_dom) + "\"" - label = re.sub('\.', '_', str(label)) - full_path = re.sub('\.', '_', str(c_dom)) - return pydot.Cluster( \ - full_path, \ - shape = "Mrecord", \ - label = label, \ - style = "\"rounded, filled, dashed\"", \ - color = "#000000", \ - fillcolor = "#AFC8AF8F", \ - fontname = "Arial", \ - fontsize = "14", \ - fontcolor = "#000000" \ - ) + label = '"' + str(c_dom) + "\ :\ " + str(v_dom) + '"' + label = re.sub("\.", "_", str(label)) + full_path = re.sub("\.", "_", str(c_dom)) + return pydot.Cluster( + full_path, + shape="box", + label=label, + style='"rounded, filled, dashed"', + color="#000000", + fillcolor="#AFC8AF8F", + fontname="Arial", + fontsize="14", + fontcolor="#000000", + ) + def dot_create_dvfs_nodes(simNode, callgraph, domain=None): if isRoot(simNode): label = "root" else: label = simNode._name - full_path = re.sub('\.', '_', simNode.path()) + full_path = re.sub("\.", "_", simNode.path()) # add class name under the label - label = "\"" + label + " \\n: " + simNode.__class__.__name__ + "\"" + label = '"' + label + " \\n: " + simNode.__class__.__name__ + '"' # each component is a sub-graph (cluster) cluster = dot_create_cluster(simNode, full_path, label) @@ -310,12 +332,12 @@ def dot_create_dvfs_nodes(simNode, callgraph, domain=None): # recurse to children for child in simnode_children(simNode): try: - c_dom = child.__getattr__('clk_domain') - v_dom = c_dom.__getattr__('voltage_domain') + c_dom = child.__getattr__("clk_domain") + v_dom = c_dom.__getattr__("voltage_domain") except AttributeError: # Just re-use the domain from above c_dom = domain - v_dom = c_dom.__getattr__('voltage_domain') + v_dom = c_dom.__getattr__("voltage_domain") pass if c_dom == domain or c_dom == None: @@ -333,16 +355,19 @@ def dot_create_dvfs_nodes(simNode, callgraph, domain=None): callgraph.add_subgraph(cluster) + def do_dot(root, outdir, dotFilename): if not pydot: - warn("No dot file generated. " + - "Please install pydot to generate the dot file and pdf.") + warn( + "No dot file generated. " + + "Please install pydot to generate the dot file and pdf." + ) return # * use ranksep > 1.0 for for vertical separation between nodes # especially useful if you need to annotate edges using e.g. visio # which accepts svg format # * no need for hoizontal separation as nothing moves horizonally - callgraph = pydot.Dot(graph_type='digraph', ranksep='1.3') + callgraph = pydot.Dot(graph_type="digraph", ranksep="1.3") dot_create_nodes(root, callgraph) dot_create_edges(root, callgraph) dot_filename = os.path.join(outdir, dotFilename) @@ -355,16 +380,19 @@ def do_dot(root, outdir, dotFilename): except: warn("failed to generate dot output from %s", dot_filename) + def do_dvfs_dot(root, outdir, dotFilename): if not pydot: - warn("No dot file generated. " + - "Please install pydot to generate the dot file and pdf.") + warn( + "No dot file generated. " + + "Please install pydot to generate the dot file and pdf." + ) return # There is a chance that we are unable to resolve the clock or # voltage domains. If so, we fail silently. try: - dvfsgraph = pydot.Dot(graph_type='digraph', ranksep='1.3') + dvfsgraph = pydot.Dot(graph_type="digraph", ranksep="1.3") dot_create_dvfs_nodes(root, dvfsgraph) dot_create_edges(root, dvfsgraph) dot_filename = os.path.join(outdir, dotFilename) diff --git a/src/python/m5/util/dot_writer_ruby.py b/src/python/m5/util/dot_writer_ruby.py index 1794cef759..e23a1064bc 100644 --- a/src/python/m5/util/dot_writer_ruby.py +++ b/src/python/m5/util/dot_writer_ruby.py @@ -38,6 +38,7 @@ import os import m5 from m5.util import warn + try: import pydot except: @@ -47,46 +48,58 @@ except: def _dot_rgb_to_html(r, g, b): return "#%.2x%.2x%.2x" % (r, g, b) + def _dot_create_router_node(full_path, label): - return pydot.Node( \ - full_path, \ - shape = "Mrecord", \ - label = label, \ - style = "\"rounded, filled\"", \ - color = "#000000", \ - fillcolor = _dot_rgb_to_html(204, 230, 252), \ - fontname = "Arial", \ - fontsize = "14", \ - fontcolor = "#000000" \ - ) + return pydot.Node( + full_path, + shape="Mrecord", + label=label, + style='"rounded, filled"', + color="#000000", + fillcolor=_dot_rgb_to_html(204, 230, 252), + fontname="Arial", + fontsize="14", + fontcolor="#000000", + ) + def _dot_create_ctrl_node(full_path, label): - return pydot.Node( \ - full_path, \ - shape = "Mrecord", \ - label = label, \ - style = "\"rounded, filled\"", \ - color = "#000000", \ - fillcolor = _dot_rgb_to_html(229, 188, 208), \ - fontname = "Arial", \ - fontsize = "14", \ - fontcolor = "#000000" \ - ) + return pydot.Node( + full_path, + shape="Mrecord", + label=label, + style='"rounded, filled"', + color="#000000", + fillcolor=_dot_rgb_to_html(229, 188, 208), + fontname="Arial", + fontsize="14", + fontcolor="#000000", + ) + + +def _dot_create_int_edge(src, dst): + return pydot.Edge(src, dst, weight=0.5, color="#042d50", dir="forward") + + +def _dot_create_ext_edge(src, dst): + return pydot.Edge(src, dst, weight=1.0, color="#381526", dir="both") def _dot_create(network, callgraph): for r in network.routers: - callgraph.add_node(_dot_create_router_node(r.path(), - 'R %d' % r.router_id)) + callgraph.add_node( + _dot_create_router_node(r.path(), "R %d" % r.router_id) + ) # One link for each direction but draw one edge only connected = dict() for link in network.int_links: - if (link.src_node.path() in connected) and \ - (connected[link.src_node.path()] == link.dst_node.path()): - continue + if (link.src_node.path() in connected) and ( + connected[link.src_node.path()] == link.dst_node.path() + ): + continue callgraph.add_edge( - pydot.Edge(link.src_node.path(), link.dst_node.path()) + _dot_create_int_edge(link.src_node.path(), link.dst_node.path()) ) connected[link.dst_node.path()] = link.src_node.path() @@ -95,45 +108,45 @@ def _dot_create(network, callgraph): rpaths = [link.ext_node.path()[::-1] for link in network.ext_links] preffix = os.path.commonprefix(paths) suffix = os.path.commonprefix(rpaths)[::-1] + def strip_right(text, suffix): if not text.endswith(suffix): return text - return text[:len(text)-len(suffix)] + return text[: len(text) - len(suffix)] + def strip_left(text, prefix): if not text.startswith(prefix): return text - return text[len(prefix):] - + return text[len(prefix) :] for link in network.ext_links: ctrl = link.ext_node label = strip_right(strip_left(ctrl.path(), preffix), suffix) - if hasattr(ctrl, '_node_type'): - label += ' (' + ctrl._node_type + ')' - callgraph.add_node( - _dot_create_ctrl_node(ctrl.path(), label) - ) + if hasattr(ctrl, "_node_type"): + label += " (" + ctrl._node_type + ")" + callgraph.add_node(_dot_create_ctrl_node(ctrl.path(), label)) callgraph.add_edge( - pydot.Edge(link.ext_node.path(), link.int_node.path()) + _dot_create_ext_edge(link.ext_node.path(), link.int_node.path()) ) + def _do_dot(network, outdir, dotFilename): - callgraph = pydot.Dot(graph_type='graph', rankdir='LR') + callgraph = pydot.Dot(graph_type="graph", rankdir="LR") _dot_create(network, callgraph) dot_filename = os.path.join(outdir, dotFilename) callgraph.write(dot_filename) try: # dot crashes if the figure is extremely wide. # So avoid terminating simulation unnecessarily - callgraph.write_svg(dot_filename + ".svg", prog='neato') - callgraph.write_pdf(dot_filename + ".pdf", prog='neato') + callgraph.write_svg(dot_filename + ".svg", prog="neato") + callgraph.write_pdf(dot_filename + ".pdf", prog="neato") except: warn("failed to generate dot output from %s", dot_filename) def do_ruby_dot(root, outdir, dotFilename): - RubyNetwork = getattr(m5.objects, 'RubyNetwork', None) + RubyNetwork = getattr(m5.objects, "RubyNetwork", None) if not pydot or not RubyNetwork: return @@ -144,6 +157,7 @@ def do_ruby_dot(root, outdir, dotFilename): for network in filter(is_ruby_network, root.descendants()): # We assume each ruby system has a single network. - rubydotFilename = dotFilename.replace(".dot", - "." + network.get_parent().path() + ".dot") + rubydotFilename = dotFilename.replace( + ".dot", "." + network.get_parent().path() + ".dot" + ) _do_dot(network, outdir, rubydotFilename) diff --git a/src/python/m5/util/fdthelper.py b/src/python/m5/util/fdthelper.py index 8c90b1aa44..136936c512 100644 --- a/src/python/m5/util/fdthelper.py +++ b/src/python/m5/util/fdthelper.py @@ -41,12 +41,16 @@ import os from m5.SimObject import SimObject from m5.util import fatal + class FdtProperty(pyfdt.FdtProperty): """Create a property without values.""" + pass + class FdtPropertyWords(pyfdt.FdtPropertyWords): """Create a property with word (32-bit unsigned) values.""" + def __init__(self, name, words): if type(words) != list: words = [words] @@ -55,15 +59,19 @@ class FdtPropertyWords(pyfdt.FdtPropertyWords): words = [int(w, base=0) if type(w) == str else int(w) for w in words] super().__init__(name, words) + class FdtPropertyStrings(pyfdt.FdtPropertyStrings): """Create a property with string values.""" def __init__(self, name, strings): if type(strings) == str: strings = [strings] - strings = [str(string) for string in strings] # Make all values strings + strings = [ + str(string) for string in strings + ] # Make all values strings super().__init__(name, strings) + class FdtPropertyBytes(pyfdt.FdtPropertyBytes): """Create a property with integer (8-bit signed) values.""" @@ -72,10 +80,12 @@ class FdtPropertyBytes(pyfdt.FdtPropertyBytes): values = [values] # Make sure all values are ints (use automatic base detection if the # type is str) - values = [int(v, base=0) - if isinstance(v, str) else int(v) for v in values] + values = [ + int(v, base=0) if isinstance(v, str) else int(v) for v in values + ] super().__init__(name, values) + class FdtState(object): """Class for maintaining state while recursively generating a flattened device tree. The state tracks address, size and CPU address cell sizes, and @@ -88,10 +98,10 @@ class FdtState(object): """Instantiate values of this state. The state can only be initialized once.""" - self.addr_cells = kwargs.pop('addr_cells', 0) - self.size_cells = kwargs.pop('size_cells', 0) - self.cpu_cells = kwargs.pop('cpu_cells', 0) - self.interrupt_cells = kwargs.pop('interrupt_cells', 0) + self.addr_cells = kwargs.pop("addr_cells", 0) + self.size_cells = kwargs.pop("size_cells", 0) + self.cpu_cells = kwargs.pop("cpu_cells", 0) + self.interrupt_cells = kwargs.pop("interrupt_cells", 0) def phandle(self, obj): """Return a unique phandle number for a key. The key can be a SimObject @@ -104,7 +114,7 @@ class FdtState(object): try: key = str(obj) except ValueError: - raise ValueError('Phandle keys must be castable to str') + raise ValueError("Phandle keys must be castable to str") if not key in FdtState.phandles: FdtState.phandle_counter += 1 @@ -123,7 +133,9 @@ class FdtState(object): if (value >> (32 * cells)) != 0: fatal("Value %d doesn't fit in %d cells" % (value, cells)) - return [(value >> 32*(x-1)) & 0xFFFFFFFF for x in range(cells, 0, -1)] + return [ + (value >> 32 * (x - 1)) & 0xFFFFFFFF for x in range(cells, 0, -1) + ] def addrCells(self, addr): """Format an integer type according to the address_cells value of this @@ -166,8 +178,10 @@ class FdtState(object): class FdtNop(pyfdt.FdtNop): """Create an empty node.""" + pass + class FdtNode(pyfdt.FdtNode): def __init__(self, name, obj=None): """Create a new node and immediately set the phandle property, if obj @@ -180,7 +194,7 @@ class FdtNode(pyfdt.FdtNode): """Change the behavior of the normal append to override if a node with the same name already exists or merge if the name exists and is a node type. Can also take a list of subnodes, that each get appended.""" - if not hasattr(subnodes, '__iter__'): + if not hasattr(subnodes, "__iter__"): subnodes = [subnodes] for subnode in subnodes: @@ -193,8 +207,9 @@ class FdtNode(pyfdt.FdtNode): except ValueError: item = None - if isinstance(item, pyfdt.FdtNode) and \ - isinstance(subnode, pyfdt.FdtNode): + if isinstance(item, pyfdt.FdtNode) and isinstance( + subnode, pyfdt.FdtNode + ): item.merge(subnode) subnode = item @@ -210,7 +225,7 @@ class FdtNode(pyfdt.FdtNode): strings.""" if isinstance(compatible, str): compatible = [compatible] - self.append(FdtPropertyStrings('compatible', compatible)) + self.append(FdtPropertyStrings("compatible", compatible)) def appendPhandle(self, obj): """Append a phandle property to this node with the phandle of the @@ -221,6 +236,7 @@ class FdtNode(pyfdt.FdtNode): phandle = state.phandle(obj) self.append(FdtPropertyWords("phandle", [phandle])) + class Fdt(pyfdt.Fdt): def sortNodes(self, node): """Move all properties to the beginning and subnodes to the end @@ -251,7 +267,7 @@ class Fdt(pyfdt.Fdt): """Convert the device tree to DTB and write to a file.""" filename = os.path.realpath(filename) try: - with open(filename, 'wb') as f: + with open(filename, "wb") as f: f.write(self.to_dtb()) return filename except IOError: @@ -261,7 +277,7 @@ class Fdt(pyfdt.Fdt): """Convert the device tree to DTS and write to a file.""" filename = os.path.realpath(filename) try: - with open(filename, 'w') as f: + with open(filename, "w") as f: f.write(self.to_dts()) return filename except IOError: diff --git a/src/python/m5/util/multidict.py b/src/python/m5/util/multidict.py index 78b2c8b025..f6ca6ba90a 100644 --- a/src/python/m5/util/multidict.py +++ b/src/python/m5/util/multidict.py @@ -24,10 +24,11 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -__all__ = [ 'multidict' ] +__all__ = ["multidict"] + class multidict(object): - def __init__(self, parent = {}, **kwargs): + def __init__(self, parent={}, **kwargs): self.local = dict(**kwargs) self.parent = parent self.deleted = {} @@ -67,13 +68,13 @@ class multidict(object): return len(self.local) + len(self.parent) def next(self): - for key,value in self.local.items(): - yield key,value + for key, value in self.local.items(): + yield key, value if self.parent: - for key,value in self.parent.next(): + for key, value in self.parent.next(): if key not in self.local and key not in self.deleted: - yield key,value + yield key, value def has_key(self, key): return key in self @@ -83,11 +84,11 @@ class multidict(object): yield item def keys(self): - for key,value in self.next(): + for key, value in self.next(): yield key def values(self): - for key,value in self.next(): + for key, value in self.next(): yield value def get(self, key, default=None): @@ -105,10 +106,10 @@ class multidict(object): return default def _dump(self): - print('multidict dump') + print("multidict dump") node = self while isinstance(node, multidict): - print(' ', node.local) + print(" ", node.local) node = node.parent def _dumpkey(self, key): @@ -120,52 +121,53 @@ class multidict(object): node = node.parent print(key, values) -if __name__ == '__main__': + +if __name__ == "__main__": test1 = multidict() test2 = multidict(test1) test3 = multidict(test2) test4 = multidict(test3) - test1['a'] = 'test1_a' - test1['b'] = 'test1_b' - test1['c'] = 'test1_c' - test1['d'] = 'test1_d' - test1['e'] = 'test1_e' + test1["a"] = "test1_a" + test1["b"] = "test1_b" + test1["c"] = "test1_c" + test1["d"] = "test1_d" + test1["e"] = "test1_e" - test2['a'] = 'test2_a' - del test2['b'] - test2['c'] = 'test2_c' - del test1['a'] + test2["a"] = "test2_a" + del test2["b"] + test2["c"] = "test2_c" + del test1["a"] - test2.setdefault('f', multidict) + test2.setdefault("f", multidict) - print('test1>', list(test1.items())) - print('test2>', list(test2.items())) - #print(test1['a']) - print(test1['b']) - print(test1['c']) - print(test1['d']) - print(test1['e']) + print("test1>", list(test1.items())) + print("test2>", list(test2.items())) + # print(test1['a']) + print(test1["b"]) + print(test1["c"]) + print(test1["d"]) + print(test1["e"]) - print(test2['a']) - #print(test2['b']) - print(test2['c']) - print(test2['d']) - print(test2['e']) + print(test2["a"]) + # print(test2['b']) + print(test2["c"]) + print(test2["d"]) + print(test2["e"]) for key in test2.keys(): print(key) - test2.get('g', 'foo') - #test2.get('b') - test2.get('b', 'bar') - test2.setdefault('b', 'blah') + test2.get("g", "foo") + # test2.get('b') + test2.get("b", "bar") + test2.setdefault("b", "blah") print(test1) print(test2) print(repr(test2)) print(len(test2)) - test3['a'] = [ 0, 1, 2, 3 ] + test3["a"] = [0, 1, 2, 3] print(test4) diff --git a/src/python/m5/util/pybind.py b/src/python/m5/util/pybind.py index bb73be9949..52d38e5302 100644 --- a/src/python/m5/util/pybind.py +++ b/src/python/m5/util/pybind.py @@ -35,11 +35,13 @@ from abc import * + class PyBindExport(object, metaclass=ABCMeta): @abstractmethod def export(self, code, cname): pass + class PyBindProperty(PyBindExport): def __init__(self, name, cxx_name=None, writable=True): self.name = name @@ -50,14 +52,21 @@ class PyBindProperty(PyBindExport): export = "def_readwrite" if self.writable else "def_readonly" code('.${export}("${{self.name}}", &${cname}::${{self.cxx_name}})') + class PyBindMethod(PyBindExport): - def __init__(self, name, cxx_name=None, args=None, - return_value_policy=None, static=False): + def __init__( + self, + name, + cxx_name=None, + args=None, + return_value_policy=None, + static=False, + ): self.name = name self.cxx_name = cxx_name if cxx_name else name self.args = args self.return_value_policy = return_value_policy - self.method_def = 'def_static' if static else 'def' + self.method_def = "def_static" if static else "def" def _conv_arg(self, value): if isinstance(value, bool): @@ -68,18 +77,23 @@ class PyBindMethod(PyBindExport): raise TypeError("Unsupported PyBind default value type") def export(self, code, cname): - arguments = [ '"${{self.name}}"', '&${cname}::${{self.cxx_name}}' ] + arguments = ['"${{self.name}}"', "&${cname}::${{self.cxx_name}}"] if self.return_value_policy: - arguments.append('pybind11::return_value_policy::' - '${{self.return_value_policy}}') + arguments.append( + "pybind11::return_value_policy::" + "${{self.return_value_policy}}" + ) if self.args: + def get_arg_decl(arg): if isinstance(arg, tuple): name, default = arg return 'py::arg("%s") = %s' % ( - name, self._conv_arg(default)) + name, + self._conv_arg(default), + ) else: return 'py::arg("%s")' % arg - arguments.extend(list([ get_arg_decl(a) for a in self.args ])) - code('.' + self.method_def + '(' + ', '.join(arguments) + ')') + arguments.extend(list([get_arg_decl(a) for a in self.args])) + code("." + self.method_def + "(" + ", ".join(arguments) + ")") diff --git a/src/python/m5/util/terminal.py b/src/python/m5/util/terminal.py index f50e92dbae..f3d53ac460 100644 --- a/src/python/m5/util/terminal.py +++ b/src/python/m5/util/terminal.py @@ -52,39 +52,46 @@ color_names = "Black Red Green Yellow Blue Magenta Cyan".split() # Please feel free to add information about other terminals here. # capability_map = { - 'Bold': 'bold', - 'Dim': 'dim', - 'Blink': 'blink', - 'Underline': 'smul', - 'Reverse': 'rev', - 'Standout': 'smso', - 'Normal': 'sgr0' + "Bold": "bold", + "Dim": "dim", + "Blink": "blink", + "Underline": "smul", + "Reverse": "rev", + "Standout": "smso", + "Normal": "sgr0", } capability_names = list(capability_map.keys()) + def null_cap_string(s, *args): - return '' + return "" + try: import curses + curses.setupterm() + def cap_string(s, *args): cap = curses.tigetstr(s) if cap: - return curses.tparm(cap, *args).decode('utf-8') + return curses.tparm(cap, *args).decode("utf-8") else: - return '' + return "" + except: cap_string = null_cap_string + class ColorStrings(object): def __init__(self, cap_string): for i, c in enumerate(color_names): - setattr(self, c, cap_string('setaf', i)) + setattr(self, c, cap_string("setaf", i)) for name, cap in capability_map.items(): setattr(self, name, cap_string(cap)) + termcap = ColorStrings(cap_string) no_termcap = ColorStrings(null_cap_string) @@ -93,7 +100,8 @@ if sys.stdout.isatty(): else: tty_termcap = no_termcap -def get_termcap(use_colors = None): + +def get_termcap(use_colors=None): if use_colors: return termcap elif use_colors is None: @@ -102,19 +110,27 @@ def get_termcap(use_colors = None): else: return no_termcap + def test_termcap(obj): for c_name in color_names: c_str = getattr(obj, c_name) print(c_str + c_name + obj.Normal) for attr_name in capability_names: - if attr_name == 'Normal': + if attr_name == "Normal": continue attr_str = getattr(obj, attr_name) print(attr_str + c_str + attr_name + " " + c_name + obj.Normal) - print(obj.Bold + obj.Underline + - c_name + "Bold Underline " + c + obj.Normal) + print( + obj.Bold + + obj.Underline + + c_name + + "Bold Underline " + + c + + obj.Normal + ) -if __name__ == '__main__': + +if __name__ == "__main__": print("=== termcap enabled ===") test_termcap(termcap) print(termcap.Normal) diff --git a/src/python/m5/util/terminal_formatter.py b/src/python/m5/util/terminal_formatter.py index f294d20437..8d533f8bb7 100644 --- a/src/python/m5/util/terminal_formatter.py +++ b/src/python/m5/util/terminal_formatter.py @@ -26,8 +26,8 @@ import textwrap -class TerminalFormatter: +class TerminalFormatter: def __init__(self, max_width=80): # text_width holds the actual width we'll be wrapping to. # This takes into account the current terminal size. @@ -35,9 +35,13 @@ class TerminalFormatter: def __terminal_size(self): import fcntl, termios, struct - h, w, hp, wp = struct.unpack('HHHH', - fcntl.ioctl(0, termios.TIOCGWINSZ, - struct.pack('HHHH', 0, 0, 0, 0))) + + h, w, hp, wp = struct.unpack( + "HHHH", + fcntl.ioctl( + 0, termios.TIOCGWINSZ, struct.pack("HHHH", 0, 0, 0, 0) + ), + ) return w, h def __get_paragraphs(self, text, flatten=False): @@ -74,15 +78,17 @@ class TerminalFormatter: for line in text.splitlines(): stripped = line.strip() - if not stripped: #I.e. a blank line. + if not stripped: # I.e. a blank line. paragraphs.append( - {False: "\n", True: " "}[flatten].join(cur_paragraph)) + {False: "\n", True: " "}[flatten].join(cur_paragraph) + ) cur_paragraph = [] else: cur_paragraph.append(stripped) paragraphs.append( - {False: "\n", True: " "}[flatten].join(cur_paragraph)) + {False: "\n", True: " "}[flatten].join(cur_paragraph) + ) return paragraphs @@ -115,15 +121,19 @@ class TerminalFormatter: paragraphs = self.__get_paragraphs(text, True) # Wrap and Indent the paragraphs - wrapper = textwrap.TextWrapper(width = - max((self.__text_width - indent),1)) + wrapper = textwrap.TextWrapper( + width=max((self.__text_width - indent), 1) + ) # The first paragraph is special case due to the inclusion of the label - formatted_paragraphs = [' ' * max((indent - len(label)),0) \ - + label + wrapper.wrap(paragraphs[0])[0]] + formatted_paragraphs = [ + " " * max((indent - len(label)), 0) + + label + + wrapper.wrap(paragraphs[0])[0] + ] for paragraph in paragraphs: for line in wrapper.wrap(paragraph[1:])[1:]: - formatted_paragraphs.append(' ' * indent + line) - formatted_paragraphs.append('\n') + formatted_paragraphs.append(" " * indent + line) + formatted_paragraphs.append("\n") # Remove the last line break - return '\n'.join(formatted_paragraphs[:-1]) + return "\n".join(formatted_paragraphs[:-1]) diff --git a/src/python/pybind11/debug.cc b/src/python/pybind11/debug.cc index 3fc3d096eb..313ca81e6e 100644 --- a/src/python/pybind11/debug.cc +++ b/src/python/pybind11/debug.cc @@ -64,7 +64,7 @@ output(const char *filename) if (!file_stream) file_stream = simout.create(filename); - Trace::setDebugLogger(new Trace::OstreamLogger(*file_stream->stream())); + trace::setDebugLogger(new trace::OstreamLogger(*file_stream->stream())); } static void @@ -72,7 +72,7 @@ ignore(const char *expr) { ObjectMatch ignore(expr); - Trace::getDebugLogger()->addIgnore(ignore); + trace::getDebugLogger()->addIgnore(ignore); } void @@ -86,7 +86,6 @@ pybind_init_debug(py::module_ &m_native) .def("allFlags", &debug::allFlags, py::return_value_policy::reference) .def("schedBreak", &schedBreak) - .def("setRemoteGDBPort", &setRemoteGDBPort) ; py::class_ c_flag(m_debug, "Flag"); @@ -123,8 +122,8 @@ pybind_init_debug(py::module_ &m_native) m_trace .def("output", &output) .def("ignore", &ignore) - .def("enable", &Trace::enable) - .def("disable", &Trace::disable) + .def("enable", &trace::enable) + .def("disable", &trace::disable) ; } diff --git a/src/python/pybind11/event.cc b/src/python/pybind11/event.cc index 7a02221611..95e6ddb844 100644 --- a/src/python/pybind11/event.cc +++ b/src/python/pybind11/event.cc @@ -107,6 +107,8 @@ pybind_init_event(py::module_ &m_native) m.def("simulate", &simulate, py::arg("ticks") = MaxTick); + m.def("setMaxTick", &set_max_tick, py::arg("tick")); + m.def("getMaxTick", &get_max_tick, py::return_value_policy::copy); m.def("terminateEventQueueThreads", &terminateEventQueueThreads); m.def("exitSimLoop", &exitSimLoop); m.def("getEventQueue", []() { return curEventQueue(); }, diff --git a/src/sim/ClockDomain.py b/src/sim/ClockDomain.py index 61b2204ecd..d71252e1bc 100644 --- a/src/sim/ClockDomain.py +++ b/src/sim/ClockDomain.py @@ -39,17 +39,18 @@ from m5.proxy import * # Abstract clock domain class ClockDomain(SimObject): - type = 'ClockDomain' + type = "ClockDomain" cxx_header = "sim/clock_domain.hh" - cxx_class = 'gem5::ClockDomain' + cxx_class = "gem5::ClockDomain" abstract = True + # Source clock domain with an actual clock, and a list of voltage and frequency # op points class SrcClockDomain(ClockDomain): - type = 'SrcClockDomain' + type = "SrcClockDomain" cxx_header = "sim/clock_domain.hh" - cxx_class = 'gem5::SrcClockDomain' + cxx_class = "gem5::SrcClockDomain" # Single clock frequency value, or list of frequencies for DVFS # Frequencies must be ordered in descending order @@ -70,12 +71,13 @@ class SrcClockDomain(ClockDomain): # Defaults to maximum performance init_perf_level = Param.UInt32(0, "Initial performance level") + # Derived clock domain with a parent clock domain and a frequency # divider class DerivedClockDomain(ClockDomain): - type = 'DerivedClockDomain' + type = "DerivedClockDomain" cxx_header = "sim/clock_domain.hh" - cxx_class = 'gem5::DerivedClockDomain' + cxx_class = "gem5::DerivedClockDomain" clk_domain = Param.ClockDomain("Parent clock domain") clk_divider = Param.Unsigned(1, "Frequency divider") diff --git a/src/sim/ClockedObject.py b/src/sim/ClockedObject.py index 27a0e60364..5d1656e520 100644 --- a/src/sim/ClockedObject.py +++ b/src/sim/ClockedObject.py @@ -38,11 +38,12 @@ from m5.SimObject import SimObject from m5.params import * from m5.proxy import * + class ClockedObject(SimObject): - type = 'ClockedObject' + type = "ClockedObject" abstract = True cxx_header = "sim/clocked_object.hh" - cxx_class = 'gem5::ClockedObject' + cxx_class = "gem5::ClockedObject" # The clock domain this clocked object belongs to, inheriting the # parent's clock domain by default @@ -52,4 +53,3 @@ class ClockedObject(SimObject): power_model = VectorParam.PowerModel([], "Power models") power_state = Param.PowerState(PowerState(), "Power state") - diff --git a/src/sim/DVFSHandler.py b/src/sim/DVFSHandler.py index 08e7f117d6..f7064221e1 100644 --- a/src/sim/DVFSHandler.py +++ b/src/sim/DVFSHandler.py @@ -42,17 +42,18 @@ from m5.proxy import * # handle. The specific voltage and frequency points are configured per clock # and voltage domain. class DVFSHandler(SimObject): - type = 'DVFSHandler' + type = "DVFSHandler" cxx_header = "sim/dvfs_handler.hh" - cxx_class = 'gem5::DVFSHandler' + cxx_class = "gem5::DVFSHandler" # List of controllable clock domains which in turn reference the appropriate # voltage domains domains = VectorParam.SrcClockDomain([], "list of domains") # System domain (its clock and voltage) is not controllable - sys_clk_domain = Param.SrcClockDomain(Parent.clk_domain, - "Clk domain in which the handler is instantiated") + sys_clk_domain = Param.SrcClockDomain( + Parent.clk_domain, "Clk domain in which the handler is instantiated" + ) enable = Param.Bool(False, "Enable/Disable the handler") @@ -62,5 +63,6 @@ class DVFSHandler(SimObject): # accomodate this effect with ease, we provide a fixed transition latency # associated with all migrations. Configure this to maximum latency that # the hardware will take to migratate between any two perforamnce levels. - transition_latency = Param.Latency('100us', - "fixed latency for perf level migration") + transition_latency = Param.Latency( + "100us", "fixed latency for perf level migration" + ) diff --git a/src/sim/InstTracer.py b/src/sim/InstTracer.py index 83fe077dc5..34c97dd43e 100644 --- a/src/sim/InstTracer.py +++ b/src/sim/InstTracer.py @@ -27,8 +27,9 @@ from m5.SimObject import SimObject from m5.params import * + class InstTracer(SimObject): - type = 'InstTracer' + type = "InstTracer" cxx_header = "sim/insttracer.hh" - cxx_class = 'gem5::Trace::InstTracer' + cxx_class = "gem5::trace::InstTracer" abstract = True diff --git a/src/sim/PowerDomain.py b/src/sim/PowerDomain.py index 549d860f35..2f42343870 100644 --- a/src/sim/PowerDomain.py +++ b/src/sim/PowerDomain.py @@ -44,6 +44,6 @@ from m5.objects.PowerState import PowerState # change power state depeding on what the leader objects (CPUs for # example) do. The power domain is the link between these. class PowerDomain(PowerState): - type = 'PowerDomain' - cxx_header = 'sim/power_domain.hh' - cxx_class = 'gem5::PowerDomain' + type = "PowerDomain" + cxx_header = "sim/power_domain.hh" + cxx_class = "gem5::PowerDomain" diff --git a/src/sim/PowerState.py b/src/sim/PowerState.py index e87a998306..ca285fc68b 100644 --- a/src/sim/PowerState.py +++ b/src/sim/PowerState.py @@ -50,14 +50,14 @@ from m5.proxy import * # Sram_retention: The SRAMs within the logic blocks are pulled into retention # state to reduce leakage energy further. # Off: The logic block is power gated and is not consuming any energy. -class PwrState(Enum): vals = [ - 'UNDEFINED', 'ON', 'CLK_GATED', 'SRAM_RETENTION', 'OFF' -] +class PwrState(Enum): + vals = ["UNDEFINED", "ON", "CLK_GATED", "SRAM_RETENTION", "OFF"] + class PowerState(SimObject): - type = 'PowerState' + type = "PowerState" cxx_header = "sim/power_state.hh" - cxx_class = 'gem5::PowerState' + cxx_class = "gem5::PowerState" # Provide initial power state, should ideally get redefined in startup # routine @@ -66,13 +66,15 @@ class PowerState(SimObject): # Possible power states this object can be in sorted from the most # to the least performant possible_states = VectorParam.PwrState( - [], "Power states this object can be in") + [], "Power states this object can be in" + ) - clk_gate_min = Param.Latency('1ns',"Min value of the distribution") - clk_gate_max = Param.Latency('1s',"Max value of the distribution") - clk_gate_bins = Param.Unsigned('20', "# bins in clk gated distribution") + clk_gate_min = Param.Latency("1ns", "Min value of the distribution") + clk_gate_max = Param.Latency("1s", "Max value of the distribution") + clk_gate_bins = Param.Unsigned("20", "# bins in clk gated distribution") # The objects which drive the power state of this object. If the list is # empty, the object determines its power state independently. leaders = VectorParam.PowerState( - [], "Objects which drive the power state of this object") + [], "Objects which drive the power state of this object" + ) diff --git a/src/sim/Process.py b/src/sim/Process.py index a20bdffb2c..0b87b09485 100644 --- a/src/sim/Process.py +++ b/src/sim/Process.py @@ -29,47 +29,52 @@ from m5.params import * from m5.proxy import * from os import getcwd + class Process(SimObject): - type = 'Process' + type = "Process" cxx_header = "sim/process.hh" - cxx_class = 'gem5::Process' + cxx_class = "gem5::Process" @cxxMethod def map(self, vaddr, paddr, size, cacheable=False): pass - input = Param.String('cin', "filename for stdin") - output = Param.String('cout', 'filename for stdout') - errout = Param.String('cerr', 'filename for stderr') + input = Param.String("cin", "filename for stdin") + output = Param.String("cout", "filename for stdout") + errout = Param.String("cerr", "filename for stderr") system = Param.System(Parent.any, "system process will run on") - useArchPT = Param.Bool('false', 'maintain an in-memory version of the page\ - table in an architecture-specific format') - kvmInSE = Param.Bool('false', 'initialize the process for KvmCPU in SE') - maxStackSize = Param.MemorySize('64MiB', 'maximum size of the stack') + useArchPT = Param.Bool( + "false", + "maintain an in-memory version of the page\ + table in an architecture-specific format", + ) + kvmInSE = Param.Bool("false", "initialize the process for KvmCPU in SE") + maxStackSize = Param.MemorySize("64MiB", "maximum size of the stack") - uid = Param.Int(100, 'user id') - euid = Param.Int(100, 'effective user id') - gid = Param.Int(100, 'group id') - egid = Param.Int(100, 'effective group id') - pid = Param.Int(100, 'process id') - ppid = Param.Int(0, 'parent process id') - pgid = Param.Int(100, 'process group id') + uid = Param.Int(100, "user id") + euid = Param.Int(100, "effective user id") + gid = Param.Int(100, "group id") + egid = Param.Int(100, "effective group id") + pid = Param.Int(100, "process id") + ppid = Param.Int(0, "parent process id") + pgid = Param.Int(100, "process group id") - executable = Param.String('', "executable (overrides cmd[0] if set)") + executable = Param.String("", "executable (overrides cmd[0] if set)") cmd = VectorParam.String("command line (executable plus arguments)") env = VectorParam.String([], "environment settings") cwd = Param.String(getcwd(), "current working directory") - simpoint = Param.UInt64(0, 'simulation point at which to start simulation') - drivers = VectorParam.EmulatedDriver([], 'Available emulated drivers') - release = Param.String('5.1.0', "Linux kernel uname release") + simpoint = Param.UInt64(0, "simulation point at which to start simulation") + drivers = VectorParam.EmulatedDriver([], "Available emulated drivers") + release = Param.String("5.1.0", "Linux kernel uname release") @classmethod def export_methods(cls, code): - code('bool map(Addr vaddr, Addr paddr, int sz, bool cacheable=true);') + code("bool map(Addr vaddr, Addr paddr, int sz, bool cacheable=true);") + class EmulatedDriver(SimObject): - type = 'EmulatedDriver' + type = "EmulatedDriver" cxx_header = "sim/emul_driver.hh" - cxx_class = 'gem5::EmulatedDriver' + cxx_class = "gem5::EmulatedDriver" abstract = True filename = Param.String("device file name (under /dev)") diff --git a/src/sim/RedirectPath.py b/src/sim/RedirectPath.py index fb79c3d565..c6c63e2558 100644 --- a/src/sim/RedirectPath.py +++ b/src/sim/RedirectPath.py @@ -28,15 +28,17 @@ from m5.SimObject import SimObject from m5.params import * from m5.proxy import * + class RedirectPath(SimObject): - """ Stores paths for filesystem redirection during syscalls. If a path + """Stores paths for filesystem redirection during syscalls. If a path matches 'appPath', then the syscall is redirected to the first 'hostPath' that contains the non-overlapping portion of the path as a valid file. If there are no hits, then the syscall is redirected to the first value. """ - type = 'RedirectPath' + + type = "RedirectPath" cxx_header = "sim/redirect_path.hh" - cxx_class = 'gem5::RedirectPath' + cxx_class = "gem5::RedirectPath" app_path = Param.String("/", "filesystem path from an app's perspective") host_paths = VectorParam.String(["/"], "file path on host filesystem") diff --git a/src/sim/Root.py b/src/sim/Root.py index bad3004e18..5002cdcf81 100644 --- a/src/sim/Root.py +++ b/src/sim/Root.py @@ -30,6 +30,7 @@ from m5.SimObject import SimObject from m5.params import * from m5.util import fatal + class Root(SimObject): _the_instance = None @@ -53,11 +54,11 @@ class Root(SimObject): return Root._the_instance def path(self): - return 'root' + return "root" - type = 'Root' + type = "Root" cxx_header = "sim/root.hh" - cxx_class = 'gem5::Root' + cxx_class = "gem5::Root" # By default, root sim object and hence all other sim objects schedule # event on the eventq with index 0. @@ -72,5 +73,6 @@ class Root(SimObject): # Time syncing prevents the simulation from running faster than real time. time_sync_enable = Param.Bool(False, "whether time syncing is enabled") time_sync_period = Param.Clock("100ms", "how often to sync with real time") - time_sync_spin_threshold = \ - Param.Clock("100us", "when less than this much time is left, spin") + time_sync_spin_threshold = Param.Clock( + "100us", "when less than this much time is left, spin" + ) diff --git a/src/sim/SConscript b/src/sim/SConscript index 969a3e439c..75b595b8a1 100644 --- a/src/sim/SConscript +++ b/src/sim/SConscript @@ -104,18 +104,17 @@ GTest('proxy_ptr.test', 'proxy_ptr.test.cc') GTest('serialize.test', 'serialize.test.cc', with_tag('gem5 serialize')) GTest('serialize_handlers.test', 'serialize_handlers.test.cc') -if env['CONF']['TARGET_ISA'] != 'null': - SimObject('InstTracer.py', sim_objects=['InstTracer']) - SimObject('Process.py', sim_objects=['Process', 'EmulatedDriver']) - Source('faults.cc') - Source('process.cc') - Source('fd_array.cc') - Source('fd_entry.cc') - Source('mem_state.cc') - Source('pseudo_inst.cc') - Source('syscall_emul.cc') - Source('syscall_desc.cc') - Source('vma.cc') +SimObject('InstTracer.py', sim_objects=['InstTracer']) +SimObject('Process.py', sim_objects=['Process', 'EmulatedDriver']) +Source('faults.cc') +Source('process.cc') +Source('fd_array.cc') +Source('fd_entry.cc') +Source('mem_state.cc') +Source('pseudo_inst.cc') +Source('syscall_emul.cc') +Source('syscall_desc.cc') +Source('vma.cc') DebugFlag('Checkpoint') DebugFlag('Config') diff --git a/src/sim/SConsopts b/src/sim/SConsopts index 07997c75f4..98be89bc30 100644 --- a/src/sim/SConsopts +++ b/src/sim/SConsopts @@ -31,7 +31,7 @@ import gem5_scons with gem5_scons.Configure(main) as conf: if conf.CheckLibWithHeader([None, 'execinfo'], 'execinfo.h', 'C', - 'char temp; backtrace_symbols_fd((void *)&temp, 0, 0);'): + 'backtrace_symbols_fd((void *)1, 0, 0);'): conf.env['BACKTRACE_IMPL'] = 'glibc' else: conf.env['BACKTRACE_IMPL'] = 'none' diff --git a/src/sim/SubSystem.py b/src/sim/SubSystem.py index 8220ed696e..fa0063ba1f 100644 --- a/src/sim/SubSystem.py +++ b/src/sim/SubSystem.py @@ -45,9 +45,9 @@ from m5.params import * # any naming conflicts. # class SubSystem(SimObject): - type = 'SubSystem' + type = "SubSystem" cxx_header = "sim/sub_system.hh" - cxx_class = 'gem5::SubSystem' + cxx_class = "gem5::SubSystem" abstract = False # Thermal domain associated to this object, inheriting the parent's diff --git a/src/sim/System.py b/src/sim/System.py index b5bd5df363..a5722e80fd 100644 --- a/src/sim/System.py +++ b/src/sim/System.py @@ -45,13 +45,15 @@ from m5.objects.DVFSHandler import * from m5.objects.SimpleMemory import * from m5.objects.Workload import StubWorkload -class MemoryMode(Enum): vals = ['invalid', 'atomic', 'timing', - 'atomic_noncaching'] + +class MemoryMode(Enum): + vals = ["invalid", "atomic", "timing", "atomic_noncaching"] + class System(SimObject): - type = 'System' + type = "System" cxx_header = "sim/system.hh" - cxx_class = 'gem5::System' + cxx_class = "gem5::System" system_port = RequestPort("System port") @@ -60,65 +62,87 @@ class System(SimObject): PyBindMethod("setMemoryMode"), ] - memories = VectorParam.AbstractMemory(Self.all, - "All memories in the system") - mem_mode = Param.MemoryMode('atomic', "The mode the memory system is in") + memories = VectorParam.AbstractMemory( + Self.all, "All memories in the system" + ) + mem_mode = Param.MemoryMode("atomic", "The mode the memory system is in") thermal_model = Param.ThermalModel(NULL, "Thermal model") - thermal_components = VectorParam.SimObject([], - "A collection of all thermal components in the system.") + thermal_components = VectorParam.SimObject( + [], "A collection of all thermal components in the system." + ) # When reserving memory on the host, we have the option of # reserving swap space or not (by passing MAP_NORESERVE to # mmap). By enabling this flag, we accommodate cases where a large # (but sparse) memory is simulated. - mmap_using_noreserve = Param.Bool(False, "mmap the backing store " \ - "without reserving swap") + mmap_using_noreserve = Param.Bool( + False, "mmap the backing store " "without reserving swap" + ) # The memory ranges are to be populated when creating the system # such that these can be passed from the I/O subsystem through an # I/O bridge or cache - mem_ranges = VectorParam.AddrRange([], "Ranges that constitute main memory") + mem_ranges = VectorParam.AddrRange( + [], "Ranges that constitute main memory" + ) # The ranges backed by a shadowed ROM - shadow_rom_ranges = VectorParam.AddrRange([], "Ranges backed by a " \ - "shadowed ROM") + shadow_rom_ranges = VectorParam.AddrRange( + [], "Ranges backed by a " "shadowed ROM" + ) - shared_backstore = Param.String("", "backstore's shmem segment filename, " + shared_backstore = Param.String( + "", + "backstore's shmem segment filename, " "use to directly address the backstore from another host-OS process. " - "Leave this empty to unset the MAP_SHARED flag.") - auto_unlink_shared_backstore = Param.Bool(False, "Automatically remove the " + "Leave this empty to unset the MAP_SHARED flag.", + ) + auto_unlink_shared_backstore = Param.Bool( + False, + "Automatically remove the " "shmem segment file upon destruction. This is used only if " - "shared_backstore is non-empty.") + "shared_backstore is non-empty.", + ) cache_line_size = Param.Unsigned(64, "Cache line size in bytes") redirect_paths = VectorParam.RedirectPath([], "Path redirections") - exit_on_work_items = Param.Bool(False, "Exit from the simulation loop when " - "encountering work item annotations.") + exit_on_work_items = Param.Bool( + False, + "Exit from the simulation loop when " + "encountering work item annotations.", + ) work_item_id = Param.Int(-1, "specific work item id") num_work_ids = Param.Int(16, "Number of distinct work item types") - work_begin_cpu_id_exit = Param.Int(-1, - "work started on specific id, now exit simulation") - work_begin_ckpt_count = Param.Counter(0, - "create checkpoint when work items begin count value is reached") - work_begin_exit_count = Param.Counter(0, - "exit simulation when work items begin count value is reached") - work_end_ckpt_count = Param.Counter(0, - "create checkpoint when work items end count value is reached") - work_end_exit_count = Param.Counter(0, - "exit simulation when work items end count value is reached") - work_cpus_ckpt_count = Param.Counter(0, - "create checkpoint when active cpu count value is reached") + work_begin_cpu_id_exit = Param.Int( + -1, "work started on specific id, now exit simulation" + ) + work_begin_ckpt_count = Param.Counter( + 0, "create checkpoint when work items begin count value is reached" + ) + work_begin_exit_count = Param.Counter( + 0, "exit simulation when work items begin count value is reached" + ) + work_end_ckpt_count = Param.Counter( + 0, "create checkpoint when work items end count value is reached" + ) + work_end_exit_count = Param.Counter( + 0, "exit simulation when work items end count value is reached" + ) + work_cpus_ckpt_count = Param.Counter( + 0, "create checkpoint when active cpu count value is reached" + ) workload = Param.Workload(StubWorkload(), "Workload to run on this system") init_param = Param.UInt64(0, "numerical value to pass into simulator") readfile = Param.String("", "file to read startup script from") symbolfile = Param.String("", "file to get the symbols from") - multi_thread = Param.Bool(False, - "Supports multi-threaded CPUs? Impacts Thread/Context IDs") + multi_thread = Param.Bool( + False, "Supports multi-threaded CPUs? Impacts Thread/Context IDs" + ) # Dynamic voltage and frequency handler for the system, disabled by default # Provide list of domains that need to be controlled by the handler @@ -126,5 +150,8 @@ class System(SimObject): # SE mode doesn't use the ISA System subclasses, and so we need to set an # ISA specific value in this class directly. - m5ops_base = Param.Addr(0, "Base of the 64KiB PA range used for " - "memory-mapped m5ops. Set to 0 to disable.") + m5ops_base = Param.Addr( + 0, + "Base of the 64KiB PA range used for " + "memory-mapped m5ops. Set to 0 to disable.", + ) diff --git a/src/sim/TickedObject.py b/src/sim/TickedObject.py index f6488df11e..42233a0b4b 100644 --- a/src/sim/TickedObject.py +++ b/src/sim/TickedObject.py @@ -35,8 +35,9 @@ from m5.objects.ClockedObject import ClockedObject + class TickedObject(ClockedObject): - type = 'TickedObject' + type = "TickedObject" abstract = True cxx_header = "sim/ticked_object.hh" - cxx_class = 'gem5::TickedObject' + cxx_class = "gem5::TickedObject" diff --git a/src/sim/VoltageDomain.py b/src/sim/VoltageDomain.py index 4909e01b8f..79116edfd3 100644 --- a/src/sim/VoltageDomain.py +++ b/src/sim/VoltageDomain.py @@ -36,10 +36,11 @@ from m5.SimObject import SimObject from m5.params import * + class VoltageDomain(SimObject): - type = 'VoltageDomain' + type = "VoltageDomain" cxx_header = "sim/voltage_domain.hh" - cxx_class = 'gem5::VoltageDomain' + cxx_class = "gem5::VoltageDomain" # Single or list of voltages for the voltage domain. If only a single # voltage is specified, it is used for all different frequencies. @@ -47,4 +48,4 @@ class VoltageDomain(SimObject): # domain (src/sim/ClockDomain.py) must match. Voltages must be specified in # descending order. We use a default voltage of 1V to avoid forcing users to # set it even if they are not interested in using the functionality - voltage = VectorParam.Voltage('1V', "Operational voltage(s)") + voltage = VectorParam.Voltage("1V", "Operational voltage(s)") diff --git a/src/sim/Workload.py b/src/sim/Workload.py index 52bda94ff2..f5139e1dd8 100644 --- a/src/sim/Workload.py +++ b/src/sim/Workload.py @@ -24,58 +24,80 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from m5.params import * -from m5.SimObject import SimObject +from m5.SimObject import SimObject, cxxMethod from m5.objects.SimpleMemory import * + class Workload(SimObject): - type = 'Workload' + type = "Workload" cxx_header = "sim/workload.hh" - cxx_class = 'gem5::Workload' + cxx_class = "gem5::Workload" abstract = True - wait_for_remote_gdb = Param.Bool(False, - "Wait for a remote GDB connection"); + wait_for_remote_gdb = Param.Bool(False, "Wait for a remote GDB connection") + remote_gdb_port = Param.Int( + 7000, "Default port number used for remote GDB connection" + ) + + @cxxMethod + def sendToGdb(self, message): + """send a message to the GDB client + Args: + message (str): message to send + """ + pass + class StubWorkload(Workload): - type = 'StubWorkload' + type = "StubWorkload" cxx_header = "sim/workload.hh" - cxx_class = 'gem5::StubWorkload' + cxx_class = "gem5::StubWorkload" + + entry = Param.Addr(0, "Dummy entry point for this workload.") + byte_order = Param.ByteOrder( + "little", "Dummy byte order for this workload." + ) - entry = Param.Addr(0, 'Dummy entry point for this workload.') - byte_order = Param.ByteOrder('little', - 'Dummy byte order for this workload.') class KernelWorkload(Workload): - type = 'KernelWorkload' + type = "KernelWorkload" cxx_header = "sim/kernel_workload.hh" - cxx_class = 'gem5::KernelWorkload' + cxx_class = "gem5::KernelWorkload" object_file = Param.String("", "File that contains the kernel code") extras = VectorParam.String([], "Additional object files to load") - extras_addrs = VectorParam.Addr([], - "Load addresses for additional object files") + extras_addrs = VectorParam.Addr( + [], "Load addresses for additional object files" + ) - addr_check = Param.Bool(True, - "whether to bounds check kernel addresses (disable for baremetal)") - load_addr_mask = Param.UInt64(0xffffffffffffffff, - "Mask to apply to kernel addresses. If zero, " - "auto-calculated to be the most restrictive.") + addr_check = Param.Bool( + True, + "whether to bounds check kernel addresses (disable for baremetal)", + ) + load_addr_mask = Param.UInt64( + 0xFFFFFFFFFFFFFFFF, + "Mask to apply to kernel addresses. If zero, " + "auto-calculated to be the most restrictive.", + ) load_addr_offset = Param.UInt64(0, "Address to offset the kernel with") command_line = Param.String("a", "boot flags to pass to the kernel") + class SEWorkloadMeta(type(Workload)): all_se_workload_classes = [] + def __new__(mcls, name, bases, dct): cls = super().__new__(mcls, name, bases, dct) SEWorkloadMeta.all_se_workload_classes.append(cls) return cls + class SEWorkload(Workload, metaclass=SEWorkloadMeta): - type = 'SEWorkload' + type = "SEWorkload" cxx_header = "sim/se_workload.hh" - cxx_class = 'gem5::SEWorkload' + cxx_class = "gem5::SEWorkload" abstract = True @classmethod @@ -84,18 +106,23 @@ class SEWorkload(Workload, metaclass=SEWorkloadMeta): @classmethod def find_compatible(cls, path): - '''List the SE workloads compatible with the binary at path''' + """List the SE workloads compatible with the binary at path""" from _m5 import object_file + obj = object_file.create(path) - options = list(filter(lambda wld: wld._is_compatible_with(obj), - SEWorkloadMeta.all_se_workload_classes)) + options = list( + filter( + lambda wld: wld._is_compatible_with(obj), + SEWorkloadMeta.all_se_workload_classes, + ) + ) return options @classmethod def init_compatible(cls, path, *args, **kwargs): - '''Construct the only SE workload compatible with the binary at path''' + """Construct the only SE workload compatible with the binary at path""" options = SEWorkload.find_compatible(path) diff --git a/src/sim/debug.cc b/src/sim/debug.cc index 85bfb97ab3..55b28f7d6e 100644 --- a/src/sim/debug.cc +++ b/src/sim/debug.cc @@ -114,20 +114,4 @@ eventqDump() } } -int remote_gdb_base_port = 7000; - -int -getRemoteGDBPort() -{ - return remote_gdb_base_port; -} - -// Set remote GDB base port. 0 means disable remote GDB. -// Callable from python. -void -setRemoteGDBPort(int port) -{ - remote_gdb_base_port = port; -} - } // namespace gem5 diff --git a/src/sim/debug.hh b/src/sim/debug.hh index ed593040c8..e1c537dc86 100644 --- a/src/sim/debug.hh +++ b/src/sim/debug.hh @@ -60,10 +60,6 @@ void takeCheckpoint(Tick when); */ void eventqDump(); -int getRemoteGDBPort(); -// Remote gdb base port. 0 disables remote gdb. -void setRemoteGDBPort(int port); - } // namespace gem5 #endif // __SIM_DEBUG_HH__ diff --git a/src/sim/drain.hh b/src/sim/drain.hh index eb6712a5ae..55c22eb77d 100644 --- a/src/sim/drain.hh +++ b/src/sim/drain.hh @@ -118,11 +118,11 @@ class DrainManager /** * Run state fixups before a checkpoint restore operation. * - * This is called before restoring the checkpoint and to make + * This is called before restoring the checkpoint and to make * sure that everything has been set to drained. * - * When restoring from a checkpoint, this function should be called - * first before calling the resume() function. And also before + * When restoring from a checkpoint, this function should be called + * first before calling the resume() function. And also before * calling loadstate() on any object. * * The drain state of an object isn't stored in a checkpoint since diff --git a/src/sim/insttracer.hh b/src/sim/insttracer.hh index 6389d0f37a..9c9bca7692 100644 --- a/src/sim/insttracer.hh +++ b/src/sim/insttracer.hh @@ -44,9 +44,8 @@ #include #include "arch/generic/pcstate.hh" -#include "arch/vecregs.hh" #include "base/types.hh" -#include "config/the_isa.hh" +#include "cpu/inst_res.hh" #include "cpu/inst_seq.hh" #include "cpu/static_inst.hh" #include "sim/sim_object.hh" @@ -56,7 +55,7 @@ namespace gem5 class ThreadContext; -namespace Trace { +namespace trace { class InstRecord { @@ -97,13 +96,14 @@ class InstRecord * @TODO fix this and record all destintations that an instruction writes * @see data_status */ - union + union Data { - uint64_t as_int; - double as_double; - TheISA::VecRegContainer* as_vec; - TheISA::VecPredRegContainer* as_pred; - } data = {0}; + ~Data() {} + Data() {} + uint64_t asInt = 0; + double asDouble; + InstResult asReg; + } data; /** @defgroup fetch_seq * This records the serial number that the instruction was fetched in. @@ -128,9 +128,8 @@ class InstRecord DataInt32 = 4, DataInt64 = 8, DataDouble = 3, - DataVec = 5, - DataVecPred = 6 - } data_status = DataInvalid; + DataReg = 5 + } dataStatus = DataInvalid; /** @ingroup memory * Are the memory fields in the record valid? @@ -166,13 +165,8 @@ class InstRecord virtual ~InstRecord() { - if (data_status == DataVec) { - assert(data.as_vec); - delete data.as_vec; - } else if (data_status == DataVecPred) { - assert(data.as_pred); - delete data.as_pred; - } + if (dataStatus == DataReg) + data.asReg.~InstResult(); } void setWhen(Tick new_when) { when = new_when; } @@ -189,8 +183,8 @@ class InstRecord void setData(std::array d) { - data.as_int = d[0]; - data_status = (DataStatus)sizeof(T); + data.asInt = d[0]; + dataStatus = (DataStatus)sizeof(T); static_assert(sizeof(T) == DataInt8 || sizeof(T) == DataInt16 || sizeof(T) == DataInt32 || sizeof(T) == DataInt64, "Type T has an unrecognized size."); @@ -199,26 +193,26 @@ class InstRecord void setData(uint64_t d) { - data.as_int = d; - data_status = DataInt64; + data.asInt = d; + dataStatus = DataInt64; } void setData(uint32_t d) { - data.as_int = d; - data_status = DataInt32; + data.asInt = d; + dataStatus = DataInt32; } void setData(uint16_t d) { - data.as_int = d; - data_status = DataInt16; + data.asInt = d; + dataStatus = DataInt16; } void setData(uint8_t d) { - data.as_int = d; - data_status = DataInt8; + data.asInt = d; + dataStatus = DataInt8; } void setData(int64_t d) { setData((uint64_t)d); } @@ -229,22 +223,22 @@ class InstRecord void setData(double d) { - data.as_double = d; - data_status = DataDouble; + data.asDouble = d; + dataStatus = DataDouble; } void - setData(TheISA::VecRegContainer& d) + setData(const RegClass ®_class, RegVal val) { - data.as_vec = new TheISA::VecRegContainer(d); - data_status = DataVec; + new(&data.asReg) InstResult(reg_class, val); + dataStatus = DataReg; } void - setData(TheISA::VecPredRegContainer& d) + setData(const RegClass ®_class, const void *val) { - data.as_pred = new TheISA::VecPredRegContainer(d); - data_status = DataVecPred; + new(&data.asReg) InstResult(reg_class, val); + dataStatus = DataReg; } void @@ -279,9 +273,9 @@ class InstRecord unsigned getFlags() const { return flags; } bool getMemValid() const { return mem_valid; } - uint64_t getIntData() const { return data.as_int; } - double getFloatData() const { return data.as_double; } - int getDataStatus() const { return data_status; } + uint64_t getIntData() const { return data.asInt; } + double getFloatData() const { return data.asDouble; } + int getDataStatus() const { return dataStatus; } InstSeqNum getFetchSeq() const { return fetch_seq; } bool getFetchSeqValid() const { return fetch_seq_valid; } @@ -305,7 +299,7 @@ class InstTracer : public SimObject const StaticInstPtr macroStaticInst=nullptr) = 0; }; -} // namespace Trace +} // namespace trace } // namespace gem5 #endif // __INSTRECORD_HH__ diff --git a/src/sim/power/MathExprPowerModel.py b/src/sim/power/MathExprPowerModel.py index 7f9f35e658..755b3953ee 100644 --- a/src/sim/power/MathExprPowerModel.py +++ b/src/sim/power/MathExprPowerModel.py @@ -39,9 +39,9 @@ from m5.objects.PowerModelState import PowerModelState # Represents a power model for a simobj class MathExprPowerModel(PowerModelState): - type = 'MathExprPowerModel' + type = "MathExprPowerModel" cxx_header = "sim/power/mathexpr_powermodel.hh" - cxx_class = 'gem5::MathExprPowerModel' + cxx_class = "gem5::MathExprPowerModel" # Equations for dynamic and static power in Watts # Equations may use gem5 stats ie. "1.1*ipc + 2.3*l2_cache.overall_misses" diff --git a/src/sim/power/PowerModel.py b/src/sim/power/PowerModel.py index 2eaafb833a..8dba29795d 100644 --- a/src/sim/power/PowerModel.py +++ b/src/sim/power/PowerModel.py @@ -38,15 +38,17 @@ from m5.params import * from m5.proxy import Parent # Enum for a type of power model -class PMType(Enum) : vals = ['All', 'Static', 'Dynamic'] +class PMType(Enum): + vals = ["All", "Static", "Dynamic"] + # Represents a power model for a simobj # The model itself is also a SimObject so we can make use some # nice features available such as Parent.any class PowerModel(SimObject): - type = 'PowerModel' + type = "PowerModel" cxx_header = "sim/power/power_model.hh" - cxx_class = 'gem5::PowerModel' + cxx_class = "gem5::PowerModel" cxx_exports = [ PyBindMethod("getDynamicPower"), diff --git a/src/sim/power/PowerModelState.py b/src/sim/power/PowerModelState.py index 75d517b7dd..081cd652d2 100644 --- a/src/sim/power/PowerModelState.py +++ b/src/sim/power/PowerModelState.py @@ -38,15 +38,12 @@ from m5.params import * # Represents a power model for a simobj class PowerModelState(SimObject): - type = 'PowerModelState' + type = "PowerModelState" cxx_header = "sim/power/power_model.hh" - cxx_class = 'gem5::PowerModelState' + cxx_class = "gem5::PowerModelState" abstract = True cxx_exports = [ PyBindMethod("getDynamicPower"), PyBindMethod("getStaticPower"), ] - - - diff --git a/src/sim/power/ThermalDomain.py b/src/sim/power/ThermalDomain.py index 35ab31e8af..ddb8d4455b 100644 --- a/src/sim/power/ThermalDomain.py +++ b/src/sim/power/ThermalDomain.py @@ -38,13 +38,11 @@ from m5.params import * # Represents a group of simobj which produce heat class ThermalDomain(SimObject): - type = 'ThermalDomain' + type = "ThermalDomain" cxx_header = "sim/power/thermal_domain.hh" - cxx_class = 'gem5::ThermalDomain' + cxx_class = "gem5::ThermalDomain" - cxx_exports = [ - PyBindMethod("setNode"), - ] + cxx_exports = [PyBindMethod("setNode")] # Static temperature which may change over time initial_temperature = Param.Temperature("25.0C", "Initial temperature") diff --git a/src/sim/power/ThermalModel.py b/src/sim/power/ThermalModel.py index 6a01ba2d76..a3d4a804cc 100644 --- a/src/sim/power/ThermalModel.py +++ b/src/sim/power/ThermalModel.py @@ -42,43 +42,44 @@ from m5.objects import ThermalDomain # Represents a thermal node class ThermalNode(SimObject): - type = 'ThermalNode' + type = "ThermalNode" cxx_header = "sim/power/thermal_node.hh" - cxx_class = 'gem5::ThermalNode' + cxx_class = "gem5::ThermalNode" + # Represents a thermal resistor class ThermalResistor(SimObject): - type = 'ThermalResistor' + type = "ThermalResistor" cxx_header = "sim/power/thermal_model.hh" - cxx_class = 'gem5::ThermalResistor' + cxx_class = "gem5::ThermalResistor" - cxx_exports = [ - PyBindMethod("setNodes"), - ] + cxx_exports = [PyBindMethod("setNodes")] + + resistance = Param.Float( + 1.0, "Thermal resistance, expressed in Kelvin per Watt" + ) - resistance = Param.Float(1.0, "Thermal resistance, expressed in Kelvin per Watt") # Represents a thermal capacitor class ThermalCapacitor(SimObject): - type = 'ThermalCapacitor' + type = "ThermalCapacitor" cxx_header = "sim/power/thermal_model.hh" - cxx_class = 'gem5::ThermalCapacitor' + cxx_class = "gem5::ThermalCapacitor" - cxx_exports = [ - PyBindMethod("setNodes"), - ] + cxx_exports = [PyBindMethod("setNodes")] + + capacitance = Param.Float( + 1.0, "Thermal capacitance, expressed in Joules per Kelvin" + ) - capacitance = Param.Float(1.0, "Thermal capacitance, expressed in Joules per Kelvin") # Represents a fixed temperature node (ie. air) class ThermalReference(SimObject, object): - type = 'ThermalReference' + type = "ThermalReference" cxx_header = "sim/power/thermal_model.hh" - cxx_class = 'gem5::ThermalReference' + cxx_class = "gem5::ThermalReference" - cxx_exports = [ - PyBindMethod("setNode"), - ] + cxx_exports = [PyBindMethod("setNode")] # Static temperature which may change over time temperature = Param.Temperature("25.0C", "Operational temperature") @@ -86,9 +87,9 @@ class ThermalReference(SimObject, object): # Represents a thermal capacitor class ThermalModel(ClockedObject): - type = 'ThermalModel' + type = "ThermalModel" cxx_header = "sim/power/thermal_model.hh" - cxx_class = 'gem5::ThermalModel' + cxx_class = "gem5::ThermalModel" cxx_exports = [ PyBindMethod("addCapacitor"), @@ -99,14 +100,21 @@ class ThermalModel(ClockedObject): PyBindMethod("doStep"), ] - step = Param.Float(0.01, "Simulation step (in seconds) for thermal simulation") + step = Param.Float( + 0.01, "Simulation step (in seconds) for thermal simulation" + ) def populate(self): - if not hasattr(self,"_capacitors"): self._capacitors = [] - if not hasattr(self,"_resistors"): self._resistors = [] - if not hasattr(self,"_domains"): self._domains = [] - if not hasattr(self,"_references"): self._references = [] - if not hasattr(self,"_nodes"): self._nodes = [] + if not hasattr(self, "_capacitors"): + self._capacitors = [] + if not hasattr(self, "_resistors"): + self._resistors = [] + if not hasattr(self, "_domains"): + self._domains = [] + if not hasattr(self, "_references"): + self._references = [] + if not hasattr(self, "_nodes"): + self._nodes = [] def init(self): self.populate() @@ -120,11 +128,15 @@ class ThermalModel(ClockedObject): self.getCCObject().addDomain(dom.getCCObject()) for cap, node1, node2 in self._capacitors: - cap.getCCObject().setNodes(node1.getCCObject(), node2.getCCObject()) + cap.getCCObject().setNodes( + node1.getCCObject(), node2.getCCObject() + ) self.getCCObject().addCapacitor(cap.getCCObject()) for res, node1, node2 in self._resistors: - res.getCCObject().setNodes(node1.getCCObject(), node2.getCCObject()) + res.getCCObject().setNodes( + node1.getCCObject(), node2.getCCObject() + ) self.getCCObject().addResistor(res.getCCObject()) for node in self._nodes: @@ -132,25 +144,25 @@ class ThermalModel(ClockedObject): def addCapacitor(self, cap, node1, node2): self.populate() - self._capacitors.append( (cap, node1, node2) ) + self._capacitors.append((cap, node1, node2)) self._parent.thermal_components.append(cap) - self.addNodes(node1,node2) + self.addNodes(node1, node2) def addResistor(self, res, node1, node2): self.populate() - self._resistors.append( (res, node1, node2) ) + self._resistors.append((res, node1, node2)) self._parent.thermal_components.append(res) - self.addNodes(node1,node2) + self.addNodes(node1, node2) def addReference(self, ref, node): self.populate() - self._references.append( (ref, node) ) + self._references.append((ref, node)) self._parent.thermal_components.append(ref) self.addNodes(node) def addDomain(self, dom, node): self.populate() - self._domains.append( (dom, node) ) + self._domains.append((dom, node)) self.addNodes(node) def addNodes(self, *nodes): diff --git a/src/sim/probe/Probe.py b/src/sim/probe/Probe.py index 00de928993..006149a0b9 100644 --- a/src/sim/probe/Probe.py +++ b/src/sim/probe/Probe.py @@ -39,9 +39,10 @@ from m5.SimObject import SimObject from m5.params import * from m5.proxy import * + class ProbeListenerObject(SimObject): - type = 'ProbeListenerObject' - cxx_header = 'sim/probe/probe.hh' - cxx_class = 'gem5::ProbeListenerObject' + type = "ProbeListenerObject" + cxx_header = "sim/probe/probe.hh" + cxx_class = "gem5::ProbeListenerObject" manager = Param.SimObject(Parent.any, "ProbeManager") diff --git a/src/sim/process.cc b/src/sim/process.cc index 97130bd9d3..a348b450b0 100644 --- a/src/sim/process.cc +++ b/src/sim/process.cc @@ -520,7 +520,7 @@ Process::absolutePath(const std::string &filename, bool host_filesystem) } // Add a trailing '/' if the current working directory did not have one. - normalize(path_base); + path_base = normalize(path_base); // Append the filename onto the current working path. auto absolute_path = path_base + filename; diff --git a/src/sim/pseudo_inst.cc b/src/sim/pseudo_inst.cc index b3c1f4edb5..28b5619a16 100644 --- a/src/sim/pseudo_inst.cc +++ b/src/sim/pseudo_inst.cc @@ -262,8 +262,11 @@ addsymbol(ThreadContext *tc, Addr addr, Addr symbolAddr) addr, symbolAddr); std::string symbol; - (FullSystem ? TranslatingPortProxy(tc) : SETranslatingPortProxy(tc)). - readString(symbol, symbolAddr); + TranslatingPortProxy fs_proxy(tc); + SETranslatingPortProxy se_proxy(tc); + PortProxy &virt_proxy = FullSystem ? fs_proxy : se_proxy; + + virt_proxy.readString(symbol, symbolAddr); DPRINTF(Loader, "Loaded symbol: %s @ %#llx\n", symbol, addr); @@ -393,8 +396,11 @@ readfile(ThreadContext *tc, Addr vaddr, uint64_t len, uint64_t offset) } close(fd); - (FullSystem ? TranslatingPortProxy(tc) : SETranslatingPortProxy(tc)). - writeBlob(vaddr, buf, result); + TranslatingPortProxy fs_proxy(tc); + SETranslatingPortProxy se_proxy(tc); + PortProxy &virt_proxy = FullSystem ? fs_proxy : se_proxy; + + virt_proxy.writeBlob(vaddr, buf, result); delete [] buf; return result; } @@ -408,8 +414,11 @@ writefile(ThreadContext *tc, Addr vaddr, uint64_t len, uint64_t offset, // copy out target filename std::string filename; - (FullSystem ? TranslatingPortProxy(tc) : SETranslatingPortProxy(tc)). - readString(filename, filename_addr); + TranslatingPortProxy fs_proxy(tc); + SETranslatingPortProxy se_proxy(tc); + PortProxy &virt_proxy = FullSystem ? fs_proxy : se_proxy; + + virt_proxy.readString(filename, filename_addr); OutputStream *out; if (offset == 0) { @@ -434,8 +443,8 @@ writefile(ThreadContext *tc, Addr vaddr, uint64_t len, uint64_t offset, // copy out data and write to file char *buf = new char[len]; - (FullSystem ? TranslatingPortProxy(tc) : SETranslatingPortProxy(tc)). - readBlob(vaddr, buf, len); + + virt_proxy.readBlob(vaddr, buf, len); os->write(buf, len); if (os->fail() || os->bad()) panic("Error while doing writefile!\n"); diff --git a/src/sim/serialize_handlers.test.cc b/src/sim/serialize_handlers.test.cc index a844b7ab17..10b09bf608 100644 --- a/src/sim/serialize_handlers.test.cc +++ b/src/sim/serialize_handlers.test.cc @@ -224,9 +224,15 @@ TEST(SerializeTest, ParseParamChar) EXPECT_FALSE(parser.parse("false", value)); // 8-bit values - EXPECT_FALSE(parser.parse("255", value)); - EXPECT_TRUE(parser.parse("-128", value)); - EXPECT_EQ(char(-128), value); + if constexpr (std::is_signed_v) { + EXPECT_FALSE(parser.parse("255", value)); + EXPECT_TRUE(parser.parse("-128", value)); + EXPECT_EQ(char(-128), value); + } else { + EXPECT_FALSE(parser.parse("256", value)); + EXPECT_TRUE(parser.parse("255", value)); + EXPECT_EQ(char(255), value); + } // 16-bit values EXPECT_FALSE(parser.parse("1000", value)); diff --git a/src/sim/simulate.cc b/src/sim/simulate.cc index c5d07942ef..95b2c05618 100644 --- a/src/sim/simulate.cc +++ b/src/sim/simulate.cc @@ -180,16 +180,14 @@ struct DescheduleDeleter }; /** Simulate for num_cycles additional cycles. If num_cycles is -1 - * (the default), do not limit simulation; some other event must - * terminate the loop. Exported to Python. + * (the default), we simulate to MAX_TICKS unless the max ticks has been set + * via the 'set_max_tick' function prior. This function is exported to Python. * @return The SimLoopExitEvent that caused the loop to exit. */ GlobalSimLoopExitEvent * simulate(Tick num_cycles) { std::unique_ptr quantum_event; - const Tick exit_tick = num_cycles < MaxTick - curTick() ? - curTick() + num_cycles : MaxTick; inform("Entering event queue @ %d. Starting simulation...\n", curTick()); @@ -197,11 +195,22 @@ simulate(Tick num_cycles) simulatorThreads.reset(new SimulatorThreads(numMainEventQueues)); if (!simulate_limit_event) { - simulate_limit_event = new GlobalSimLoopExitEvent( - mainEventQueue[0]->getCurTick(), - "simulate() limit reached", 0); + // If the simulate_limit_event is not set, we set it to MaxTick. + set_max_tick(MaxTick); + } + + if (num_cycles != -1) { + // If the user has specified an exit event after X cycles, do so here. + // Note: This will override any prior set max_tick behaviour (such as + // that above when it is set to MAxTick). + const Tick max_tick = num_cycles < MaxTick - curTick() ? + curTick() + num_cycles : MaxTick; + + // This is kept to `set_max_tick` instead of `schedule_tick_exit` to + // preserve backwards functionality. It may be better to deprecate this + // behaviour at some point in favor of `schedule_tick_exit`. + set_max_tick(max_tick); } - simulate_limit_event->reschedule(exit_tick); if (numMainEventQueues > 1) { fatal_if(simQuantum == 0, @@ -231,6 +240,29 @@ simulate(Tick num_cycles) return global_exit_event; } +void set_max_tick(Tick tick) +{ + if (!simulate_limit_event) { + simulate_limit_event = new GlobalSimLoopExitEvent( + mainEventQueue[0]->getCurTick(), + "simulate() limit reached", 0); + } + simulate_limit_event->reschedule(tick); +} + + +Tick get_max_tick() +{ + if (!simulate_limit_event) { + /* If the GlobalSimLoopExitEvent has not been setup, the maximum tick + * is `MaxTick` as declared in "src/base/types.hh". + */ + return MaxTick; + } + + return simulate_limit_event->when(); +} + void terminateEventQueueThreads() { diff --git a/src/sim/simulate.hh b/src/sim/simulate.hh index 5ef499541f..eacf67cec2 100644 --- a/src/sim/simulate.hh +++ b/src/sim/simulate.hh @@ -45,7 +45,27 @@ namespace gem5 class GlobalSimLoopExitEvent; -GlobalSimLoopExitEvent *simulate(Tick num_cycles = MaxTick); +GlobalSimLoopExitEvent *simulate(Tick num_cycles = -1); + +/** + * @brief Set the maximum tick. + * + * This function will schedule, or reschedule, the maximum tick for the + * simulation. + * + * This will setup the GlobalSimLoopExitEvent if it does not already exist. + * + * @param tick The maximum tick. + */ +void set_max_tick(Tick tick); + +/** + * @brief Get the maximum simulation tick. + * + * + * @returns The maximum simulation tick. + */ +Tick get_max_tick(); /** * Terminate helper threads when running in parallel mode. diff --git a/src/sim/syscall_abi.hh b/src/sim/syscall_abi.hh index d50286fb79..b91dc8ea95 100644 --- a/src/sim/syscall_abi.hh +++ b/src/sim/syscall_abi.hh @@ -67,10 +67,10 @@ struct GenericSyscallABI32 : public GenericSyscallABI // Read two registers and merge them into one value. static uint64_t - mergeRegs(ThreadContext *tc, RegIndex low_idx, RegIndex high_idx) + mergeRegs(ThreadContext *tc, const RegId &low_id, const RegId &high_id) { - RegVal low = tc->readIntReg(low_idx); - RegVal high = tc->readIntReg(high_idx); + RegVal low = tc->getReg(low_id); + RegVal high = tc->getReg(high_id); return insertBits(low, 63, 32, high); } }; @@ -91,7 +91,7 @@ struct Argument= ABI::ArgumentRegs.size(), "Ran out of syscall argument registers."); - return tc->readIntReg(ABI::ArgumentRegs[state++]); + return tc->getReg(ABI::ArgumentRegs[state++]); } }; @@ -107,7 +107,7 @@ struct Argument= ABI::ArgumentRegs.size(), "Ran out of syscall argument registers."); - return bits(tc->readIntReg(ABI::ArgumentRegs[state++]), 31, 0); + return bits(tc->getReg(ABI::ArgumentRegs[state++]), 31, 0); } }; diff --git a/src/sim/syscall_emul.hh b/src/sim/syscall_emul.hh index 59a97d938d..b4550dd86b 100644 --- a/src/sim/syscall_emul.hh +++ b/src/sim/syscall_emul.hh @@ -93,7 +93,6 @@ #include "base/random.hh" #include "base/trace.hh" #include "base/types.hh" -#include "config/the_isa.hh" #include "cpu/base.hh" #include "cpu/thread_context.hh" #include "kern/linux/linux.hh" @@ -1602,9 +1601,12 @@ statfsFunc(SyscallDesc *desc, ThreadContext *tc, template SyscallReturn -cloneFunc(SyscallDesc *desc, ThreadContext *tc, RegVal flags, RegVal newStack, +doClone(SyscallDesc *desc, ThreadContext *tc, RegVal flags, RegVal newStack, VPtr<> ptidPtr, VPtr<> ctidPtr, VPtr<> tlsPtr) { + DPRINTF(SyscallVerbose, "Doing clone. pid: %#llx, ctid: %#llx, tls: %#llx" + " flags: %#llx, stack: %#llx\n", + ptidPtr.addr(), ctidPtr.addr(), tlsPtr.addr(), flags, newStack); auto p = tc->getProcessPtr(); if (((flags & OS::TGT_CLONE_SIGHAND)&& !(flags & OS::TGT_CLONE_VM)) || @@ -1714,6 +1716,30 @@ cloneFunc(SyscallDesc *desc, ThreadContext *tc, RegVal flags, RegVal newStack, return cp->pid(); } +template +SyscallReturn +clone3Func(SyscallDesc *desc, ThreadContext *tc, + VPtr cl_args, RegVal size) +{ + VPtr ptidPtr((Addr)cl_args->parent_tid, tc); + VPtr ctidPtr((Addr)cl_args->child_tid, tc); + VPtr tlsPtr((Addr)cl_args->tls, tc); + // Clone3 gives the stack as the *lowest* address, but clone/__clone2 + // expects the stack parameter to be the actual stack pointer + uint64_t new_stack = cl_args->stack + cl_args->stack_size; + uint64_t flags = cl_args->flags; + + return doClone(desc, tc, flags, new_stack, ptidPtr, ctidPtr, tlsPtr); +} + +template +SyscallReturn +cloneFunc(SyscallDesc *desc, ThreadContext *tc, RegVal flags, RegVal newStack, + VPtr<> ptidPtr, VPtr<> ctidPtr, VPtr<> tlsPtr) +{ + return doClone(desc, tc, flags, newStack, ptidPtr, ctidPtr, tlsPtr); +} + template SyscallReturn cloneBackwardsFunc(SyscallDesc *desc, ThreadContext *tc, RegVal flags, diff --git a/src/sim/system.cc b/src/sim/system.cc index b7fba8a356..5f67c4d2b8 100644 --- a/src/sim/system.cc +++ b/src/sim/system.cc @@ -49,10 +49,7 @@ #include "base/loader/symtab.hh" #include "base/str.hh" #include "base/trace.hh" -#include "config/the_isa.hh" -#if !IS_NULL_ISA #include "cpu/base.hh" -#endif #include "cpu/thread_context.hh" #include "debug/Loader.hh" #include "debug/Quiesce.hh" @@ -73,10 +70,8 @@ std::vector System::systemList; void System::Threads::Thread::resume() { -# if !IS_NULL_ISA DPRINTFS(Quiesce, context->getCpuPtr(), "activating\n"); context->activate(); -# endif } std::string @@ -114,13 +109,11 @@ System::Threads::replace(ThreadContext *tc, ContextID id) { auto &t = thread(id); panic_if(!t.context, "Can't replace a context which doesn't exist."); -# if !IS_NULL_ISA if (t.resumeEvent->scheduled()) { Tick when = t.resumeEvent->when(); t.context->getCpuPtr()->deschedule(t.resumeEvent); tc->getCpuPtr()->schedule(t.resumeEvent, when); } -# endif t.context = tc; } @@ -152,17 +145,14 @@ void System::Threads::quiesce(ContextID id) { auto &t = thread(id); -# if !IS_NULL_ISA [[maybe_unused]] BaseCPU *cpu = t.context->getCpuPtr(); DPRINTFS(Quiesce, cpu, "quiesce()\n"); -# endif t.quiesce(); } void System::Threads::quiesceTick(ContextID id, Tick when) { -# if !IS_NULL_ISA auto &t = thread(id); BaseCPU *cpu = t.context->getCpuPtr(); @@ -170,7 +160,6 @@ System::Threads::quiesceTick(ContextID id, Tick when) t.quiesce(); cpu->reschedule(t.resumeEvent, when, true); -# endif } int System::numSystemsRunning = 0; @@ -361,9 +350,7 @@ System::unserialize(CheckpointIn &cp) !when || !t.resumeEvent) { continue; } -# if !IS_NULL_ISA t.context->getCpuPtr()->schedule(t.resumeEvent, when); -# endif } // also unserialize the memories in the system diff --git a/src/sim/system.hh b/src/sim/system.hh index bb855e0513..7738d561c3 100644 --- a/src/sim/system.hh +++ b/src/sim/system.hh @@ -51,7 +51,6 @@ #include "base/loader/memory_image.hh" #include "base/loader/symtab.hh" #include "base/statistics.hh" -#include "config/the_isa.hh" #include "cpu/pc_event.hh" #include "enums/MemoryMode.hh" #include "mem/mem_requestor.hh" diff --git a/src/sim/workload.cc b/src/sim/workload.cc index af10cb851d..ca51bbdb73 100644 --- a/src/sim/workload.cc +++ b/src/sim/workload.cc @@ -28,7 +28,6 @@ #include "sim/workload.hh" #include "base/remote_gdb.hh" -#include "config/the_isa.hh" #include "cpu/thread_context.hh" #include "sim/debug.hh" @@ -44,10 +43,8 @@ Workload::registerThreadContext(ThreadContext *tc) panic_if(!success, "Failed to add thread context %d.", tc->contextId()); -# if !IS_NULL_ISA if (gdb) gdb->addThreadContext(tc); -# endif } void @@ -66,10 +63,8 @@ Workload::replaceThreadContext(ThreadContext *tc) panic_if(!success, "Failed to insert replacement thread context %d.", id); -# if !IS_NULL_ISA if (gdb) gdb->replaceThreadContext(tc); -# endif return; } @@ -79,21 +74,26 @@ Workload::replaceThreadContext(ThreadContext *tc) bool Workload::trapToGdb(int signal, ContextID ctx_id) { -# if !IS_NULL_ISA if (gdb && gdb->isAttached()) { gdb->trap(ctx_id, signal); return true; } -# endif return false; }; +bool +Workload::sendToGdb(std::string msg){ + if (gdb) + return gdb->sendMessage(msg); + else + return false; + } + void Workload::startup() { SimObject::startup(); -# if !IS_NULL_ISA // Now that we're about to start simulation, wait for GDB connections if // requested. if (gdb && waitForRemoteGDB) { @@ -101,7 +101,6 @@ Workload::startup() gdb->port()); gdb->connect(); } -# endif } } // namespace gem5 diff --git a/src/sim/workload.hh b/src/sim/workload.hh index 9b3ef04fb7..f9bb8dba3c 100644 --- a/src/sim/workload.hh +++ b/src/sim/workload.hh @@ -92,6 +92,7 @@ class Workload : public SimObject // Once trapping into GDB is no longer a special case routed through the // system object, this helper can be removed. bool trapToGdb(int signal, ContextID ctx_id); + bool sendToGdb(std::string msg); virtual void registerThreadContext(ThreadContext *tc); virtual void replaceThreadContext(ThreadContext *tc); diff --git a/src/sst/OutgoingRequestBridge.py b/src/sst/OutgoingRequestBridge.py index e688d5e074..06bd5a313e 100644 --- a/src/sst/OutgoingRequestBridge.py +++ b/src/sst/OutgoingRequestBridge.py @@ -27,13 +27,13 @@ from m5.params import * from m5.SimObject import SimObject + class OutgoingRequestBridge(SimObject): - type = 'OutgoingRequestBridge' + type = "OutgoingRequestBridge" cxx_header = "sst/outgoing_request_bridge.hh" - cxx_class = 'gem5::OutgoingRequestBridge' + cxx_class = "gem5::OutgoingRequestBridge" port = ResponsePort("Response Port") physical_address_ranges = VectorParam.AddrRange( - [AddrRange(0x80000000, MaxAddr)], - "Physical address ranges." + [AddrRange(0x80000000, MaxAddr)], "Physical address ranges." ) diff --git a/src/sst/sst_responder_interface.cc b/src/sst/sst_responder_interface.cc index faee1082bf..283c1885c7 100644 --- a/src/sst/sst_responder_interface.cc +++ b/src/sst/sst_responder_interface.cc @@ -33,4 +33,4 @@ SSTResponderInterface::SSTResponderInterface() { } -}; // namespace gem5 \ No newline at end of file +}; // namespace gem5 diff --git a/src/sst/sst_responder_interface.hh b/src/sst/sst_responder_interface.hh index 9010907b84..0e827abfdc 100644 --- a/src/sst/sst_responder_interface.hh +++ b/src/sst/sst_responder_interface.hh @@ -66,4 +66,4 @@ class SSTResponderInterface } // namespace gem5 -#endif // __SST_RESPONDER_INTERFACE_HH__ \ No newline at end of file +#endif // __SST_RESPONDER_INTERFACE_HH__ diff --git a/src/systemc/Tlm.py b/src/systemc/Tlm.py index 4ca6f72ba9..5b43a3b557 100644 --- a/src/systemc/Tlm.py +++ b/src/systemc/Tlm.py @@ -25,11 +25,14 @@ from m5.params import Port, VectorPort + def TLM_TARGET_ROLE(width): - return 'TLM TARGET %d' % width + return "TLM TARGET %d" % width + def TLM_INITIATOR_ROLE(width): - return 'TLM INITIATOR %d' % width + return "TLM INITIATOR %d" % width + class TlmTargetSocket(Port): def __init__(self, width, desc): @@ -39,6 +42,7 @@ class TlmTargetSocket(Port): super().__init__(my_role, desc) + class VectorTlmTargetSocket(VectorPort): def __init__(self, width, desc): my_role = TLM_TARGET_ROLE(width) @@ -47,6 +51,7 @@ class VectorTlmTargetSocket(VectorPort): super().__init__(my_role, desc) + class TlmInitiatorSocket(Port): def __init__(self, width, desc): my_role = TLM_INITIATOR_ROLE(width) @@ -55,6 +60,7 @@ class TlmInitiatorSocket(Port): super().__init__(my_role, desc, is_source=True) + class VectorTlmInitiatorSocket(VectorPort): def __init__(self, width, desc): my_role = TLM_INITIATOR_ROLE(width) diff --git a/src/systemc/core/SystemC.py b/src/systemc/core/SystemC.py index 592b950b67..f1f87a0583 100644 --- a/src/systemc/core/SystemC.py +++ b/src/systemc/core/SystemC.py @@ -29,18 +29,19 @@ from m5.SimObject import SimObject, cxxMethod # simulation. It receives gem5 SimObject lifecycle callbacks (init, regStats, # etc.) and manages the lifecycle of the systemc simulation accordingly. class SystemC_Kernel(SimObject): - type = 'SystemC_Kernel' - cxx_class = 'sc_gem5::Kernel' - cxx_header = 'systemc/core/kernel.hh' + type = "SystemC_Kernel" + cxx_class = "sc_gem5::Kernel" + cxx_header = "systemc/core/kernel.hh" + # This class represents systemc sc_object instances in python config files. It # inherits from SimObject in python, but the c++ version, sc_core::sc_object, # doesn't inherit from gem5's c++ SimObject class. class SystemC_ScObject(SimObject): - type = 'SystemC_ScObject' + type = "SystemC_ScObject" abstract = True - cxx_class = 'sc_core::sc_object' - cxx_header = 'systemc/ext/core/sc_object.hh' + cxx_class = "sc_core::sc_object" + cxx_header = "systemc/ext/core/sc_object.hh" # Clear cxx_base to stop the c++ binding code from assuming # sc_core::sc_object inherits from SimObject, even though SystemC_ScObject @@ -50,15 +51,19 @@ class SystemC_ScObject(SimObject): # Hide the cxx_exports from SimObject since we don't inherit from # SimObject on the c++ side and so don't have those methods to call down # into. - locals().update({ - method.name: (lambda *a, **k: None) for method in SimObject.cxx_exports - }) + locals().update( + { + method.name: (lambda *a, **k: None) + for method in SimObject.cxx_exports + } + ) + class SystemC_ScModule(SystemC_ScObject): - type = 'SystemC_ScModule' + type = "SystemC_ScModule" abstract = True - cxx_class = 'sc_core::sc_module' - cxx_header = 'systemc/ext/core/sc_module.hh' + cxx_class = "sc_core::sc_module" + cxx_header = "systemc/ext/core/sc_module.hh" @cxxMethod(return_value_policy="reference", cxx_name="gem5_getPort") def getPort(self, if_name, iex): diff --git a/src/systemc/core/sc_spawn.cc b/src/systemc/core/sc_spawn.cc index 31b4f3c647..f582289c9a 100644 --- a/src/systemc/core/sc_spawn.cc +++ b/src/systemc/core/sc_spawn.cc @@ -53,7 +53,7 @@ spawnWork(ProcessFuncWrapper *func, const char *name, if (opts->_dontInitialize) dontInitialize = true; if (opts->_stackSize != -1) - warn("Ignoring request to set stack size.\n"); + warn_once("Ignoring request to set stack size.\n"); } if (!name || name[0] == '\0') { diff --git a/src/systemc/dt/int/sc_nbcommon.inc b/src/systemc/dt/int/sc_nbcommon.inc index 13317a6fa5..6fbbf986a6 100644 --- a/src/systemc/dt/int/sc_nbcommon.inc +++ b/src/systemc/dt/int/sc_nbcommon.inc @@ -2575,7 +2575,7 @@ void CLASS_TYPE::dump(::std::ostream &os) const { // Save the current setting, and set the base to decimal. - ::std::ios::fmtflags old_flags = + ::std::ios::fmtflags old_flags = os.setf(::std::ios::dec, ::std::ios::basefield); os << "width = " << length() << ::std::endl; diff --git a/src/systemc/dt/int/sc_nbfriends.inc b/src/systemc/dt/int/sc_nbfriends.inc index 9161692f0f..b3a1de0d57 100644 --- a/src/systemc/dt/int/sc_nbfriends.inc +++ b/src/systemc/dt/int/sc_nbfriends.inc @@ -53,7 +53,7 @@ // ---------------------------------------------------------------------------- // Naming conventions: // For sc_signed or sc_unsigned number u: -// us : u's sign, unb : u's number of bits, +// us : u's sign, unb : u's number of bits, // und : u's number of digits, ud : u's digits array. // ---------------------------------------------------------------------------- @@ -63,7 +63,7 @@ // Handles cases 3 and 4 and returns the result. CLASS_TYPE -ADD_HELPER(small_type us, int unb, int und, const sc_digit *ud, +ADD_HELPER(small_type us, int unb, int und, const sc_digit *ud, small_type vs, int vnb, int vnd, const sc_digit *vd) { und = vec_skip_leading_zeros(und, ud); @@ -78,7 +78,7 @@ ADD_HELPER(small_type us, int unb, int und, const sc_digit *ud, #else sc_digit *d = new sc_digit[nd]; #endif - + d[nd - 1] = d[nd - 2] = 0; // case 3 @@ -98,14 +98,14 @@ ADD_HELPER(small_type us, int unb, int und, const sc_digit *ud, } else { // case 4 int cmp_res = vec_cmp(und, ud, vnd, vd); - + if (cmp_res == 0) { // u == v #ifndef SC_MAX_NBITS delete[] d; #endif return CLASS_TYPE(); } - + if (cmp_res > 0) { // u > v if ((und == 1) && (vnd == 1)) d[0] = (*ud) - (*vd); @@ -128,8 +128,8 @@ ADD_HELPER(small_type us, int unb, int und, const sc_digit *ud, // ---------------------------------------------------------------------------- // Handles the case 4 and returns the result. -CLASS_TYPE -MUL_HELPER(small_type s, int unb, int und, const sc_digit *ud, +CLASS_TYPE +MUL_HELPER(small_type s, int unb, int und, const sc_digit *ud, int vnb, int vnd, const sc_digit *vd) { und = vec_skip_leading_zeros(und, ud); @@ -137,7 +137,7 @@ MUL_HELPER(small_type s, int unb, int und, const sc_digit *ud, int nb = unb + vnb; int nd = und + vnd; - + #ifdef SC_MAX_NBITS test_bound(nb); sc_digit d[MAX_NDIGITS]; @@ -146,7 +146,7 @@ MUL_HELPER(small_type s, int unb, int und, const sc_digit *ud, #endif vec_zero(nd, d); - + sc_digit ud0 = (*ud); sc_digit vd0 = (*vd); @@ -154,7 +154,7 @@ MUL_HELPER(small_type s, int unb, int und, const sc_digit *ud, vec_copy(und, d, ud); } else if ((und == 1) && (ud0 == 1)) { vec_copy(vnd, d, vd); - } else if ((und == 1) && (vnd == 1) && + } else if ((und == 1) && (vnd == 1) && (ud0 < HALF_DIGIT_RADIX) && (vd0 < HALF_DIGIT_RADIX)) { d[0] = ud0 * vd0; } else if ((und == 1) && (ud0 < HALF_DIGIT_RADIX)) { @@ -175,8 +175,8 @@ MUL_HELPER(small_type s, int unb, int und, const sc_digit *ud, // ---------------------------------------------------------------------------- // Handles the cases 3-4 and returns the result. -CLASS_TYPE -DIV_HELPER(small_type s, int unb, int und, const sc_digit *ud, +CLASS_TYPE +DIV_HELPER(small_type s, int unb, int und, const sc_digit *ud, int vnb, int vnd, const sc_digit *vd) { und = vec_skip_leading_zeros(und, ud); @@ -185,7 +185,7 @@ DIV_HELPER(small_type s, int unb, int und, const sc_digit *ud, int cmp_res = vec_cmp(und, ud, vnd, vd); // u < v => u / v = 0 - case 4 - if (cmp_res < 0) + if (cmp_res < 0) return CLASS_TYPE(); // One extra digit for d is allocated to simplify vec_div_*(). @@ -223,8 +223,8 @@ DIV_HELPER(small_type s, int unb, int und, const sc_digit *ud, // ---------------------------------------------------------------------------- // Handles the cases 3-4 and returns the result. -CLASS_TYPE -MOD_HELPER(small_type us, int unb, int und, const sc_digit *ud, +CLASS_TYPE +MOD_HELPER(small_type us, int unb, int und, const sc_digit *ud, int vnb, int vnd, const sc_digit *vd) { und = vec_skip_leading_zeros(und, ud); @@ -232,7 +232,7 @@ MOD_HELPER(small_type us, int unb, int und, const sc_digit *ud, int cmp_res = vec_cmp(und, ud, vnd, vd); // u = v => u % v = 0 - case 3 - if (cmp_res == 0) + if (cmp_res == 0) return CLASS_TYPE(); sc_digit vd0 = (*vd); @@ -280,8 +280,8 @@ MOD_HELPER(small_type us, int unb, int und, const sc_digit *ud, // ---------------------------------------------------------------------------- // Handles the cases 2-5 and returns the result. -CLASS_TYPE -AND_HELPER(small_type us, int unb, int und, const sc_digit *ud, +CLASS_TYPE +AND_HELPER(small_type us, int unb, int und, const sc_digit *ud, small_type vs, int vnb, int vnd, const sc_digit *vd) { int nb = sc_max(unb, vnb); @@ -327,7 +327,7 @@ AND_HELPER(small_type us, int unb, int und, const sc_digit *ud, if (xs > 0) { // case 2 while (y < yend) (*d++) = (*x++) & (*y++); - while (x++ < xend) + while (x++ < xend) (*d++) = 0; } else { // case 3 sc_digit xcarry = 1; @@ -372,7 +372,7 @@ AND_HELPER(small_type us, int unb, int und, const sc_digit *ud, } } s = convert_signed_2C_to_SM(nb, nd, dbegin); - return CLASS_TYPE(s, nb, nd, dbegin); + return CLASS_TYPE(s, nb, nd, dbegin); } @@ -381,8 +381,8 @@ AND_HELPER(small_type us, int unb, int und, const sc_digit *ud, // ---------------------------------------------------------------------------- // Handles the cases 3-5 and returns the result. -CLASS_TYPE -OR_HELPER(small_type us, int unb, int und, const sc_digit *ud, +CLASS_TYPE +OR_HELPER(small_type us, int unb, int und, const sc_digit *ud, small_type vs, int vnb, int vnd, const sc_digit *vd) { int nb = sc_max(unb, vnb); @@ -483,8 +483,8 @@ OR_HELPER(small_type us, int unb, int und, const sc_digit *ud, // ---------------------------------------------------------------------------- // Handles the cases 3-5 and returns the result. -CLASS_TYPE -XOR_HELPER(small_type us, int unb, int und, const sc_digit *ud, +CLASS_TYPE +XOR_HELPER(small_type us, int unb, int und, const sc_digit *ud, small_type vs, int vnb, int vnd, const sc_digit *vd) { int nb = sc_max(unb, vnb); diff --git a/src/systemc/dt/int/sc_signed_bitref.inc b/src/systemc/dt/int/sc_signed_bitref.inc index c20301c680..0b24c5ed0a 100644 --- a/src/systemc/dt/int/sc_signed_bitref.inc +++ b/src/systemc/dt/int/sc_signed_bitref.inc @@ -121,8 +121,8 @@ sc_signed_bitref::operator ^= (bool b) // #### OPTIMIZE void -sc_signed_bitref::concat_set(int64 src, int low_i) -{ +sc_signed_bitref::concat_set(int64 src, int low_i) +{ bool value = 1 & ((low_i < 64) ? (src >> low_i) : (src >> 63)); m_obj_p->set(low_i, value); } @@ -134,7 +134,7 @@ sc_signed_bitref::concat_set(const sc_signed &src, int low_i) m_obj_p->set(low_i, src.test(low_i)); else m_obj_p->set(low_i, src < 0); -} +} void sc_signed_bitref::concat_set(const sc_unsigned &src, int low_i) diff --git a/src/systemc/dt/int/sc_unsigned_bitref.inc b/src/systemc/dt/int/sc_unsigned_bitref.inc index 09eccd03cc..38f1007178 100644 --- a/src/systemc/dt/int/sc_unsigned_bitref.inc +++ b/src/systemc/dt/int/sc_unsigned_bitref.inc @@ -19,10 +19,10 @@ /***************************************************************************** - sc_unsigned_bitref.h -- Proxy class that is declared in sc_unsigned.h. + sc_unsigned_bitref.h -- Proxy class that is declared in sc_unsigned.h. Original Author: Ali Dasdan, Synopsys, Inc. - + *****************************************************************************/ /***************************************************************************** @@ -138,7 +138,7 @@ sc_unsigned_bitref::concat_set(const sc_signed &src, int low_i) void sc_unsigned_bitref::concat_set(const sc_unsigned &src, int low_i) -{ +{ if (low_i < src.nbits) m_obj_p->set(low_i, src.test(low_i)); else diff --git a/src/systemc/ext/tlm_utils/multi_passthrough_target_socket.h b/src/systemc/ext/tlm_utils/multi_passthrough_target_socket.h index 4dddac6145..2b8a577f5d 100644 --- a/src/systemc/ext/tlm_utils/multi_passthrough_target_socket.h +++ b/src/systemc/ext/tlm_utils/multi_passthrough_target_socket.h @@ -240,9 +240,10 @@ class multi_passthrough_target_socket : multi_binds = get_hierarch_bind()->get_multi_binds(); // Complete binding only if there has been a real bind. - bool unbound = (binders.size() == 1 && m_export_callback_created); + bool locally_unbound = + (binders.size() == 1 && m_export_callback_created); // No call to get_base_interface has consumed the export - ignore. - if (unbound) + if (locally_unbound && !m_hierarch_bind) return; // Iterate over all binders. diff --git a/src/systemc/python/systemc.py b/src/systemc/python/systemc.py index 11cf2db336..da189ffb29 100644 --- a/src/systemc/python/systemc.py +++ b/src/systemc/python/systemc.py @@ -29,13 +29,16 @@ from _m5.systemc import sc_main from _m5.systemc import sc_time from _m5.systemc import sc_main_result_code, sc_main_result_str + class ScMainResult(object): def __init__(self, code, message): self.code = code self.message = message + def sc_main_result(): - '''Retrieve and return the results of running sc_main''' + """Retrieve and return the results of running sc_main""" return ScMainResult(sc_main_result_code(), sc_main_result_str()) -__all__ = [ 'sc_main', 'sc_time', 'sc_main_result' ] + +__all__ = ["sc_main", "sc_time", "sc_main_result"] diff --git a/src/systemc/python/tlm.py b/src/systemc/python/tlm.py index cb88ddddeb..7b342811f6 100644 --- a/src/systemc/python/tlm.py +++ b/src/systemc/python/tlm.py @@ -27,7 +27,9 @@ import _m5.systemc from _m5.systemc import tlm_global_quantum + def tlm_global_quantum_instance(): return tlm_global_quantum.instance() -__all__ = [ 'tlm_global_quantum_instance' ] + +__all__ = ["tlm_global_quantum_instance"] diff --git a/src/systemc/tests/config.py b/src/systemc/tests/config.py index b199080018..1c2e021830 100755 --- a/src/systemc/tests/config.py +++ b/src/systemc/tests/config.py @@ -36,7 +36,7 @@ from m5.objects import SystemC_Kernel, Root kernel = SystemC_Kernel() root = Root(full_system=True, systemc_kernel=kernel) -m5.systemc.sc_main('gem5_systemc_test'); +m5.systemc.sc_main("gem5_systemc_test") m5.instantiate(None) diff --git a/src/systemc/tests/tlm/endian_conv/testall.py b/src/systemc/tests/tlm/endian_conv/testall.py index f3a9d0b9a5..b9e10ad94d 100644 --- a/src/systemc/tests/tlm/endian_conv/testall.py +++ b/src/systemc/tests/tlm/endian_conv/testall.py @@ -72,319 +72,425 @@ import string class transaction: - """ contains read_not_write, address, length, byte_enable, + """ contains read_not_write, address, length, byte_enable, bus_width, data_width, data_pointer, stream_width """ - def __init__(self, **a): self.__dict__ = a - def __str__(self): - if self.read_not_write: a = "R: " - else: a = "W: " - a += "addr = %d, len = %d, bus = %d, word = %d, data = %d" % \ - (self.address, self.length, self.bus_width, self.data_width, \ - self.data_pointer) - if self.byte_enable: a += ", be = " + self.byte_enable - else: a += ", be = x" - a += ", sw = %d" % (self.stream_width) - return a + + def __init__(self, **a): + self.__dict__ = a + + def __str__(self): + if self.read_not_write: + a = "R: " + else: + a = "W: " + a += "addr = %d, len = %d, bus = %d, word = %d, data = %d" % ( + self.address, + self.length, + self.bus_width, + self.data_width, + self.data_pointer, + ) + if self.byte_enable: + a += ", be = " + self.byte_enable + else: + a += ", be = x" + a += ", sw = %d" % (self.stream_width) + return a def txn_generator(nr): - pr_read = 0.5 - pr_byte_enable = 0.5 - pr_enabled = 0.5 - bus_widths = [1, 2, 4, 8, 16] - data_widths = [1, 2, 4, 8, 16] + [1, 2, 4, 8] + [1, 2, 4] + [1, 2] - lengths = list(range(1,33)) + list(range(1,17)) + list(range(1,9)) + list(range(1,5)) + list(range(1,3)) - pr_short_be = 0.2 - pr_stream = 0.1 - nr_generated = 0 - while nr_generated < nr: - # create a random transaction - bus_width = random.choice(bus_widths) - while True: - data_width = random.choice(data_widths) - if data_width <= bus_width: break - if random.random() < 0.25: break - length = random.choice(lengths) - addr_base = random.choice(list(range(0,1024,bus_width))) - addr_offset = random.choice(list(range(bus_width))+[0]*int(bus_width//2)) - txn = transaction( - bus_width = bus_width, - data_width = data_width, - read_not_write = random.random() < pr_read, - length = length * data_width, - address = addr_base + addr_offset, - byte_enable = False, - stream_width = length * data_width, - data_pointer = random.randint(0,1023) + pr_read = 0.5 + pr_byte_enable = 0.5 + pr_enabled = 0.5 + bus_widths = [1, 2, 4, 8, 16] + data_widths = [1, 2, 4, 8, 16] + [1, 2, 4, 8] + [1, 2, 4] + [1, 2] + lengths = ( + list(range(1, 33)) + + list(range(1, 17)) + + list(range(1, 9)) + + list(range(1, 5)) + + list(range(1, 3)) ) - if random.random() < pr_byte_enable: - belen = length - if random.random() < pr_short_be: - belen = min(random.choice(lengths), length) - bep = ["0" * data_width, "1" * data_width] - txn.byte_enable = "".join([random.choice(bep) for x in range(belen)]) - if random.random() < pr_stream and length > 1: - strlen = length - while True: - strlen -= 1 - if strlen == 1 or \ - (random.random() < 0.5 and (length/strlen)*strlen == length): - break - txn.stream_width = strlen * data_width - nr_generated += 1 - yield txn + pr_short_be = 0.2 + pr_stream = 0.1 + nr_generated = 0 + while nr_generated < nr: + # create a random transaction + bus_width = random.choice(bus_widths) + while True: + data_width = random.choice(data_widths) + if data_width <= bus_width: + break + if random.random() < 0.25: + break + length = random.choice(lengths) + addr_base = random.choice(list(range(0, 1024, bus_width))) + addr_offset = random.choice( + list(range(bus_width)) + [0] * int(bus_width // 2) + ) + txn = transaction( + bus_width=bus_width, + data_width=data_width, + read_not_write=random.random() < pr_read, + length=length * data_width, + address=addr_base + addr_offset, + byte_enable=False, + stream_width=length * data_width, + data_pointer=random.randint(0, 1023), + ) + if random.random() < pr_byte_enable: + belen = length + if random.random() < pr_short_be: + belen = min(random.choice(lengths), length) + bep = ["0" * data_width, "1" * data_width] + txn.byte_enable = "".join( + [random.choice(bep) for x in range(belen)] + ) + if random.random() < pr_stream and length > 1: + strlen = length + while True: + strlen -= 1 + if strlen == 1 or ( + random.random() < 0.5 + and (length / strlen) * strlen == length + ): + break + txn.stream_width = strlen * data_width + nr_generated += 1 + yield txn + # test code for transaction generator if False: - for t in txn_generator(20): - print(t) - raise Exception + for t in txn_generator(20): + print(t) + raise Exception # end test code class memory_state_cl: - buffer_size = 2048 - repeats = 10 * buffer_size // 36 - population = (string.ascii_lowercase + string.digits) * int(repeats) - def __init__(self): - self.initiator = "".join( - random.sample(memory_state_cl.population, memory_state_cl.buffer_size)) - self.target = "".join( - random.sample(memory_state_cl.population, memory_state_cl.buffer_size)) - def copy(self): - r = memory_state_cl() - r.initiator = self.initiator - r.target = self.target - return r - def __eq__(self, golden): - return self.initiator==golden.initiator and self.target==golden.target - def __ne__(self, golden): - return self.initiator!=golden.initiator or self.target!=golden.target - def __str__(self): - return "initiator = " + self.initiator + "\n" + "target = " + self.target + buffer_size = 2048 + repeats = 10 * buffer_size // 36 + population = (string.ascii_lowercase + string.digits) * int(repeats) + + def __init__(self): + self.initiator = "".join( + random.sample( + memory_state_cl.population, memory_state_cl.buffer_size + ) + ) + self.target = "".join( + random.sample( + memory_state_cl.population, memory_state_cl.buffer_size + ) + ) + + def copy(self): + r = memory_state_cl() + r.initiator = self.initiator + r.target = self.target + return r + + def __eq__(self, golden): + return ( + self.initiator == golden.initiator and self.target == golden.target + ) + + def __ne__(self, golden): + return ( + self.initiator != golden.initiator or self.target != golden.target + ) + + def __str__(self): + return ( + "initiator = " + self.initiator + "\n" + "target = " + self.target + ) # all fragmentation generators def __FRAG__null(txn): - yield txn + yield txn + def __FRAG__word(txn): - curr_address = txn.address - reset_address = curr_address + txn.stream_width - if txn.byte_enable: - full_byte_enable = txn.byte_enable * (1+txn.length/len(txn.byte_enable)) - be_pos = 0 - d_pos = txn.data_pointer - end = txn.length + d_pos - while d_pos < end: - new_txn = transaction( - bus_width = txn.bus_width, - data_width = txn.data_width, - read_not_write = txn.read_not_write, - length = txn.data_width, - address = curr_address, - byte_enable = False, - stream_width = txn.data_width, - data_pointer = d_pos - ) - curr_address += txn.data_width - if curr_address == reset_address: curr_address = txn.address - d_pos += txn.data_width - if txn.byte_enable: - new_txn.byte_enable = full_byte_enable[be_pos:be_pos+txn.data_width] - be_pos += txn.data_width - yield new_txn - -def __FRAG__stream(txn): - if txn.byte_enable: - full_byte_enable = txn.byte_enable * (1+txn.length/len(txn.byte_enable)) - be_pos = 0 - bytes_done = 0 - while bytes_done < txn.length: - new_txn = transaction( - bus_width = txn.bus_width, - data_width = txn.data_width, - read_not_write = txn.read_not_write, - length = txn.stream_width, - address = txn.address, - byte_enable = False, - stream_width = txn.stream_width, - data_pointer = bytes_done + txn.data_pointer - ) - if txn.byte_enable: - new_txn.byte_enable = full_byte_enable[be_pos:be_pos+txn.stream_width] - be_pos += txn.stream_width - yield new_txn - bytes_done += txn.stream_width - -def __FRAG__random(stream_txn): - for txn in __FRAG__stream(stream_txn): - # txn has full byte enables and no stream feature guaranteed - pr_nofrag = 0.5 - end_address = txn.address + txn.length curr_address = txn.address + reset_address = curr_address + txn.stream_width + if txn.byte_enable: + full_byte_enable = txn.byte_enable * ( + 1 + txn.length / len(txn.byte_enable) + ) be_pos = 0 d_pos = txn.data_pointer - while curr_address < end_address: - new_txn = transaction( - bus_width = txn.bus_width, - data_width = txn.data_width, - read_not_write = txn.read_not_write, - length = txn.data_width, - address = curr_address, - byte_enable = txn.byte_enable, - stream_width = txn.data_width, - data_pointer = d_pos - ) - curr_address += txn.data_width - d_pos += txn.data_width - if txn.byte_enable: - new_txn.byte_enable = txn.byte_enable[be_pos:be_pos+txn.data_width] - be_pos += txn.data_width - while random.random() < pr_nofrag and curr_address < end_address: - new_txn.length += txn.data_width - new_txn.stream_width += txn.data_width + end = txn.length + d_pos + while d_pos < end: + new_txn = transaction( + bus_width=txn.bus_width, + data_width=txn.data_width, + read_not_write=txn.read_not_write, + length=txn.data_width, + address=curr_address, + byte_enable=False, + stream_width=txn.data_width, + data_pointer=d_pos, + ) curr_address += txn.data_width + if curr_address == reset_address: + curr_address = txn.address d_pos += txn.data_width if txn.byte_enable: - new_txn.byte_enable += txn.byte_enable[be_pos:be_pos+txn.data_width] - be_pos += txn.data_width - yield new_txn + new_txn.byte_enable = full_byte_enable[ + be_pos : be_pos + txn.data_width + ] + be_pos += txn.data_width + yield new_txn + + +def __FRAG__stream(txn): + if txn.byte_enable: + full_byte_enable = txn.byte_enable * ( + 1 + txn.length / len(txn.byte_enable) + ) + be_pos = 0 + bytes_done = 0 + while bytes_done < txn.length: + new_txn = transaction( + bus_width=txn.bus_width, + data_width=txn.data_width, + read_not_write=txn.read_not_write, + length=txn.stream_width, + address=txn.address, + byte_enable=False, + stream_width=txn.stream_width, + data_pointer=bytes_done + txn.data_pointer, + ) + if txn.byte_enable: + new_txn.byte_enable = full_byte_enable[ + be_pos : be_pos + txn.stream_width + ] + be_pos += txn.stream_width + yield new_txn + bytes_done += txn.stream_width + + +def __FRAG__random(stream_txn): + for txn in __FRAG__stream(stream_txn): + # txn has full byte enables and no stream feature guaranteed + pr_nofrag = 0.5 + end_address = txn.address + txn.length + curr_address = txn.address + be_pos = 0 + d_pos = txn.data_pointer + while curr_address < end_address: + new_txn = transaction( + bus_width=txn.bus_width, + data_width=txn.data_width, + read_not_write=txn.read_not_write, + length=txn.data_width, + address=curr_address, + byte_enable=txn.byte_enable, + stream_width=txn.data_width, + data_pointer=d_pos, + ) + curr_address += txn.data_width + d_pos += txn.data_width + if txn.byte_enable: + new_txn.byte_enable = txn.byte_enable[ + be_pos : be_pos + txn.data_width + ] + be_pos += txn.data_width + while random.random() < pr_nofrag and curr_address < end_address: + new_txn.length += txn.data_width + new_txn.stream_width += txn.data_width + curr_address += txn.data_width + d_pos += txn.data_width + if txn.byte_enable: + new_txn.byte_enable += txn.byte_enable[ + be_pos : be_pos + txn.data_width + ] + be_pos += txn.data_width + yield new_txn + def __FRAG__randinterleave(stream_txn): - for txn in __FRAG__stream(stream_txn): - # txn has full byte enables and no stream feature guaranteed - pr_frag = 0.5 - txns = [ transaction( - bus_width = txn.bus_width, - data_width = txn.data_width, - read_not_write = txn.read_not_write, - length = txn.length, - address = txn.address, - byte_enable = "", - stream_width = txn.length, - data_pointer = txn.data_pointer - ), transaction( - bus_width = txn.bus_width, - data_width = txn.data_width, - read_not_write = txn.read_not_write, - length = txn.length, - address = txn.address, - byte_enable = "", - stream_width = txn.length, - data_pointer = txn.data_pointer - ) ] - curr = 0 - be_pos = 0 - on = "1" * txn.data_width - off = "0" * txn.data_width - while be_pos < txn.length: - if txn.byte_enable: bew = txn.byte_enable[be_pos:be_pos+txn.data_width] - else: bew = on - txns[curr].byte_enable += bew - txns[1-curr].byte_enable += off - be_pos += txn.data_width - if random.random() < pr_frag: curr = 1-curr - yield txns[0] - yield txns[1] + for txn in __FRAG__stream(stream_txn): + # txn has full byte enables and no stream feature guaranteed + pr_frag = 0.5 + txns = [ + transaction( + bus_width=txn.bus_width, + data_width=txn.data_width, + read_not_write=txn.read_not_write, + length=txn.length, + address=txn.address, + byte_enable="", + stream_width=txn.length, + data_pointer=txn.data_pointer, + ), + transaction( + bus_width=txn.bus_width, + data_width=txn.data_width, + read_not_write=txn.read_not_write, + length=txn.length, + address=txn.address, + byte_enable="", + stream_width=txn.length, + data_pointer=txn.data_pointer, + ), + ] + curr = 0 + be_pos = 0 + on = "1" * txn.data_width + off = "0" * txn.data_width + while be_pos < txn.length: + if txn.byte_enable: + bew = txn.byte_enable[be_pos : be_pos + txn.data_width] + else: + bew = on + txns[curr].byte_enable += bew + txns[1 - curr].byte_enable += off + be_pos += txn.data_width + if random.random() < pr_frag: + curr = 1 - curr + yield txns[0] + yield txns[1] -fragmenters = [globals()[n] for n in globals().keys() if n[:8]=="__FRAG__"] + +fragmenters = [globals()[n] for n in globals().keys() if n[:8] == "__FRAG__"] # test code for fragmenters if False: - for t in txn_generator(1): - print(t) - print() - for u in fragmenters[4](t): - print(u) - raise Exception + for t in txn_generator(1): + print(t) + print() + for u in fragmenters[4](t): + print(u) + raise Exception # end test code # conversion functions are determined by an index (shared with C++) and # a function that tests if they can be applied to a transaction def __CHCK__generic(txn): - __CHCK__generic.nr = 0 - return True + __CHCK__generic.nr = 0 + return True + def __CHCK__word(txn): - __CHCK__word.nr = 1 - if txn.data_width > txn.bus_width: return False - if txn.stream_width < txn.length: return False - if txn.byte_enable and len(txn.byte_enable) < txn.length: return False - return True + __CHCK__word.nr = 1 + if txn.data_width > txn.bus_width: + return False + if txn.stream_width < txn.length: + return False + if txn.byte_enable and len(txn.byte_enable) < txn.length: + return False + return True + def __CHCK__aligned(txn): - __CHCK__aligned.nr = 2 - if txn.data_width > txn.bus_width: return False - if txn.stream_width < txn.length: return False - if txn.byte_enable and len(txn.byte_enable) < txn.length: return False - base_addr = txn.address / txn.bus_width - if base_addr * txn.bus_width != txn.address: return False - nr_bus_words = txn.length / txn.bus_width - if nr_bus_words * txn.bus_width != txn.length: return False - return True + __CHCK__aligned.nr = 2 + if txn.data_width > txn.bus_width: + return False + if txn.stream_width < txn.length: + return False + if txn.byte_enable and len(txn.byte_enable) < txn.length: + return False + base_addr = txn.address / txn.bus_width + if base_addr * txn.bus_width != txn.address: + return False + nr_bus_words = txn.length / txn.bus_width + if nr_bus_words * txn.bus_width != txn.length: + return False + return True + def __CHCK__single(txn): - __CHCK__single.nr = 3 - if txn.length != txn.data_width: return False - base_addr = txn.address / txn.bus_width - end_base_addr = (txn.address + txn.length - 1) / txn.bus_width - if base_addr != end_base_addr: return False - return True + __CHCK__single.nr = 3 + if txn.length != txn.data_width: + return False + base_addr = txn.address / txn.bus_width + end_base_addr = (txn.address + txn.length - 1) / txn.bus_width + if base_addr != end_base_addr: + return False + return True + def __CHCK__local_single(txn): - __CHCK__local_single.nr = 4 - if txn.length != txn.data_width: return False - return True - -all_converters = [globals()[n] for n in globals().keys() if n[:8]=="__CHCK__"] -for x in all_converters: x.usage = 0 + __CHCK__local_single.nr = 4 + if txn.length != txn.data_width: + return False + return True -class TesterFailure(Exception): pass -class SystemCFailure(Exception): pass -class ConverterDifference(Exception): pass -class FragmenterDifference(Exception): pass +all_converters = [ + globals()[n] for n in globals().keys() if n[:8] == "__CHCK__" +] +for x in all_converters: + x.usage = 0 + + +class TesterFailure(Exception): + pass + + +class SystemCFailure(Exception): + pass + + +class ConverterDifference(Exception): + pass + + +class FragmenterDifference(Exception): + pass + from subprocess import Popen, PIPE # test a single fragment in multiple ways def test_a_fragment(f, ms): - # f is the (fragment of a) transaction - # ms is the memory state to use at start of test + # f is the (fragment of a) transaction + # ms is the memory state to use at start of test - # run the same fragment through all applicable conversion functions - # and check they all do the same thing - # use the same sub-process for all of them + # run the same fragment through all applicable conversion functions + # and check they all do the same thing + # use the same sub-process for all of them - # build complete stdin - convs = [c for c in all_converters if c(f)] - if len(convs) == 0: raise TesterFailure(f.str()) - txtin = "\n".join( - [("%s\n%s\nconverter = %d\n" % (f, ms, c.nr)) for c in convs]) + # build complete stdin + convs = [c for c in all_converters if c(f)] + if len(convs) == 0: + raise TesterFailure(f.str()) + txtin = "\n".join( + [("%s\n%s\nconverter = %d\n" % (f, ms, c.nr)) for c in convs] + ) - # run and get stdout - txtout = "no output" - try: - sp = Popen("../build-unix/test_endian_conv.exe", stdin=PIPE, stdout=PIPE) - txtout = sp.communicate(txtin)[0] - tmp = txtout.splitlines() - initiators = [l.split()[-1] for l in tmp if l[:14] == " initiator = "] - targets = [l.split()[-1] for l in tmp if l[:11] == " target = "] - except: - raise SystemCFailure("\n" + txtin + txtout) - if sp.returncode != 0: raise SystemCFailure("\n" + txtin + txtout) - if len(initiators) != len(convs): raise SystemCFailure("\n" + txtin + txtout) - if len(targets) != len(convs): raise SystemCFailure("\n" + txtin + txtout) - for c in convs: c.usage += 1 + # run and get stdout + txtout = "no output" + try: + sp = Popen( + "../build-unix/test_endian_conv.exe", stdin=PIPE, stdout=PIPE + ) + txtout = sp.communicate(txtin)[0] + tmp = txtout.splitlines() + initiators = [l.split()[-1] for l in tmp if l[:14] == " initiator = "] + targets = [l.split()[-1] for l in tmp if l[:11] == " target = "] + except: + raise SystemCFailure("\n" + txtin + txtout) + if sp.returncode != 0: + raise SystemCFailure("\n" + txtin + txtout) + if len(initiators) != len(convs): + raise SystemCFailure("\n" + txtin + txtout) + if len(targets) != len(convs): + raise SystemCFailure("\n" + txtin + txtout) + for c in convs: + c.usage += 1 - ms_out = memory_state_cl() - ms_out.initiator = initiators[0] - ms_out.target = targets[0] - for i in range(1,len(convs)): - if initiators[i]!=ms_out.initiator or targets[i]!=ms_out.target: - raise ConverterDifference(""" + ms_out = memory_state_cl() + ms_out.initiator = initiators[0] + ms_out.target = targets[0] + for i in range(1, len(convs)): + if initiators[i] != ms_out.initiator or targets[i] != ms_out.target: + raise ConverterDifference( + """ %s start memory: %s @@ -392,9 +498,11 @@ converter = %d golden memory: %s actual memory: -%s""" % (f, ms, i, golden_ms, ms_out)) +%s""" + % (f, ms, i, golden_ms, ms_out) + ) - return ms_out + return ms_out # main loop @@ -405,36 +513,39 @@ print("Testing Endianness Conversion Functions") print("March 2008") print("OSCI TLM-2") -try: nr_txns_to_test = int(argv[1]) +try: + nr_txns_to_test = int(argv[1]) except: - print("No command line input for number of tests, using default") - nr_txns_to_test = 1000 + print("No command line input for number of tests, using default") + nr_txns_to_test = 1000 print("Number to test:", nr_txns_to_test) # generate and test a number of transactions for txn in txn_generator(nr_txns_to_test): - # each transaction has a random initial memory state - initial_memory = memory_state_cl() + # each transaction has a random initial memory state + initial_memory = memory_state_cl() - # iterate over all defined fragmentation functions - first_time = True - for fragmenter in fragmenters: + # iterate over all defined fragmentation functions + first_time = True + for fragmenter in fragmenters: - # all versions of the transaction start in the same place - memory_state = initial_memory.copy() + # all versions of the transaction start in the same place + memory_state = initial_memory.copy() - # now iterate over the fragments of the transaction, accumulating - # the memory state - for partial_txn in fragmenter(txn): - memory_state = test_a_fragment(partial_txn, memory_state) + # now iterate over the fragments of the transaction, accumulating + # the memory state + for partial_txn in fragmenter(txn): + memory_state = test_a_fragment(partial_txn, memory_state) - if first_time: - golden_memory_state = memory_state.copy() - first_time = False - else: - if memory_state != golden_memory_state: raise FragmenterDifference(""" + if first_time: + golden_memory_state = memory_state.copy() + first_time = False + else: + if memory_state != golden_memory_state: + raise FragmenterDifference( + """ fragmenter: %s transaction: %s @@ -443,14 +554,20 @@ start memory: golden memory: %s actual memory: -%s""" % (fragmenter, txn, initial_memory, golden_memory_state, memory_state)) +%s""" + % ( + fragmenter, + txn, + initial_memory, + golden_memory_state, + memory_state, + ) + ) - print("."), + print("."), print() print("Conversion functions usage frequency:") for c in all_converters: - print(c.nr, c.__name__, c.usage) - - + print(c.nr, c.__name__, c.usage) diff --git a/src/systemc/tests/verify.py b/src/systemc/tests/verify.py index 818855aa31..5191062e50 100755 --- a/src/systemc/tests/verify.py +++ b/src/systemc/tests/verify.py @@ -40,25 +40,23 @@ import sys script_path = os.path.abspath(inspect.getfile(inspect.currentframe())) script_dir = os.path.dirname(script_path) -config_path = os.path.join(script_dir, 'config.py') +config_path = os.path.join(script_dir, "config.py") # Parent directories if checked out as part of gem5. systemc_dir = os.path.dirname(script_dir) src_dir = os.path.dirname(systemc_dir) checkout_dir = os.path.dirname(src_dir) -systemc_rel_path = 'systemc' -tests_rel_path = os.path.join(systemc_rel_path, 'tests') -json_rel_path = os.path.join(tests_rel_path, 'tests.json') - +systemc_rel_path = "systemc" +tests_rel_path = os.path.join(systemc_rel_path, "tests") +json_rel_path = os.path.join(tests_rel_path, "tests.json") def scons(*args): - args = ['scons', '--with-systemc-tests'] + list(args) + args = ["scons", "--with-systemc-tests"] + list(args) subprocess.check_call(args) - -class Test(): +class Test: def __init__(self, target, suffix, build_dir, props): self.target = target self.suffix = suffix @@ -79,34 +77,35 @@ class Test(): return os.path.join(script_dir, self.path) def expected_returncode_file(self): - return os.path.join(self.src_dir(), 'expected_returncode') + return os.path.join(self.src_dir(), "expected_returncode") def golden_dir(self): - return os.path.join(self.src_dir(), 'golden') + return os.path.join(self.src_dir(), "golden") def bin(self): - return '.'.join([self.name, self.suffix]) + return ".".join([self.name, self.suffix]) def full_path(self): return os.path.join(self.dir(), self.bin()) def m5out_dir(self): - return os.path.join(self.dir(), 'm5out.' + self.suffix) + return os.path.join(self.dir(), "m5out." + self.suffix) def returncode_file(self): - return os.path.join(self.m5out_dir(), 'returncode') - + return os.path.join(self.m5out_dir(), "returncode") test_phase_classes = {} + class TestPhaseMeta(type): def __init__(cls, name, bases, d): - if not d.pop('abstract', False): - test_phase_classes[d['name']] = cls + if not d.pop("abstract", False): + test_phase_classes[d["name"]] = cls super().__init__(name, bases, d) + class TestPhaseBase(metaclass=TestPhaseMeta): abstract = True @@ -117,53 +116,70 @@ class TestPhaseBase(metaclass=TestPhaseMeta): def __lt__(self, other): return self.number < other.number + class CompilePhase(TestPhaseBase): - name = 'compile' + name = "compile" number = 1 def run(self, tests): targets = list([test.full_path() for test in tests]) parser = argparse.ArgumentParser() - parser.add_argument('-j', type=int, default=0) + parser.add_argument("-j", type=int, default=0) args, leftovers = parser.parse_known_args(self.args) if args.j == 0: - self.args = ('-j', str(self.main_args.j)) + self.args + self.args = ("-j", str(self.main_args.j)) + self.args - scons_args = [ '--directory', self.main_args.scons_dir, - 'USE_SYSTEMC=1' ] + list(self.args) + targets + scons_args = ( + ["--directory", self.main_args.scons_dir, "USE_SYSTEMC=1"] + + list(self.args) + + targets + ) scons(*scons_args) + class RunPhase(TestPhaseBase): - name = 'execute' + name = "execute" number = 2 def run(self, tests): parser = argparse.ArgumentParser() - parser.add_argument('--timeout', type=int, metavar='SECONDS', - help='Time limit for each run in seconds, ' - '0 to disable.', - default=60) - parser.add_argument('-j', type=int, default=0, - help='How many tests to run in parallel.') + parser.add_argument( + "--timeout", + type=int, + metavar="SECONDS", + help="Time limit for each run in seconds, " "0 to disable.", + default=60, + ) + parser.add_argument( + "-j", + type=int, + default=0, + help="How many tests to run in parallel.", + ) args = parser.parse_args(self.args) timeout_cmd = [ - 'timeout', - '--kill-after', str(args.timeout * 2), - str(args.timeout) + "timeout", + "--kill-after", + str(args.timeout * 2), + str(args.timeout), ] + def run_test(test): cmd = [] if args.timeout: cmd.extend(timeout_cmd) - cmd.extend([ - os.path.abspath(test.full_path()), - '-rd', os.path.abspath(test.m5out_dir()), - '--listener-mode=off', - '--quiet', - os.path.abspath(config_path), - ]) + cmd.extend( + [ + os.path.abspath(test.full_path()), + "-rd", + os.path.abspath(test.m5out_dir()), + "--listener-mode=off", + "--quiet", + os.path.abspath(config_path), + ] + ) # Ensure the output directory exists. if not os.path.exists(test.m5out_dir()): os.makedirs(test.m5out_dir()) @@ -173,8 +189,8 @@ class RunPhase(TestPhaseBase): returncode = error.returncode else: returncode = 0 - with open(test.returncode_file(), 'w') as rc: - rc.write('%d\n' % returncode) + with open(test.returncode_file(), "w") as rc: + rc.write("%d\n" % returncode) j = self.main_args.j if args.j == 0 else args.j @@ -187,7 +203,8 @@ class RunPhase(TestPhaseBase): tp.close() tp.join() -class Checker(): + +class Checker: def __init__(self, ref, test, tag): self.ref = ref self.test = test @@ -197,18 +214,24 @@ class Checker(): with open(self.test) as test_f, open(self.ref) as ref_f: return test_f.read() == ref_f.read() + def tagged_filt(tag, num): - return (r'\n{}: \({}{}\) .*\n(In file: .*\n)?' - r'(In process: [\w.]* @ .*\n)?').format(tag, tag[0], num) + return ( + r"\n{}: \({}{}\) .*\n(In file: .*\n)?" r"(In process: [\w.]* @ .*\n)?" + ).format(tag, tag[0], num) + def error_filt(num): - return tagged_filt('Error', num) + return tagged_filt("Error", num) + def warning_filt(num): - return tagged_filt('Warning', num) + return tagged_filt("Warning", num) + def info_filt(num): - return tagged_filt('Info', num) + return tagged_filt("Info", num) + class DiffingChecker(Checker): def __init__(self, ref, test, tag, out_dir): @@ -219,20 +242,22 @@ class DiffingChecker(Checker): return False def do_diff(self, ref_lines, test_lines, ref_file, test_file): - return difflib.unified_diff(ref_lines, test_lines, - fromfile=ref_file, tofile=test_file) + return difflib.unified_diff( + ref_lines, test_lines, fromfile=ref_file, tofile=test_file + ) def diffing_check(self, ref_lines, test_lines): test_file = os.path.basename(self.test) ref_file = os.path.basename(self.ref) - diff_file = '.'.join([ref_file, 'diff']) + diff_file = ".".join([ref_file, "diff"]) diff_path = os.path.join(self.out_dir, diff_file) if test_lines != ref_lines: - flag = 'wb' if self.is_bytes_mode() else 'w' + flag = "wb" if self.is_bytes_mode() else "w" with open(diff_file, flag) as diff_f: - for line in self.do_diff(ref_lines, test_lines, - ref_file, test_file): + for line in self.do_diff( + ref_lines, test_lines, ref_file, test_file + ): diff_f.write(line) return False else: @@ -240,11 +265,12 @@ class DiffingChecker(Checker): os.unlink(diff_path) return True + class LogChecker(DiffingChecker): def merge_filts(*filts): - filts = map(lambda f: '(' + f + ')', filts) - filts = '|'.join(filts) - return re.compile(filts.encode('utf-8'), flags=re.MULTILINE) + filts = map(lambda f: "(" + f + ")", filts) + filts = "|".join(filts) + return re.compile(filts.encode("utf-8"), flags=re.MULTILINE) def is_bytes_mode(self): return True @@ -252,22 +278,24 @@ class LogChecker(DiffingChecker): def do_diff(self, ref_lines, test_lines, ref_file, test_file): return difflib.diff_bytes( difflib.unified_diff, - ref_lines, test_lines, - fromfile=ref_file.encode('utf-8'), - tofile=test_file.encode('utf-8')) + ref_lines, + test_lines, + fromfile=ref_file.encode("utf-8"), + tofile=test_file.encode("utf-8"), + ) # The reporting mechanism will print the actual filename when running in # gem5, and the "golden" output will say "". We want # to strip out both versions to make comparing the output sensible. - in_file_filt = r'^In file: (()|([a-zA-Z0-9.:_/]*))$' + in_file_filt = r"^In file: (()|([a-zA-Z0-9.:_/]*))$" ref_filt = merge_filts( - r'^\nInfo: /OSCI/SystemC: Simulation stopped by user.\n', - r'^SystemC Simulation\n', - r'^\nInfo: \(I804\) /IEEE_Std_1666/deprecated: ' + - r'You can turn off(.*\n){7}', - r'^\nInfo: \(I804\) /IEEE_Std_1666/deprecated: \n' + - r' sc_clock\(const char(.*\n){3}', + r"^\nInfo: /OSCI/SystemC: Simulation stopped by user.\n", + r"^SystemC Simulation\n", + r"^\nInfo: \(I804\) /IEEE_Std_1666/deprecated: " + + r"You can turn off(.*\n){7}", + r"^\nInfo: \(I804\) /IEEE_Std_1666/deprecated: \n" + + r" sc_clock\(const char(.*\n){3}", warning_filt(540), warning_filt(571), info_filt(804), @@ -275,29 +303,31 @@ class LogChecker(DiffingChecker): in_file_filt, ) test_filt = merge_filts( - r'^/.*:\d+: ', - r'^Global frequency set at \d* ticks per second\n', - r'.*info: Entering event queue @ \d*\. Starting simulation\.\.\.\n', - r'.*warn: Ignoring request to set stack size\.\n', - r'^.*warn: No dot file generated. Please install pydot ' + - r'to generate the dot file and pdf.\n', + r"^/.*:\d+: ", + r"^Global frequency set at \d* ticks per second\n", + r".*info: Entering event queue @ \d*\. Starting simulation\.\.\.\n", + r".*warn: Ignoring request to set stack size\.\n", + r"^.*warn: No dot file generated. Please install pydot " + + r"to generate the dot file and pdf.\n", info_filt(804), in_file_filt, ) def apply_filters(self, data, filts): - re.sub(filt, b'', data) + re.sub(filt, b"", data) def check(self): - with open(self.test, 'rb') as test_f, open(self.ref, 'rb') as ref_f: - test = re.sub(self.test_filt, b'', test_f.read()) - ref = re.sub(self.ref_filt, b'', ref_f.read()) - return self.diffing_check(ref.splitlines(True), - test.splitlines(True)) + with open(self.test, "rb") as test_f, open(self.ref, "rb") as ref_f: + test = re.sub(self.test_filt, b"", test_f.read()) + ref = re.sub(self.ref_filt, b"", ref_f.read()) + return self.diffing_check( + ref.splitlines(True), test.splitlines(True) + ) + class VcdChecker(DiffingChecker): def check(self): - with open (self.test) as test_f, open(self.ref) as ref_f: + with open(self.test) as test_f, open(self.ref) as ref_f: ref = ref_f.read().splitlines(True) test = test_f.read().splitlines(True) # Strip off the first seven lines of the test output which are @@ -306,19 +336,21 @@ class VcdChecker(DiffingChecker): return self.diffing_check(ref, test) -class GoldenDir(): + +class GoldenDir: def __init__(self, path, platform): self.path = path self.platform = platform contents = os.listdir(path) - suffix = '.' + platform + suffix = "." + platform suffixed = filter(lambda c: c.endswith(suffix), contents) - bases = map(lambda t: t[:-len(platform)], suffixed) + bases = map(lambda t: t[: -len(platform)], suffixed) common = filter(lambda t: not t.startswith(tuple(bases)), contents) self.entries = {} - class Entry(): + + class Entry: def __init__(self, e_path): self.used = False self.path = os.path.join(path, e_path) @@ -331,13 +363,14 @@ class GoldenDir(): def entry(self, name): def match(n): - return (n == name) or n.startswith(name + '.') - matches = { n: e for n, e in self.entries.items() if match(n) } + return (n == name) or n.startswith(name + ".") + + matches = {n: e for n, e in self.entries.items() if match(n)} for match in matches.values(): match.use() - platform_name = '.'.join([ name, self.platform ]) + platform_name = ".".join([name, self.platform]) if platform_name in matches: return matches[platform_name].path if name in matches: @@ -360,8 +393,9 @@ class GoldenDir(): i += 1 return sources + class VerifyPhase(TestPhaseBase): - name = 'verify' + name = "verify" number = 3 def reset_status(self): @@ -371,58 +405,70 @@ class VerifyPhase(TestPhaseBase): def passed(self, test): self._passed.append(test) - def failed(self, test, cause, note=''): - test.set_prop('note', note) + def failed(self, test, cause, note=""): + test.set_prop("note", note) self._failed.setdefault(cause, []).append(test) def print_status(self): total_passed = len(self._passed) total_failed = sum(map(len, self._failed.values())) print() - print('Passed: {passed:4} - Failed: {failed:4}'.format( - passed=total_passed, failed=total_failed)) + print( + "Passed: {passed:4} - Failed: {failed:4}".format( + passed=total_passed, failed=total_failed + ) + ) def write_result_file(self, path): results = { - 'passed': list(map(lambda t: t.props, self._passed)), - 'failed': { + "passed": list(map(lambda t: t.props, self._passed)), + "failed": { cause: list(map(lambda t: t.props, tests)) for cause, tests in self._failed.items() - } + }, } - with open(path, 'w') as rf: + with open(path, "w") as rf: json.dump(results, rf) def print_results(self): print() - print('Passed:') - for path in sorted(list([ t.path for t in self._passed ])): - print(' ', path) + print("Passed:") + for path in sorted(list([t.path for t in self._passed])): + print(" ", path) print() - print('Failed:') + print("Failed:") causes = [] for cause, tests in sorted(self._failed.items()): - block = ' ' + cause.capitalize() + ':\n' + block = " " + cause.capitalize() + ":\n" for test in sorted(tests, key=lambda t: t.path): - block += ' ' + test.path + block += " " + test.path if test.note: - block += ' - ' + test.note - block += '\n' + block += " - " + test.note + block += "\n" causes.append(block) - print('\n'.join(causes)) + print("\n".join(causes)) def run(self, tests): parser = argparse.ArgumentParser() result_opts = parser.add_mutually_exclusive_group() - result_opts.add_argument('--result-file', action='store_true', - help='Create a results.json file in the current directory.') - result_opts.add_argument('--result-file-at', metavar='PATH', - help='Create a results json file at the given path.') - parser.add_argument('--no-print-results', action='store_true', - help='Don\'t print a list of tests that passed or failed') + result_opts.add_argument( + "--result-file", + action="store_true", + help="Create a results.json file in the current directory.", + ) + result_opts.add_argument( + "--result-file-at", + metavar="PATH", + help="Create a results json file at the given path.", + ) + parser.add_argument( + "--no-print-results", + action="store_true", + help="Don't print a list of tests that passed or failed", + ) args = parser.parse_args(self.args) self.reset_status() @@ -434,7 +480,7 @@ class VerifyPhase(TestPhaseBase): if os.path.exists(test.full_path()): self.passed(test) else: - self.failed(test, 'compile failed') + self.failed(test, "compile failed") for test in runnable: with open(test.returncode_file()) as rc: @@ -446,53 +492,54 @@ class VerifyPhase(TestPhaseBase): expected_returncode = int(erc.read()) if returncode == 124: - self.failed(test, 'time out') + self.failed(test, "time out") continue elif returncode != expected_returncode: if expected_returncode == 0: - self.failed(test, 'abort') + self.failed(test, "abort") else: - self.failed(test, 'missed abort') + self.failed(test, "missed abort") continue out_dir = test.m5out_dir() - Diff = collections.namedtuple( - 'Diff', 'ref, test, tag, ref_filter') + Diff = collections.namedtuple("Diff", "ref, test, tag, ref_filter") diffs = [] - gd = GoldenDir(test.golden_dir(), 'linux64') + gd = GoldenDir(test.golden_dir(), "linux64") missing = [] - log_file = '.'.join([test.name, 'log']) + log_file = ".".join([test.name, "log"]) log_path = gd.entry(log_file) - simout_path = os.path.join(out_dir, 'simout') + simout_path = os.path.join(out_dir, "simout") if not os.path.exists(simout_path): - missing.append('log output') + missing.append("log output") elif log_path: - diffs.append(LogChecker(log_path, simout_path, - log_file, out_dir)) + diffs.append( + LogChecker(log_path, simout_path, log_file, out_dir) + ) for name in gd.unused(): test_path = os.path.join(out_dir, name) ref_path = gd.entry(name) if not os.path.exists(test_path): missing.append(name) - elif name.endswith('.vcd'): - diffs.append(VcdChecker(ref_path, test_path, - name, out_dir)) + elif name.endswith(".vcd"): + diffs.append( + VcdChecker(ref_path, test_path, name, out_dir) + ) else: diffs.append(Checker(ref_path, test_path, name)) if missing: - self.failed(test, 'missing output', ' '.join(missing)) + self.failed(test, "missing output", " ".join(missing)) continue failed_diffs = list(filter(lambda d: not d.check(), diffs)) if failed_diffs: tags = map(lambda d: d.tag, failed_diffs) - self.failed(test, 'failed diffs', ' '.join(tags)) + self.failed(test, "failed diffs", " ".join(tags)) continue self.passed(test) @@ -504,7 +551,7 @@ class VerifyPhase(TestPhaseBase): result_path = None if args.result_file: - result_path = os.path.join(os.getcwd(), 'results.json') + result_path = os.path.join(os.getcwd(), "results.json") elif args.result_file_at: result_path = args.result_file_at @@ -512,97 +559,127 @@ class VerifyPhase(TestPhaseBase): self.write_result_file(result_path) -parser = argparse.ArgumentParser(description='SystemC test utility') +parser = argparse.ArgumentParser(description="SystemC test utility") -parser.add_argument('build_dir', metavar='BUILD_DIR', - help='The build directory (ie. build/ARM).') +parser.add_argument( + "build_dir", + metavar="BUILD_DIR", + help="The build directory (ie. build/ARM).", +) -parser.add_argument('--update-json', action='store_true', - help='Update the json manifest of tests.') +parser.add_argument( + "--update-json", + action="store_true", + help="Update the json manifest of tests.", +) -parser.add_argument('--flavor', choices=['debug', 'opt', 'fast'], - default='opt', - help='Flavor of binary to test.') +parser.add_argument( + "--flavor", + choices=["debug", "opt", "fast"], + default="opt", + help="Flavor of binary to test.", +) -parser.add_argument('--list', action='store_true', - help='List the available tests') +parser.add_argument( + "--list", action="store_true", help="List the available tests" +) -parser.add_argument('-j', type=int, default=1, - help='Default level of parallelism, can be overriden ' - 'for individual stages') +parser.add_argument( + "-j", + type=int, + default=1, + help="Default level of parallelism, can be overriden " + "for individual stages", +) -parser.add_argument('-C', '--scons-dir', metavar='SCONS_DIR', - default=checkout_dir, - help='Directory to run scons from') +parser.add_argument( + "-C", + "--scons-dir", + metavar="SCONS_DIR", + default=checkout_dir, + help="Directory to run scons from", +) filter_opts = parser.add_mutually_exclusive_group() -filter_opts.add_argument('--filter', default='True', - help='Python expression which filters tests based ' - 'on their properties') -filter_opts.add_argument('--filter-file', default=None, - type=argparse.FileType('r'), - help='Same as --filter, but read from a file') +filter_opts.add_argument( + "--filter", + default="True", + help="Python expression which filters tests based " "on their properties", +) +filter_opts.add_argument( + "--filter-file", + default=None, + type=argparse.FileType("r"), + help="Same as --filter, but read from a file", +) + def collect_phases(args): - phase_groups = [list(g) for k, g in - itertools.groupby(args, lambda x: x != '--phase') if k] + phase_groups = [ + list(g) + for k, g in itertools.groupby(args, lambda x: x != "--phase") + if k + ] main_args = parser.parse_args(phase_groups[0][1:]) phases = [] names = [] for group in phase_groups[1:]: name = group[0] if name in names: - raise RuntimeException('Phase %s specified more than once' % name) + raise RuntimeException("Phase %s specified more than once" % name) phase = test_phase_classes[name] phases.append(phase(main_args, *group[1:])) phases.sort() return main_args, phases + main_args, phases = collect_phases(sys.argv) if len(phases) == 0: phases = [ CompilePhase(main_args), RunPhase(main_args), - VerifyPhase(main_args) + VerifyPhase(main_args), ] - json_path = os.path.join(main_args.build_dir, json_rel_path) if main_args.update_json: - scons('--directory', main_args.scons_dir, os.path.join(json_path)) + scons("--directory", main_args.scons_dir, os.path.join(json_path)) with open(json_path) as f: test_data = json.load(f) if main_args.filter_file: f = main_args.filter_file - filt = compile(f.read(), f.name, 'eval') + filt = compile(f.read(), f.name, "eval") else: - filt = compile(main_args.filter, '', 'eval') + filt = compile(main_args.filter, "", "eval") filtered_tests = { - target: props for (target, props) in - test_data.items() if eval(filt, dict(props)) + target: props + for (target, props) in test_data.items() + if eval(filt, dict(props)) } if len(filtered_tests) == 0: - print('All tests were filtered out.') + print("All tests were filtered out.") exit() if main_args.list: for target, props in sorted(filtered_tests.items()): - print('%s.%s' % (target, main_args.flavor)) + print("%s.%s" % (target, main_args.flavor)) for key, val in props.items(): - print(' %s: %s' % (key, val)) - print('Total tests: %d' % len(filtered_tests)) + print(" %s: %s" % (key, val)) + print("Total tests: %d" % len(filtered_tests)) else: - tests_to_run = list([ - Test(target, main_args.flavor, main_args.build_dir, props) for - target, props in sorted(filtered_tests.items()) - ]) + tests_to_run = list( + [ + Test(target, main_args.flavor, main_args.build_dir, props) + for target, props in sorted(filtered_tests.items()) + ] + ) for phase in phases: phase.run(tests_to_run) diff --git a/src/systemc/tlm_bridge/SConscript b/src/systemc/tlm_bridge/SConscript index 87616bbfbc..2126ef8f08 100644 --- a/src/systemc/tlm_bridge/SConscript +++ b/src/systemc/tlm_bridge/SConscript @@ -37,3 +37,5 @@ Source('gem5_to_tlm.cc') Source('sc_ext.cc') Source('sc_mm.cc') Source('tlm_to_gem5.cc') + +DebugFlag('TlmBridge') diff --git a/src/systemc/tlm_bridge/TlmBridge.py b/src/systemc/tlm_bridge/TlmBridge.py index 9238535e17..546b4c0790 100644 --- a/src/systemc/tlm_bridge/TlmBridge.py +++ b/src/systemc/tlm_bridge/TlmBridge.py @@ -29,106 +29,117 @@ from m5.proxy import * from m5.objects.Tlm import TlmTargetSocket, TlmInitiatorSocket + class Gem5ToTlmBridgeBase(SystemC_ScModule): - type = 'Gem5ToTlmBridgeBase' + type = "Gem5ToTlmBridgeBase" abstract = True - cxx_class = 'sc_gem5::Gem5ToTlmBridgeBase' - cxx_header = 'systemc/tlm_bridge/gem5_to_tlm.hh' + cxx_class = "sc_gem5::Gem5ToTlmBridgeBase" + cxx_header = "systemc/tlm_bridge/gem5_to_tlm.hh" system = Param.System(Parent.any, "system") - gem5 = ResponsePort('gem5 response port') - addr_ranges = VectorParam.AddrRange([], - 'Addresses served by this port\'s TLM side') + gem5 = ResponsePort("gem5 response port") + addr_ranges = VectorParam.AddrRange( + [], "Addresses served by this port's TLM side" + ) + class TlmToGem5BridgeBase(SystemC_ScModule): - type = 'TlmToGem5BridgeBase' + type = "TlmToGem5BridgeBase" abstract = True - cxx_class = 'sc_gem5::TlmToGem5BridgeBase' - cxx_header = 'systemc/tlm_bridge/tlm_to_gem5.hh' + cxx_class = "sc_gem5::TlmToGem5BridgeBase" + cxx_header = "systemc/tlm_bridge/tlm_to_gem5.hh" system = Param.System(Parent.any, "system") - gem5 = RequestPort('gem5 request port') + gem5 = RequestPort("gem5 request port") class Gem5ToTlmBridge32(Gem5ToTlmBridgeBase): - type = 'Gem5ToTlmBridge32' - cxx_template_params = [ 'unsigned int BITWIDTH' ] - cxx_class = 'sc_gem5::Gem5ToTlmBridge<32>' - cxx_header = 'systemc/tlm_bridge/gem5_to_tlm.hh' + type = "Gem5ToTlmBridge32" + cxx_template_params = ["unsigned int BITWIDTH"] + cxx_class = "sc_gem5::Gem5ToTlmBridge<32>" + cxx_header = "systemc/tlm_bridge/gem5_to_tlm.hh" + + tlm = TlmInitiatorSocket(32, "TLM initiator socket") - tlm = TlmInitiatorSocket(32, 'TLM initiator socket') class Gem5ToTlmBridge64(Gem5ToTlmBridgeBase): - type = 'Gem5ToTlmBridge64' - cxx_template_params = [ 'unsigned int BITWIDTH' ] - cxx_class = 'sc_gem5::Gem5ToTlmBridge<64>' - cxx_header = 'systemc/tlm_bridge/gem5_to_tlm.hh' + type = "Gem5ToTlmBridge64" + cxx_template_params = ["unsigned int BITWIDTH"] + cxx_class = "sc_gem5::Gem5ToTlmBridge<64>" + cxx_header = "systemc/tlm_bridge/gem5_to_tlm.hh" + + tlm = TlmInitiatorSocket(64, "TLM initiator socket") - tlm = TlmInitiatorSocket(64, 'TLM initiator socket') class Gem5ToTlmBridge128(Gem5ToTlmBridgeBase): - type = 'Gem5ToTlmBridge128' - cxx_template_params = [ 'unsigned int BITWIDTH' ] - cxx_class = 'sc_gem5::Gem5ToTlmBridge<128>' - cxx_header = 'systemc/tlm_bridge/gem5_to_tlm.hh' + type = "Gem5ToTlmBridge128" + cxx_template_params = ["unsigned int BITWIDTH"] + cxx_class = "sc_gem5::Gem5ToTlmBridge<128>" + cxx_header = "systemc/tlm_bridge/gem5_to_tlm.hh" + + tlm = TlmInitiatorSocket(128, "TLM initiator socket") - tlm = TlmInitiatorSocket(128, 'TLM initiator socket') class Gem5ToTlmBridge256(Gem5ToTlmBridgeBase): - type = 'Gem5ToTlmBridge256' - cxx_template_params = [ 'unsigned int BITWIDTH' ] - cxx_class = 'sc_gem5::Gem5ToTlmBridge<256>' - cxx_header = 'systemc/tlm_bridge/gem5_to_tlm.hh' + type = "Gem5ToTlmBridge256" + cxx_template_params = ["unsigned int BITWIDTH"] + cxx_class = "sc_gem5::Gem5ToTlmBridge<256>" + cxx_header = "systemc/tlm_bridge/gem5_to_tlm.hh" + + tlm = TlmInitiatorSocket(256, "TLM initiator socket") - tlm = TlmInitiatorSocket(256, 'TLM initiator socket') class Gem5ToTlmBridge512(Gem5ToTlmBridgeBase): - type = 'Gem5ToTlmBridge512' - cxx_template_params = [ 'unsigned int BITWIDTH' ] - cxx_class = 'sc_gem5::Gem5ToTlmBridge<512>' - cxx_header = 'systemc/tlm_bridge/gem5_to_tlm.hh' + type = "Gem5ToTlmBridge512" + cxx_template_params = ["unsigned int BITWIDTH"] + cxx_class = "sc_gem5::Gem5ToTlmBridge<512>" + cxx_header = "systemc/tlm_bridge/gem5_to_tlm.hh" - tlm = TlmInitiatorSocket(512, 'TLM initiator socket') + tlm = TlmInitiatorSocket(512, "TLM initiator socket") class TlmToGem5Bridge32(TlmToGem5BridgeBase): - type = 'TlmToGem5Bridge32' - cxx_template_params = [ 'unsigned int BITWIDTH' ] - cxx_class = 'sc_gem5::TlmToGem5Bridge<32>' - cxx_header = 'systemc/tlm_bridge/tlm_to_gem5.hh' + type = "TlmToGem5Bridge32" + cxx_template_params = ["unsigned int BITWIDTH"] + cxx_class = "sc_gem5::TlmToGem5Bridge<32>" + cxx_header = "systemc/tlm_bridge/tlm_to_gem5.hh" + + tlm = TlmTargetSocket(32, "TLM target socket") - tlm = TlmTargetSocket(32, 'TLM target socket') class TlmToGem5Bridge64(TlmToGem5BridgeBase): - type = 'TlmToGem5Bridge64' - cxx_template_params = [ 'unsigned int BITWIDTH' ] - cxx_class = 'sc_gem5::TlmToGem5Bridge<64>' - cxx_header = 'systemc/tlm_bridge/tlm_to_gem5.hh' + type = "TlmToGem5Bridge64" + cxx_template_params = ["unsigned int BITWIDTH"] + cxx_class = "sc_gem5::TlmToGem5Bridge<64>" + cxx_header = "systemc/tlm_bridge/tlm_to_gem5.hh" + + tlm = TlmTargetSocket(64, "TLM target socket") - tlm = TlmTargetSocket(64, 'TLM target socket') class TlmToGem5Bridge128(TlmToGem5BridgeBase): - type = 'TlmToGem5Bridge128' - cxx_template_params = [ 'unsigned int BITWIDTH' ] - cxx_class = 'sc_gem5::TlmToGem5Bridge<128>' - cxx_header = 'systemc/tlm_bridge/tlm_to_gem5.hh' + type = "TlmToGem5Bridge128" + cxx_template_params = ["unsigned int BITWIDTH"] + cxx_class = "sc_gem5::TlmToGem5Bridge<128>" + cxx_header = "systemc/tlm_bridge/tlm_to_gem5.hh" + + tlm = TlmTargetSocket(128, "TLM target socket") - tlm = TlmTargetSocket(128, 'TLM target socket') class TlmToGem5Bridge256(TlmToGem5BridgeBase): - type = 'TlmToGem5Bridge256' - cxx_template_params = [ 'unsigned int BITWIDTH' ] - cxx_class = 'sc_gem5::TlmToGem5Bridge<256>' - cxx_header = 'systemc/tlm_bridge/tlm_to_gem5.hh' + type = "TlmToGem5Bridge256" + cxx_template_params = ["unsigned int BITWIDTH"] + cxx_class = "sc_gem5::TlmToGem5Bridge<256>" + cxx_header = "systemc/tlm_bridge/tlm_to_gem5.hh" + + tlm = TlmTargetSocket(256, "TLM target socket") - tlm = TlmTargetSocket(256, 'TLM target socket') class TlmToGem5Bridge512(TlmToGem5BridgeBase): - type = 'TlmToGem5Bridge512' - cxx_template_params = [ 'unsigned int BITWIDTH' ] - cxx_class = 'sc_gem5::TlmToGem5Bridge<512>' - cxx_header = 'systemc/tlm_bridge/tlm_to_gem5.hh' + type = "TlmToGem5Bridge512" + cxx_template_params = ["unsigned int BITWIDTH"] + cxx_class = "sc_gem5::TlmToGem5Bridge<512>" + cxx_header = "systemc/tlm_bridge/tlm_to_gem5.hh" - tlm = TlmTargetSocket(512, 'TLM target socket') + tlm = TlmTargetSocket(512, "TLM target socket") diff --git a/src/systemc/tlm_bridge/gem5_to_tlm.cc b/src/systemc/tlm_bridge/gem5_to_tlm.cc index 37da822499..10f7d1a9c7 100644 --- a/src/systemc/tlm_bridge/gem5_to_tlm.cc +++ b/src/systemc/tlm_bridge/gem5_to_tlm.cc @@ -112,7 +112,8 @@ std::vector extraPacketToPayloadSteps; * gem5 packet to tlm payload. This can be useful when there exists a SystemC * extension that requires information in gem5 packet. For example, if a user * defined a SystemC extension the carries stream_id, the user may add a step - * here to read stream_id out and set the extension properly. + * here to read stream_id out and set the extension properly. Steps should be + * idempotent. */ void addPacketToPayloadConversionStep(PacketToPayloadConversionStep step) @@ -121,13 +122,33 @@ addPacketToPayloadConversionStep(PacketToPayloadConversionStep step) } /** - * Convert a gem5 packet to a TLM payload by copying all the relevant - * information to new tlm payload. + * Convert a gem5 packet to TLM payload by copying all the relevant information + * to new payload. If the transaction is initiated by TLM model, we would use + * the original payload. + * The return value is the payload pointer. */ tlm::tlm_generic_payload * packet2payload(PacketPtr packet) { - tlm::tlm_generic_payload *trans = mm.allocate(); + tlm::tlm_generic_payload *trans = nullptr; + auto *tlmSenderState = + packet->findNextSenderState(); + + // If there is a SenderState, we can pipe through the original transaction. + // Otherwise, we generate a new transaction based on the packet. + if (tlmSenderState != nullptr) { + // Sync the address which could have changed. + trans = &tlmSenderState->trans; + trans->set_address(packet->getAddr()); + trans->acquire(); + // Apply all conversion steps necessary in this specific setup. + for (auto &step : extraPacketToPayloadSteps) { + step(packet, *trans); + } + return trans; + } + + trans = mm.allocate(); trans->acquire(); trans->set_address(packet->getAddr()); @@ -173,6 +194,24 @@ packet2payload(PacketPtr packet) return trans; } +void +setPacketResponse(PacketPtr pkt, tlm::tlm_generic_payload &trans) +{ + pkt->makeResponse(); + + auto resp = trans.get_response_status(); + switch (resp) { + case tlm::TLM_OK_RESPONSE: + break; + case tlm::TLM_COMMAND_ERROR_RESPONSE: + pkt->setBadCommand(); + break; + default: + pkt->setBadAddress(); + break; + } +} + template void Gem5ToTlmBridge::pec( @@ -204,7 +243,7 @@ Gem5ToTlmBridge::pec( // we make a response packet before sending it back to the initiator // side gem5 module. if (packet->needsResponse()) { - packet->makeResponse(); + setPacketResponse(packet, trans); } if (packet->isResponse()) { need_retry = !bridgeResponsePort.sendTimingResp(packet); @@ -275,7 +314,7 @@ Gem5ToTlmBridge::recvAtomic(PacketPtr packet) } if (packet->needsResponse()) - packet->makeResponse(); + setPacketResponse(packet, *trans); trans->release(); @@ -307,6 +346,7 @@ Gem5ToTlmBridge::recvAtomicBackdoor( backdoor = getBackdoor(*trans); } + // Always set success response in Backdoor case. if (packet->needsResponse()) packet->makeResponse(); diff --git a/src/systemc/tlm_bridge/sc_ext.cc b/src/systemc/tlm_bridge/sc_ext.cc index ba4078ae00..4d12fb3d9f 100644 --- a/src/systemc/tlm_bridge/sc_ext.cc +++ b/src/systemc/tlm_bridge/sc_ext.cc @@ -34,23 +34,78 @@ #include "systemc/tlm_bridge/sc_ext.hh" #include "systemc/ext/utils/sc_report_handler.hh" +#include "systemc/tlm_bridge/gem5_to_tlm.hh" +#include "systemc/tlm_bridge/tlm_to_gem5.hh" using namespace gem5; namespace Gem5SystemC { - -Gem5Extension::Gem5Extension(PacketPtr _packet) +namespace +{ + +struct ControlConversionRegister +{ + ControlConversionRegister() + { + sc_gem5::addPayloadToPacketConversionStep( + [] (PacketPtr pkt, tlm::tlm_generic_payload &trans) + { + ControlExtension *control_ex = nullptr; + trans.get_extension(control_ex); + if (!control_ex) { + return; + } + + if (control_ex->isPrivileged()) { + pkt->req->setFlags(Request::PRIVILEGED); + } else { + pkt->req->clearFlags(Request::PRIVILEGED); + } + + if (control_ex->isSecure()) { + pkt->req->setFlags(Request::SECURE); + } else { + pkt->req->clearFlags(Request::SECURE); + } + + if (control_ex->isInstruction()) { + pkt->req->setFlags(Request::INST_FETCH); + } else { + pkt->req->clearFlags(Request::INST_FETCH); + } + + pkt->qosValue(control_ex->getQos()); + }); + sc_gem5::addPacketToPayloadConversionStep( + [] (PacketPtr pkt, tlm::tlm_generic_payload &trans) + { + ControlExtension *control_ex = nullptr; + trans.get_extension(control_ex); + if (!control_ex) { + return; + } + + control_ex->setPrivileged(pkt->req->isPriv()); + control_ex->setSecure(pkt->req->isSecure()); + control_ex->setInstruction(pkt->req->isInstFetch()); + control_ex->setQos(pkt->qosValue()); + }); + } +}; + +} // namespace + +Gem5Extension::Gem5Extension(PacketPtr p) : packet(p) { - packet = _packet; } Gem5Extension & Gem5Extension::getExtension(const tlm::tlm_generic_payload *payload) { - Gem5Extension *result = NULL; + Gem5Extension *result = nullptr; payload->get_extension(result); - sc_assert(result != NULL); + sc_assert(result != nullptr); return *result; } @@ -75,13 +130,13 @@ Gem5Extension::clone() const void Gem5Extension::copy_from(const tlm::tlm_extension_base &ext) { - const Gem5Extension &cpyFrom = static_cast(ext); - packet = cpyFrom.packet; + const Gem5Extension &from = static_cast(ext); + packet = from.packet; } AtomicExtension::AtomicExtension( - std::shared_ptr amo_op, bool need_return) - : _op(amo_op), _needReturn(need_return) + std::shared_ptr o, bool r) + : op(o), returnRequired(r) { } @@ -114,15 +169,98 @@ AtomicExtension::getExtension(const tlm::tlm_generic_payload *payload) } bool -AtomicExtension::needReturn() const +AtomicExtension::isReturnRequired() const { - return _needReturn; + return returnRequired; } gem5::AtomicOpFunctor* AtomicExtension::getAtomicOpFunctor() const { - return _op.get(); + return op.get(); +} + +ControlExtension::ControlExtension() + : privileged(false), secure(false), instruction(false), qos(0) +{ + [[maybe_unused]] static ControlConversionRegister *conversion_register = + new ControlConversionRegister(); +} + +tlm::tlm_extension_base * +ControlExtension::clone() const +{ + return new ControlExtension(*this); +} + +void +ControlExtension::copy_from(const tlm::tlm_extension_base &ext) +{ + const ControlExtension &from = static_cast(ext); + *this = from; +} + +ControlExtension & +ControlExtension::getExtension(const tlm::tlm_generic_payload &payload) +{ + return ControlExtension::getExtension(&payload); +} + +ControlExtension & +ControlExtension::getExtension(const tlm::tlm_generic_payload *payload) +{ + ControlExtension *result = nullptr; + payload->get_extension(result); + sc_assert(result); + return *result; +} + +bool +ControlExtension::isPrivileged() const +{ + return privileged; +} + +void +ControlExtension::setPrivileged(bool p) +{ + privileged = p; +} + +bool +ControlExtension::isSecure() const +{ + return secure; +} + +void +ControlExtension::setSecure(bool s) +{ + secure = s; +} + +bool +ControlExtension::isInstruction() const +{ + return instruction; +} + +void +ControlExtension::setInstruction(bool i) +{ + instruction = i; +} + +uint8_t +ControlExtension::getQos() const +{ + return qos; +} + +void +ControlExtension::setQos(uint8_t q) +{ + qos = q; } } // namespace Gem5SystemC diff --git a/src/systemc/tlm_bridge/sc_ext.hh b/src/systemc/tlm_bridge/sc_ext.hh index 25e6a1cc04..bb676761ce 100644 --- a/src/systemc/tlm_bridge/sc_ext.hh +++ b/src/systemc/tlm_bridge/sc_ext.hh @@ -34,6 +34,7 @@ #ifndef __SYSTEMC_TLM_BRIDGE_SC_EXT_HH__ #define __SYSTEMC_TLM_BRIDGE_SC_EXT_HH__ +#include #include #include "base/amo.hh" @@ -43,13 +44,19 @@ namespace Gem5SystemC { +struct TlmSenderState : public gem5::Packet::SenderState +{ + tlm::tlm_generic_payload &trans; + TlmSenderState(tlm::tlm_generic_payload &trans) : trans(trans) {} +}; + class Gem5Extension: public tlm::tlm_extension { public: - Gem5Extension(gem5::PacketPtr _packet); + Gem5Extension(gem5::PacketPtr p); - virtual tlm_extension_base *clone() const; - virtual void copy_from(const tlm_extension_base &ext); + tlm_extension_base *clone() const override; + void copy_from(const tlm_extension_base &ext) override; static Gem5Extension &getExtension( const tlm::tlm_generic_payload *payload); @@ -65,22 +72,57 @@ class AtomicExtension: public tlm::tlm_extension { public: AtomicExtension( - std::shared_ptr amo_op, bool need_return); + std::shared_ptr o, bool r); - virtual tlm_extension_base *clone() const; - virtual void copy_from(const tlm_extension_base &ext); + tlm_extension_base *clone() const override; + void copy_from(const tlm_extension_base &ext) override; static AtomicExtension &getExtension( const tlm::tlm_generic_payload *payload); static AtomicExtension &getExtension( const tlm::tlm_generic_payload &payload); - bool needReturn() const; + bool isReturnRequired() const; gem5::AtomicOpFunctor* getAtomicOpFunctor() const; private: - std::shared_ptr _op; - bool _needReturn; + std::shared_ptr op; + bool returnRequired; +}; + +class ControlExtension : public tlm::tlm_extension +{ + public: + ControlExtension(); + + tlm_extension_base *clone() const override; + void copy_from(const tlm_extension_base &ext) override; + + static ControlExtension &getExtension( + const tlm::tlm_generic_payload *payload); + static ControlExtension &getExtension( + const tlm::tlm_generic_payload &payload); + + /* Secure and privileged access */ + bool isPrivileged() const; + void setPrivileged(bool p); + bool isSecure() const; + void setSecure(bool s); + bool isInstruction() const; + void setInstruction(bool i); + + /* Quality of Service (AXI4) */ + uint8_t getQos() const; + void setQos(uint8_t q); + + private: + /* Secure and privileged access */ + bool privileged; + bool secure; + bool instruction; + + /* Quality of Service (AXI4) */ + uint8_t qos; }; } // namespace Gem5SystemC diff --git a/src/systemc/tlm_bridge/tlm_to_gem5.cc b/src/systemc/tlm_bridge/tlm_to_gem5.cc index 2b9ced8783..703e118dee 100644 --- a/src/systemc/tlm_bridge/tlm_to_gem5.cc +++ b/src/systemc/tlm_bridge/tlm_to_gem5.cc @@ -59,11 +59,13 @@ #include -#include "params/TlmToGem5Bridge32.hh" -#include "params/TlmToGem5Bridge64.hh" +#include "base/trace.hh" +#include "debug/TlmBridge.hh" #include "params/TlmToGem5Bridge128.hh" #include "params/TlmToGem5Bridge256.hh" +#include "params/TlmToGem5Bridge32.hh" #include "params/TlmToGem5Bridge512.hh" +#include "params/TlmToGem5Bridge64.hh" #include "sim/core.hh" #include "sim/system.hh" #include "systemc/ext/core/sc_module_name.hh" @@ -87,7 +89,8 @@ std::vector extraPayloadToPacketSteps; * tlm payload to gem5 packet. This can be useful when there exists a SystemC * extension that carries extra information. For example, SystemC user might * define an extension to store stream_id, the user may then add an extra step - * to set the generated request's stream_id accordingly. + * to set the generated request's stream_id accordingly. Steps should be + * idempotent. */ void addPayloadToPacketConversionStep(PayloadToPacketConversionStep step) @@ -95,9 +98,33 @@ addPayloadToPacketConversionStep(PayloadToPacketConversionStep step) extraPayloadToPacketSteps.push_back(std::move(step)); } -PacketPtr +/** + * Convert a TLM payload to gem5 packet by copying all the relevant information + * to new packet. If the transaction is initiated by gem5 model, we would use + * the original packet. + * The first return value is the packet pointer. + * The second return value is if the packet is newly created. + */ +std::pair payload2packet(RequestorID _id, tlm::tlm_generic_payload &trans) { + Gem5SystemC::Gem5Extension *extension = nullptr; + trans.get_extension(extension); + + // If there is an extension, this transaction was initiated by the gem5 + // world and we can pipe through the original packet. Otherwise, we + // generate a new packet based on the transaction. + if (extension != nullptr) { + auto pkt = extension->getPacket(); + // Sync the address which could have changed. + pkt->setAddr(trans.get_address()); + // Apply all conversion steps necessary in this specific setup. + for (auto &step : extraPayloadToPacketSteps) { + step(pkt, trans); + } + return std::make_pair(pkt, false); + } + MemCmd cmd; RequestPtr req; @@ -105,7 +132,7 @@ payload2packet(RequestorID _id, tlm::tlm_generic_payload &trans) trans.get_extension(atomic_ex); if (atomic_ex) { cmd = MemCmd::SwapReq; - Request::Flags flags = (atomic_ex->needReturn() ? + Request::Flags flags = (atomic_ex->isReturnRequired() ? Request::ATOMIC_RETURN_OP : Request::ATOMIC_NO_RETURN_OP); AtomicOpFunctorPtr amo_op = AtomicOpFunctorPtr( @@ -124,7 +151,7 @@ payload2packet(RequestorID _id, tlm::tlm_generic_payload &trans) cmd = MemCmd::WriteReq; break; case tlm::TLM_IGNORE_COMMAND: - return nullptr; + return std::make_pair(nullptr, false); default: SC_REPORT_FATAL("TlmToGem5Bridge", "received transaction with unsupported " @@ -148,7 +175,19 @@ payload2packet(RequestorID _id, tlm::tlm_generic_payload &trans) step(pkt, trans); } - return pkt; + return std::make_pair(pkt, true); +} + +void +setPayloadResponse(tlm::tlm_generic_payload &trans, PacketPtr pkt) +{ + if (!pkt->isError()) { + trans.set_response_status(tlm::TLM_OK_RESPONSE); + } else if (pkt->isRead() || pkt->isWrite()) { + trans.set_response_status(tlm::TLM_COMMAND_ERROR_RESPONSE); + } else { + trans.set_response_status(tlm::TLM_ADDRESS_ERROR_RESPONSE); + } } template @@ -168,9 +207,15 @@ void TlmToGem5Bridge::sendBeginResp(tlm::tlm_generic_payload &trans, sc_core::sc_time &delay) { - tlm::tlm_phase phase = tlm::BEGIN_RESP; + Gem5SystemC::Gem5Extension *extension = nullptr; + trans.get_extension(extension); + panic_if(extension == nullptr, + "Missing gem5 extension when sending BEGIN_RESP"); + auto pkt = extension->getPacket(); - trans.set_response_status(tlm::TLM_OK_RESPONSE); + setPayloadResponse(trans, pkt); + + tlm::tlm_phase phase = tlm::BEGIN_RESP; auto status = socket->nb_transport_bw(trans, phase, delay); @@ -196,22 +241,9 @@ TlmToGem5Bridge::handleBeginReq(tlm::tlm_generic_payload &trans) trans.acquire(); - PacketPtr pkt = nullptr; - - Gem5SystemC::Gem5Extension *extension = nullptr; - trans.get_extension(extension); - - // If there is an extension, this transaction was initiated by the gem5 - // world and we can pipe through the original packet. Otherwise, we - // generate a new packet based on the transaction. - if (extension != nullptr) { - pkt = extension->getPacket(); - } else { - pkt = payload2packet(_id, trans); - } - - auto tlmSenderState = new TlmSenderState(trans); - pkt->pushSenderState(tlmSenderState); + auto res = payload2packet(_id, trans); + auto pkt = res.first; + pkt->pushSenderState(new Gem5SystemC::TlmSenderState(trans)); // If the packet doesn't need a response, we should send BEGIN_RESP by // ourselves. @@ -238,8 +270,6 @@ TlmToGem5Bridge::handleEndResp(tlm::tlm_generic_payload &trans) responseInProgress = false; - checkTransaction(trans); - if (needToSendRetry) { bmp.sendRetryResp(); needToSendRetry = false; @@ -253,24 +283,13 @@ TlmToGem5Bridge::destroyPacket(PacketPtr pkt) delete pkt; } -template -void -TlmToGem5Bridge::checkTransaction(tlm::tlm_generic_payload &trans) -{ - if (trans.is_response_error()) { - std::stringstream ss; - ss << "Transaction returned with error, response status = " - << trans.get_response_string(); - SC_REPORT_ERROR("TLM-2", ss.str().c_str()); - } -} - template void TlmToGem5Bridge::invalidateDmi(const gem5::MemBackdoor &backdoor) { socket->invalidate_direct_mem_ptr( backdoor.range().start(), backdoor.range().end()); + requestedBackdoors.erase(const_cast(&backdoor)); } template @@ -321,18 +340,8 @@ void TlmToGem5Bridge::b_transport(tlm::tlm_generic_payload &trans, sc_core::sc_time &t) { - Gem5SystemC::Gem5Extension *extension = nullptr; - trans.get_extension(extension); - - PacketPtr pkt = nullptr; - - // If there is an extension, this transaction was initiated by the gem5 - // world and we can pipe through the original packet. - if (extension != nullptr) { - pkt = extension->getPacket(); - } else { - pkt = payload2packet(_id, trans); - } + auto [pkt, pkt_created] = payload2packet(_id, trans); + pkt->pushSenderState(new Gem5SystemC::TlmSenderState(trans)); MemBackdoorPtr backdoor = nullptr; Tick ticks = bmp.sendAtomicBackdoor(pkt, backdoor); @@ -350,29 +359,38 @@ TlmToGem5Bridge::b_transport(tlm::tlm_generic_payload &trans, // update time t += delay; - if (extension == nullptr) - destroyPacket(pkt); + gem5::Packet::SenderState *senderState = pkt->popSenderState(); + sc_assert( + nullptr != dynamic_cast(senderState)); - trans.set_response_status(tlm::TLM_OK_RESPONSE); + // clean up + delete senderState; + + setPayloadResponse(trans, pkt); + + if (pkt_created) + destroyPacket(pkt); } template unsigned int TlmToGem5Bridge::transport_dbg(tlm::tlm_generic_payload &trans) { - Gem5SystemC::Gem5Extension *extension = nullptr; - trans.get_extension(extension); + auto [pkt, pkt_created] = payload2packet(_id, trans); + if (pkt != nullptr) { + pkt->pushSenderState(new Gem5SystemC::TlmSenderState(trans)); - // If there is an extension, this transaction was initiated by the gem5 - // world and we can pipe through the original packet. - if (extension != nullptr) { - bmp.sendFunctional(extension->getPacket()); - } else { - auto pkt = payload2packet(_id, trans); - if (pkt) { - bmp.sendFunctional(pkt); + bmp.sendFunctional(pkt); + + gem5::Packet::SenderState *senderState = pkt->popSenderState(); + sc_assert( + nullptr != dynamic_cast(senderState)); + + // clean up + delete senderState; + + if (pkt_created) destroyPacket(pkt); - } } return trans.get_data_length(); @@ -383,19 +401,10 @@ bool TlmToGem5Bridge::get_direct_mem_ptr(tlm::tlm_generic_payload &trans, tlm::tlm_dmi &dmi_data) { - Gem5SystemC::Gem5Extension *extension = nullptr; - trans.get_extension(extension); - - PacketPtr pkt = nullptr; - - // If there is an extension, this transaction was initiated by the gem5 - // world and we can pipe through the original packet. - if (extension != nullptr) { - pkt = extension->getPacket(); - } else { - pkt = payload2packet(_id, trans); + auto [pkt, pkt_created] = payload2packet(_id, trans); + pkt->pushSenderState(new Gem5SystemC::TlmSenderState(trans)); + if (pkt_created) pkt->req->setFlags(Request::NO_ACCESS); - } MemBackdoorPtr backdoor = nullptr; bmp.sendAtomicBackdoor(pkt, backdoor); @@ -413,18 +422,29 @@ TlmToGem5Bridge::get_direct_mem_ptr(tlm::tlm_generic_payload &trans, access = (access_t)(access | tlm::tlm_dmi::DMI_ACCESS_WRITE); dmi_data.set_granted_access(access); - backdoor->addInvalidationCallback( - [this](const MemBackdoor &backdoor) - { - invalidateDmi(backdoor); - } - ); + // We only need to register the callback at the first time. + if (requestedBackdoors.find(backdoor) == requestedBackdoors.end()) { + backdoor->addInvalidationCallback( + [this](const MemBackdoor &backdoor) + { + invalidateDmi(backdoor); + } + ); + requestedBackdoors.emplace(backdoor); + } } - if (extension == nullptr) - destroyPacket(pkt); + gem5::Packet::SenderState *senderState = pkt->popSenderState(); + sc_assert( + nullptr != dynamic_cast(senderState)); - trans.set_response_status(tlm::TLM_OK_RESPONSE); + // clean up + delete senderState; + + setPayloadResponse(trans, pkt); + + if (pkt_created) + destroyPacket(pkt); return backdoor != nullptr; } @@ -453,7 +473,8 @@ TlmToGem5Bridge::recvTimingResp(PacketPtr pkt) pkt->payloadDelay = 0; pkt->headerDelay = 0; - auto tlmSenderState = dynamic_cast(pkt->popSenderState()); + auto *tlmSenderState = + dynamic_cast(pkt->popSenderState()); sc_assert(tlmSenderState != nullptr); auto &trans = tlmSenderState->trans; @@ -506,8 +527,8 @@ template void TlmToGem5Bridge::recvRangeChange() { - SC_REPORT_WARNING("TlmToGem5Bridge", - "received address range change but ignored it"); + DPRINTF(TlmBridge, + "received address range change but ignored it"); } template @@ -549,11 +570,11 @@ TlmToGem5Bridge::before_end_of_elaboration() * NOTE: The mode may change during execution. */ if (system->isTimingMode()) { - SC_REPORT_INFO("TlmToGem5Bridge", "register non-blocking interface"); + DPRINTF(TlmBridge, "register non-blocking interface"); socket.register_nb_transport_fw( this, &TlmToGem5Bridge::nb_transport_fw); } else if (system->isAtomicMode()) { - SC_REPORT_INFO("TlmToGem5Bridge", "register blocking interface"); + DPRINTF(TlmBridge, "register blocking interface"); socket.register_b_transport( this, &TlmToGem5Bridge::b_transport); socket.register_get_direct_mem_ptr( diff --git a/src/systemc/tlm_bridge/tlm_to_gem5.hh b/src/systemc/tlm_bridge/tlm_to_gem5.hh index b0fe62af08..ca5f681c9c 100644 --- a/src/systemc/tlm_bridge/tlm_to_gem5.hh +++ b/src/systemc/tlm_bridge/tlm_to_gem5.hh @@ -59,6 +59,8 @@ #define __SYSTEMC_TLM_BRIDGE_TLM_TO_GEM5_HH__ #include +#include +#include #include "mem/port.hh" #include "params/TlmToGem5BridgeBase.hh" @@ -78,7 +80,7 @@ using PayloadToPacketConversionStep = void addPayloadToPacketConversionStep(PayloadToPacketConversionStep step); -gem5::PacketPtr payload2packet(gem5::RequestorID _id, +std::pair payload2packet(gem5::RequestorID _id, tlm::tlm_generic_payload &trans); class TlmToGem5BridgeBase : public sc_core::sc_module @@ -91,12 +93,6 @@ template class TlmToGem5Bridge : public TlmToGem5BridgeBase { private: - struct TlmSenderState : public gem5::Packet::SenderState - { - tlm::tlm_generic_payload &trans; - TlmSenderState(tlm::tlm_generic_payload &trans) : trans(trans) {} - }; - class BridgeRequestPort : public gem5::RequestPort { protected: @@ -127,6 +123,8 @@ class TlmToGem5Bridge : public TlmToGem5BridgeBase bool responseInProgress; + std::unordered_set requestedBackdoors; + BridgeRequestPort bmp; tlm_utils::simple_target_socket< TlmToGem5Bridge, BITWIDTH> socket; @@ -143,8 +141,6 @@ class TlmToGem5Bridge : public TlmToGem5BridgeBase void destroyPacket(gem5::PacketPtr pkt); - void checkTransaction(tlm::tlm_generic_payload &trans); - void invalidateDmi(const gem5::MemBackdoor &backdoor); protected: diff --git a/tests/compiler-tests.sh b/tests/compiler-tests.sh index 354444c50c..57938283b5 100755 --- a/tests/compiler-tests.sh +++ b/tests/compiler-tests.sh @@ -13,11 +13,14 @@ build_dir="${gem5_root}/build" docker_mem_limit="18g" # All Docker images in the gem5 testing GCR which we want to compile with. -images=("gcc-version-11" +images=("gcc-version-12" + "gcc-version-11" "gcc-version-10" "gcc-version-9" "gcc-version-8" "gcc-version-7" + "clang-version-14" + "clang-version-13" "clang-version-12" "clang-version-11" "clang-version-10" @@ -25,20 +28,22 @@ images=("gcc-version-11" "clang-version-8" "clang-version-7" "clang-version-6.0" - # The following checks our support for Ubuntu 18.04 and 20.04, for both our - # "minimum dependencies" and "all dependencies" docker images. + # The following checks our support for Ubuntu 18.04, 20.04, and 22.04. "ubuntu-18.04_all-dependencies" "ubuntu-20.04_all-dependencies" - "ubuntu-20.04_min-dependencies" + "ubuntu-22.04_all-dependencies" + # Here we test the minimum dependency scenario. + "ubuntu-22.04_min-dependencies" ) # A subset of the above list: these images will build against every target, # ignoring builds_per_compiler. -comprehensive=("gcc-version-11" - "clang-version-12") +comprehensive=("gcc-version-12" + "clang-version-14") # All build targets in build_opt/ which we want to build using each image. -builds=("ARM" +builds=("ALL" + "ARM" "ARM_MESI_Three_Level" "ARM_MESI_Three_Level_HTM" "ARM_MOESI_hammer" @@ -103,7 +108,7 @@ for compiler in ${images[@]}; do # targets for this test build_indices=(${build_permutation[@]:0:$builds_count}) - repo_name="${base_url}/${compiler}:v22-0" + repo_name="${base_url}/${compiler}:v22-1" # Grab compiler image docker pull $repo_name >/dev/null @@ -129,7 +134,7 @@ for compiler in ${images[@]}; do { docker run --rm -v "${gem5_root}":"/gem5" -u $UID:$GID \ -w /gem5 --memory="${docker_mem_limit}" $repo_name \ - /usr/bin/env python3 /usr/bin/scons \ + /usr/bin/env python3 /usr/bin/scons --ignore-style \ "${build_out}" "${build_args}" }>"${build_stdout}" 2>"${build_stderr}" result=$? diff --git a/tests/configs/dram-lowp.py b/tests/configs/dram-lowp.py index f967c7ff03..a2a0ce37b7 100644 --- a/tests/configs/dram-lowp.py +++ b/tests/configs/dram-lowp.py @@ -43,16 +43,18 @@ root = None import m5 + def run_test(root): - # Called from tests/run.py + # Called from tests/run.py - import sys - argv = [ - sys.argv[0], - # Add a specific page policy and specify the number of ranks - '-p%s' % page_policy, - '-r 2', - ] + import sys - # Execute the script we are wrapping - run_config('configs/dram/low_power_sweep.py', argv=argv) + argv = [ + sys.argv[0], + # Add a specific page policy and specify the number of ranks + "-p%s" % page_policy, + "-r 2", + ] + + # Execute the script we are wrapping + run_config("configs/dram/low_power_sweep.py", argv=argv) diff --git a/tests/configs/gpu-randomtest-ruby.py b/tests/configs/gpu-randomtest-ruby.py index ff4071ccf3..ceede7f500 100644 --- a/tests/configs/gpu-randomtest-ruby.py +++ b/tests/configs/gpu-randomtest-ruby.py @@ -34,7 +34,7 @@ from m5.defines import buildEnv from m5.util import addToPath import os, argparse, sys -m5.util.addToPath('../configs/') +m5.util.addToPath("../configs/") from ruby import Ruby from common import Options @@ -43,16 +43,31 @@ parser = argparse.ArgumentParser() Options.addCommonOptions(parser) # add the gpu specific options expected by the the gpu and gpu_RfO -parser.add_argument("-u", "--num-compute-units", type=int, default=8, - help="number of compute units in the GPU") -parser.add_argument("--num-cp", type=int, default=0, - help="Number of GPU Command Processors (CP)") -parser.add_argument("--simds-per-cu", type=int, default=4, help="SIMD units" \ - "per CU") -parser.add_argument("--wf-size", type=int, default=64, - help="Wavefront size(in workitems)") -parser.add_argument("--wfs-per-simd", type=int, default=10, help="Number of " \ - "WF slots per SIMD") +parser.add_argument( + "-u", + "--num-compute-units", + type=int, + default=8, + help="number of compute units in the GPU", +) +parser.add_argument( + "--num-cp", + type=int, + default=0, + help="Number of GPU Command Processors (CP)", +) +parser.add_argument( + "--simds-per-cu", type=int, default=4, help="SIMD units" "per CU" +) +parser.add_argument( + "--wf-size", type=int, default=64, help="Wavefront size(in workitems)" +) +parser.add_argument( + "--wfs-per-simd", + type=int, + default=10, + help="Number of " "WF slots per SIMD", +) # Add the ruby specific and protocol specific options Ruby.define_options(parser) @@ -63,49 +78,55 @@ args = parser.parse_args() # Set the default cache size and associativity to be very small to encourage # races between requests and writebacks. # -args.l1d_size="256B" -args.l1i_size="256B" -args.l2_size="512B" -args.l3_size="1kB" -args.l1d_assoc=2 -args.l1i_assoc=2 -args.l2_assoc=2 -args.l3_assoc=2 -args.num_compute_units=8 -args.num_sqc=2 +args.l1d_size = "256B" +args.l1i_size = "256B" +args.l2_size = "512B" +args.l3_size = "1kB" +args.l1d_assoc = 2 +args.l1i_assoc = 2 +args.l2_assoc = 2 +args.l3_assoc = 2 +args.num_compute_units = 8 +args.num_sqc = 2 # Check to for the GPU_RfO protocol. Other GPU protocols are non-SC and will # not work with the Ruby random tester. -assert(buildEnv['PROTOCOL'] == 'GPU_RfO') +assert buildEnv["PROTOCOL"] == "GPU_RfO" # # create the tester and system, including ruby # -tester = RubyTester(check_flush = False, checks_to_complete = 100, - wakeup_frequency = 10, num_cpus = args.num_cpus) +tester = RubyTester( + check_flush=False, + checks_to_complete=100, + wakeup_frequency=10, + num_cpus=args.num_cpus, +) # We set the testers as cpu for ruby to find the correct clock domains # for the L1 Objects. -system = System(cpu = tester) +system = System(cpu=tester) # Dummy voltage domain for all our clock domains -system.voltage_domain = VoltageDomain(voltage = args.sys_voltage) -system.clk_domain = SrcClockDomain(clock = '1GHz', - voltage_domain = system.voltage_domain) +system.voltage_domain = VoltageDomain(voltage=args.sys_voltage) +system.clk_domain = SrcClockDomain( + clock="1GHz", voltage_domain=system.voltage_domain +) -system.mem_ranges = AddrRange('256MB') +system.mem_ranges = AddrRange("256MB") # the ruby tester reuses num_cpus to specify the # number of cpu ports connected to the tester object, which # is stored in system.cpu. because there is only ever one # tester object, num_cpus is not necessarily equal to the # size of system.cpu -cpu_list = [ system.cpu ] * args.num_cpus +cpu_list = [system.cpu] * args.num_cpus Ruby.create_system(args, False, system, cpus=cpu_list) # Create a separate clock domain for Ruby -system.ruby.clk_domain = SrcClockDomain(clock = '1GHz', - voltage_domain = system.voltage_domain) +system.ruby.clk_domain = SrcClockDomain( + clock="1GHz", voltage_domain=system.voltage_domain +) tester.num_cpus = len(system.ruby._cpu_ports) @@ -139,5 +160,5 @@ for ruby_port in system.ruby._cpu_ports: # run simulation # ----------------------- -root = Root(full_system = False, system = system ) -root.system.mem_mode = 'timing' +root = Root(full_system=False, system=system) +root.system.mem_mode = "timing" diff --git a/tests/configs/gpu-ruby.py b/tests/configs/gpu-ruby.py index 391b98d62a..e45c446373 100644 --- a/tests/configs/gpu-ruby.py +++ b/tests/configs/gpu-ruby.py @@ -34,12 +34,13 @@ from m5.defines import buildEnv from m5.util import addToPath import os, argparse, sys, math, glob -m5.util.addToPath('../configs/') +m5.util.addToPath("../configs/") from ruby import Ruby from common import Options from common import GPUTLBOptions, GPUTLBConfig + def run_test(root): """gpu test requires a specialized run_test implementation to set up the mmio space.""" @@ -52,104 +53,168 @@ def run_test(root): # simulate until program terminates exit_event = m5.simulate(maxtick) - print('Exiting @ tick', m5.curTick(), 'because', exit_event.getCause()) + print("Exiting @ tick", m5.curTick(), "because", exit_event.getCause()) + parser = argparse.ArgumentParser() Options.addCommonOptions(parser) Options.addSEOptions(parser) parser.add_argument( - "-k", "--kernel-files", - help="file(s) containing GPU kernel code (colon separated)") + "-k", + "--kernel-files", + help="file(s) containing GPU kernel code (colon separated)", +) parser.add_argument( - "-u", "--num-compute-units", type=int, default=2, - help="number of GPU compute units"), + "-u", + "--num-compute-units", + type=int, + default=2, + help="number of GPU compute units", +), parser.add_argument( - "--num-cp", type=int, default=0, - help="Number of GPU Command Processors (CP)") + "--num-cp", + type=int, + default=0, + help="Number of GPU Command Processors (CP)", +) parser.add_argument( - "--simds-per-cu", type=int, default=4, help="SIMD units" \ - "per CU") + "--simds-per-cu", type=int, default=4, help="SIMD units" "per CU" +) parser.add_argument( - "--cu-per-sqc", type=int, default=4, help="number of CUs" \ - "sharing an SQC (icache, and thus icache TLB)") + "--cu-per-sqc", + type=int, + default=4, + help="number of CUs" "sharing an SQC (icache, and thus icache TLB)", +) parser.add_argument( - "--wf-size", type=int, default=64, - help="Wavefront size(in workitems)") + "--wf-size", type=int, default=64, help="Wavefront size(in workitems)" +) parser.add_argument( - "--wfs-per-simd", type=int, default=8, help="Number of " \ - "WF slots per SIMD") + "--wfs-per-simd", + type=int, + default=8, + help="Number of " "WF slots per SIMD", +) parser.add_argument( - "--sp-bypass-path-length", type=int, default=4, + "--sp-bypass-path-length", + type=int, + default=4, help="Number of stages of bypass path in vector ALU for Single " - "Precision ops") + "Precision ops", +) parser.add_argument( - "--dp-bypass-path-length", type=int, default=4, + "--dp-bypass-path-length", + type=int, + default=4, help="Number of stages of bypass path in vector ALU for Double " - "Precision ops") + "Precision ops", +) parser.add_argument( - "--issue-period", type=int, default=4, - help="Number of cycles per vector instruction issue period") + "--issue-period", + type=int, + default=4, + help="Number of cycles per vector instruction issue period", +) parser.add_argument( - "--glbmem-wr-bus-width", type=int, default=32, - help="VGPR to Coalescer (Global Memory) data bus width in bytes") + "--glbmem-wr-bus-width", + type=int, + default=32, + help="VGPR to Coalescer (Global Memory) data bus width in bytes", +) parser.add_argument( - "--glbmem-rd-bus-width", type=int, default=32, - help="Coalescer to VGPR (Global Memory) data bus width in bytes") + "--glbmem-rd-bus-width", + type=int, + default=32, + help="Coalescer to VGPR (Global Memory) data bus width in bytes", +) parser.add_argument( - "--shr-mem-pipes-per-cu", type=int, default=1, \ - help="Number of Shared Memory pipelines per CU") + "--shr-mem-pipes-per-cu", + type=int, + default=1, + help="Number of Shared Memory pipelines per CU", +) parser.add_argument( - "--glb-mem-pipes-per-cu", type=int, default=1, \ - help="Number of Global Memory pipelines per CU") + "--glb-mem-pipes-per-cu", + type=int, + default=1, + help="Number of Global Memory pipelines per CU", +) parser.add_argument( - "--vreg-file-size", type=int, default=2048, - help="number of physical vector registers per SIMD") + "--vreg-file-size", + type=int, + default=2048, + help="number of physical vector registers per SIMD", +) parser.add_argument( - "--bw-scalor", type=int, default=0, - help="bandwidth scalor for scalability analysis") + "--bw-scalor", + type=int, + default=0, + help="bandwidth scalor for scalability analysis", +) +parser.add_argument("--CPUClock", type=str, default="2GHz", help="CPU clock") +parser.add_argument("--GPUClock", type=str, default="1GHz", help="GPU clock") parser.add_argument( - "--CPUClock", type=str, default="2GHz", - help="CPU clock") + "--cpu-voltage", + action="store", + type=str, + default="1.0V", + help="""CPU voltage domain""", +) parser.add_argument( - "--GPUClock", type=str, default="1GHz", - help="GPU clock") + "--gpu-voltage", + action="store", + type=str, + default="1.0V", + help="""CPU voltage domain""", +) parser.add_argument( - "--cpu-voltage", action="store", type=str, - default='1.0V', - help = """CPU voltage domain""") + "--CUExecPolicy", + type=str, + default="OLDEST-FIRST", + help="WF exec policy (OLDEST-FIRST, ROUND-ROBIN)", +) parser.add_argument( - "--gpu-voltage", action="store", type=str, - default='1.0V', - help = """CPU voltage domain""") + "--xact-cas-mode", + action="store_true", + help="enable load_compare mode (transactional CAS)", +) parser.add_argument( - "--CUExecPolicy", type=str, default="OLDEST-FIRST", - help="WF exec policy (OLDEST-FIRST, ROUND-ROBIN)") + "--SegFaultDebug", + action="store_true", + help="checks for GPU seg fault before TLB access", +) parser.add_argument( - "--xact-cas-mode", action="store_true", - help="enable load_compare mode (transactional CAS)") + "--LocalMemBarrier", + action="store_true", + help="Barrier does not wait for writethroughs to complete", +) parser.add_argument( - "--SegFaultDebug",action="store_true", - help="checks for GPU seg fault before TLB access") + "--countPages", + action="store_true", + help="Count Page Accesses and output in per-CU output files", +) parser.add_argument( - "--LocalMemBarrier",action="store_true", - help="Barrier does not wait for writethroughs to complete") + "--TLB-prefetch", type=int, help="prefetch depth for" "TLBs" +) parser.add_argument( - "--countPages", action="store_true", - help="Count Page Accesses and output in per-CU output files") -parser.add_argument( - "--TLB-prefetch", type=int, help = "prefetch depth for"\ - "TLBs") -parser.add_argument( - "--pf-type", type=str, help="type of prefetch: "\ - "PF_CU, PF_WF, PF_PHASE, PF_STRIDE") + "--pf-type", + type=str, + help="type of prefetch: " "PF_CU, PF_WF, PF_PHASE, PF_STRIDE", +) parser.add_argument("--pf-stride", type=int, help="set prefetch stride") parser.add_argument( - "--numLdsBanks", type=int, default=32, - help="number of physical banks per LDS module") + "--numLdsBanks", + type=int, + default=32, + help="number of physical banks per LDS module", +) parser.add_argument( - "--ldsBankConflictPenalty", type=int, default=1, - help="number of cycles per LDS bank conflict") + "--ldsBankConflictPenalty", + type=int, + default=1, + help="number of cycles per LDS bank conflict", +) # Add the ruby specific and protocol specific options Ruby.define_options(parser) @@ -168,16 +233,18 @@ args.access_backing_store = True # sharing sqc is the common usage) n_cu = args.num_compute_units num_sqc = int(math.ceil(float(n_cu) / args.cu_per_sqc)) -args.num_sqc = num_sqc # pass this to Ruby +args.num_sqc = num_sqc # pass this to Ruby ########################## Creating the GPU system ######################## # shader is the GPU -shader = Shader(n_wf = args.wfs_per_simd, - clk_domain = SrcClockDomain( - clock = args.GPUClock, - voltage_domain = VoltageDomain( - voltage = args.gpu_voltage)), - timing = True) +shader = Shader( + n_wf=args.wfs_per_simd, + clk_domain=SrcClockDomain( + clock=args.GPUClock, + voltage_domain=VoltageDomain(voltage=args.gpu_voltage), + ), + timing=True, +) # GPU_RfO(Read For Ownership) implements SC/TSO memory model. # Other GPU protocols implement release consistency at GPU side. @@ -187,7 +254,7 @@ shader = Shader(n_wf = args.wfs_per_simd, # the acquire/release operation depending on this impl_kern_boundary_sync # flag. This flag=true means pipeline initiates a acquire/release operation # at kernel boundary. -if buildEnv['PROTOCOL'] == 'GPU_RfO': +if buildEnv["PROTOCOL"] == "GPU_RfO": shader.impl_kern_boundary_sync = False else: shader.impl_kern_boundary_sync = True @@ -200,40 +267,42 @@ if args.TLB_config == "perLane": # List of compute units; one GPU can have multiple compute units compute_units = [] for i in range(n_cu): - compute_units.append(ComputeUnit(cu_id = i, perLaneTLB = per_lane, - num_SIMDs = args.simds_per_cu, - wfSize = args.wf_size, - spbypass_pipe_length = \ - args.sp_bypass_path_length, - dpbypass_pipe_length = \ - args.dp_bypass_path_length, - issue_period = args.issue_period, - coalescer_to_vrf_bus_width = \ - args.glbmem_rd_bus_width, - vrf_to_coalescer_bus_width = \ - args.glbmem_wr_bus_width, - num_global_mem_pipes = \ - args.glb_mem_pipes_per_cu, - num_shared_mem_pipes = \ - args.shr_mem_pipes_per_cu, - n_wf = args.wfs_per_simd, - execPolicy = args.CUExecPolicy, - xactCasMode = args.xact_cas_mode, - debugSegFault = args.SegFaultDebug, - functionalTLB = True, - localMemBarrier = args.LocalMemBarrier, - countPages = args.countPages, - localDataStore = \ - LdsState(banks = args.numLdsBanks, - bankConflictPenalty = \ - args.ldsBankConflictPenalty))) + compute_units.append( + ComputeUnit( + cu_id=i, + perLaneTLB=per_lane, + num_SIMDs=args.simds_per_cu, + wfSize=args.wf_size, + spbypass_pipe_length=args.sp_bypass_path_length, + dpbypass_pipe_length=args.dp_bypass_path_length, + issue_period=args.issue_period, + coalescer_to_vrf_bus_width=args.glbmem_rd_bus_width, + vrf_to_coalescer_bus_width=args.glbmem_wr_bus_width, + num_global_mem_pipes=args.glb_mem_pipes_per_cu, + num_shared_mem_pipes=args.shr_mem_pipes_per_cu, + n_wf=args.wfs_per_simd, + execPolicy=args.CUExecPolicy, + xactCasMode=args.xact_cas_mode, + debugSegFault=args.SegFaultDebug, + functionalTLB=True, + localMemBarrier=args.LocalMemBarrier, + countPages=args.countPages, + localDataStore=LdsState( + banks=args.numLdsBanks, + bankConflictPenalty=args.ldsBankConflictPenalty, + ), + ) + ) wavefronts = [] vrfs = [] for j in range(args.simds_per_cu): for k in range(int(shader.n_wf)): - wavefronts.append(Wavefront(simdId = j, wf_slot_id = k)) - vrfs.append(VectorRegisterFile(simd_id=j, - num_regs_per_simd=args.vreg_file_size)) + wavefronts.append(Wavefront(simdId=j, wf_slot_id=k)) + vrfs.append( + VectorRegisterFile( + simd_id=j, num_regs_per_simd=args.vreg_file_size + ) + ) compute_units[-1].wavefronts = wavefronts compute_units[-1].vector_register_file = vrfs if args.TLB_prefetch: @@ -261,33 +330,38 @@ dispatcher = GpuDispatcher() # Currently does not test for command processors cpu_list = [cpu] + [shader] + [dispatcher] -system = System(cpu = cpu_list, - mem_ranges = [AddrRange(args.mem_size)], - mem_mode = 'timing', - workload = SEWorkload()) +system = System( + cpu=cpu_list, + mem_ranges=[AddrRange(args.mem_size)], + mem_mode="timing", + workload=SEWorkload(), +) # Dummy voltage domain for all our clock domains -system.voltage_domain = VoltageDomain(voltage = args.sys_voltage) -system.clk_domain = SrcClockDomain(clock = '1GHz', - voltage_domain = system.voltage_domain) +system.voltage_domain = VoltageDomain(voltage=args.sys_voltage) +system.clk_domain = SrcClockDomain( + clock="1GHz", voltage_domain=system.voltage_domain +) # Create a seperate clock domain for components that should run at # CPUs frequency -system.cpu[0].clk_domain = SrcClockDomain(clock = '2GHz', - voltage_domain = \ - system.voltage_domain) +system.cpu[0].clk_domain = SrcClockDomain( + clock="2GHz", voltage_domain=system.voltage_domain +) # configure the TLB hierarchy GPUTLBConfig.config_tlb_hierarchy(args, system, shader_idx) # create Ruby system -system.piobus = IOXBar(width=32, response_latency=0, - frontend_latency=0, forward_latency=0) +system.piobus = IOXBar( + width=32, response_latency=0, frontend_latency=0, forward_latency=0 +) Ruby.create_system(args, None, system) # Create a separate clock for Ruby -system.ruby.clk_domain = SrcClockDomain(clock = args.ruby_clock, - voltage_domain = system.voltage_domain) +system.ruby.clk_domain = SrcClockDomain( + clock=args.ruby_clock, voltage_domain=system.voltage_domain +) # create the interrupt controller cpu.createInterruptController() @@ -299,7 +373,8 @@ cpu.createInterruptController() cpu.connectAllPorts( system.ruby._cpu_ports[0].in_ports, system.ruby._cpu_ports[0].in_ports, - system.ruby._cpu_ports[0].interrupt_out_port) + system.ruby._cpu_ports[0].interrupt_out_port, +) system.ruby._cpu_ports[0].mem_request_port = system.piobus.cpu_side_ports # attach CU ports to Ruby @@ -309,8 +384,9 @@ system.ruby._cpu_ports[0].mem_request_port = system.piobus.cpu_side_ports # the index as below, but note that this assumes there is one sequencer # per compute unit and one sequencer per SQC for the math to work out # correctly. -gpu_port_idx = len(system.ruby._cpu_ports) \ - - args.num_compute_units - args.num_sqc +gpu_port_idx = ( + len(system.ruby._cpu_ports) - args.num_compute_units - args.num_sqc +) gpu_port_idx = gpu_port_idx - args.num_cp * 2 wavefront_size = args.wf_size @@ -318,19 +394,21 @@ for i in range(n_cu): # The pipeline issues wavefront_size number of uncoalesced requests # in one GPU issue cycle. Hence wavefront_size mem ports. for j in range(wavefront_size): - system.cpu[shader_idx].CUs[i].memory_port[j] = \ - system.ruby._cpu_ports[gpu_port_idx].slave[j] + system.cpu[shader_idx].CUs[i].memory_port[j] = system.ruby._cpu_ports[ + gpu_port_idx + ].slave[j] gpu_port_idx += 1 for i in range(n_cu): if i > 0 and not i % args.cu_per_sqc: gpu_port_idx += 1 - system.cpu[shader_idx].CUs[i].sqc_port = \ - system.ruby._cpu_ports[gpu_port_idx].slave + system.cpu[shader_idx].CUs[i].sqc_port = system.ruby._cpu_ports[ + gpu_port_idx + ].slave gpu_port_idx = gpu_port_idx + 1 # Current regression tests do not support the command processor -assert(args.num_cp == 0) +assert args.num_cp == 0 # connect dispatcher to the system.piobus dispatcher.pio = system.piobus.mem_side_ports @@ -352,6 +430,6 @@ dispatcher.shader_pointer = shader # run simulation # ----------------------- -root = Root(full_system = False, system = system) -m5.ticks.setGlobalFrequency('1THz') -root.system.mem_mode = 'timing' +root = Root(full_system=False, system=system) +m5.ticks.setGlobalFrequency("1THz") +root.system.mem_mode = "timing" diff --git a/tests/configs/memcheck.py b/tests/configs/memcheck.py index 669c71b30a..25a48f9f9d 100644 --- a/tests/configs/memcheck.py +++ b/tests/configs/memcheck.py @@ -48,14 +48,13 @@ require_sim_object("TrafficGen") # For some reason, this is implicitly needed by run.py root = None + def run_test(root): - # Called from tests/run.py + # Called from tests/run.py - import sys - argv = [ - sys.argv[0], - '-m %d' % maxtick, - ] + import sys - # Execute the script we are wrapping - run_config('configs/example/memcheck.py', argv=argv) + argv = [sys.argv[0], "-m %d" % maxtick] + + # Execute the script we are wrapping + run_config("configs/example/memcheck.py", argv=argv) diff --git a/tests/configs/memtest-filter.py b/tests/configs/memtest-filter.py index 042b3cda3a..1080853f7b 100644 --- a/tests/configs/memtest-filter.py +++ b/tests/configs/memtest-filter.py @@ -26,30 +26,36 @@ import m5 from m5.objects import * -m5.util.addToPath('../configs/') + +m5.util.addToPath("../configs/") from common.Caches import * -#MAX CORES IS 8 with the fals sharing method +# MAX CORES IS 8 with the fals sharing method nb_cores = 8 -cpus = [ MemTest() for i in range(nb_cores) ] +cpus = [MemTest() for i in range(nb_cores)] # system simulated -system = System(cpu = cpus, - physmem = SimpleMemory(), - membus = SystemXBar(width=16, snoop_filter = SnoopFilter())) +system = System( + cpu=cpus, + physmem=SimpleMemory(), + membus=SystemXBar(width=16, snoop_filter=SnoopFilter()), +) # Dummy voltage domain for all our clock domains system.voltage_domain = VoltageDomain() -system.clk_domain = SrcClockDomain(clock = '1GHz', - voltage_domain = system.voltage_domain) +system.clk_domain = SrcClockDomain( + clock="1GHz", voltage_domain=system.voltage_domain +) # Create a seperate clock domain for components that should run at # CPUs frequency -system.cpu_clk_domain = SrcClockDomain(clock = '2GHz', - voltage_domain = system.voltage_domain) +system.cpu_clk_domain = SrcClockDomain( + clock="2GHz", voltage_domain=system.voltage_domain +) -system.toL2Bus = L2XBar(clk_domain = system.cpu_clk_domain, - snoop_filter = SnoopFilter()) -system.l2c = L2Cache(clk_domain = system.cpu_clk_domain, size='64kB', assoc=8) +system.toL2Bus = L2XBar( + clk_domain=system.cpu_clk_domain, snoop_filter=SnoopFilter() +) +system.l2c = L2Cache(clk_domain=system.cpu_clk_domain, size="64kB", assoc=8) system.l2c.cpu_side = system.toL2Bus.mem_side_ports # connect l2c to membus @@ -59,7 +65,7 @@ system.l2c.mem_side = system.membus.cpu_side_ports for cpu in cpus: # All cpus are associated with cpu_clk_domain cpu.clk_domain = system.cpu_clk_domain - cpu.l1c = L1Cache(size = '32kB', assoc = 4) + cpu.l1c = L1Cache(size="32kB", assoc=4) cpu.l1c.cpu_side = cpu.port cpu.l1c.mem_side = system.toL2Bus.cpu_side_ports @@ -73,5 +79,5 @@ system.physmem.port = system.membus.mem_side_ports # run simulation # ----------------------- -root = Root( full_system = False, system = system ) -root.system.mem_mode = 'timing' +root = Root(full_system=False, system=system) +root.system.mem_mode = "timing" diff --git a/tests/configs/memtest-ruby.py b/tests/configs/memtest-ruby.py index d6e1cf4f8d..dac165e288 100644 --- a/tests/configs/memtest-ruby.py +++ b/tests/configs/memtest-ruby.py @@ -31,7 +31,7 @@ from m5.defines import buildEnv from m5.util import addToPath import os, argparse, sys -m5.util.addToPath('../configs/') +m5.util.addToPath("../configs/") from ruby import Ruby from common import Options @@ -48,69 +48,75 @@ args = parser.parse_args() # Set the default cache size and associativity to be very small to encourage # races between requests and writebacks. # -args.l1d_size="256B" -args.l1i_size="256B" -args.l2_size="512B" -args.l3_size="1kB" -args.l1d_assoc=2 -args.l1i_assoc=2 -args.l2_assoc=2 -args.l3_assoc=2 -args.ports=32 +args.l1d_size = "256B" +args.l1i_size = "256B" +args.l2_size = "512B" +args.l3_size = "1kB" +args.l1d_assoc = 2 +args.l1i_assoc = 2 +args.l2_assoc = 2 +args.l3_assoc = 2 +args.ports = 32 -#MAX CORES IS 8 with the fals sharing method +# MAX CORES IS 8 with the fals sharing method nb_cores = 8 # ruby does not support atomic, functional, or uncacheable accesses -cpus = [ MemTest(percent_functional=50, - percent_uncacheable=0, suppress_func_errors=True) \ - for i in range(nb_cores) ] +cpus = [ + MemTest( + percent_functional=50, percent_uncacheable=0, suppress_func_errors=True + ) + for i in range(nb_cores) +] # overwrite args.num_cpus with the nb_cores value args.num_cpus = nb_cores # system simulated -system = System(cpu = cpus) +system = System(cpu=cpus) # Dummy voltage domain for all our clock domains system.voltage_domain = VoltageDomain() -system.clk_domain = SrcClockDomain(clock = '1GHz', - voltage_domain = system.voltage_domain) +system.clk_domain = SrcClockDomain( + clock="1GHz", voltage_domain=system.voltage_domain +) # Create a seperate clock domain for components that should run at # CPUs frequency -system.cpu_clk_domain = SrcClockDomain(clock = '2GHz', - voltage_domain = system.voltage_domain) +system.cpu_clk_domain = SrcClockDomain( + clock="2GHz", voltage_domain=system.voltage_domain +) # All cpus are associated with cpu_clk_domain for cpu in cpus: cpu.clk_domain = system.cpu_clk_domain -system.mem_ranges = AddrRange('256MB') +system.mem_ranges = AddrRange("256MB") Ruby.create_system(args, False, system) # Create a separate clock domain for Ruby -system.ruby.clk_domain = SrcClockDomain(clock = args.ruby_clock, - voltage_domain = system.voltage_domain) +system.ruby.clk_domain = SrcClockDomain( + clock=args.ruby_clock, voltage_domain=system.voltage_domain +) -assert(len(cpus) == len(system.ruby._cpu_ports)) +assert len(cpus) == len(system.ruby._cpu_ports) for (i, ruby_port) in enumerate(system.ruby._cpu_ports): - # - # Tie the cpu port to the ruby cpu ports and - # physmem, respectively - # - cpus[i].port = ruby_port.in_ports + # + # Tie the cpu port to the ruby cpu ports and + # physmem, respectively + # + cpus[i].port = ruby_port.in_ports - # - # Since the memtester is incredibly bursty, increase the deadlock - # threshold to 1 million cycles - # - ruby_port.deadlock_threshold = 1000000 + # + # Since the memtester is incredibly bursty, increase the deadlock + # threshold to 1 million cycles + # + ruby_port.deadlock_threshold = 1000000 # ----------------------- # run simulation # ----------------------- -root = Root(full_system = False, system = system) -root.system.mem_mode = 'timing' +root = Root(full_system=False, system=system) +root.system.mem_mode = "timing" diff --git a/tests/configs/memtest.py b/tests/configs/memtest.py index 01a5a46f09..10f3fbe50d 100644 --- a/tests/configs/memtest.py +++ b/tests/configs/memtest.py @@ -26,29 +26,30 @@ import m5 from m5.objects import * -m5.util.addToPath('../configs/') + +m5.util.addToPath("../configs/") from common.Caches import * -#MAX CORES IS 8 with the fals sharing method +# MAX CORES IS 8 with the fals sharing method nb_cores = 8 -cpus = [ MemTest() for i in range(nb_cores) ] +cpus = [MemTest() for i in range(nb_cores)] # system simulated -system = System(cpu = cpus, - physmem = SimpleMemory(), - membus = SystemXBar()) +system = System(cpu=cpus, physmem=SimpleMemory(), membus=SystemXBar()) # Dummy voltage domain for all our clock domains system.voltage_domain = VoltageDomain() -system.clk_domain = SrcClockDomain(clock = '1GHz', - voltage_domain = system.voltage_domain) +system.clk_domain = SrcClockDomain( + clock="1GHz", voltage_domain=system.voltage_domain +) # Create a seperate clock domain for components that should run at # CPUs frequency -system.cpu_clk_domain = SrcClockDomain(clock = '2GHz', - voltage_domain = system.voltage_domain) +system.cpu_clk_domain = SrcClockDomain( + clock="2GHz", voltage_domain=system.voltage_domain +) -system.toL2Bus = L2XBar(clk_domain = system.cpu_clk_domain) -system.l2c = L2Cache(clk_domain = system.cpu_clk_domain, size='64kB', assoc=8) +system.toL2Bus = L2XBar(clk_domain=system.cpu_clk_domain) +system.l2c = L2Cache(clk_domain=system.cpu_clk_domain, size="64kB", assoc=8) system.l2c.cpu_side = system.toL2Bus.mem_side_ports # connect l2c to membus @@ -58,7 +59,7 @@ system.l2c.mem_side = system.membus.cpu_side_ports for cpu in cpus: # All cpus are associated with cpu_clk_domain cpu.clk_domain = system.cpu_clk_domain - cpu.l1c = L1Cache(size = '32kB', assoc = 4) + cpu.l1c = L1Cache(size="32kB", assoc=4) cpu.l1c.cpu_side = cpu.port cpu.l1c.mem_side = system.toL2Bus.cpu_side_ports @@ -72,6 +73,5 @@ system.physmem.port = system.membus.mem_side_ports # run simulation # ----------------------- -root = Root( full_system = False, system = system ) -root.system.mem_mode = 'timing' - +root = Root(full_system=False, system=system) +root.system.mem_mode = "timing" diff --git a/tests/configs/minor-timing-mp.py b/tests/configs/minor-timing-mp.py index 4283c3fcbf..b6c56de512 100644 --- a/tests/configs/minor-timing-mp.py +++ b/tests/configs/minor-timing-mp.py @@ -40,5 +40,9 @@ from m5.objects import * from base_config import * nb_cores = 4 -root = BaseSESystem(mem_mode='timing', mem_class=DDR3_1600_8x8, - cpu_class=MinorCPU, num_cpus=nb_cores).create_root() +root = BaseSESystem( + mem_mode="timing", + mem_class=DDR3_1600_8x8, + cpu_class=MinorCPU, + num_cpus=nb_cores, +).create_root() diff --git a/tests/configs/minor-timing.py b/tests/configs/minor-timing.py index 6c1b51711c..e6680d7702 100644 --- a/tests/configs/minor-timing.py +++ b/tests/configs/minor-timing.py @@ -39,5 +39,6 @@ from m5.objects import * from base_config import * -root = BaseSESystemUniprocessor(mem_mode='timing', mem_class=DDR3_1600_8x8, - cpu_class=MinorCPU).create_root() +root = BaseSESystemUniprocessor( + mem_mode="timing", mem_class=DDR3_1600_8x8, cpu_class=MinorCPU +).create_root() diff --git a/tests/configs/o3-timing-checker.py b/tests/configs/o3-timing-checker.py index c82a6c86b1..9b328ce9e8 100644 --- a/tests/configs/o3-timing-checker.py +++ b/tests/configs/o3-timing-checker.py @@ -36,6 +36,9 @@ from m5.objects import * from base_config import * -root = BaseSESystemUniprocessor(mem_mode='timing', mem_class=DDR3_1600_8x8, - cpu_class=DerivO3CPU, - checker=True).create_root() +root = BaseSESystemUniprocessor( + mem_mode="timing", + mem_class=DDR3_1600_8x8, + cpu_class=DerivO3CPU, + checker=True, +).create_root() diff --git a/tests/configs/o3-timing-mp-ruby.py b/tests/configs/o3-timing-mp-ruby.py index bd7f07f996..10725e36ad 100644 --- a/tests/configs/o3-timing-mp-ruby.py +++ b/tests/configs/o3-timing-mp-ruby.py @@ -28,19 +28,24 @@ import m5 from m5.objects import * nb_cores = 4 -cpus = [ DerivO3CPU(cpu_id=i) for i in range(nb_cores) ] +cpus = [DerivO3CPU(cpu_id=i) for i in range(nb_cores)] import ruby_config + ruby_memory = ruby_config.generate("TwoLevel_SplitL1UnifiedL2.rb", nb_cores) # system simulated -system = System(cpu = cpus, physmem = ruby_memory, membus = SystemXBar(), - mem_mode = "timing", - clk_domain = SrcClockDomain(clock = '1GHz')) +system = System( + cpu=cpus, + physmem=ruby_memory, + membus=SystemXBar(), + mem_mode="timing", + clk_domain=SrcClockDomain(clock="1GHz"), +) # Create a seperate clock domain for components that should run at # CPUs frequency -system.cpu_clk_domain = SrcClockDomain(clock = '2GHz') +system.cpu_clk_domain = SrcClockDomain(clock="2GHz") for cpu in cpus: # create the interrupt controller @@ -59,5 +64,5 @@ system.system_port = system.membus.cpu_side_ports # run simulation # ----------------------- -root = Root(full_system = False, system = system) -root.system.mem_mode = 'timing' +root = Root(full_system=False, system=system) +root.system.mem_mode = "timing" diff --git a/tests/configs/o3-timing-mp.py b/tests/configs/o3-timing-mp.py index 5c68a607c0..9b58c9d416 100644 --- a/tests/configs/o3-timing-mp.py +++ b/tests/configs/o3-timing-mp.py @@ -40,5 +40,9 @@ from m5.objects import * from base_config import * nb_cores = 4 -root = BaseSESystem(mem_mode='timing', mem_class=DDR3_1600_8x8, - cpu_class=DerivO3CPU, num_cpus=nb_cores).create_root() +root = BaseSESystem( + mem_mode="timing", + mem_class=DDR3_1600_8x8, + cpu_class=DerivO3CPU, + num_cpus=nb_cores, +).create_root() diff --git a/tests/configs/o3-timing-mt.py b/tests/configs/o3-timing-mt.py index 5c98f332db..9fda80de12 100644 --- a/tests/configs/o3-timing-mt.py +++ b/tests/configs/o3-timing-mt.py @@ -41,15 +41,23 @@ from m5.defines import buildEnv from base_config import * from arm_generic import * from common.cores.arm.O3_ARM_v7a import O3_ARM_v7a_3 +from gem5.isas import ISA +from gem5.runtime import get_runtime_isa # If we are running ARM regressions, use a more sensible CPU # configuration. This makes the results more meaningful, and also # increases the coverage of the regressions. -if buildEnv['TARGET_ISA'] == "arm": - root = ArmSESystemUniprocessor(mem_mode='timing', mem_class=DDR3_1600_8x8, - cpu_class=O3_ARM_v7a_3, - num_threads=2).create_root() +if get_runtime_isa() == ISA.ARM: + root = ArmSESystemUniprocessor( + mem_mode="timing", + mem_class=DDR3_1600_8x8, + cpu_class=O3_ARM_v7a_3, + num_threads=2, + ).create_root() else: - root = BaseSESystemUniprocessor(mem_mode='timing', mem_class=DDR3_1600_8x8, - cpu_class=DerivO3CPU, - num_threads=2).create_root() + root = BaseSESystemUniprocessor( + mem_mode="timing", + mem_class=DDR3_1600_8x8, + cpu_class=DerivO3CPU, + num_threads=2, + ).create_root() diff --git a/tests/configs/o3-timing-ruby.py b/tests/configs/o3-timing-ruby.py index f06fe93bc3..30ee69ef23 100644 --- a/tests/configs/o3-timing-ruby.py +++ b/tests/configs/o3-timing-ruby.py @@ -28,19 +28,22 @@ import m5 from m5.objects import * import ruby_config + ruby_memory = ruby_config.generate("TwoLevel_SplitL1UnifiedL2.rb", 1) cpu = DerivO3CPU(cpu_id=0) -system = System(cpu = cpu, - physmem = ruby_memory, - membus = SystemXBar(), - mem_mode = "timing", - clk_domain = SrcClockDomain(clock = '1GHz')) +system = System( + cpu=cpu, + physmem=ruby_memory, + membus=SystemXBar(), + mem_mode="timing", + clk_domain=SrcClockDomain(clock="1GHz"), +) # Create a seperate clock domain for components that should run at # CPUs frequency -system.cpu.clk_domain = SrcClockDomain(clock = '2GHz') +system.cpu.clk_domain = SrcClockDomain(clock="2GHz") system.physmem.port = system.membus.mem_side_ports # create the interrupt controller @@ -50,4 +53,4 @@ cpu.connectBus(system.membus) # Connect the system port for loading of binaries etc system.system_port = system.membus.cpu_side_ports -root = Root(full_system = False, system = system) +root = Root(full_system=False, system=system) diff --git a/tests/configs/o3-timing.py b/tests/configs/o3-timing.py index 3788df9f89..26efe466d4 100644 --- a/tests/configs/o3-timing.py +++ b/tests/configs/o3-timing.py @@ -41,13 +41,17 @@ from m5.defines import buildEnv from base_config import * from arm_generic import * from common.cores.arm.O3_ARM_v7a import O3_ARM_v7a_3 +from gem5.isas import ISA +from gem5.runtime import get_runtime_isa # If we are running ARM regressions, use a more sensible CPU # configuration. This makes the results more meaningful, and also # increases the coverage of the regressions. -if buildEnv['TARGET_ISA'] == "arm": - root = ArmSESystemUniprocessor(mem_mode='timing', mem_class=DDR3_1600_8x8, - cpu_class=O3_ARM_v7a_3).create_root() +if get_runtime_isa() == ISA.ARM: + root = ArmSESystemUniprocessor( + mem_mode="timing", mem_class=DDR3_1600_8x8, cpu_class=O3_ARM_v7a_3 + ).create_root() else: - root = BaseSESystemUniprocessor(mem_mode='timing', mem_class=DDR3_1600_8x8, - cpu_class=DerivO3CPU).create_root() + root = BaseSESystemUniprocessor( + mem_mode="timing", mem_class=DDR3_1600_8x8, cpu_class=DerivO3CPU + ).create_root() diff --git a/tests/configs/pc-o3-timing.py b/tests/configs/pc-o3-timing.py index 406678634b..24abcd2de6 100644 --- a/tests/configs/pc-o3-timing.py +++ b/tests/configs/pc-o3-timing.py @@ -36,6 +36,6 @@ from m5.objects import * from x86_generic import * -root = LinuxX86FSSystemUniprocessor(mem_mode='timing', - mem_class=DDR3_1600_8x8, - cpu_class=DerivO3CPU).create_root() +root = LinuxX86FSSystemUniprocessor( + mem_mode="timing", mem_class=DDR3_1600_8x8, cpu_class=DerivO3CPU +).create_root() diff --git a/tests/configs/pc-simple-atomic.py b/tests/configs/pc-simple-atomic.py index cc509f3ed0..ac2c3c06b8 100644 --- a/tests/configs/pc-simple-atomic.py +++ b/tests/configs/pc-simple-atomic.py @@ -36,6 +36,6 @@ from m5.objects import * from x86_generic import * -root = LinuxX86FSSystemUniprocessor(mem_mode='atomic', - mem_class=SimpleMemory, - cpu_class=AtomicSimpleCPU).create_root() +root = LinuxX86FSSystemUniprocessor( + mem_mode="atomic", mem_class=SimpleMemory, cpu_class=AtomicSimpleCPU +).create_root() diff --git a/tests/configs/pc-simple-timing-ruby.py b/tests/configs/pc-simple-timing-ruby.py index 9f73da9f2b..d0458b49cd 100644 --- a/tests/configs/pc-simple-timing-ruby.py +++ b/tests/configs/pc-simple-timing-ruby.py @@ -26,7 +26,8 @@ import m5, os, argparse, sys from m5.objects import * -m5.util.addToPath('../configs/') + +m5.util.addToPath("../configs/") from common.Benchmarks import SysConfig from common import FSConfig, SysPaths from ruby import Ruby @@ -40,35 +41,41 @@ args = parser.parse_args() # Set the default cache size and associativity to be very small to encourage # races between requests and writebacks. -args.l1d_size="32kB" -args.l1i_size="32kB" -args.l2_size="4MB" -args.l1d_assoc=2 -args.l1i_assoc=2 -args.l2_assoc=2 +args.l1d_size = "32kB" +args.l1i_size = "32kB" +args.l2_size = "4MB" +args.l1d_assoc = 2 +args.l1i_assoc = 2 +args.l2_assoc = 2 args.num_cpus = 2 -#the system -mdesc = SysConfig(disks = ['linux-x86.img']) -system = FSConfig.makeLinuxX86System('timing', args.num_cpus, - mdesc=mdesc, Ruby=True) -system.kernel = SysPaths.binary('x86_64-vmlinux-2.6.22.9') +# the system +mdesc = SysConfig(disks=["linux-x86.img"]) +system = FSConfig.makeLinuxX86System( + "timing", args.num_cpus, mdesc=mdesc, Ruby=True +) +system.kernel = SysPaths.binary("x86_64-vmlinux-2.6.22.9") # Dummy voltage domain for all our clock domains -system.voltage_domain = VoltageDomain(voltage = args.sys_voltage) +system.voltage_domain = VoltageDomain(voltage=args.sys_voltage) -system.kernel = FSConfig.binary('x86_64-vmlinux-2.6.22.9.smp') -system.clk_domain = SrcClockDomain(clock = '1GHz', - voltage_domain = system.voltage_domain) -system.cpu_clk_domain = SrcClockDomain(clock = '2GHz', - voltage_domain = system.voltage_domain) -system.cpu = [TimingSimpleCPU(cpu_id=i, clk_domain = system.cpu_clk_domain) - for i in range(args.num_cpus)] +system.kernel = FSConfig.binary("x86_64-vmlinux-2.6.22.9.smp") +system.clk_domain = SrcClockDomain( + clock="1GHz", voltage_domain=system.voltage_domain +) +system.cpu_clk_domain = SrcClockDomain( + clock="2GHz", voltage_domain=system.voltage_domain +) +system.cpu = [ + TimingSimpleCPU(cpu_id=i, clk_domain=system.cpu_clk_domain) + for i in range(args.num_cpus) +] Ruby.create_system(args, True, system, system.iobus, system._dma_ports) # Create a seperate clock domain for Ruby -system.ruby.clk_domain = SrcClockDomain(clock = args.ruby_clock, - voltage_domain = system.voltage_domain) +system.ruby.clk_domain = SrcClockDomain( + clock=args.ruby_clock, voltage_domain=system.voltage_domain +) # Connect the ruby io port to the PIO bus, # assuming that there is just one such port. @@ -80,5 +87,5 @@ for (i, cpu) in enumerate(system.cpu): # Tie the cpu ports to the correct ruby system ports system.ruby._cpu_ports[i].connectCpuPorts(cpu) -root = Root(full_system = True, system = system) -m5.ticks.setGlobalFrequency('1THz') +root = Root(full_system=True, system=system) +m5.ticks.setGlobalFrequency("1THz") diff --git a/tests/configs/pc-simple-timing.py b/tests/configs/pc-simple-timing.py index a73e15c548..c095401381 100644 --- a/tests/configs/pc-simple-timing.py +++ b/tests/configs/pc-simple-timing.py @@ -36,7 +36,6 @@ from m5.objects import * from x86_generic import * -root = LinuxX86FSSystemUniprocessor(mem_mode='timing', - mem_class=DDR3_1600_8x8, - cpu_class=TimingSimpleCPU).create_root() - +root = LinuxX86FSSystemUniprocessor( + mem_mode="timing", mem_class=DDR3_1600_8x8, cpu_class=TimingSimpleCPU +).create_root() diff --git a/tests/configs/pc-switcheroo-full.py b/tests/configs/pc-switcheroo-full.py index d5b3b701f2..a69f80c341 100644 --- a/tests/configs/pc-switcheroo-full.py +++ b/tests/configs/pc-switcheroo-full.py @@ -40,8 +40,8 @@ import switcheroo root = LinuxX86FSSwitcheroo( mem_class=DDR3_1600_8x8, - cpu_classes=(AtomicSimpleCPU, TimingSimpleCPU, DerivO3CPU) - ).create_root() + cpu_classes=(AtomicSimpleCPU, TimingSimpleCPU, DerivO3CPU), +).create_root() # Setup a custom test method that uses the switcheroo tester that # switches between CPU models. diff --git a/tests/configs/rubytest-ruby.py b/tests/configs/rubytest-ruby.py index b68ff92ad1..9a382cce6e 100644 --- a/tests/configs/rubytest-ruby.py +++ b/tests/configs/rubytest-ruby.py @@ -31,7 +31,7 @@ from m5.defines import buildEnv from m5.util import addToPath import os, argparse, sys -m5.util.addToPath('../configs/') +m5.util.addToPath("../configs/") from ruby import Ruby from common import Options @@ -48,51 +48,57 @@ args = parser.parse_args() # Set the default cache size and associativity to be very small to encourage # races between requests and writebacks. # -args.l1d_size="256B" -args.l1i_size="256B" -args.l2_size="512B" -args.l3_size="1kB" -args.l1d_assoc=2 -args.l1i_assoc=2 -args.l2_assoc=2 -args.l3_assoc=2 -args.ports=32 +args.l1d_size = "256B" +args.l1i_size = "256B" +args.l2_size = "512B" +args.l3_size = "1kB" +args.l1d_assoc = 2 +args.l1i_assoc = 2 +args.l2_assoc = 2 +args.l3_assoc = 2 +args.ports = 32 # Turn on flush check for the hammer protocol check_flush = False -if buildEnv['PROTOCOL'] == 'MOESI_hammer': +if buildEnv["PROTOCOL"] == "MOESI_hammer": check_flush = True # # create the tester and system, including ruby # -tester = RubyTester(check_flush = check_flush, checks_to_complete = 100, - wakeup_frequency = 10, num_cpus = args.num_cpus) +tester = RubyTester( + check_flush=check_flush, + checks_to_complete=100, + wakeup_frequency=10, + num_cpus=args.num_cpus, +) # We set the testers as cpu for ruby to find the correct clock domains # for the L1 Objects. -system = System(cpu = tester) +system = System(cpu=tester) # Dummy voltage domain for all our clock domains -system.voltage_domain = VoltageDomain(voltage = args.sys_voltage) -system.clk_domain = SrcClockDomain(clock = '1GHz', - voltage_domain = system.voltage_domain) +system.voltage_domain = VoltageDomain(voltage=args.sys_voltage) +system.clk_domain = SrcClockDomain( + clock="1GHz", voltage_domain=system.voltage_domain +) -system.mem_ranges = AddrRange('256MB') +system.mem_ranges = AddrRange("256MB") # the ruby tester reuses num_cpus to specify the # number of cpu ports connected to the tester object, which # is stored in system.cpu. because there is only ever one # tester object, num_cpus is not necessarily equal to the # size of system.cpu -cpu_list = [ system.cpu ] * args.num_cpus +cpu_list = [system.cpu] * args.num_cpus Ruby.create_system(args, False, system, cpus=cpu_list) # Create a separate clock domain for Ruby -system.ruby.clk_domain = SrcClockDomain(clock = '1GHz', - voltage_domain = system.voltage_domain) +system.ruby.clk_domain = SrcClockDomain( + clock="1GHz", voltage_domain=system.voltage_domain +) -assert(args.num_cpus == len(system.ruby._cpu_ports)) +assert args.num_cpus == len(system.ruby._cpu_ports) tester.num_cpus = len(system.ruby._cpu_ports) @@ -126,5 +132,5 @@ for ruby_port in system.ruby._cpu_ports: # run simulation # ----------------------- -root = Root(full_system = False, system = system ) -root.system.mem_mode = 'timing' +root = Root(full_system=False, system=system) +root.system.mem_mode = "timing" diff --git a/tests/configs/simple-atomic-dummychecker.py b/tests/configs/simple-atomic-dummychecker.py index 42d1ceed1f..7ec004765a 100644 --- a/tests/configs/simple-atomic-dummychecker.py +++ b/tests/configs/simple-atomic-dummychecker.py @@ -36,6 +36,6 @@ from m5.objects import * from base_config import * -root = BaseSESystemUniprocessor(mem_mode='atomic', - cpu_class=AtomicSimpleCPU, - checker=True).create_root() +root = BaseSESystemUniprocessor( + mem_mode="atomic", cpu_class=AtomicSimpleCPU, checker=True +).create_root() diff --git a/tests/configs/simple-atomic-mp-ruby.py b/tests/configs/simple-atomic-mp-ruby.py index 117787a7dd..e3ac279022 100644 --- a/tests/configs/simple-atomic-mp-ruby.py +++ b/tests/configs/simple-atomic-mp-ruby.py @@ -28,18 +28,23 @@ import m5 from m5.objects import * nb_cores = 4 -cpus = [ AtomicSimpleCPU(cpu_id=i) for i in range(nb_cores) ] +cpus = [AtomicSimpleCPU(cpu_id=i) for i in range(nb_cores)] import ruby_config + ruby_memory = ruby_config.generate("TwoLevel_SplitL1UnifiedL2.rb", nb_cores) # system simulated -system = System(cpu = cpus, physmem = ruby_memory, membus = SystemXBar(), - clk_domain = SrcClockDomain(clock = '1GHz')) +system = System( + cpu=cpus, + physmem=ruby_memory, + membus=SystemXBar(), + clk_domain=SrcClockDomain(clock="1GHz"), +) # Create a seperate clock domain for components that should run at # CPUs frequency -system.cpu.clk_domain = SrcClockDomain(clock = '2GHz') +system.cpu.clk_domain = SrcClockDomain(clock="2GHz") # add L1 caches for cpu in cpus: @@ -57,5 +62,5 @@ system.system_port = system.membus.cpu_side_ports # run simulation # ----------------------- -root = Root(full_system = False, system = system) -root.system.mem_mode = 'atomic' +root = Root(full_system=False, system=system) +root.system.mem_mode = "atomic" diff --git a/tests/configs/simple-atomic-mp.py b/tests/configs/simple-atomic-mp.py index 5e9b465905..0d85b5af36 100644 --- a/tests/configs/simple-atomic-mp.py +++ b/tests/configs/simple-atomic-mp.py @@ -40,5 +40,6 @@ from m5.objects import * from base_config import * nb_cores = 4 -root = BaseSESystem(mem_mode='atomic', cpu_class=AtomicSimpleCPU, - num_cpus=nb_cores).create_root() +root = BaseSESystem( + mem_mode="atomic", cpu_class=AtomicSimpleCPU, num_cpus=nb_cores +).create_root() diff --git a/tests/configs/simple-atomic.py b/tests/configs/simple-atomic.py index 3c698f537a..6dd86ccf39 100644 --- a/tests/configs/simple-atomic.py +++ b/tests/configs/simple-atomic.py @@ -39,5 +39,6 @@ from m5.objects import * from base_config import * -root = BaseSESystemUniprocessor(mem_mode='atomic', - cpu_class=AtomicSimpleCPU).create_root() +root = BaseSESystemUniprocessor( + mem_mode="atomic", cpu_class=AtomicSimpleCPU +).create_root() diff --git a/tests/configs/simple-timing-mp-ruby.py b/tests/configs/simple-timing-mp-ruby.py index 899f18beeb..38488c409d 100644 --- a/tests/configs/simple-timing-mp-ruby.py +++ b/tests/configs/simple-timing-mp-ruby.py @@ -30,7 +30,7 @@ from m5.defines import buildEnv from m5.util import addToPath import os, argparse, sys -m5.util.addToPath('../configs/') +m5.util.addToPath("../configs/") from common import Options from ruby import Ruby @@ -47,34 +47,34 @@ args = parser.parse_args() # Set the default cache size and associativity to be very small to encourage # races between requests and writebacks. # -args.l1d_size="256B" -args.l1i_size="256B" -args.l2_size="512B" -args.l3_size="1kB" -args.l1d_assoc=2 -args.l1i_assoc=2 -args.l2_assoc=2 -args.l3_assoc=2 +args.l1d_size = "256B" +args.l1i_size = "256B" +args.l2_size = "512B" +args.l3_size = "1kB" +args.l1d_assoc = 2 +args.l1i_assoc = 2 +args.l2_assoc = 2 +args.l3_assoc = 2 nb_cores = 4 -cpus = [ TimingSimpleCPU(cpu_id=i) for i in range(nb_cores) ] +cpus = [TimingSimpleCPU(cpu_id=i) for i in range(nb_cores)] # overwrite the num_cpus to equal nb_cores args.num_cpus = nb_cores # system simulated -system = System(cpu = cpus, clk_domain = SrcClockDomain(clock = '1GHz')) +system = System(cpu=cpus, clk_domain=SrcClockDomain(clock="1GHz")) # Create a seperate clock domain for components that should run at # CPUs frequency -system.cpu.clk_domain = SrcClockDomain(clock = '2GHz') +system.cpu.clk_domain = SrcClockDomain(clock="2GHz") Ruby.create_system(args, False, system) # Create a separate clock domain for Ruby -system.ruby.clk_domain = SrcClockDomain(clock = args.ruby_clock) +system.ruby.clk_domain = SrcClockDomain(clock=args.ruby_clock) -assert(args.num_cpus == len(system.ruby._cpu_ports)) +assert args.num_cpus == len(system.ruby._cpu_ports) for (i, cpu) in enumerate(system.cpu): # create the interrupt controller @@ -86,11 +86,12 @@ for (i, cpu) in enumerate(system.cpu): cpu.connectAllPorts( system.ruby._cpu_ports[i].in_ports, system.ruby._cpu_ports[i].in_ports, - system.ruby._cpu_ports[i].interrupt_out_port) + system.ruby._cpu_ports[i].interrupt_out_port, + ) # ----------------------- # run simulation # ----------------------- -root = Root( full_system=False, system = system ) -root.system.mem_mode = 'timing' +root = Root(full_system=False, system=system) +root.system.mem_mode = "timing" diff --git a/tests/configs/simple-timing-mp.py b/tests/configs/simple-timing-mp.py index 8712cb9be1..3988f4c2b9 100644 --- a/tests/configs/simple-timing-mp.py +++ b/tests/configs/simple-timing-mp.py @@ -40,5 +40,6 @@ from m5.objects import * from base_config import * nb_cores = 4 -root = BaseSESystem(mem_mode='timing', cpu_class=TimingSimpleCPU, - num_cpus=nb_cores).create_root() +root = BaseSESystem( + mem_mode="timing", cpu_class=TimingSimpleCPU, num_cpus=nb_cores +).create_root() diff --git a/tests/configs/simple-timing-ruby.py b/tests/configs/simple-timing-ruby.py index cc1697f169..eb0f4e9ac7 100644 --- a/tests/configs/simple-timing-ruby.py +++ b/tests/configs/simple-timing-ruby.py @@ -30,7 +30,7 @@ from m5.defines import buildEnv from m5.util import addToPath import os, argparse, sys -m5.util.addToPath('../configs/') +m5.util.addToPath("../configs/") from ruby import Ruby from common import Options @@ -47,38 +47,41 @@ args = parser.parse_args() # Set the default cache size and associativity to be very small to encourage # races between requests and writebacks. # -args.l1d_size="256B" -args.l1i_size="256B" -args.l2_size="512B" -args.l3_size="1kB" -args.l1d_assoc=2 -args.l1i_assoc=2 -args.l2_assoc=2 -args.l3_assoc=2 +args.l1d_size = "256B" +args.l1i_size = "256B" +args.l2_size = "512B" +args.l3_size = "1kB" +args.l1d_assoc = 2 +args.l1i_assoc = 2 +args.l2_assoc = 2 +args.l3_assoc = 2 # this is a uniprocessor only test args.num_cpus = 1 cpu = TimingSimpleCPU(cpu_id=0) -system = System(cpu = cpu) +system = System(cpu=cpu) # Dummy voltage domain for all our clock domains -system.voltage_domain = VoltageDomain(voltage = args.sys_voltage) -system.clk_domain = SrcClockDomain(clock = '1GHz', - voltage_domain = system.voltage_domain) +system.voltage_domain = VoltageDomain(voltage=args.sys_voltage) +system.clk_domain = SrcClockDomain( + clock="1GHz", voltage_domain=system.voltage_domain +) # Create a seperate clock domain for components that should run at # CPUs frequency -system.cpu.clk_domain = SrcClockDomain(clock = '2GHz', - voltage_domain = system.voltage_domain) +system.cpu.clk_domain = SrcClockDomain( + clock="2GHz", voltage_domain=system.voltage_domain +) -system.mem_ranges = AddrRange('256MB') +system.mem_ranges = AddrRange("256MB") Ruby.create_system(args, False, system) # Create a separate clock for Ruby -system.ruby.clk_domain = SrcClockDomain(clock = args.ruby_clock, - voltage_domain = system.voltage_domain) +system.ruby.clk_domain = SrcClockDomain( + clock=args.ruby_clock, voltage_domain=system.voltage_domain +) -assert(len(system.ruby._cpu_ports) == 1) +assert len(system.ruby._cpu_ports) == 1 # create the interrupt controller cpu.createInterruptController() @@ -90,11 +93,12 @@ cpu.createInterruptController() cpu.connectAllPorts( system.ruby._cpu_ports[0].in_ports, system.ruby._cpu_ports[0].in_ports, - system.ruby._cpu_ports[0].interrupt_out_port) + system.ruby._cpu_ports[0].interrupt_out_port, +) # ----------------------- # run simulation # ----------------------- -root = Root(full_system = False, system = system) -root.system.mem_mode = 'timing' +root = Root(full_system=False, system=system) +root.system.mem_mode = "timing" diff --git a/tests/configs/simple-timing.py b/tests/configs/simple-timing.py index d7d6171a6b..bf3ced4463 100644 --- a/tests/configs/simple-timing.py +++ b/tests/configs/simple-timing.py @@ -39,5 +39,6 @@ from m5.objects import * from base_config import * -root = BaseSESystemUniprocessor(mem_mode='timing', - cpu_class=TimingSimpleCPU).create_root() +root = BaseSESystemUniprocessor( + mem_mode="timing", cpu_class=TimingSimpleCPU +).create_root() diff --git a/tests/configs/t1000-simple-atomic.py b/tests/configs/t1000-simple-atomic.py index 99697212cc..76d39327d2 100644 --- a/tests/configs/t1000-simple-atomic.py +++ b/tests/configs/t1000-simple-atomic.py @@ -26,20 +26,23 @@ import m5 from m5.objects import * -m5.util.addToPath('../configs/') + +m5.util.addToPath("../configs/") from common import FSConfig try: - system = FSConfig.makeSparcSystem('atomic') + system = FSConfig.makeSparcSystem("atomic") except IOError as e: skip_test(reason=str(e)) system.voltage_domain = VoltageDomain() -system.clk_domain = SrcClockDomain(clock = '1GHz', - voltage_domain = system.voltage_domain) -system.cpu_clk_domain = SrcClockDomain(clock = '1GHz', - voltage_domain = system.voltage_domain) -cpu = AtomicSimpleCPU(cpu_id=0, clk_domain = system.cpu_clk_domain) +system.clk_domain = SrcClockDomain( + clock="1GHz", voltage_domain=system.voltage_domain +) +system.cpu_clk_domain = SrcClockDomain( + clock="1GHz", voltage_domain=system.voltage_domain +) +cpu = AtomicSimpleCPU(cpu_id=0, clk_domain=system.cpu_clk_domain) system.cpu = cpu # create the interrupt controller cpu.createInterruptController() @@ -47,11 +50,10 @@ cpu.connectBus(system.membus) # create the memory controllers and connect them, stick with # the physmem name to avoid bumping all the reference stats -system.physmem = [SimpleMemory(range = r) - for r in system.mem_ranges] +system.physmem = [SimpleMemory(range=r) for r in system.mem_ranges] for i in range(len(system.physmem)): system.physmem[i].port = system.membus.mem_side_ports root = Root(full_system=True, system=system) -m5.ticks.setGlobalFrequency('2GHz') +m5.ticks.setGlobalFrequency("2GHz") diff --git a/tests/configs/x86_generic.py b/tests/configs/x86_generic.py index 1ab5d3b060..3c590860de 100644 --- a/tests/configs/x86_generic.py +++ b/tests/configs/x86_generic.py @@ -37,12 +37,14 @@ from abc import ABCMeta, abstractmethod import m5 from m5.objects import * from m5.proxy import * -m5.util.addToPath('../configs/') + +m5.util.addToPath("../configs/") from common.Benchmarks import SysConfig from common import FSConfig, SysPaths from common.Caches import * from base_config import * + class LinuxX86SystemBuilder(object): """Mix-in that implements create_system. @@ -50,21 +52,22 @@ class LinuxX86SystemBuilder(object): X86-specific create_system method to a class deriving from one of the generic base systems. """ + def __init__(self): pass def create_system(self): - mdesc = SysConfig(disks = ['linux-x86.img']) - system = FSConfig.makeLinuxX86System(self.mem_mode, - numCPUs=self.num_cpus, - mdesc=mdesc) - system.kernel = SysPaths.binary('x86_64-vmlinux-2.6.22.9') + mdesc = SysConfig(disks=["linux-x86.img"]) + system = FSConfig.makeLinuxX86System( + self.mem_mode, numCPUs=self.num_cpus, mdesc=mdesc + ) + system.kernel = SysPaths.binary("x86_64-vmlinux-2.6.22.9") self.init_system(system) return system -class LinuxX86FSSystem(LinuxX86SystemBuilder, - BaseFSSystem): + +class LinuxX86FSSystem(LinuxX86SystemBuilder, BaseFSSystem): """Basic X86 full system builder.""" def __init__(self, **kwargs): @@ -80,13 +83,17 @@ class LinuxX86FSSystem(LinuxX86SystemBuilder, LinuxX86SystemBuilder.__init__(self) def create_caches_private(self, cpu): - cpu.addPrivateSplitL1Caches(L1_ICache(size='32kB', assoc=1), - L1_DCache(size='32kB', assoc=4), - PageTableWalkerCache(), - PageTableWalkerCache()) + cpu.addPrivateSplitL1Caches( + L1_ICache(size="32kB", assoc=1), + L1_DCache(size="32kB", assoc=4), + PageTableWalkerCache(), + PageTableWalkerCache(), + ) -class LinuxX86FSSystemUniprocessor(LinuxX86SystemBuilder, - BaseFSSystemUniprocessor): + +class LinuxX86FSSystemUniprocessor( + LinuxX86SystemBuilder, BaseFSSystemUniprocessor +): """Basic X86 full system builder for uniprocessor systems. Note: This class is a specialization of the X86FSSystem and is @@ -99,11 +106,13 @@ class LinuxX86FSSystemUniprocessor(LinuxX86SystemBuilder, LinuxX86SystemBuilder.__init__(self) def create_caches_private(self, cpu): - cpu.addTwoLevelCacheHierarchy(L1_ICache(size='32kB', assoc=1), - L1_DCache(size='32kB', assoc=4), - L2Cache(size='4MB', assoc=8), - PageTableWalkerCache(), - PageTableWalkerCache()) + cpu.addTwoLevelCacheHierarchy( + L1_ICache(size="32kB", assoc=1), + L1_DCache(size="32kB", assoc=4), + L2Cache(size="4MB", assoc=8), + PageTableWalkerCache(), + PageTableWalkerCache(), + ) class LinuxX86FSSwitcheroo(LinuxX86SystemBuilder, BaseFSSwitcheroo): diff --git a/tests/gem5/arm-boot-tests/test_linux_boot.py b/tests/gem5/arm-boot-tests/test_linux_boot.py new file mode 100644 index 0000000000..364125691c --- /dev/null +++ b/tests/gem5/arm-boot-tests/test_linux_boot.py @@ -0,0 +1,180 @@ +# Copyright (c) 2022 The Regents of the University of California +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import re + +from typing import Optional + +from testlib import * + +if config.bin_path: + resource_path = config.bin_path +else: + resource_path = joinpath(absdirpath(__file__), "..", "resources") + + +def test_boot( + cpu: str, + num_cpus: int, + mem_system: str, + memory_class: str, + length: str, + to_tick: Optional[int] = None, +): + + name = f"{cpu}-cpu_{num_cpus}-cores_{mem_system}_{memory_class}_\ +arm-boot-test" + + verifiers = [] + + config_args = [ + "--cpu", + cpu, + "--num-cpus", + str(num_cpus), + "--mem-system", + mem_system, + "--dram-class", + memory_class, + "--resource-directory", + resource_path, + ] + + if to_tick: + name += "_to-tick" + exit_regex = re.compile( + "Exiting @ tick {} because simulate\(\) limit reached".format( + str(to_tick) + ) + ) + verifiers.append(verifier.MatchRegex(exit_regex)) + config_args += ["--tick-exit", str(to_tick)] + else: + name += "_m5-exit" + + if mem_system == "chi": + protocol_to_use = "CHI" + elif mem_system == "mesi_two_level": + protocol_to_use = None + elif mem_system == "mi_example": + protocol_to_use = "MI_example" + else: + protocol_to_use = None + + gem5_verify_config( + name=name, + verifiers=verifiers, + fixtures=(), + config=joinpath( + config.base_dir, + "tests", + "gem5", + "configs", + "arm_boot_exit_run.py", + ), + config_args=config_args, + valid_isas=(constants.all_compiled,), + valid_hosts=constants.supported_hosts, + length=length, + protocol=protocol_to_use, + ) + + +#### The long (pre-submit/Kokoro) tests #### + +test_boot( + cpu="atomic", + num_cpus=1, + mem_system="classic", + memory_class="SingleChannelDDR3_1600", + length=constants.quick_tag, + to_tick=10000000000, +) + +test_boot( + cpu="timing", + num_cpus=1, + mem_system="classic", + memory_class="SingleChannelDDR3_2133", + length=constants.quick_tag, + to_tick=10000000000, +) + +test_boot( + cpu="o3", + num_cpus=1, + mem_system="classic", + memory_class="DualChannelDDR3_1600", + length=constants.quick_tag, + to_tick=10000000000, +) + +test_boot( + cpu="timing", + num_cpus=2, + mem_system="classic", + memory_class="DualChannelDDR4_2400", + length=constants.quick_tag, + to_tick=10000000000, +) + +test_boot( + cpu="timing", + num_cpus=2, + mem_system="no_cache", + memory_class="DualChannelDDR4_2400", + length=constants.quick_tag, + to_tick=10000000000, +) + + +test_boot( + cpu="timing", + num_cpus=2, + mem_system="mesi_two_level", + memory_class="DualChannelDDR4_2400", + length=constants.quick_tag, + to_tick=10000000000, +) + + +#### The long (nightly) tests #### + +test_boot( + cpu="atomic", + num_cpus=4, + mem_system="no_cache", + memory_class="HBM2Stack", + length=constants.long_tag, +) + +test_boot( + cpu="timing", + num_cpus=2, + mem_system="chi", + memory_class="DualChannelDDR4_2400", + length=constants.long_tag, +) diff --git a/tests/gem5/asmtest/tests.py b/tests/gem5/asmtest/tests.py old mode 100755 new mode 100644 index cd473b0baf..b2a5992da0 --- a/tests/gem5/asmtest/tests.py +++ b/tests/gem5/asmtest/tests.py @@ -1,246 +1,197 @@ -# Copyright (c) 2020 The Regents of the University of California -# All Rights Reserved. -# -# Copyright (c) 2018, Cornell University +# Copyright (c) 2022 The Regents of the University of California # All rights reserved. # -# Redistribution and use in source and binary forms, with or -# without modification, are permitted provided that the following -# conditions are met: +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. # -# Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# -# Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# -# Neither the name of Cornell University nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND -# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, -# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF -# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED -# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import os from testlib import * -def asm_test(test, #The full path of the test - cpu_type, - num_cpus=4, - max_tick=None, - ruby=False, - debug_flags=None, # Debug flags passed to gem5 - full_system = False - ): - - if full_system: - config_file = os.path.join(config.base_dir, - 'configs', 'example', 'fs.py') - else: - config_file = os.path.join(config.base_dir, - 'configs', 'example', 'se.py') - - gem5_args = ['--listener-mode', 'off'] - - if not debug_flags is None: - gem5_args += ['--debug-flags', str(debug_flags)] - - config_args = ['--cpu-type', cpu_type] - - if max_tick: - config_args += ['-m', str(max_tick) ] - - if full_system: - config_args += [ - '--caches', - '--mem-size', '3072MB', - '--kernel', test - ] - else: - config_args += [ - '-n', str(num_cpus), - '--ruby' if ruby else '--caches', - '--cmd', test - ] - - gem5_verify_config( - name = 'asm-' + os.path.basename(test) + '-' + cpu_type, - fixtures = (program,), - verifiers = (), - gem5_args = gem5_args, - config = config_file, - config_args = config_args, - valid_isas = (constants.riscv_tag,), - valid_hosts = constants.supported_hosts - ) - -cpu_types = ('AtomicSimpleCPU', 'TimingSimpleCPU', 'MinorCPU', 'DerivO3CPU') +if config.bin_path: + resource_path = config.bin_path +else: + resource_path = joinpath(absdirpath(__file__), "..", "resources") # The following lists the RISCV binaries. Those commented out presently result # in a test failure. This is outlined in the following Jira issue: # https://gem5.atlassian.net/browse/GEM5-496 binaries = ( - 'rv64samt-ps-sysclone_d', - 'rv64samt-ps-sysfutex1_d', -# 'rv64samt-ps-sysfutex2_d', - 'rv64samt-ps-sysfutex3_d', -# 'rv64samt-ps-sysfutex_d', - 'rv64ua-ps-amoadd_d', - 'rv64ua-ps-amoadd_w', - 'rv64ua-ps-amoand_d', - 'rv64ua-ps-amoand_w', - 'rv64ua-ps-amomax_d', - 'rv64ua-ps-amomax_w', - 'rv64ua-ps-amomaxu_d', - 'rv64ua-ps-amomaxu_w', - 'rv64ua-ps-amomin_d', - 'rv64ua-ps-amomin_w', - 'rv64ua-ps-amominu_d', - 'rv64ua-ps-amominu_w', - 'rv64ua-ps-amoor_d', - 'rv64ua-ps-amoor_w', - 'rv64ua-ps-amoswap_d', - 'rv64ua-ps-amoswap_w', - 'rv64ua-ps-amoxor_d', - 'rv64ua-ps-amoxor_w', - 'rv64ua-ps-lrsc', - 'rv64uamt-ps-amoadd_d', - 'rv64uamt-ps-amoand_d', - 'rv64uamt-ps-amomax_d', - 'rv64uamt-ps-amomaxu_d', - 'rv64uamt-ps-amomin_d', - 'rv64uamt-ps-amominu_d', - 'rv64uamt-ps-amoor_d', - 'rv64uamt-ps-amoswap_d', - 'rv64uamt-ps-amoxor_d', - 'rv64uamt-ps-lrsc_d', - 'rv64ud-ps-fadd', - 'rv64ud-ps-fclass', - 'rv64ud-ps-fcmp', - 'rv64ud-ps-fcvt', - 'rv64ud-ps-fcvt_w', - 'rv64ud-ps-fdiv', - 'rv64ud-ps-fmadd', - 'rv64ud-ps-fmin', - 'rv64ud-ps-ldst', - 'rv64ud-ps-move', - 'rv64ud-ps-recoding', - 'rv64ud-ps-structural', - 'rv64uf-ps-fadd', - 'rv64uf-ps-fclass', - 'rv64uf-ps-fcmp', - 'rv64uf-ps-fcvt', - 'rv64uf-ps-fcvt_w', - 'rv64uf-ps-fdiv', - 'rv64uf-ps-fmadd', - 'rv64uf-ps-fmin', - 'rv64uf-ps-ldst', - 'rv64uf-ps-move', - 'rv64uf-ps-recoding', - 'rv64ui-ps-add', - 'rv64ui-ps-addi', - 'rv64ui-ps-addiw', - 'rv64ui-ps-addw', - 'rv64ui-ps-and', - 'rv64ui-ps-andi', - 'rv64ui-ps-auipc', - 'rv64ui-ps-beq', - 'rv64ui-ps-bge', - 'rv64ui-ps-bgeu', - 'rv64ui-ps-blt', - 'rv64ui-ps-bltu', - 'rv64ui-ps-bne', - 'rv64ui-ps-fence_i', - 'rv64ui-ps-jal', - 'rv64ui-ps-jalr', - 'rv64ui-ps-lb', - 'rv64ui-ps-lbu', - 'rv64ui-ps-ld', - 'rv64ui-ps-lh', - 'rv64ui-ps-lhu', - 'rv64ui-ps-lui', - 'rv64ui-ps-lw', - 'rv64ui-ps-lwu', - 'rv64ui-ps-or', - 'rv64ui-ps-ori', - 'rv64ui-ps-sb', - 'rv64ui-ps-sd', - 'rv64ui-ps-sh', - 'rv64ui-ps-simple', - 'rv64ui-ps-sll', - 'rv64ui-ps-slli', - 'rv64ui-ps-slliw', - 'rv64ui-ps-sllw', - 'rv64ui-ps-slt', - 'rv64ui-ps-slti', - 'rv64ui-ps-sltiu', - 'rv64ui-ps-sltu', - 'rv64ui-ps-sra', - 'rv64ui-ps-srai', - 'rv64ui-ps-sraiw', - 'rv64ui-ps-sraw', - 'rv64ui-ps-srl', - 'rv64ui-ps-srli', - 'rv64ui-ps-srliw', - 'rv64ui-ps-srlw', - 'rv64ui-ps-sub', - 'rv64ui-ps-subw', - 'rv64ui-ps-sw', - 'rv64ui-ps-xor', - 'rv64ui-ps-xori', - 'rv64um-ps-div', - 'rv64um-ps-divu', - 'rv64um-ps-divuw', - 'rv64um-ps-divw', - 'rv64um-ps-mul', - 'rv64um-ps-mulh', - 'rv64um-ps-mulhsu', - 'rv64um-ps-mulhu', - 'rv64um-ps-mulw', - 'rv64um-ps-rem', - 'rv64um-ps-remu', - 'rv64um-ps-remuw', - 'rv64um-ps-remw', - 'rv64uzfh-ps-fadd', - 'rv64uzfh-ps-fclass', - 'rv64uzfh-ps-fcmp', - 'rv64uzfh-ps-fcvt', - 'rv64uzfh-ps-fcvt_w', - 'rv64uzfh-ps-fdiv', - 'rv64uzfh-ps-fmadd', - 'rv64uzfh-ps-fmin', - 'rv64uzfh-ps-ldst', - 'rv64uzfh-ps-move', - 'rv64uzfh-ps-recoding', + "rv64samt-ps-sysclone_d", + "rv64samt-ps-sysfutex1_d", + # 'rv64samt-ps-sysfutex2_d', + "rv64samt-ps-sysfutex3_d", + # 'rv64samt-ps-sysfutex_d', + "rv64ua-ps-amoadd_d", + "rv64ua-ps-amoadd_w", + "rv64ua-ps-amoand_d", + "rv64ua-ps-amoand_w", + "rv64ua-ps-amomax_d", + "rv64ua-ps-amomax_w", + "rv64ua-ps-amomaxu_d", + "rv64ua-ps-amomaxu_w", + "rv64ua-ps-amomin_d", + "rv64ua-ps-amomin_w", + "rv64ua-ps-amominu_d", + "rv64ua-ps-amominu_w", + "rv64ua-ps-amoor_d", + "rv64ua-ps-amoor_w", + "rv64ua-ps-amoswap_d", + "rv64ua-ps-amoswap_w", + "rv64ua-ps-amoxor_d", + "rv64ua-ps-amoxor_w", + "rv64ua-ps-lrsc", + "rv64uamt-ps-amoadd_d", + "rv64uamt-ps-amoand_d", + "rv64uamt-ps-amomax_d", + "rv64uamt-ps-amomaxu_d", + "rv64uamt-ps-amomin_d", + "rv64uamt-ps-amominu_d", + "rv64uamt-ps-amoor_d", + "rv64uamt-ps-amoswap_d", + "rv64uamt-ps-amoxor_d", + "rv64uamt-ps-lrsc_d", + "rv64ud-ps-fadd", + "rv64ud-ps-fclass", + "rv64ud-ps-fcmp", + "rv64ud-ps-fcvt", + "rv64ud-ps-fcvt_w", + "rv64ud-ps-fdiv", + "rv64ud-ps-fmadd", + "rv64ud-ps-fmin", + "rv64ud-ps-ldst", + "rv64ud-ps-move", + "rv64ud-ps-recoding", + "rv64ud-ps-structural", + "rv64uf-ps-fadd", + "rv64uf-ps-fclass", + "rv64uf-ps-fcmp", + "rv64uf-ps-fcvt", + "rv64uf-ps-fcvt_w", + "rv64uf-ps-fdiv", + "rv64uf-ps-fmadd", + "rv64uf-ps-fmin", + "rv64uf-ps-ldst", + "rv64uf-ps-move", + "rv64uf-ps-recoding", + "rv64ui-ps-add", + "rv64ui-ps-addi", + "rv64ui-ps-addiw", + "rv64ui-ps-addw", + "rv64ui-ps-and", + "rv64ui-ps-andi", + "rv64ui-ps-auipc", + "rv64ui-ps-beq", + "rv64ui-ps-bge", + "rv64ui-ps-bgeu", + "rv64ui-ps-blt", + "rv64ui-ps-bltu", + "rv64ui-ps-bne", + "rv64ui-ps-fence_i", + "rv64ui-ps-jal", + "rv64ui-ps-jalr", + "rv64ui-ps-lb", + "rv64ui-ps-lbu", + "rv64ui-ps-ld", + "rv64ui-ps-lh", + "rv64ui-ps-lhu", + "rv64ui-ps-lui", + "rv64ui-ps-lw", + "rv64ui-ps-lwu", + "rv64ui-ps-or", + "rv64ui-ps-ori", + "rv64ui-ps-sb", + "rv64ui-ps-sd", + "rv64ui-ps-sh", + "rv64ui-ps-simple", + "rv64ui-ps-sll", + "rv64ui-ps-slli", + "rv64ui-ps-slliw", + "rv64ui-ps-sllw", + "rv64ui-ps-slt", + "rv64ui-ps-slti", + "rv64ui-ps-sltiu", + "rv64ui-ps-sltu", + "rv64ui-ps-sra", + "rv64ui-ps-srai", + "rv64ui-ps-sraiw", + "rv64ui-ps-sraw", + "rv64ui-ps-srl", + "rv64ui-ps-srli", + "rv64ui-ps-srliw", + "rv64ui-ps-srlw", + "rv64ui-ps-sub", + "rv64ui-ps-subw", + "rv64ui-ps-sw", + "rv64ui-ps-xor", + "rv64ui-ps-xori", + "rv64um-ps-div", + "rv64um-ps-divu", + "rv64um-ps-divuw", + "rv64um-ps-divw", + "rv64um-ps-mul", + "rv64um-ps-mulh", + "rv64um-ps-mulhsu", + "rv64um-ps-mulhu", + "rv64um-ps-mulw", + "rv64um-ps-rem", + "rv64um-ps-remu", + "rv64um-ps-remuw", + "rv64um-ps-remw", + "rv64uzfh-ps-fadd", + "rv64uzfh-ps-fclass", + "rv64uzfh-ps-fcmp", + "rv64uzfh-ps-fcvt", + "rv64uzfh-ps-fcvt_w", + "rv64uzfh-ps-fdiv", + "rv64uzfh-ps-fmadd", + "rv64uzfh-ps-fmin", + "rv64uzfh-ps-ldst", + "rv64uzfh-ps-move", + "rv64uzfh-ps-recoding", ) +cpu_types = ("atomic", "timing", "minor", "o3") -if config.bin_path: - bin_path = config.bin_path -else: - bin_path = joinpath(absdirpath(__file__), '..', 'resources', 'asmtest') - -urlbase = config.resource_url + '/test-progs/asmtest/bin/' - -for cpu in cpu_types: +for cpu_type in cpu_types: for binary in binaries: - url = urlbase + binary - path = joinpath(bin_path, binary) - try: - program = DownloadedProgram(url, path, binary) - except: - continue - asm_test(joinpath(bin_path, binary, binary), cpu) + gem5_verify_config( + name=f"asm-riscv-{binary}-{cpu_type}", + verifiers=(), + config=joinpath( + config.base_dir, + "tests", + "gem5", + "configs", + "simple_binary_run.py", + ), + config_args=[ + binary, + cpu_type, + "riscv", + "--num-cores", + "4", + "--resource-directory", + resource_path, + ], + valid_isas=(constants.all_compiled_tag,), + valid_hosts=constants.supported_hosts, + ) diff --git a/tests/gem5/configs/arm_boot_exit_run.py b/tests/gem5/configs/arm_boot_exit_run.py new file mode 100644 index 0000000000..aea3c4160f --- /dev/null +++ b/tests/gem5/configs/arm_boot_exit_run.py @@ -0,0 +1,230 @@ +# Copyright (c) 2022 The Regents of the University of California +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" +This example runs a simple linux boot on the ArmBoard. + +Characteristics +--------------- + +* Runs exclusively on the ARM ISA with the classic caches +""" + +from gem5.isas import ISA +from m5.objects import ArmDefaultRelease +from gem5.utils.requires import requires +from gem5.resources.resource import Resource +from gem5.simulate.simulator import Simulator +from m5.objects import VExpress_GEM5_Foundation +from gem5.coherence_protocol import CoherenceProtocol +from gem5.components.boards.arm_board import ArmBoard +from gem5.components.processors.simple_processor import SimpleProcessor +from gem5.components.processors.cpu_types import ( + get_cpu_types_str_set, + get_cpu_type_from_str, + CPUTypes, +) + +import argparse +import importlib + +parser = argparse.ArgumentParser( + description="A script to run the ARM boot exit tests." +) + +parser.add_argument( + "-n", + "--num-cpus", + type=int, + required=True, + help="The number of CPUs.", +) + +parser.add_argument( + "-c", + "--cpu", + type=str, + choices=get_cpu_types_str_set(), + required=True, + help="The CPU type.", +) + +parser.add_argument( + "-m", + "--mem-system", + type=str, + choices=("no_cache", "classic", "chi", "mesi_two_level", "mi_example"), + required=True, + help="The memory system.", +) + +parser.add_argument( + "-d", + "--dram-class", + type=str, + required=False, + default="DualChannelDDR3_1600", + help="The python class for the memory interface to use", +) + +parser.add_argument( + "-t", + "--tick-exit", + type=int, + required=False, + help="The tick to exit the simulation.", +) + +parser.add_argument( + "-r", + "--resource-directory", + type=str, + required=False, + help="The directory in which resources will be downloaded or exist.", +) + +args = parser.parse_args() + +# Run a check to ensure the right version of gem5 is being used. +requires(isa_required=ISA.ARM) + +if args.mem_system == "no_cache": + from gem5.components.cachehierarchies.classic.no_cache import NoCache + + cache_hierarchy = NoCache() + +elif args.mem_system == "classic": + from gem5.components.cachehierarchies.classic.private_l1_private_l2_cache_hierarchy import ( + PrivateL1PrivateL2CacheHierarchy, + ) + + cache_hierarchy = PrivateL1PrivateL2CacheHierarchy( + l1d_size="32KiB", l1i_size="32KiB", l2_size="512KiB" + ) + +elif args.mem_system == "chi": + requires(coherence_protocol_required=CoherenceProtocol.CHI) + from gem5.components.cachehierarchies.chi.private_l1_cache_hierarchy import ( + PrivateL1CacheHierarchy, + ) + + cache_hierarchy = PrivateL1CacheHierarchy( + size="16kB", + assoc=4, + ) + +elif args.mem_system == "mesi_two_level": + requires(coherence_protocol_required=CoherenceProtocol.MESI_TWO_LEVEL) + from gem5.components.cachehierarchies.ruby.mesi_two_level_cache_hierarchy import ( + MESITwoLevelCacheHierarchy, + ) + + cache_hierarchy = MESITwoLevelCacheHierarchy( + l1d_size="32kB", + l1d_assoc=8, + l1i_size="32kB", + l1i_assoc=8, + l2_size="256kB", + l2_assoc=16, + num_l2_banks=2, + ) + +elif args.mem_system == "mi_example": + requires(coherence_protocol_required=CoherenceProtocol.MI_EXAMPLE) + from gem5.components.cachehierarchies.ruby.mi_example_cache_hierarchy import ( + MIExampleCacheHierarchy, + ) + + cache_hierarchy = MIExampleCacheHierarchy(size="32kB", assoc=4) +else: + raise NotImplementedError( + "Memory type '{}' is not supported in the boot tests.".format( + args.mem_system + ) + ) + +# Setup the system memory. +python_module = "gem5.components.memory" +memory_class = getattr(importlib.import_module(python_module), args.dram_class) +memory = memory_class(size="4GiB") + +# Setup a processor. + +cpu_type = get_cpu_type_from_str(args.cpu) + +processor = SimpleProcessor( + cpu_type=cpu_type, num_cores=args.num_cpus, isa=ISA.ARM +) + + +# The ArmBoard requires a `release` to be specified. + +release = ArmDefaultRelease() + +# The platform sets up the memory ranges of all the on-chip and off-chip +# devices present on the ARM system. + +platform = VExpress_GEM5_Foundation() + +# Setup the board. +board = ArmBoard( + clk_freq="1GHz", + processor=processor, + memory=memory, + cache_hierarchy=cache_hierarchy, + release=release, + platform=platform, +) + +# Set the Full System workload. +board.set_kernel_disk_workload( + kernel=Resource( + "arm64-linux-kernel-5.4.49", + resource_directory=args.resource_directory, + ), + bootloader=Resource( + "arm64-bootloader-foundation", + resource_directory=args.resource_directory, + ), + disk_image=Resource( + "arm64-ubuntu-20.04-img", + resource_directory=args.resource_directory, + ), +) + +simulator = Simulator(board=board) + +if args.tick_exit: + simulator.run(max_ticks=args.tick_exit) +else: + simulator.run() + +print( + "Exiting @ tick {} because {}.".format( + simulator.get_current_tick(), + simulator.get_last_exit_event_cause(), + ) +) diff --git a/tests/gem5/configs/arm_generic.py b/tests/gem5/configs/arm_generic.py index 5ea5e641d6..df118c7583 100644 --- a/tests/gem5/configs/arm_generic.py +++ b/tests/gem5/configs/arm_generic.py @@ -37,15 +37,17 @@ from abc import ABCMeta, abstractmethod import m5 from m5.objects import * from m5.proxy import * -m5.util.addToPath('../configs/') + +m5.util.addToPath("../configs/") from common import FSConfig -from common.Caches import * +from base_caches import * from base_config import * from common.cores.arm.O3_ARM_v7a import * from common.Benchmarks import SysConfig from common import SysPaths + class ArmSESystemUniprocessor(BaseSESystemUniprocessor): """Syscall-emulation builder for ARM uniprocessor systems. @@ -60,9 +62,10 @@ class ArmSESystemUniprocessor(BaseSESystemUniprocessor): # The atomic SE configurations do not use caches if self.mem_mode == "timing": # Use the more representative cache configuration - cpu.addTwoLevelCacheHierarchy(O3_ARM_v7a_ICache(), - O3_ARM_v7a_DCache(), - O3_ARM_v7aL2()) + cpu.addTwoLevelCacheHierarchy( + O3_ARM_v7a_ICache(), O3_ARM_v7a_DCache(), O3_ARM_v7aL2() + ) + class LinuxArmSystemBuilder(object): """Mix-in that implements create_system. @@ -71,6 +74,7 @@ class LinuxArmSystemBuilder(object): ARM-specific create_system method to a class deriving from one of the generic base systems. """ + def __init__(self, machine_type, aarch64_kernel, enable_dvm, **kwargs): """ Arguments: @@ -81,9 +85,9 @@ class LinuxArmSystemBuilder(object): self.machine_type = machine_type self.aarch64_kernel = aarch64_kernel self.enable_dvm = enable_dvm - self.num_cpus = kwargs.get('num_cpus', 1) - self.mem_size = kwargs.get('mem_size', '256MB') - self.use_ruby = kwargs.get('use_ruby', False) + self.num_cpus = kwargs.get("num_cpus", 1) + self.mem_size = kwargs.get("mem_size", "256MB") + self.use_ruby = kwargs.get("use_ruby", False) def init_kvm(self, system): """Do KVM-specific system initialization. @@ -92,6 +96,7 @@ class LinuxArmSystemBuilder(object): system -- System to work on. """ system.kvm_vm = KvmVM() + system.release = ArmDefaultRelease.for_kvm() # Arm KVM regressions will use a simulated GIC. This means that in # order to work we need to remove the system interface of the @@ -120,9 +125,13 @@ class LinuxArmSystemBuilder(object): } sc = SysConfig(None, self.mem_size, [disk_image], "/dev/sda") - system = FSConfig.makeArmSystem(self.mem_mode, - self.machine_type, self.num_cpus, - sc, ruby=self.use_ruby) + system = FSConfig.makeArmSystem( + self.mem_mode, + self.machine_type, + self.num_cpus, + sc, + ruby=self.use_ruby, + ) # TODO: This is removing SECURITY and VIRTUALIZATION extensions # from AArch32 runs to fix long regressions. Find a fix or @@ -137,7 +146,8 @@ class LinuxArmSystemBuilder(object): system.workload.panic_on_oops = True system.workload.object_file = SysPaths.binary( - default_kernels[self.machine_type]) + default_kernels[self.machine_type] + ) self.init_system(system) if self.enable_dvm: @@ -145,20 +155,23 @@ class LinuxArmSystemBuilder(object): for decoder in cpu.decoder: decoder.dvm_enabled = True - system.workload.dtb_filename = \ - os.path.join(m5.options.outdir, 'system.dtb') + system.workload.dtb_filename = os.path.join( + m5.options.outdir, "system.dtb" + ) system.generateDtb(system.workload.dtb_filename) return system -class LinuxArmFSSystem(LinuxArmSystemBuilder, - BaseFSSystem): + +class LinuxArmFSSystem(LinuxArmSystemBuilder, BaseFSSystem): """Basic ARM full system builder.""" - def __init__(self, - machine_type='VExpress_GEM5_Foundation', - aarch64_kernel=True, - enable_dvm=False, - **kwargs): + def __init__( + self, + machine_type="VExpress_GEM5_Foundation", + aarch64_kernel=True, + enable_dvm=False, + **kwargs + ): """Initialize an ARM system that supports full system simulation. Note: Keyword arguments that are not listed below will be @@ -169,16 +182,19 @@ class LinuxArmFSSystem(LinuxArmSystemBuilder, """ BaseFSSystem.__init__(self, **kwargs) LinuxArmSystemBuilder.__init__( - self, machine_type, aarch64_kernel, enable_dvm, **kwargs) + self, machine_type, aarch64_kernel, enable_dvm, **kwargs + ) def create_caches_private(self, cpu): # Use the more representative cache configuration - cpu.addTwoLevelCacheHierarchy(O3_ARM_v7a_ICache(), - O3_ARM_v7a_DCache(), - O3_ARM_v7aL2()) + cpu.addTwoLevelCacheHierarchy( + O3_ARM_v7a_ICache(), O3_ARM_v7a_DCache(), O3_ARM_v7aL2() + ) -class LinuxArmFSSystemUniprocessor(LinuxArmSystemBuilder, - BaseFSSystemUniprocessor): + +class LinuxArmFSSystemUniprocessor( + LinuxArmSystemBuilder, BaseFSSystemUniprocessor +): """Basic ARM full system builder for uniprocessor systems. Note: This class is a specialization of the ArmFSSystem and is @@ -186,21 +202,28 @@ class LinuxArmFSSystemUniprocessor(LinuxArmSystemBuilder, test cases. """ - def __init__(self, - machine_type='VExpress_GEM5_Foundation', - aarch64_kernel=True, - **kwargs): + def __init__( + self, + machine_type="VExpress_GEM5_Foundation", + aarch64_kernel=True, + **kwargs + ): BaseFSSystemUniprocessor.__init__(self, **kwargs) LinuxArmSystemBuilder.__init__( - self, machine_type, aarch64_kernel, False, **kwargs) + self, machine_type, aarch64_kernel, False, **kwargs + ) + class LinuxArmFSSwitcheroo(LinuxArmSystemBuilder, BaseFSSwitcheroo): """Uniprocessor ARM system prepared for CPU switching""" - def __init__(self, - machine_type='VExpress_GEM5_Foundation', - aarch64_kernel=True, - **kwargs): + def __init__( + self, + machine_type="VExpress_GEM5_Foundation", + aarch64_kernel=True, + **kwargs + ): BaseFSSwitcheroo.__init__(self, **kwargs) LinuxArmSystemBuilder.__init__( - self, machine_type, aarch64_kernel, False, **kwargs) + self, machine_type, aarch64_kernel, False, **kwargs + ) diff --git a/tests/gem5/configs/base_caches.py b/tests/gem5/configs/base_caches.py new file mode 100644 index 0000000000..3b5f55870d --- /dev/null +++ b/tests/gem5/configs/base_caches.py @@ -0,0 +1,85 @@ +# Copyright (c) 2012 ARM Limited +# Copyright (c) 2020 Barkhausen Institut +# All rights reserved. +# +# The license below extends only to copyright in the software and shall +# not be construed as granting a license to any other intellectual +# property including but not limited to intellectual property relating +# to a hardware implementation of the functionality of the software +# licensed hereunder. You may use the software subject to the license +# terms below provided that you ensure that this notice is replicated +# unmodified and in its entirety in all distributions of the software, +# modified or unmodified, in source code or in binary form. +# +# Copyright (c) 2006-2007 The Regents of The University of Michigan +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from m5.objects import * + +# Base implementations of L1, L2, IO and TLB-walker caches. There are +# used in the regressions and also as base components in the +# system-configuration scripts. The values are meant to serve as a +# starting point, and specific parameters can be overridden in the +# specific instantiations. + + +class L1Cache(Cache): + assoc = 2 + tag_latency = 2 + data_latency = 2 + response_latency = 2 + mshrs = 4 + tgts_per_mshr = 20 + + +class L1_ICache(L1Cache): + is_read_only = True + # Writeback clean lines as well + writeback_clean = True + + +class L1_DCache(L1Cache): + pass + + +class L2Cache(Cache): + assoc = 8 + tag_latency = 20 + data_latency = 20 + response_latency = 20 + mshrs = 20 + tgts_per_mshr = 12 + write_buffers = 8 + + +class IOCache(Cache): + assoc = 8 + tag_latency = 50 + data_latency = 50 + response_latency = 50 + mshrs = 20 + size = "1kB" + tgts_per_mshr = 12 diff --git a/tests/gem5/configs/base_config.py b/tests/gem5/configs/base_config.py index e148467151..22987d5eff 100644 --- a/tests/gem5/configs/base_config.py +++ b/tests/gem5/configs/base_config.py @@ -40,10 +40,11 @@ from m5.objects import * from m5.proxy import * from common import FSConfig from common import Options -from common.Caches import * +from base_caches import * from ruby import Ruby -_have_kvm_support = 'BaseKvmCPU' in globals() +_have_kvm_support = "BaseKvmCPU" in globals() + class BaseSystem(object, metaclass=ABCMeta): """Base system builder. @@ -54,9 +55,17 @@ class BaseSystem(object, metaclass=ABCMeta): the initialization process. """ - def __init__(self, mem_mode='timing', mem_class=SimpleMemory, - cpu_class=TimingSimpleCPU, num_cpus=1, num_threads=1, - checker=False, mem_size=None, use_ruby=False): + def __init__( + self, + mem_mode="timing", + mem_class=SimpleMemory, + cpu_class=TimingSimpleCPU, + num_cpus=1, + num_threads=1, + checker=False, + mem_size=None, + use_ruby=False, + ): """Initialize a simple base system. Keyword Arguments: @@ -78,10 +87,14 @@ class BaseSystem(object, metaclass=ABCMeta): def create_cpus(self, cpu_clk_domain): """Return a list of CPU objects to add to a system.""" - cpus = [ self.cpu_class(clk_domain=cpu_clk_domain, - numThreads=self.num_threads, - cpu_id=i) - for i in range(self.num_cpus) ] + cpus = [ + self.cpu_class( + clk_domain=cpu_clk_domain, + numThreads=self.num_threads, + cpu_id=i, + ) + for i in range(self.num_cpus) + ] if self.checker: for c in cpus: c.addCheckerCpu() @@ -93,8 +106,9 @@ class BaseSystem(object, metaclass=ABCMeta): Arguments: cpu -- CPU instance to work on. """ - cpu.addPrivateSplitL1Caches(L1_ICache(size='32kB', assoc=1), - L1_DCache(size='32kB', assoc=4)) + cpu.addPrivateSplitL1Caches( + L1_ICache(size="32kB", assoc=1), L1_DCache(size="32kB", assoc=4) + ) def create_caches_shared(self, system): """Add shared caches to a system. @@ -106,8 +120,9 @@ class BaseSystem(object, metaclass=ABCMeta): A bus that CPUs should use to connect to the shared cache. """ system.toL2Bus = L2XBar(clk_domain=system.cpu_clk_domain) - system.l2c = L2Cache(clk_domain=system.cpu_clk_domain, - size='4MB', assoc=8) + system.l2c = L2Cache( + clk_domain=system.cpu_clk_domain, size="4MB", assoc=8 + ) system.l2c.cpu_side = system.toL2Bus.mem_side_ports system.l2c.mem_side = system.membus.cpu_side_ports return system.toL2Bus @@ -123,9 +138,11 @@ class BaseSystem(object, metaclass=ABCMeta): self.create_caches_private(cpu) cpu.createInterruptController() cached_bus = sha_bus if sha_bus != None else system.membus - cpu.connectAllPorts(cached_bus.cpu_side_ports, - system.membus.cpu_side_ports, - system.membus.mem_side_ports) + cpu.connectAllPorts( + cached_bus.cpu_side_ports, + system.membus.cpu_side_ports, + system.membus.mem_side_ports, + ) def init_kvm_cpus(self, cpus): """ @@ -173,23 +190,24 @@ class BaseSystem(object, metaclass=ABCMeta): # Set the default cache size and associativity to be very # small to encourage races between requests and writebacks. - args.l1d_size="32kB" - args.l1i_size="32kB" - args.l2_size="4MB" - args.l1d_assoc=4 - args.l1i_assoc=2 - args.l2_assoc=8 + args.l1d_size = "32kB" + args.l1i_size = "32kB" + args.l2_size = "4MB" + args.l1d_assoc = 4 + args.l1i_assoc = 2 + args.l2_assoc = 8 args.num_cpus = self.num_cpus args.num_dirs = 2 - bootmem = getattr(system, '_bootmem', None) - Ruby.create_system(args, True, system, system.iobus, - system._dma_ports, bootmem) + bootmem = getattr(system, "_bootmem", None) + Ruby.create_system( + args, True, system, system.iobus, system._dma_ports, bootmem + ) # Create a seperate clock domain for Ruby system.ruby.clk_domain = SrcClockDomain( - clock = args.ruby_clock, - voltage_domain = system.voltage_domain) + clock=args.ruby_clock, voltage_domain=system.voltage_domain + ) for i, cpu in enumerate(system.cpu): if not cpu.switched_out: cpu.createInterruptController() @@ -199,25 +217,26 @@ class BaseSystem(object, metaclass=ABCMeta): for cpu in system.cpu: self.init_cpu(system, cpu, sha_bus) - if _have_kvm_support and \ - any([isinstance(c, BaseKvmCPU) for c in system.cpu]): + if _have_kvm_support and any( + [isinstance(c, BaseKvmCPU) for c in system.cpu] + ): self.init_kvm(system) self.init_kvm_cpus(system.cpu) - def create_clk_src(self,system): + def create_clk_src(self, system): # Create system clock domain. This provides clock value to every # clocked object that lies beneath it unless explicitly overwritten # by a different clock domain. system.voltage_domain = VoltageDomain() - system.clk_domain = SrcClockDomain(clock = '1GHz', - voltage_domain = - system.voltage_domain) + system.clk_domain = SrcClockDomain( + clock="1GHz", voltage_domain=system.voltage_domain + ) # Create a seperate clock domain for components that should # run at CPUs frequency - system.cpu_clk_domain = SrcClockDomain(clock = '2GHz', - voltage_domain = - system.voltage_domain) + system.cpu_clk_domain = SrcClockDomain( + clock="2GHz", voltage_domain=system.voltage_domain + ) @abstractmethod def create_system(self): @@ -230,6 +249,7 @@ class BaseSystem(object, metaclass=ABCMeta): defined by this class.""" pass + class BaseSESystem(BaseSystem): """Basic syscall-emulation builder.""" @@ -245,10 +265,12 @@ class BaseSESystem(BaseSystem): mem_ctrl.dram = self.mem_class() else: mem_ctrl = self.mem_class() - system = System(physmem = mem_ctrl, - membus = SystemXBar(), - mem_mode = self.mem_mode, - multi_thread = (self.num_threads > 1)) + system = System( + physmem=mem_ctrl, + membus=SystemXBar(), + mem_mode=self.mem_mode, + multi_thread=(self.num_threads > 1), + ) if not self.use_ruby: system.system_port = system.membus.cpu_side_ports system.physmem.port = system.membus.mem_side_ports @@ -257,9 +279,10 @@ class BaseSESystem(BaseSystem): def create_root(self): system = self.create_system() - m5.ticks.setGlobalFrequency('1THz') + m5.ticks.setGlobalFrequency("1THz") return Root(full_system=False, system=system) + class BaseSESystemUniprocessor(BaseSESystem): """Basic syscall-emulation builder for uniprocessor systems. @@ -274,13 +297,16 @@ class BaseSESystemUniprocessor(BaseSESystem): # The atomic SE configurations do not use caches if self.mem_mode == "timing": # @todo We might want to revisit these rather enthusiastic L1 sizes - cpu.addTwoLevelCacheHierarchy(L1_ICache(size='128kB'), - L1_DCache(size='256kB'), - L2Cache(size='2MB')) + cpu.addTwoLevelCacheHierarchy( + L1_ICache(size="128kB"), + L1_DCache(size="256kB"), + L2Cache(size="2MB"), + ) def create_caches_shared(self, system): return None + class BaseFSSystem(BaseSystem): """Basic full system builder.""" @@ -301,12 +327,13 @@ class BaseFSSystem(BaseSystem): mem_ctrls = [] for r in system.mem_ranges: mem_ctrl = MemCtrl() - mem_ctrl.dram = self.mem_class(range = r) + mem_ctrl.dram = self.mem_class(range=r) mem_ctrls.append(mem_ctrl) system.physmem = mem_ctrls else: - system.physmem = [self.mem_class(range = r) - for r in system.mem_ranges] + system.physmem = [ + self.mem_class(range=r) for r in system.mem_ranges + ] for i in range(len(system.physmem)): system.physmem[i].port = system.membus.mem_side_ports @@ -317,9 +344,10 @@ class BaseFSSystem(BaseSystem): def create_root(self): system = self.create_system() - m5.ticks.setGlobalFrequency('1THz') + m5.ticks.setGlobalFrequency("1THz") return Root(full_system=True, system=system) + class BaseFSSystemUniprocessor(BaseFSSystem): """Basic full system builder for uniprocessor systems. @@ -331,13 +359,16 @@ class BaseFSSystemUniprocessor(BaseFSSystem): super(BaseFSSystemUniprocessor, self).__init__(**kwargs) def create_caches_private(self, cpu): - cpu.addTwoLevelCacheHierarchy(L1_ICache(size='32kB', assoc=1), - L1_DCache(size='32kB', assoc=4), - L2Cache(size='4MB', assoc=8)) + cpu.addTwoLevelCacheHierarchy( + L1_ICache(size="32kB", assoc=1), + L1_DCache(size="32kB", assoc=4), + L2Cache(size="4MB", assoc=8), + ) def create_caches_shared(self, system): return None + class BaseFSSwitcheroo(BaseFSSystem): """Uniprocessor system prepared for CPU switching""" @@ -346,9 +377,9 @@ class BaseFSSwitcheroo(BaseFSSystem): self.cpu_classes = tuple(cpu_classes) def create_cpus(self, cpu_clk_domain): - cpus = [ cclass(clk_domain = cpu_clk_domain, - cpu_id=0, - switched_out=True) - for cclass in self.cpu_classes ] + cpus = [ + cclass(clk_domain=cpu_clk_domain, cpu_id=0, switched_out=True) + for cclass in self.cpu_classes + ] cpus[0].switched_out = False return cpus diff --git a/tests/gem5/configs/boot_kvm_fork_run.py b/tests/gem5/configs/boot_kvm_fork_run.py index c4160fdd35..18f6e9d416 100644 --- a/tests/gem5/configs/boot_kvm_fork_run.py +++ b/tests/gem5/configs/boot_kvm_fork_run.py @@ -47,7 +47,7 @@ from gem5.components.boards.x86_board import X86Board from gem5.coherence_protocol import CoherenceProtocol from gem5.isas import ISA from gem5.components.memory import SingleChannelDDR3_1600 -from gem5.components.processors.cpu_types import( +from gem5.components.processors.cpu_types import ( CPUTypes, get_cpu_types_str_set, get_cpu_type_from_str, @@ -124,15 +124,13 @@ requires( cache_hierarchy = None if args.mem_system == "mi_example": - from gem5.components.cachehierarchies.ruby.\ - mi_example_cache_hierarchy import ( + from gem5.components.cachehierarchies.ruby.mi_example_cache_hierarchy import ( MIExampleCacheHierarchy, ) cache_hierarchy = MIExampleCacheHierarchy(size="32kB", assoc=8) elif args.mem_system == "mesi_two_level": - from gem5.components.cachehierarchies.ruby.\ - mesi_two_level_cache_hierarchy import ( + from gem5.components.cachehierarchies.ruby.mesi_two_level_cache_hierarchy import ( MESITwoLevelCacheHierarchy, ) @@ -146,8 +144,7 @@ elif args.mem_system == "mesi_two_level": num_l2_banks=1, ) elif args.mem_system == "classic": - from gem5.components.cachehierarchies.classic.\ - private_l1_cache_hierarchy import ( + from gem5.components.cachehierarchies.classic.private_l1_cache_hierarchy import ( PrivateL1CacheHierarchy, ) @@ -185,12 +182,10 @@ kernel_args = motherboard.get_default_kernel_args() + [args.kernel_args] # Set the Full System workload. motherboard.set_kernel_disk_workload( kernel=Resource( - "x86-linux-kernel-5.4.49", - resource_directory=args.resource_directory, + "x86-linux-kernel-5.4.49", resource_directory=args.resource_directory ), disk_image=Resource( - "x86-ubuntu-18.04-img", - resource_directory=args.resource_directory, + "x86-ubuntu-18.04-img", resource_directory=args.resource_directory ), readfile_contents=dedent( """ @@ -198,7 +193,7 @@ motherboard.set_kernel_disk_workload( m5 exit # exit in children and parent """ ), - kernel_args=kernel_args + kernel_args=kernel_args, ) @@ -216,7 +211,7 @@ root.sim_quantum = int(1e9) # Disable the gdb ports. Required for forking. m5.disableAllListeners() - +motherboard._pre_instantiate() m5.instantiate() # Simulate the inital boot with the starting KVM cpu @@ -240,7 +235,7 @@ for i in range(args.num_forks): print("Waiting for children...") for pid in pids: - print (os.waitpid(pid, 0)) + print(os.waitpid(pid, 0)) print("Children finished! Running to completion in parent.") exit_event = m5.simulate() diff --git a/tests/gem5/configs/boot_kvm_switch_exit.py b/tests/gem5/configs/boot_kvm_switch_exit.py index a807f84822..25f5808e13 100644 --- a/tests/gem5/configs/boot_kvm_switch_exit.py +++ b/tests/gem5/configs/boot_kvm_switch_exit.py @@ -37,7 +37,7 @@ from gem5.isas import ISA from gem5.components.boards.x86_board import X86Board from gem5.coherence_protocol import CoherenceProtocol from gem5.components.memory import SingleChannelDDR3_1600 -from gem5.components.processors.cpu_types import( +from gem5.components.processors.cpu_types import ( CPUTypes, get_cpu_types_str_set, get_cpu_type_from_str, @@ -110,15 +110,13 @@ requires( cache_hierarchy = None if args.mem_system == "mi_example": - from gem5.components.cachehierarchies.ruby.\ - mi_example_cache_hierarchy import ( + from gem5.components.cachehierarchies.ruby.mi_example_cache_hierarchy import ( MIExampleCacheHierarchy, ) cache_hierarchy = MIExampleCacheHierarchy(size="32kB", assoc=8) elif args.mem_system == "mesi_two_level": - from gem5.components.cachehierarchies.ruby.\ - mesi_two_level_cache_hierarchy import ( + from gem5.components.cachehierarchies.ruby.mesi_two_level_cache_hierarchy import ( MESITwoLevelCacheHierarchy, ) @@ -132,8 +130,7 @@ elif args.mem_system == "mesi_two_level": num_l2_banks=1, ) elif args.mem_system == "classic": - from gem5.components.cachehierarchies.classic.\ - private_l1_cache_hierarchy import ( + from gem5.components.cachehierarchies.classic.private_l1_cache_hierarchy import ( PrivateL1CacheHierarchy, ) @@ -171,12 +168,10 @@ kernal_args = motherboard.get_default_kernel_args() + [args.kernel_args] # Set the Full System workload. motherboard.set_kernel_disk_workload( kernel=Resource( - "x86-linux-kernel-5.4.49", - resource_directory=args.resource_directory, + "x86-linux-kernel-5.4.49", resource_directory=args.resource_directory ), disk_image=Resource( - "x86-ubuntu-18.04-img", - resource_directory=args.resource_directory, + "x86-ubuntu-18.04-img", resource_directory=args.resource_directory ), # The first exit signals to switch processors. readfile_contents="m5 exit\nm5 exit\n", @@ -195,7 +190,7 @@ simulator = Simulator( on_exit_event={ # When we reach the first exit, we switch cores. For the second exit we # simply exit the simulation (default behavior). - ExitEvent.EXIT : (i() for i in [processor.switch]), + ExitEvent.EXIT: (i() for i in [processor.switch]) }, # This parameter allows us to state the expected order-of-execution. # That is, we expect two exit events. If anyother event is triggered, an @@ -207,7 +202,6 @@ simulator.run() print( "Exiting @ tick {} because {}.".format( - simulator.get_current_tick(), - simulator.get_last_exit_event_cause(), + simulator.get_current_tick(), simulator.get_last_exit_event_cause() ) ) diff --git a/tests/gem5/configs/checkpoint.py b/tests/gem5/configs/checkpoint.py index 3545095817..d5d58922a3 100644 --- a/tests/gem5/configs/checkpoint.py +++ b/tests/gem5/configs/checkpoint.py @@ -39,14 +39,9 @@ import os import m5 -_exit_normal = ( - "target called exit()", - "m5_exit instruction encountered", - ) +_exit_normal = ("target called exit()", "m5_exit instruction encountered") -_exit_limit = ( - "simulate() limit reached", - ) +_exit_limit = ("simulate() limit reached",) _exitcode_done = 0 _exitcode_fail = 1 @@ -81,6 +76,7 @@ def _run_step(name, restore=None, interval=0.5): print("Test failed: Unknown exit cause: %s" % cause) sys.exit(_exitcode_fail) + def run_test(root, interval=0.5, max_checkpoints=5): """ Run the simulated system for a fixed amount of time and take a @@ -96,12 +92,11 @@ def run_test(root, interval=0.5, max_checkpoints=5): # Create a checkpoint from a separate child process. This enables # us to get back to a (mostly) pristine state and restart # simulation from the checkpoint. - p = Process(target=_run_step, - args=(cpt_name, ), - kwargs={ - "restore" : restore, - "interval" : interval, - }) + p = Process( + target=_run_step, + args=(cpt_name,), + kwargs={"restore": restore, "interval": interval}, + ) p.start() # Wait for the child to return @@ -115,8 +110,10 @@ def run_test(root, interval=0.5, max_checkpoints=5): print("Test done.", file=sys.stderr) sys.exit(0) else: - print("Test done, but no checkpoint was created.", - file=sys.stderr) + print( + "Test done, but no checkpoint was created.", + file=sys.stderr, + ) sys.exit(1) elif p.exitcode == _exitcode_checkpoint: checkpointed = True diff --git a/tests/gem5/configs/download_check.py b/tests/gem5/configs/download_check.py index 613a1c4d3b..decc62c2d7 100644 --- a/tests/gem5/configs/download_check.py +++ b/tests/gem5/configs/download_check.py @@ -86,8 +86,10 @@ for id in ids: download_path = os.path.join(args.download_directory, id) try: get_resource(resource_name=id, to_path=download_path) - except Exception: + except Exception as e: errors += f"Failure to download resource '{id}'.{os.linesep}" + errors += f"Exception message:{os.linesep}{str(e)}" + errors += f"{os.linesep}{os.linesep}" continue if md5(Path(download_path)) != resource_json["md5sum"]: diff --git a/tests/gem5/configs/parsec_disk_run.py b/tests/gem5/configs/parsec_disk_run.py index 456fce09b2..fbe1cd3688 100644 --- a/tests/gem5/configs/parsec_disk_run.py +++ b/tests/gem5/configs/parsec_disk_run.py @@ -43,7 +43,7 @@ from gem5.components.memory import SingleChannelDDR3_1600 from gem5.components.processors.simple_switchable_processor import ( SimpleSwitchableProcessor, ) -from gem5.components.processors.cpu_types import( +from gem5.components.processors.cpu_types import ( get_cpu_types_str_set, get_cpu_type_from_str, ) @@ -148,19 +148,15 @@ args = parser.parse_args() if args.mem_system == "classic": - from gem5.components.cachehierarchies.classic.\ - private_l1_private_l2_cache_hierarchy import ( + from gem5.components.cachehierarchies.classic.private_l1_private_l2_cache_hierarchy import ( PrivateL1PrivateL2CacheHierarchy, ) cache_hierarchy = PrivateL1PrivateL2CacheHierarchy( - l1d_size="32kB", - l1i_size="32kB", - l2_size="256kB", + l1d_size="32kB", l1i_size="32kB", l2_size="256kB" ) elif args.mem_system == "mesi_two_level": - from gem5.components.cachehierarchies.ruby.\ - mesi_two_level_cache_hierarchy import ( + from gem5.components.cachehierarchies.ruby.mesi_two_level_cache_hierarchy import ( MESITwoLevelCacheHierarchy, ) @@ -210,21 +206,14 @@ command = ( board.set_kernel_disk_workload( kernel=Resource( - "x86-linux-kernel-5.4.49", - resource_directory=args.resource_directory, + "x86-linux-kernel-5.4.49", resource_directory=args.resource_directory ), disk_image=Resource( - "x86-parsec", - resource_directory=args.resource_directory, + "x86-parsec", resource_directory=args.resource_directory ), readfile_contents=command, ) -print("Running with ISA: " + get_runtime_isa().name) -print("Running with protocol: " + get_runtime_coherence_protocol().name) -print() - - # Here we define some custom workbegin/workend exit event generators. Here we # want to switch to detailed CPUs at the beginning of the ROI, then continue to # the end of of the ROI. Then we exit the simulation. @@ -232,13 +221,15 @@ def workbegin(): processor.switch() yield False + def workend(): yield True + simulator = Simulator( board=board, on_exit_event={ - ExitEvent.WORKBEGIN : workbegin(), + ExitEvent.WORKBEGIN: workbegin(), ExitEvent.WORKEND: workend(), }, ) diff --git a/tests/gem5/configs/realview-minor-dual.py b/tests/gem5/configs/realview-minor-dual.py index bb4fe172e9..c6b4e45e9d 100644 --- a/tests/gem5/configs/realview-minor-dual.py +++ b/tests/gem5/configs/realview-minor-dual.py @@ -36,9 +36,11 @@ from m5.objects import * from arm_generic import * -root = LinuxArmFSSystem(aarch64_kernel=False, - machine_type='VExpress_GEM5_V1', - mem_mode='timing', - mem_class=DDR3_1600_8x8, - cpu_class=MinorCPU, - num_cpus=2).create_root() +root = LinuxArmFSSystem( + aarch64_kernel=False, + machine_type="VExpress_GEM5_V1", + mem_mode="timing", + mem_class=DDR3_1600_8x8, + cpu_class=MinorCPU, + num_cpus=2, +).create_root() diff --git a/tests/gem5/configs/realview-minor.py b/tests/gem5/configs/realview-minor.py index 0bee6cafb2..a6351628fd 100644 --- a/tests/gem5/configs/realview-minor.py +++ b/tests/gem5/configs/realview-minor.py @@ -36,8 +36,10 @@ from m5.objects import * from arm_generic import * -root = LinuxArmFSSystemUniprocessor(aarch64_kernel=False, - machine_type='VExpress_GEM5_V1', - mem_mode='timing', - mem_class=DDR3_1600_8x8, - cpu_class=MinorCPU).create_root() +root = LinuxArmFSSystemUniprocessor( + aarch64_kernel=False, + machine_type="VExpress_GEM5_V1", + mem_mode="timing", + mem_class=DDR3_1600_8x8, + cpu_class=ArmMinorCPU, +).create_root() diff --git a/tests/gem5/configs/realview-o3-checker.py b/tests/gem5/configs/realview-o3-checker.py index 4809581a7b..89e5c66fd3 100644 --- a/tests/gem5/configs/realview-o3-checker.py +++ b/tests/gem5/configs/realview-o3-checker.py @@ -37,9 +37,11 @@ from m5.objects import * from arm_generic import * from common.cores.arm.O3_ARM_v7a import O3_ARM_v7a_3 -root = LinuxArmFSSystemUniprocessor(aarch64_kernel=False, - machine_type='VExpress_GEM5_V1', - mem_mode='timing', - mem_class=DDR3_1600_8x8, - cpu_class=O3_ARM_v7a_3, - checker=True).create_root() +root = LinuxArmFSSystemUniprocessor( + aarch64_kernel=False, + machine_type="VExpress_GEM5_V1", + mem_mode="timing", + mem_class=DDR3_1600_8x8, + cpu_class=O3_ARM_v7a_3, + checker=True, +).create_root() diff --git a/tests/gem5/configs/realview-o3-dual.py b/tests/gem5/configs/realview-o3-dual.py index 52695e8740..f4326dbda1 100644 --- a/tests/gem5/configs/realview-o3-dual.py +++ b/tests/gem5/configs/realview-o3-dual.py @@ -37,9 +37,11 @@ from m5.objects import * from arm_generic import * from common.cores.arm.O3_ARM_v7a import O3_ARM_v7a_3 -root = LinuxArmFSSystem(aarch64_kernel=False, - machine_type='VExpress_GEM5_V1', - mem_mode='timing', - mem_class=DDR3_1600_8x8, - cpu_class=O3_ARM_v7a_3, - num_cpus=2).create_root() +root = LinuxArmFSSystem( + aarch64_kernel=False, + machine_type="VExpress_GEM5_V1", + mem_mode="timing", + mem_class=DDR3_1600_8x8, + cpu_class=O3_ARM_v7a_3, + num_cpus=2, +).create_root() diff --git a/tests/gem5/configs/realview-o3.py b/tests/gem5/configs/realview-o3.py index e984049a77..6a1b757300 100644 --- a/tests/gem5/configs/realview-o3.py +++ b/tests/gem5/configs/realview-o3.py @@ -37,8 +37,10 @@ from m5.objects import * from arm_generic import * from common.cores.arm.O3_ARM_v7a import O3_ARM_v7a_3 -root = LinuxArmFSSystemUniprocessor(aarch64_kernel=False, - machine_type='VExpress_GEM5_V1', - mem_mode='timing', - mem_class=DDR3_1600_8x8, - cpu_class=O3_ARM_v7a_3).create_root() +root = LinuxArmFSSystemUniprocessor( + aarch64_kernel=False, + machine_type="VExpress_GEM5_V1", + mem_mode="timing", + mem_class=DDR3_1600_8x8, + cpu_class=O3_ARM_v7a_3, +).create_root() diff --git a/tests/gem5/configs/realview-simple-atomic-checkpoint.py b/tests/gem5/configs/realview-simple-atomic-checkpoint.py index d994ca5052..a60fd96b27 100644 --- a/tests/gem5/configs/realview-simple-atomic-checkpoint.py +++ b/tests/gem5/configs/realview-simple-atomic-checkpoint.py @@ -39,10 +39,12 @@ from m5.objects import * from arm_generic import * import checkpoint -root = LinuxArmFSSystemUniprocessor(aarch64_kernel=False, - machine_type='VExpress_GEM5_V1', - mem_mode='atomic', - mem_class=SimpleMemory, - cpu_class=AtomicSimpleCPU).create_root() +root = LinuxArmFSSystemUniprocessor( + aarch64_kernel=False, + machine_type="VExpress_GEM5_V1", + mem_mode="atomic", + mem_class=SimpleMemory, + cpu_class=ArmAtomicSimpleCPU, +).create_root() run_test = functools.partial(checkpoint.run_test, interval=0.2) diff --git a/tests/gem5/configs/realview-simple-atomic-dual.py b/tests/gem5/configs/realview-simple-atomic-dual.py index 47bbcc772b..c02f8727ea 100644 --- a/tests/gem5/configs/realview-simple-atomic-dual.py +++ b/tests/gem5/configs/realview-simple-atomic-dual.py @@ -36,9 +36,11 @@ from m5.objects import * from arm_generic import * -root = LinuxArmFSSystem(aarch64_kernel=False, - machine_type='VExpress_GEM5_V1', - mem_mode='atomic', - mem_class=SimpleMemory, - cpu_class=AtomicSimpleCPU, - num_cpus=2).create_root() +root = LinuxArmFSSystem( + aarch64_kernel=False, + machine_type="VExpress_GEM5_V1", + mem_mode="atomic", + mem_class=SimpleMemory, + cpu_class=ArmAtomicSimpleCPU, + num_cpus=2, +).create_root() diff --git a/tests/gem5/configs/realview-simple-atomic.py b/tests/gem5/configs/realview-simple-atomic.py index 659c18e1b7..9c782ad97a 100644 --- a/tests/gem5/configs/realview-simple-atomic.py +++ b/tests/gem5/configs/realview-simple-atomic.py @@ -36,9 +36,10 @@ from m5.objects import * from arm_generic import * -root = LinuxArmFSSystemUniprocessor(aarch64_kernel=False, - machine_type='VExpress_GEM5_V1', - mem_mode='atomic', - mem_class=SimpleMemory, - cpu_class=AtomicSimpleCPU).create_root() - +root = LinuxArmFSSystemUniprocessor( + aarch64_kernel=False, + machine_type="VExpress_GEM5_V1", + mem_mode="atomic", + mem_class=SimpleMemory, + cpu_class=ArmAtomicSimpleCPU, +).create_root() diff --git a/tests/gem5/configs/realview-simple-timing-dual-ruby.py b/tests/gem5/configs/realview-simple-timing-dual-ruby.py index 63860c5b5b..741ededdff 100644 --- a/tests/gem5/configs/realview-simple-timing-dual-ruby.py +++ b/tests/gem5/configs/realview-simple-timing-dual-ruby.py @@ -36,11 +36,12 @@ from m5.objects import * from arm_generic import * -root = LinuxArmFSSystem(aarch64_kernel=False, - machine_type='VExpress_GEM5_V1', - mem_mode='timing', - mem_class=DDR3_1600_8x8, - cpu_class=TimingSimpleCPU, - num_cpus=2, - use_ruby=True).create_root() - +root = LinuxArmFSSystem( + aarch64_kernel=False, + machine_type="VExpress_GEM5_V1", + mem_mode="timing", + mem_class=DDR3_1600_8x8, + cpu_class=ArmTimingSimpleCPU, + num_cpus=2, + use_ruby=True, +).create_root() diff --git a/tests/gem5/configs/realview-simple-timing-dual.py b/tests/gem5/configs/realview-simple-timing-dual.py index 59ce2c70e0..aaea9deb83 100644 --- a/tests/gem5/configs/realview-simple-timing-dual.py +++ b/tests/gem5/configs/realview-simple-timing-dual.py @@ -36,9 +36,11 @@ from m5.objects import * from arm_generic import * -root = LinuxArmFSSystem(aarch64_kernel=False, - machine_type='VExpress_GEM5_V1', - mem_mode='timing', - mem_class=DDR3_1600_8x8, - cpu_class=TimingSimpleCPU, - num_cpus=2).create_root() +root = LinuxArmFSSystem( + aarch64_kernel=False, + machine_type="VExpress_GEM5_V1", + mem_mode="timing", + mem_class=DDR3_1600_8x8, + cpu_class=ArmTimingSimpleCPU, + num_cpus=2, +).create_root() diff --git a/tests/gem5/configs/realview-simple-timing-ruby.py b/tests/gem5/configs/realview-simple-timing-ruby.py index 94e4274668..fcca94361e 100644 --- a/tests/gem5/configs/realview-simple-timing-ruby.py +++ b/tests/gem5/configs/realview-simple-timing-ruby.py @@ -36,10 +36,11 @@ from m5.objects import * from arm_generic import * -root = LinuxArmFSSystemUniprocessor(aarch64_kernel=False, - machine_type='VExpress_GEM5_V1', - mem_mode='timing', - mem_class=DDR3_1600_8x8, - cpu_class=TimingSimpleCPU, - use_ruby=True).create_root() - +root = LinuxArmFSSystemUniprocessor( + aarch64_kernel=False, + machine_type="VExpress_GEM5_V1", + mem_mode="timing", + mem_class=DDR3_1600_8x8, + cpu_class=ArmTimingSimpleCPU, + use_ruby=True, +).create_root() diff --git a/tests/gem5/configs/realview-simple-timing.py b/tests/gem5/configs/realview-simple-timing.py index e130e20a9f..2afbdd0a0e 100644 --- a/tests/gem5/configs/realview-simple-timing.py +++ b/tests/gem5/configs/realview-simple-timing.py @@ -36,8 +36,10 @@ from m5.objects import * from arm_generic import * -root = LinuxArmFSSystemUniprocessor(aarch64_kernel=False, - machine_type='VExpress_GEM5_V1', - mem_mode='timing', - mem_class=DDR3_1600_8x8, - cpu_class=TimingSimpleCPU).create_root() +root = LinuxArmFSSystemUniprocessor( + aarch64_kernel=False, + machine_type="VExpress_GEM5_V1", + mem_mode="timing", + mem_class=DDR3_1600_8x8, + cpu_class=ArmTimingSimpleCPU, +).create_root() diff --git a/tests/gem5/configs/realview-switcheroo-atomic.py b/tests/gem5/configs/realview-switcheroo-atomic.py index 092ac8e336..d2f2100f52 100644 --- a/tests/gem5/configs/realview-switcheroo-atomic.py +++ b/tests/gem5/configs/realview-switcheroo-atomic.py @@ -39,8 +39,8 @@ import switcheroo root = LinuxArmFSSwitcheroo( mem_class=SimpleMemory, - cpu_classes=(AtomicSimpleCPU, AtomicSimpleCPU) - ).create_root() + cpu_classes=(ArmAtomicSimpleCPU, ArmAtomicSimpleCPU), +).create_root() # Setup a custom test method that uses the switcheroo tester that # switches between CPU models. diff --git a/tests/gem5/configs/realview-switcheroo-full.py b/tests/gem5/configs/realview-switcheroo-full.py index 9d8ac5784e..6ed99a3772 100644 --- a/tests/gem5/configs/realview-switcheroo-full.py +++ b/tests/gem5/configs/realview-switcheroo-full.py @@ -38,11 +38,16 @@ from arm_generic import * import switcheroo root = LinuxArmFSSwitcheroo( - machine_type='VExpress_GEM5_V1', + machine_type="VExpress_GEM5_V1", aarch64_kernel=False, mem_class=DDR3_1600_8x8, - cpu_classes=(AtomicSimpleCPU, TimingSimpleCPU, MinorCPU, DerivO3CPU) - ).create_root() + cpu_classes=( + ArmAtomicSimpleCPU, + ArmTimingSimpleCPU, + ArmMinorCPU, + ArmO3CPU, + ), +).create_root() # Setup a custom test method that uses the switcheroo tester that # switches between CPU models. diff --git a/tests/gem5/configs/realview-switcheroo-noncaching-timing.py b/tests/gem5/configs/realview-switcheroo-noncaching-timing.py index ddaea8be16..cc77f440ab 100644 --- a/tests/gem5/configs/realview-switcheroo-noncaching-timing.py +++ b/tests/gem5/configs/realview-switcheroo-noncaching-timing.py @@ -38,8 +38,8 @@ from arm_generic import * import switcheroo root = LinuxArmFSSwitcheroo( - cpu_classes=(NonCachingSimpleCPU, TimingSimpleCPU), - ).create_root() + cpu_classes=(ArmNonCachingSimpleCPU, ArmTimingSimpleCPU) +).create_root() # Setup a custom test method that uses the switcheroo tester that # switches between CPU models. diff --git a/tests/gem5/configs/realview-switcheroo-o3.py b/tests/gem5/configs/realview-switcheroo-o3.py index 7424f40353..4fca57ea1e 100644 --- a/tests/gem5/configs/realview-switcheroo-o3.py +++ b/tests/gem5/configs/realview-switcheroo-o3.py @@ -39,10 +39,10 @@ import switcheroo root = LinuxArmFSSwitcheroo( aarch64_kernel=False, - machine_type='VExpress_GEM5_V1', + machine_type="VExpress_GEM5_V1", mem_class=DDR3_1600_8x8, - cpu_classes=(DerivO3CPU, DerivO3CPU) - ).create_root() + cpu_classes=(ArmO3CPU, ArmO3CPU), +).create_root() # Setup a custom test method that uses the switcheroo tester that # switches between CPU models. diff --git a/tests/gem5/configs/realview-switcheroo-timing.py b/tests/gem5/configs/realview-switcheroo-timing.py index a1ee1c92a9..5157da6dac 100644 --- a/tests/gem5/configs/realview-switcheroo-timing.py +++ b/tests/gem5/configs/realview-switcheroo-timing.py @@ -39,8 +39,8 @@ import switcheroo root = LinuxArmFSSwitcheroo( mem_class=DDR3_1600_8x8, - cpu_classes=(TimingSimpleCPU, TimingSimpleCPU) - ).create_root() + cpu_classes=(ArmTimingSimpleCPU, ArmTimingSimpleCPU), +).create_root() # Setup a custom test method that uses the switcheroo tester that # switches between CPU models. diff --git a/tests/gem5/configs/realview64-kvm-dual.py b/tests/gem5/configs/realview64-kvm-dual.py index c97240ac46..e64ba2fbf5 100644 --- a/tests/gem5/configs/realview64-kvm-dual.py +++ b/tests/gem5/configs/realview64-kvm-dual.py @@ -37,10 +37,12 @@ from m5.objects import * from arm_generic import * from m5.ticks import fixGlobalFrequency, fromSeconds -root = LinuxArmFSSystem(mem_mode='atomic_noncaching', - machine_type='VExpress_GEM5_V1', - mem_class=SimpleMemory, - cpu_class=ArmV8KvmCPU, - num_cpus=2).create_root() +root = LinuxArmFSSystem( + mem_mode="atomic_noncaching", + machine_type="VExpress_GEM5_V1", + mem_class=SimpleMemory, + cpu_class=ArmV8KvmCPU, + num_cpus=2, +).create_root() fixGlobalFrequency() root.sim_quantum = fromSeconds(m5.util.convert.anyToLatency("1ms")) diff --git a/tests/gem5/configs/realview64-kvm.py b/tests/gem5/configs/realview64-kvm.py index f69008d34d..8fa6997da8 100644 --- a/tests/gem5/configs/realview64-kvm.py +++ b/tests/gem5/configs/realview64-kvm.py @@ -36,7 +36,9 @@ from m5.objects import * from arm_generic import * -root = LinuxArmFSSystemUniprocessor(mem_mode='atomic_noncaching', - machine_type='VExpress_GEM5_V1', - mem_class=SimpleMemory, - cpu_class=ArmV8KvmCPU).create_root() +root = LinuxArmFSSystemUniprocessor( + mem_mode="atomic_noncaching", + machine_type="VExpress_GEM5_V1", + mem_class=SimpleMemory, + cpu_class=ArmV8KvmCPU, +).create_root() diff --git a/tests/gem5/configs/realview64-minor-dual.py b/tests/gem5/configs/realview64-minor-dual.py index 7b0165c547..aed5d83c71 100644 --- a/tests/gem5/configs/realview64-minor-dual.py +++ b/tests/gem5/configs/realview64-minor-dual.py @@ -36,7 +36,9 @@ from m5.objects import * from arm_generic import * -root = LinuxArmFSSystem(mem_mode='timing', - mem_class=DDR3_1600_8x8, - cpu_class=MinorCPU, - num_cpus=2).create_root() +root = LinuxArmFSSystem( + mem_mode="timing", + mem_class=DDR3_1600_8x8, + cpu_class=ArmMinorCPU, + num_cpus=2, +).create_root() diff --git a/tests/gem5/configs/realview64-minor.py b/tests/gem5/configs/realview64-minor.py index 2d189c1abe..7bad3c52ed 100644 --- a/tests/gem5/configs/realview64-minor.py +++ b/tests/gem5/configs/realview64-minor.py @@ -36,6 +36,6 @@ from m5.objects import * from arm_generic import * -root = LinuxArmFSSystemUniprocessor(mem_mode='timing', - mem_class=DDR3_1600_8x8, - cpu_class=MinorCPU).create_root() +root = LinuxArmFSSystemUniprocessor( + mem_mode="timing", mem_class=DDR3_1600_8x8, cpu_class=ArmMinorCPU +).create_root() diff --git a/tests/gem5/configs/realview64-o3-checker.py b/tests/gem5/configs/realview64-o3-checker.py index ffbb7c669b..00d9a5773a 100644 --- a/tests/gem5/configs/realview64-o3-checker.py +++ b/tests/gem5/configs/realview64-o3-checker.py @@ -37,8 +37,10 @@ from m5.objects import * from arm_generic import * from common.cores.arm.O3_ARM_v7a import O3_ARM_v7a_3 -root = LinuxArmFSSystemUniprocessor(mem_mode='timing', - machine_type='VExpress_GEM5_V1', - mem_class=DDR3_1600_8x8, - cpu_class=O3_ARM_v7a_3, - checker=True).create_root() +root = LinuxArmFSSystemUniprocessor( + mem_mode="timing", + machine_type="VExpress_GEM5_V1", + mem_class=DDR3_1600_8x8, + cpu_class=O3_ARM_v7a_3, + checker=True, +).create_root() diff --git a/tests/gem5/configs/realview64-o3-dual-ruby.py b/tests/gem5/configs/realview64-o3-dual-ruby.py index 83c38e22e8..a4bffe902e 100644 --- a/tests/gem5/configs/realview64-o3-dual-ruby.py +++ b/tests/gem5/configs/realview64-o3-dual-ruby.py @@ -36,9 +36,11 @@ from m5.objects import * from arm_generic import * -root = LinuxArmFSSystem(mem_mode='timing', - mem_class=DDR3_1600_8x8, - cpu_class=O3CPU, - num_cpus=2, - enable_dvm=True, - use_ruby=True).create_root() +root = LinuxArmFSSystem( + mem_mode="timing", + mem_class=DDR3_1600_8x8, + cpu_class=ArmO3CPU, + num_cpus=2, + enable_dvm=True, + use_ruby=True, +).create_root() diff --git a/tests/gem5/configs/realview64-o3-dual.py b/tests/gem5/configs/realview64-o3-dual.py index 4648808a27..75fe10e316 100644 --- a/tests/gem5/configs/realview64-o3-dual.py +++ b/tests/gem5/configs/realview64-o3-dual.py @@ -37,7 +37,9 @@ from m5.objects import * from arm_generic import * from common.cores.arm.O3_ARM_v7a import O3_ARM_v7a_3 -root = LinuxArmFSSystem(mem_mode='timing', - mem_class=DDR3_1600_8x8, - cpu_class=O3_ARM_v7a_3, - num_cpus=2).create_root() +root = LinuxArmFSSystem( + mem_mode="timing", + mem_class=DDR3_1600_8x8, + cpu_class=O3_ARM_v7a_3, + num_cpus=2, +).create_root() diff --git a/tests/gem5/configs/realview64-o3.py b/tests/gem5/configs/realview64-o3.py index 4a49cc887e..c8ae8ec5af 100644 --- a/tests/gem5/configs/realview64-o3.py +++ b/tests/gem5/configs/realview64-o3.py @@ -37,6 +37,6 @@ from m5.objects import * from arm_generic import * from common.cores.arm.O3_ARM_v7a import O3_ARM_v7a_3 -root = LinuxArmFSSystemUniprocessor(mem_mode='timing', - mem_class=DDR3_1600_8x8, - cpu_class=O3_ARM_v7a_3).create_root() +root = LinuxArmFSSystemUniprocessor( + mem_mode="timing", mem_class=DDR3_1600_8x8, cpu_class=O3_ARM_v7a_3 +).create_root() diff --git a/tests/gem5/configs/realview64-simple-atomic-checkpoint.py b/tests/gem5/configs/realview64-simple-atomic-checkpoint.py index 37dcbc6bbd..fa73a0ee6e 100644 --- a/tests/gem5/configs/realview64-simple-atomic-checkpoint.py +++ b/tests/gem5/configs/realview64-simple-atomic-checkpoint.py @@ -39,9 +39,10 @@ from m5.objects import * from arm_generic import * import checkpoint -root = LinuxArmFSSystemUniprocessor(mem_mode='atomic', - mem_class=SimpleMemory, - cpu_class=AtomicSimpleCPU).create_root() +root = LinuxArmFSSystemUniprocessor( + mem_mode="atomic", mem_class=SimpleMemory, cpu_class=ArmAtomicSimpleCPU +).create_root() -run_test = functools.partial(checkpoint.run_test, interval=0.2, - max_checkpoints=3) +run_test = functools.partial( + checkpoint.run_test, interval=0.2, max_checkpoints=3 +) diff --git a/tests/gem5/configs/realview64-simple-atomic-dual.py b/tests/gem5/configs/realview64-simple-atomic-dual.py index d1ab0bf71d..19cf751603 100644 --- a/tests/gem5/configs/realview64-simple-atomic-dual.py +++ b/tests/gem5/configs/realview64-simple-atomic-dual.py @@ -36,7 +36,9 @@ from m5.objects import * from arm_generic import * -root = LinuxArmFSSystem(mem_mode='atomic', - mem_class=SimpleMemory, - cpu_class=AtomicSimpleCPU, - num_cpus=2).create_root() +root = LinuxArmFSSystem( + mem_mode="atomic", + mem_class=SimpleMemory, + cpu_class=ArmAtomicSimpleCPU, + num_cpus=2, +).create_root() diff --git a/tests/gem5/configs/realview64-simple-atomic.py b/tests/gem5/configs/realview64-simple-atomic.py index 42a9a9220c..299dd7b0a7 100644 --- a/tests/gem5/configs/realview64-simple-atomic.py +++ b/tests/gem5/configs/realview64-simple-atomic.py @@ -36,7 +36,6 @@ from m5.objects import * from arm_generic import * -root = LinuxArmFSSystemUniprocessor(mem_mode='atomic', - mem_class=SimpleMemory, - cpu_class=AtomicSimpleCPU).create_root() - +root = LinuxArmFSSystemUniprocessor( + mem_mode="atomic", mem_class=SimpleMemory, cpu_class=ArmAtomicSimpleCPU +).create_root() diff --git a/tests/gem5/configs/realview64-simple-timing-dual-ruby.py b/tests/gem5/configs/realview64-simple-timing-dual-ruby.py index b29d548f4f..96ad96355c 100644 --- a/tests/gem5/configs/realview64-simple-timing-dual-ruby.py +++ b/tests/gem5/configs/realview64-simple-timing-dual-ruby.py @@ -36,9 +36,10 @@ from m5.objects import * from arm_generic import * -root = LinuxArmFSSystem(mem_mode='timing', - mem_class=DDR3_1600_8x8, - cpu_class=TimingSimpleCPU, - num_cpus=2, - use_ruby=True).create_root() - +root = LinuxArmFSSystem( + mem_mode="timing", + mem_class=DDR3_1600_8x8, + cpu_class=ArmTimingSimpleCPU, + num_cpus=2, + use_ruby=True, +).create_root() diff --git a/tests/gem5/configs/realview64-simple-timing-dual.py b/tests/gem5/configs/realview64-simple-timing-dual.py index e9c37cd37f..8b62cd3414 100644 --- a/tests/gem5/configs/realview64-simple-timing-dual.py +++ b/tests/gem5/configs/realview64-simple-timing-dual.py @@ -36,7 +36,9 @@ from m5.objects import * from arm_generic import * -root = LinuxArmFSSystem(mem_mode='timing', - mem_class=DDR3_1600_8x8, - cpu_class=TimingSimpleCPU, - num_cpus=2).create_root() +root = LinuxArmFSSystem( + mem_mode="timing", + mem_class=DDR3_1600_8x8, + cpu_class=ArmTimingSimpleCPU, + num_cpus=2, +).create_root() diff --git a/tests/gem5/configs/realview64-simple-timing-ruby.py b/tests/gem5/configs/realview64-simple-timing-ruby.py index 22e15cdb4d..f6024537a2 100644 --- a/tests/gem5/configs/realview64-simple-timing-ruby.py +++ b/tests/gem5/configs/realview64-simple-timing-ruby.py @@ -36,8 +36,9 @@ from m5.objects import * from arm_generic import * -root = LinuxArmFSSystemUniprocessor(mem_mode='timing', - mem_class=DDR3_1600_8x8, - cpu_class=TimingSimpleCPU, - use_ruby=True).create_root() - +root = LinuxArmFSSystemUniprocessor( + mem_mode="timing", + mem_class=DDR3_1600_8x8, + cpu_class=ArmTimingSimpleCPU, + use_ruby=True, +).create_root() diff --git a/tests/gem5/configs/realview64-simple-timing.py b/tests/gem5/configs/realview64-simple-timing.py index 9cf063c3ec..6897f3b1ad 100644 --- a/tests/gem5/configs/realview64-simple-timing.py +++ b/tests/gem5/configs/realview64-simple-timing.py @@ -36,6 +36,6 @@ from m5.objects import * from arm_generic import * -root = LinuxArmFSSystemUniprocessor(mem_mode='timing', - mem_class=DDR3_1600_8x8, - cpu_class=TimingSimpleCPU).create_root() +root = LinuxArmFSSystemUniprocessor( + mem_mode="timing", mem_class=DDR3_1600_8x8, cpu_class=ArmTimingSimpleCPU +).create_root() diff --git a/tests/gem5/configs/realview64-switcheroo-atomic.py b/tests/gem5/configs/realview64-switcheroo-atomic.py index c135ea1c68..c2f67f0553 100644 --- a/tests/gem5/configs/realview64-switcheroo-atomic.py +++ b/tests/gem5/configs/realview64-switcheroo-atomic.py @@ -39,8 +39,8 @@ import switcheroo root = LinuxArmFSSwitcheroo( mem_class=SimpleMemory, - cpu_classes=(AtomicSimpleCPU, AtomicSimpleCPU) - ).create_root() + cpu_classes=(ArmAtomicSimpleCPU, ArmAtomicSimpleCPU), +).create_root() # Setup a custom test method that uses the switcheroo tester that # switches between CPU models. diff --git a/tests/gem5/configs/realview64-switcheroo-full.py b/tests/gem5/configs/realview64-switcheroo-full.py index 2b12873102..020957875d 100644 --- a/tests/gem5/configs/realview64-switcheroo-full.py +++ b/tests/gem5/configs/realview64-switcheroo-full.py @@ -39,8 +39,13 @@ import switcheroo root = LinuxArmFSSwitcheroo( mem_class=DDR3_1600_8x8, - cpu_classes=(AtomicSimpleCPU, TimingSimpleCPU, MinorCPU, DerivO3CPU) - ).create_root() + cpu_classes=( + ArmAtomicSimpleCPU, + ArmTimingSimpleCPU, + ArmMinorCPU, + ArmO3CPU, + ), +).create_root() # Setup a custom test method that uses the switcheroo tester that # switches between CPU models. diff --git a/tests/gem5/configs/realview64-switcheroo-o3.py b/tests/gem5/configs/realview64-switcheroo-o3.py index f7a1493d4c..f899337a8f 100644 --- a/tests/gem5/configs/realview64-switcheroo-o3.py +++ b/tests/gem5/configs/realview64-switcheroo-o3.py @@ -38,9 +38,8 @@ from arm_generic import * import switcheroo root = LinuxArmFSSwitcheroo( - mem_class=DDR3_1600_8x8, - cpu_classes=(DerivO3CPU, DerivO3CPU) - ).create_root() + mem_class=DDR3_1600_8x8, cpu_classes=(ArmO3CPU, ArmO3CPU) +).create_root() # Setup a custom test method that uses the switcheroo tester that # switches between CPU models. diff --git a/tests/gem5/configs/realview64-switcheroo-timing.py b/tests/gem5/configs/realview64-switcheroo-timing.py index aafd1f4717..4ccce5d953 100644 --- a/tests/gem5/configs/realview64-switcheroo-timing.py +++ b/tests/gem5/configs/realview64-switcheroo-timing.py @@ -39,8 +39,8 @@ import switcheroo root = LinuxArmFSSwitcheroo( mem_class=DDR3_1600_8x8, - cpu_classes=(TimingSimpleCPU, TimingSimpleCPU) - ).create_root() + cpu_classes=(ArmTimingSimpleCPU, ArmTimingSimpleCPU), +).create_root() # Setup a custom test method that uses the switcheroo tester that # switches between CPU models. diff --git a/tests/gem5/configs/requires_check.py b/tests/gem5/configs/requires_check.py index 8ec566a908..eb29f32aa3 100644 --- a/tests/gem5/configs/requires_check.py +++ b/tests/gem5/configs/requires_check.py @@ -36,7 +36,7 @@ import argparse parser = argparse.ArgumentParser( description="A simple script used to check the behavior of " - "`gem5.utils.requires`." + "`gem5.utils.requires`." ) parser.add_argument( @@ -46,7 +46,7 @@ parser.add_argument( choices=get_isas_str_set(), required=True, help="The required ISA. A non-zero exit code is returned if the " - "requirements are not met." , + "requirements are not met.", ) args = parser.parse_args() diff --git a/tests/gem5/configs/riscv_boot_exit_run.py b/tests/gem5/configs/riscv_boot_exit_run.py index fd57e4507a..4424868112 100644 --- a/tests/gem5/configs/riscv_boot_exit_run.py +++ b/tests/gem5/configs/riscv_boot_exit_run.py @@ -40,6 +40,7 @@ from gem5.components.processors.cpu_types import CPUTypes from gem5.components.boards.riscv_board import RiscvBoard from gem5.components.processors.simple_processor import SimpleProcessor from gem5.simulate.simulator import Simulator +from gem5.resources.workload import Workload import argparse import importlib @@ -49,11 +50,7 @@ parser = argparse.ArgumentParser( ) parser.add_argument( - "-n", - "--num-cpus", - type=int, - required=True, - help="The number of CPUs.", + "-n", "--num-cpus", type=int, required=True, help="The number of CPUs." ) parser.add_argument( @@ -69,7 +66,7 @@ parser.add_argument( "-m", "--mem-system", type=str, - choices=("classic", "mi_example",), + choices=("classic", "mesi_two_level"), required=True, help="The memory system.", ) @@ -80,7 +77,7 @@ parser.add_argument( type=str, required=False, default="DualChannelDDR3_1600", - help="The python class for the memory interface to use" + help="The python class for the memory interface to use", ) parser.add_argument( @@ -105,29 +102,33 @@ args = parser.parse_args() requires(isa_required=ISA.RISCV) if args.mem_system == "classic": - from gem5.components.cachehierarchies.classic.\ - private_l1_private_l2_cache_hierarchy import \ - PrivateL1PrivateL2CacheHierarchy + from gem5.components.cachehierarchies.classic.private_l1_private_l2_cache_hierarchy import ( + PrivateL1PrivateL2CacheHierarchy, + ) # Setup the cache hierarchy. cache_hierarchy = PrivateL1PrivateL2CacheHierarchy( l1d_size="32KiB", l1i_size="32KiB", l2_size="512KiB" ) -elif args.mem_system == "mi_example": - from gem5.components.cachehierarchies.ruby.\ - mi_example_cache_hierarchy import \ - MIExampleCacheHierarchy +elif args.mem_system == "mesi_two_level": + from gem5.components.cachehierarchies.ruby.mesi_two_level_cache_hierarchy import ( + MESITwoLevelCacheHierarchy, + ) # Setup the cache hierarchy. - cache_hierarchy = MIExampleCacheHierarchy( - size="32KiB", assoc=8 + cache_hierarchy = MESITwoLevelCacheHierarchy( + l1d_size="16kB", + l1d_assoc=8, + l1i_size="16kB", + l1i_assoc=8, + l2_size="256kB", + l2_assoc=16, + num_l2_banks=1, ) # Setup the system memory. python_module = "gem5.components.memory" -memory_class = getattr( - importlib.import_module(python_module), args.dram_class -) +memory_class = getattr(importlib.import_module(python_module), args.dram_class) memory = memory_class(size="4GiB") # Setup a processor. @@ -147,9 +148,7 @@ else: ) processor = SimpleProcessor( - cpu_type=cpu_type, - isa=ISA.RISCV, - num_cores=args.num_cpus, + cpu_type=cpu_type, isa=ISA.RISCV, num_cores=args.num_cpus ) # Setup the board. @@ -160,28 +159,22 @@ board = RiscvBoard( cache_hierarchy=cache_hierarchy, ) -# Set the Full System workload. -board.set_kernel_disk_workload( - kernel=Resource( - "riscv-bootloader-vmlinux-5.10", - resource_directory=args.resource_directory, - ), - disk_image=Resource( - "riscv-ubuntu-20.04-img", - resource_directory=args.resource_directory, - ), +# Set the workload. +workload = Workload( + "riscv-ubuntu-20.04-boot", resource_directory=args.resource_directory ) +board.set_workload(workload) + simulator = Simulator(board=board) if args.tick_exit: - simulator.run(max_ticks = args.tick_exit) + simulator.run(max_ticks=args.tick_exit) else: simulator.run() print( "Exiting @ tick {} because {}.".format( - simulator.get_current_tick(), - simulator.get_last_exit_event_cause(), + simulator.get_current_tick(), simulator.get_last_exit_event_cause() ) -) \ No newline at end of file +) diff --git a/tests/gem5/configs/runtime_isa_check.py b/tests/gem5/configs/runtime_isa_check.py index 8f1769343b..1076e99335 100644 --- a/tests/gem5/configs/runtime_isa_check.py +++ b/tests/gem5/configs/runtime_isa_check.py @@ -36,7 +36,7 @@ import argparse parser = argparse.ArgumentParser( description="A simple script used to check the output of " - "`gem5.runtime.get_runtime_isa`" + "`gem5.runtime.get_runtime_isa`" ) parser.add_argument( @@ -46,7 +46,7 @@ parser.add_argument( choices=get_isas_str_set(), required=True, help="The expected ISA. If not returned by `get_runtime_isa`, a " - "non-zero exit code will be returned by the script" , + "non-zero exit code will be returned by the script", ) args = parser.parse_args() diff --git a/tests/gem5/configs/simple_binary_run.py b/tests/gem5/configs/simple_binary_run.py index ebe833a985..d69e1a1169 100644 --- a/tests/gem5/configs/simple_binary_run.py +++ b/tests/gem5/configs/simple_binary_run.py @@ -31,7 +31,7 @@ gem5 while still being functinal. """ from gem5.resources.resource import Resource -from gem5.components.processors.cpu_types import( +from gem5.components.processors.cpu_types import ( get_cpu_types_str_set, get_cpu_type_from_str, ) @@ -39,33 +39,38 @@ from gem5.components.memory import SingleChannelDDR3_1600 from gem5.components.boards.simple_board import SimpleBoard from gem5.components.cachehierarchies.classic.no_cache import NoCache from gem5.components.processors.simple_processor import SimpleProcessor +from gem5.components.processors.base_cpu_processor import BaseCPUProcessor +from gem5.components.processors.simple_core import SimpleCore +from gem5.components.boards.mem_mode import MemMode +from gem5.components.processors.cpu_types import CPUTypes from gem5.simulate.simulator import Simulator from gem5.isas import get_isa_from_str, get_isas_str_set import argparse +from python.gem5.components.processors.base_cpu_core import BaseCPUCore + parser = argparse.ArgumentParser( description="A gem5 script for running simple binaries in SE mode." ) parser.add_argument( - "resource", - type=str, - help="The gem5 resource binary to run.", + "resource", type=str, help="The gem5 resource binary to run." ) parser.add_argument( - "cpu", - type=str, - choices=get_cpu_types_str_set(), - help="The CPU type used.", + "cpu", type=str, choices=get_cpu_types_str_set(), help="The CPU type used." ) parser.add_argument( - "isa", - type=str, - choices=get_isas_str_set(), - help="The ISA used", + "isa", type=str, choices=get_isas_str_set(), help="The ISA used" +) + +parser.add_argument( + "-b", + "--base-cpu-processor", + action="store_true", + help="Use the BaseCPUProcessor instead of the SimpleProcessor.", ) parser.add_argument( @@ -76,16 +81,52 @@ parser.add_argument( help="The directory in which resources will be downloaded or exist.", ) +parser.add_argument( + "--arguments", + type=str, + action="append", + default=[], + required=False, + help="The input arguments for the binary.", +) + +parser.add_argument( + "-n", + "--num-cores", + type=int, + default=1, + required=False, + help="The number of CPU cores to run.", +) + args = parser.parse_args() # Setup the system. cache_hierarchy = NoCache() memory = SingleChannelDDR3_1600() -processor = SimpleProcessor( - cpu_type=get_cpu_type_from_str(args.cpu), - isa=get_isa_from_str(args.isa), - num_cores=1, -) + +if args.base_cpu_processor: + cores = [ + BaseCPUCore( + core=SimpleCore.cpu_simobject_factory( + cpu_type=get_cpu_type_from_str(args.cpu), + isa=get_isa_from_str(args.isa), + core_id=i, + ), + isa=get_isa_from_str(args.isa), + ) + for i in range(args.num_cores) + ] + + processor = BaseCPUProcessor( + cores=cores, + ) +else: + processor = SimpleProcessor( + cpu_type=get_cpu_type_from_str(args.cpu), + isa=get_isa_from_str(args.isa), + num_cores=args.num_cores, + ) motherboard = SimpleBoard( clk_freq="3GHz", @@ -95,9 +136,8 @@ motherboard = SimpleBoard( ) # Set the workload -binary = Resource(args.resource, - resource_directory=args.resource_directory) -motherboard.set_se_binary_workload(binary) +binary = Resource(args.resource, resource_directory=args.resource_directory) +motherboard.set_se_binary_workload(binary, arguments=args.arguments) # Run the simulation simulator = Simulator(board=motherboard) @@ -105,7 +145,6 @@ simulator.run() print( "Exiting @ tick {} because {}.".format( - simulator.get_current_tick(), - simulator.get_last_exit_event_cause(), + simulator.get_current_tick(), simulator.get_last_exit_event_cause() ) ) diff --git a/tests/gem5/configs/switcheroo.py b/tests/gem5/configs/switcheroo.py index fb1db81322..5f38543c52 100644 --- a/tests/gem5/configs/switcheroo.py +++ b/tests/gem5/configs/switcheroo.py @@ -36,8 +36,10 @@ import m5 import _m5 from m5.objects import * -m5.util.addToPath('../configs/') -from common.Caches import * + +m5.util.addToPath("../configs/") +from base_caches import * + class Sequential: """Sequential CPU switcher. @@ -48,12 +50,13 @@ class Sequential: in. base_config.BaseFSSwitcheroo can be used to create such a system. """ + def __init__(self, cpus): self.first_cpu = None for (cpuno, cpu) in enumerate(cpus): if not cpu.switched_out: if self.first_cpu != None: - fatal("More than one CPU is switched in"); + fatal("More than one CPU is switched in") self.first_cpu = cpuno if self.first_cpu == None: @@ -69,6 +72,7 @@ class Sequential: def first(self): return self.cpus[self.first_cpu] + def run_test(root, switcher=None, freq=1000, verbose=False): """Test runner for CPU switcheroo tests. @@ -124,14 +128,19 @@ def run_test(root, switcher=None, freq=1000, verbose=False): print("Next CPU: %s" % type(next_cpu)) m5.drain() if current_cpu != next_cpu: - m5.switchCpus(system, [ (current_cpu, next_cpu) ], - verbose=verbose) + m5.switchCpus( + system, [(current_cpu, next_cpu)], verbose=verbose + ) else: - print("Source CPU and destination CPU are the same," - " skipping...") + print( + "Source CPU and destination CPU are the same," + " skipping..." + ) current_cpu = next_cpu - elif exit_cause == "target called exit()" or \ - exit_cause == "m5_exit instruction encountered": + elif ( + exit_cause == "target called exit()" + or exit_cause == "m5_exit instruction encountered" + ): sys.exit(0) else: diff --git a/tests/gem5/configs/x86_boot_exit_run.py b/tests/gem5/configs/x86_boot_exit_run.py index 358276c3c7..b1cbc647b2 100644 --- a/tests/gem5/configs/x86_boot_exit_run.py +++ b/tests/gem5/configs/x86_boot_exit_run.py @@ -33,19 +33,23 @@ import m5 from gem5.runtime import get_runtime_coherence_protocol from gem5.isas import ISA from gem5.utils.requires import requires -from gem5.resources.resource import Resource from gem5.coherence_protocol import CoherenceProtocol from gem5.components.boards.x86_board import X86Board -from gem5.components.processors.cpu_types import( +from gem5.components.processors.cpu_types import ( get_cpu_types_str_set, get_cpu_type_from_str, ) from gem5.components.processors.simple_processor import SimpleProcessor from gem5.simulate.simulator import Simulator +from gem5.resources.workload import Workload import argparse import importlib +from python.gem5.components.boards.kernel_disk_workload import ( + KernelDiskWorkload, +) + parser = argparse.ArgumentParser( description="A script to run the gem5 boot test. This test boots the " "linux kernel." @@ -80,7 +84,7 @@ parser.add_argument( type=str, required=False, default="DualChannelDDR3_1600", - help="The python class for the memory interface to use" + help="The python class for the memory interface to use", ) parser.add_argument( "-b", @@ -116,21 +120,21 @@ if args.mem_system == "mi_example": elif args.mem_system == "mesi_two_level": coherence_protocol_required = CoherenceProtocol.MESI_TWO_LEVEL -requires(isa_required=ISA.X86, - coherence_protocol_required=coherence_protocol_required, - kvm_required=(args.cpu == "kvm")) +requires( + isa_required=ISA.X86, + coherence_protocol_required=coherence_protocol_required, + kvm_required=(args.cpu == "kvm"), +) cache_hierarchy = None if args.mem_system == "mi_example": - from gem5.components.cachehierarchies.ruby.\ - mi_example_cache_hierarchy import ( + from gem5.components.cachehierarchies.ruby.mi_example_cache_hierarchy import ( MIExampleCacheHierarchy, ) cache_hierarchy = MIExampleCacheHierarchy(size="32kB", assoc=8) elif args.mem_system == "mesi_two_level": - from gem5.components.cachehierarchies.ruby.\ - mesi_two_level_cache_hierarchy import ( + from gem5.components.cachehierarchies.ruby.mesi_two_level_cache_hierarchy import ( MESITwoLevelCacheHierarchy, ) @@ -144,8 +148,7 @@ elif args.mem_system == "mesi_two_level": num_l2_banks=1, ) elif args.mem_system == "classic": - from gem5.components.cachehierarchies.classic.\ - private_l1_cache_hierarchy import ( + from gem5.components.cachehierarchies.classic.private_l1_cache_hierarchy import ( PrivateL1CacheHierarchy, ) @@ -163,9 +166,7 @@ assert cache_hierarchy != None # Warning: This must be kept at 3GB for now. X86Motherboard does not support # anything else right now! python_module = "gem5.components.memory" -memory_class = getattr( - importlib.import_module(python_module), args.dram_class -) +memory_class = getattr(importlib.import_module(python_module), args.dram_class) memory = memory_class(size="3GiB") # Setup a Processor. @@ -187,19 +188,12 @@ kernal_args = motherboard.get_default_kernel_args() if args.boot_type == "init": kernal_args.append("init=/root/exit.sh") -# Set the Full System workload. -motherboard.set_kernel_disk_workload( - kernel=Resource( - "x86-linux-kernel-5.4.49", - resource_directory=args.resource_directory, - ), - disk_image=Resource( - "x86-ubuntu-18.04-img", - resource_directory=args.resource_directory, - ), - kernel_args=kernal_args, +# Set the workload. +workload = Workload( + "x86-ubuntu-18.04-boot", resource_directory=args.resource_directory ) - +workload.set_parameter("kernel_args", kernal_args) +motherboard.set_workload(workload) # Begin running of the simulation. This will exit once the Linux system boot # is complete. @@ -211,13 +205,12 @@ print("Beginning simulation!") simulator = Simulator(board=motherboard) if args.tick_exit: - simulator.run(max_ticks = args.tick_exit) + simulator.run(max_ticks=args.tick_exit) else: simulator.run() print( "Exiting @ tick {} because {}.".format( - simulator.get_current_tick(), - simulator.get_last_exit_event_cause(), + simulator.get_current_tick(), simulator.get_last_exit_event_cause() ) -) \ No newline at end of file +) diff --git a/tests/gem5/cpu_tests/run.py b/tests/gem5/cpu_tests/run.py index c17956ee87..fb528e5e8c 100644 --- a/tests/gem5/cpu_tests/run.py +++ b/tests/gem5/cpu_tests/run.py @@ -31,6 +31,7 @@ import argparse import m5 from m5.objects import * + class L1Cache(Cache): """Simple L1 Cache with default values""" @@ -47,34 +48,37 @@ class L1Cache(Cache): def connectCPU(self, cpu): """Connect this cache's port to a CPU-side port - This must be defined in a subclass""" + This must be defined in a subclass""" raise NotImplementedError + class L1ICache(L1Cache): """Simple L1 instruction cache with default values""" # Set the default size - size = '32kB' + size = "32kB" def connectCPU(self, cpu): """Connect this cache's port to a CPU icache port""" self.cpu_side = cpu.icache_port + class L1DCache(L1Cache): """Simple L1 data cache with default values""" # Set the default size - size = '32kB' + size = "32kB" def connectCPU(self, cpu): """Connect this cache's port to a CPU dcache port""" self.cpu_side = cpu.dcache_port + class L2Cache(Cache): """Simple L2 Cache with default values""" # Default parameters - size = '512kB' + size = "512kB" assoc = 16 tag_latency = 10 data_latency = 10 @@ -90,30 +94,29 @@ class L2Cache(Cache): class MySimpleMemory(SimpleMemory): - latency = '1ns' + latency = "1ns" -if buildEnv['TARGET_ISA'] == 'x86': - valid_cpu = {'AtomicSimpleCPU': AtomicSimpleCPU, - 'TimingSimpleCPU': TimingSimpleCPU, - 'DerivO3CPU': DerivO3CPU - } -else: - valid_cpu = {'AtomicSimpleCPU': AtomicSimpleCPU, - 'TimingSimpleCPU': TimingSimpleCPU, - 'MinorCPU': MinorCPU, - 'DerivO3CPU': DerivO3CPU, - } -valid_mem = {'SimpleMemory': MySimpleMemory, - 'DDR3_1600_8x8': DDR3_1600_8x8 - } +valid_cpu = { + "X86AtomicSimpleCPU": X86AtomicSimpleCPU, + "X86TimingSimpleCPU": X86TimingSimpleCPU, + "X86DerivO3CPU": X86O3CPU, + "ArmAtomicSimpleCPU": ArmAtomicSimpleCPU, + "ArmTimingSimpleCPU": ArmTimingSimpleCPU, + "ArmMinorCPU": ArmMinorCPU, + "ArmDerivO3CPU": ArmO3CPU, + "RiscvAtomicSimpleCPU": RiscvAtomicSimpleCPU, + "RiscvTimingSimpleCPU": RiscvTimingSimpleCPU, + "RiscvMinorCPU": RiscvMinorCPU, + "RiscvDerivO3CPU": RiscvO3CPU, +} + +valid_mem = {"SimpleMemory": MySimpleMemory, "DDR3_1600_8x8": DDR3_1600_8x8} parser = argparse.ArgumentParser() -parser.add_argument('binary', type = str) -parser.add_argument('--cpu', choices = valid_cpu.keys(), - default = 'TimingSimpleCPU') -parser.add_argument('--mem', choices = valid_mem.keys(), - default = 'SimpleMemory') +parser.add_argument("binary", type=str) +parser.add_argument("--cpu") +parser.add_argument("--mem", choices=valid_mem.keys(), default="SimpleMemory") args = parser.parse_args() @@ -122,17 +125,25 @@ system = System() system.workload = SEWorkload.init_compatible(args.binary) system.clk_domain = SrcClockDomain() -system.clk_domain.clock = '1GHz' +system.clk_domain.clock = "1GHz" system.clk_domain.voltage_domain = VoltageDomain() -if args.cpu != "AtomicSimpleCPU": - system.mem_mode = 'timing' +if args.cpu not in ( + "X86AtomicSimpleCPU", + "ArmAtomicSimpleCPU", + "RiscvAtomicSimpleCPU", +): + system.mem_mode = "timing" -system.mem_ranges = [AddrRange('512MB')] +system.mem_ranges = [AddrRange("512MB")] system.cpu = valid_cpu[args.cpu]() -if args.cpu == "AtomicSimpleCPU": +if args.cpu in ( + "X86AtomicSimpleCPU", + "ArmAtomicSimpleCPU", + "RiscvAtomicSimpleCPU", +): system.membus = SystemXBar() system.cpu.icache_port = system.membus.cpu_side_ports system.cpu.dcache_port = system.membus.cpu_side_ports @@ -150,7 +161,7 @@ else: system.l2cache.connectMemSideBus(system.membus) system.cpu.createInterruptController() -if m5.defines.buildEnv['TARGET_ISA'] == "x86": +if args.cpu in ("X86AtomicSimpleCPU", "X86TimingSimpleCPU", "X86DerivO3CPU"): system.cpu.interrupts[0].pio = system.membus.mem_side_ports system.cpu.interrupts[0].int_master = system.membus.cpu_side_ports system.cpu.interrupts[0].int_slave = system.membus.mem_side_ports @@ -165,10 +176,10 @@ process.cmd = [args.binary] system.cpu.workload = process system.cpu.createThreads() -root = Root(full_system = False, system = system) +root = Root(full_system=False, system=system) m5.instantiate() exit_event = m5.simulate() -if exit_event.getCause() != 'exiting with last active thread context': +if exit_event.getCause() != "exiting with last active thread context": exit(1) diff --git a/tests/gem5/cpu_tests/test.py b/tests/gem5/cpu_tests/test.py index c0322b2abb..bbdb492c82 100644 --- a/tests/gem5/cpu_tests/test.py +++ b/tests/gem5/cpu_tests/test.py @@ -36,53 +36,62 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -''' +""" Test file containing simple workloads to run on CPU models. Each test takes ~10 seconds to run. -''' +""" from testlib import * -workloads = ('Bubblesort','FloatMM') +workloads = ("Bubblesort", "FloatMM") valid_isas = { - constants.vega_x86_tag : - ('AtomicSimpleCPU', 'TimingSimpleCPU', 'DerivO3CPU'), - constants.arm_tag: - ('AtomicSimpleCPU', 'TimingSimpleCPU', 'MinorCPU', 'DerivO3CPU'), - constants.riscv_tag: - ('AtomicSimpleCPU', 'TimingSimpleCPU', 'MinorCPU', 'DerivO3CPU'), + constants.vega_x86_tag: ( + "X86AtomicSimpleCPU", + "X86TimingSimpleCPU", + "X86DerivO3CPU", + ), + constants.arm_tag: ( + "ArmAtomicSimpleCPU", + "ArmTimingSimpleCPU", + "ArmMinorCPU", + "ArmDerivO3CPU", + ), + constants.riscv_tag: ( + "RiscvAtomicSimpleCPU", + "RiscvTimingSimpleCPU", + "RiscvMinorCPU", + "RiscvDerivO3CPU", + ), } -base_path = joinpath(config.bin_path, 'cpu_tests') +base_path = joinpath(config.bin_path, "cpu_tests") -base_url = config.resource_url + '/test-progs/cpu-tests/bin/' +base_url = config.resource_url + "/test-progs/cpu-tests/bin/" isa_url = { - constants.vega_x86_tag : base_url + "x86", - constants.arm_tag : base_url + "arm", - constants.riscv_tag : base_url + "riscv", + constants.vega_x86_tag: base_url + "x86", + constants.arm_tag: base_url + "arm", + constants.riscv_tag: base_url + "riscv", } for isa in valid_isas: path = joinpath(base_path, isa.lower()) for workload in workloads: - ref_path = joinpath(getcwd(), 'ref', workload) - verifiers = ( - verifier.MatchStdout(ref_path), - ) + ref_path = joinpath(getcwd(), "ref", workload) + verifiers = (verifier.MatchStdout(ref_path),) - url = isa_url[isa] + '/' + workload + url = isa_url[isa] + "/" + workload workload_binary = DownloadedProgram(url, path, workload) binary = joinpath(workload_binary.path, workload) for cpu in valid_isas[isa]: gem5_verify_config( - name='cpu_test_{}_{}'.format(cpu,workload), - verifiers=verifiers, - config=joinpath(getcwd(), 'run.py'), - config_args=['--cpu={}'.format(cpu), binary], - valid_isas=(isa,), - fixtures=[workload_binary] + name="cpu_test_{}_{}".format(cpu, workload), + verifiers=verifiers, + config=joinpath(getcwd(), "run.py"), + config_args=["--cpu={}".format(cpu), binary], + valid_isas=(constants.all_compiled_tag,), + fixtures=[workload_binary], ) diff --git a/tests/gem5/dram-lowp/test_dram_lowp.py b/tests/gem5/dram-lowp/test_dram_lowp.py index b90fca4b51..2e146bbe46 100644 --- a/tests/gem5/dram-lowp/test_dram_lowp.py +++ b/tests/gem5/dram-lowp/test_dram_lowp.py @@ -26,26 +26,24 @@ from testlib import * -verifiers = ( - verifier.MatchStdoutNoPerf(joinpath(getcwd(), 'ref', 'simout')), -) +verifiers = (verifier.MatchStdoutNoPerf(joinpath(getcwd(), "ref", "simout")),) gem5_verify_config( - name='test-low_power-close_adaptive', + name="test-low_power-close_adaptive", fixtures=(), verifiers=verifiers, - config=joinpath(config.base_dir, 'configs', 'dram','low_power_sweep.py'), - config_args=['-p', 'close_adaptive', '-r', '2'], - valid_isas=(constants.null_tag,), + config=joinpath(config.base_dir, "configs", "dram", "low_power_sweep.py"), + config_args=["-p", "close_adaptive", "-r", "2"], + valid_isas=(constants.all_compiled_tag,), valid_hosts=constants.supported_hosts, ) gem5_verify_config( - name='test-low_power-open_adaptive', + name="test-low_power-open_adaptive", fixtures=(), verifiers=verifiers, - config=joinpath(config.base_dir, 'configs', 'dram','low_power_sweep.py'), - config_args=['-p', 'open_adaptive', '-r', '2'], - valid_isas=(constants.null_tag,), + config=joinpath(config.base_dir, "configs", "dram", "low_power_sweep.py"), + config_args=["-p", "open_adaptive", "-r", "2"], + valid_isas=(constants.all_compiled_tag,), valid_hosts=constants.supported_hosts, ) diff --git a/tests/gem5/fixture.py b/tests/gem5/fixture.py index a6b2881e8b..65b5454cae 100644 --- a/tests/gem5/fixture.py +++ b/tests/gem5/fixture.py @@ -64,17 +64,18 @@ class TempdirFixture(Fixture): def __init__(self): self.path = None super(TempdirFixture, self).__init__( - name=constants.tempdir_fixture_name) + name=constants.tempdir_fixture_name + ) def setup(self, testitem): - self.path = tempfile.mkdtemp(prefix='gem5out') + self.path = tempfile.mkdtemp(prefix="gem5out") def post_test_procedure(self, testitem): suiteUID = testitem.metadata.uid.suite testUID = testitem.metadata.name - testing_result_folder = os.path.join(config.result_path, - "SuiteUID:" + suiteUID, - "TestUID:" + testUID) + testing_result_folder = os.path.join( + config.result_path, "SuiteUID:" + suiteUID, "TestUID:" + testUID + ) # Copy the output files of the run from /tmp to testing-results # We want to wipe the entire result folder for this test first. Why? @@ -90,8 +91,9 @@ class TempdirFixture(Fixture): if testitem.result == Result.Passed: shutil.rmtree(self.path) + class UniqueFixture(Fixture): - ''' + """ Base class for fixtures that generate a target in the filesystem. If the same fixture is used by more than one test/suite, rather than creating a copy of the fixture, it returns @@ -101,7 +103,8 @@ class UniqueFixture(Fixture): :param target: The absolute path of the target in the filesystem. - ''' + """ + fixtures = {} def __new__(cls, target): @@ -116,7 +119,7 @@ class UniqueFixture(Fixture): def __init__(self, *args, **kwargs): with self.lock: - if hasattr(self, '_init_done'): + if hasattr(self, "_init_done"): return super(UniqueFixture, self).__init__(self, **kwargs) self._init(*args, **kwargs) @@ -124,21 +127,21 @@ class UniqueFixture(Fixture): def setup(self, testitem): with self.lock: - if hasattr(self, '_setup_done'): + if hasattr(self, "_setup_done"): return self._setup_done = True self._setup(testitem) class SConsFixture(UniqueFixture): - ''' + """ Fixture will wait until all SCons targets are collected and tests are about to be ran, then will invocate a single instance of SCons for all targets. :param directory: The directory which scons will -C (cd) into before executing. If None is provided, will choose the config base_dir. - ''' + """ def __new__(cls, target): obj = super(SConsFixture, cls).__new__(cls, target) @@ -149,38 +152,44 @@ class SConsFixture(UniqueFixture): return command = [ - 'scons', '-C', self.directory, - '-j', str(config.threads), - '--ignore-style', - '--no-compress-debug' + "scons", + "-C", + self.directory, + "-j", + str(config.threads), + "--ignore-style", + "--no-compress-debug", ] if not self.targets: log.test_log.warn( - 'No SCons targets specified, this will' - ' build the default all target.\n' - 'This is likely unintended, and you' - ' may wish to kill testlib and reconfigure.') + "No SCons targets specified, this will" + " build the default all target.\n" + "This is likely unintended, and you" + " may wish to kill testlib and reconfigure." + ) else: log.test_log.message( - 'Building the following targets.' - ' This may take a while.') - log.test_log.message('%s' % (', '.join(self.targets))) + "Building the following targets." " This may take a while." + ) + log.test_log.message("%s" % (", ".join(self.targets))) log.test_log.message( - "You may want to run with only a single ISA" - "(--isa=), use --skip-build, or use 'rerun'.") + "You may want to run with only a single ISA" + "(--isa=), use --skip-build, or use 'rerun'." + ) command.extend(self.targets) if self.options: command.extend(self.options) log_call(log.test_log, command, time=None, stderr=sys.stderr) + class Gem5Fixture(SConsFixture): def __new__(cls, isa, variant, protocol=None): target_dir = joinpath(config.build_dir, isa.upper()) if protocol: - target_dir += '_' + protocol - target = joinpath(target_dir, 'gem5.%s' % variant) + target_dir += "_" + protocol + target = joinpath(target_dir, "gem5.%s" % variant) obj = super(Gem5Fixture, cls).__new__(cls, target) return obj @@ -193,43 +202,42 @@ class Gem5Fixture(SConsFixture): self.options = [] if protocol: - self.options = [ '--default=' + isa.upper(), - 'PROTOCOL=' + protocol ] + self.options = ["--default=" + isa.upper(), "PROTOCOL=" + protocol] self.set_global() + class MakeFixture(Fixture): def __init__(self, directory, *args, **kwargs): - name = 'make -C %s' % directory - super(MakeFixture, self).__init__(build_once=True, lazy_init=False, - name=name, - *args, **kwargs) + name = "make -C %s" % directory + super(MakeFixture, self).__init__( + build_once=True, lazy_init=False, name=name, *args, **kwargs + ) self.targets = [] self.directory = directory def setup(self): super(MakeFixture, self).setup() targets = set(self.required_by) - command = ['make', '-C', self.directory] + command = ["make", "-C", self.directory] command.extend([target.target for target in targets]) log_call(log.test_log, command, time=None, stderr=sys.stderr) class MakeTarget(Fixture): def __init__(self, target, make_fixture=None, *args, **kwargs): - ''' + """ :param make_fixture: The make invocation we will be attached to. Since we don't have a single global instance of make in gem5 like we do scons we need to know what invocation to attach to. If none given, creates its own. - ''' + """ super(MakeTarget, self).__init__(name=target, *args, **kwargs) self.target = self.name if make_fixture is None: make_fixture = MakeFixture( - absdirpath(target), - lazy_init=True, - build_once=False) + absdirpath(target), lazy_init=True, build_once=False + ) self.make_fixture = make_fixture @@ -241,11 +249,12 @@ class MakeTarget(Fixture): self.make_fixture.setup() return self + class TestProgram(MakeTarget): def __init__(self, program, isa, os, recompile=False): make_dir = joinpath(config.bin_dir, program) make_fixture = MakeFixture(make_dir) - target = joinpath('bin', isa, os, program) + target = joinpath("bin", isa, os, program) super(TestProgram, self).__init__(target, make_fixture) self.path = joinpath(make_dir, target) self.recompile = recompile @@ -258,9 +267,10 @@ class TestProgram(MakeTarget): elif not os.path.exists(self.path): super(MakeTarget, self).setup() + class DownloadedProgram(UniqueFixture): - """ Like TestProgram, but checks the version in the gem5 binary repository - and downloads an updated version if it is needed. + """Like TestProgram, but checks the version in the gem5 binary repository + and downloads an updated version if it is needed. """ def __new__(cls, url, path, filename, gzip_decompress=False): @@ -288,6 +298,7 @@ class DownloadedProgram(UniqueFixture): def _download(self): import errno + log.test_log.debug("Downloading " + self.url + " to " + self.path) if not os.path.exists(self.path): try: @@ -299,8 +310,8 @@ class DownloadedProgram(UniqueFixture): gzipped_filename = self.filename + ".gz" urllib.request.urlretrieve(self.url, gzipped_filename) - with open(self.filename, 'wb') as outfile: - with gzip.open(gzipped_filename, 'r') as infile: + with open(self.filename, "wb") as outfile: + with gzip.open(gzipped_filename, "r") as infile: shutil.copyfileobj(infile, outfile) os.remove(gzipped_filename) @@ -309,13 +320,15 @@ class DownloadedProgram(UniqueFixture): def _getremotetime(self): import datetime, time - import _strptime # Needed for python threading bug + import _strptime # Needed for python threading bug u = urllib.request.urlopen(self.url, timeout=10) - return time.mktime(datetime.datetime.strptime( \ - u.info()["Last-Modified"], - "%a, %d %b %Y %X GMT").timetuple()) + return time.mktime( + datetime.datetime.strptime( + u.info()["Last-Modified"], "%a, %d %b %Y %X GMT" + ).timetuple() + ) def _setup(self, testitem): # Check to see if there is a file downloaded @@ -326,21 +339,46 @@ class DownloadedProgram(UniqueFixture): t = self._getremotetime() except (urllib.error.URLError, socket.timeout): # Problem checking the server, use the old files. - log.test_log.debug("Could not contact server. Binaries may be old.") + log.test_log.debug( + "Could not contact server. Binaries may be old." + ) return # If the server version is more recent, download it if t > os.path.getmtime(self.filename): self._download() + class DownloadedArchive(DownloadedProgram): - """ Like TestProgram, but checks the version in the gem5 binary repository - and downloads an updated version if it is needed. + """Like TestProgram, but checks the version in the gem5 binary repository + and downloads an updated version if it is needed. """ def _extract(self): import tarfile + with tarfile.open(self.filename) as tf: - tf.extractall(self.path) + + def is_within_directory(directory, target): + + abs_directory = os.path.abspath(directory) + abs_target = os.path.abspath(target) + + prefix = os.path.commonprefix([abs_directory, abs_target]) + + return prefix == abs_directory + + def safe_extract( + tar, path=".", members=None, *, numeric_owner=False + ): + + for member in tar.getmembers(): + member_path = os.path.join(path, member.name) + if not is_within_directory(path, member_path): + raise Exception("Attempted Path Traversal in Tar File") + + tar.extractall(path, members, numeric_owner=numeric_owner) + + safe_extract(tf, self.path) def _setup(self, testitem): # Check to see if there is a file downloaded @@ -352,8 +390,9 @@ class DownloadedArchive(DownloadedProgram): t = self._getremotetime() except (urllib.error.URLError, socket.timeout): # Problem checking the server, use the old files. - log.test_log.debug("Could not contact server. " - "Binaries may be old.") + log.test_log.debug( + "Could not contact server. " "Binaries may be old." + ) return # If the server version is more recent, download it if t > os.path.getmtime(self.filename): diff --git a/tests/gem5/fs/linux/arm/run.py b/tests/gem5/fs/linux/arm/run.py index 3dccebb8ac..18a4e5e268 100644 --- a/tests/gem5/fs/linux/arm/run.py +++ b/tests/gem5/fs/linux/arm/run.py @@ -43,6 +43,7 @@ from os.path import join as joinpath import m5 + def run_test(root): """Default run_test implementations. Scripts can override it.""" @@ -51,26 +52,27 @@ def run_test(root): # simulate until program terminates exit_event = m5.simulate() - print('Exiting @ tick', m5.curTick(), 'because', exit_event.getCause()) + print("Exiting @ tick", m5.curTick(), "because", exit_event.getCause()) + config = sys.argv[1] -os.environ['M5_PATH'] = sys.argv[2] +os.environ["M5_PATH"] = sys.argv[2] gem5_root = sys.argv[3] # path setup -sys.path.append(joinpath(gem5_root, 'configs')) -tests_root = joinpath(gem5_root, 'tests') -sys.path.append(joinpath(tests_root, 'gem5', 'configs')) +sys.path.append(joinpath(gem5_root, "configs")) +tests_root = joinpath(gem5_root, "tests") +sys.path.append(joinpath(tests_root, "gem5", "configs")) -exec(compile(open(config).read(), config, 'exec')) +exec(compile(open(config).read(), config, "exec")) system = root.system -system.readfile = os.path.join(gem5_root, 'configs', 'boot', 'halt.sh') +system.readfile = os.path.join(gem5_root, "configs", "boot", "halt.sh") # The CPU can either be a list of CPUs or a single object. if isinstance(system.cpu, list): - [ cpu.createThreads() for cpu in system.cpu ] + [cpu.createThreads() for cpu in system.cpu] else: system.cpu.createThreads() diff --git a/tests/gem5/fs/linux/arm/test.py b/tests/gem5/fs/linux/arm/test.py index facff57bb9..870024760e 100644 --- a/tests/gem5/fs/linux/arm/test.py +++ b/tests/gem5/fs/linux/arm/test.py @@ -33,9 +33,9 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -''' +""" Arm FS simulation tests -''' +""" from os.path import join as joinpath @@ -43,45 +43,36 @@ from testlib import * import re -arm_fs_kvm_tests = [ - 'realview64-kvm', - 'realview64-kvm-dual', -] +arm_fs_kvm_tests = ["realview64-kvm", "realview64-kvm-dual"] arm_fs_quick_tests = [ - 'realview64-simple-atomic', - 'realview64-simple-atomic-dual', - 'realview64-simple-atomic-checkpoint', - 'realview64-simple-timing', - 'realview64-simple-timing-dual', - 'realview64-switcheroo-atomic', - 'realview64-switcheroo-timing', + "realview64-simple-atomic", + "realview64-simple-atomic-dual", + "realview64-simple-atomic-checkpoint", + "realview64-simple-timing", + "realview64-simple-timing-dual", + "realview64-switcheroo-atomic", + "realview64-switcheroo-timing", ] + arm_fs_kvm_tests arm_fs_long_tests = [ - 'realview-simple-atomic', - 'realview-simple-atomic-checkpoint', - 'realview-simple-timing', - 'realview-switcheroo-atomic', - 'realview-switcheroo-timing', - 'realview-o3', - 'realview-minor', - 'realview-switcheroo-noncaching-timing', - 'realview-switcheroo-o3', - 'realview-switcheroo-full', - 'realview64-o3', - 'realview64-o3-checker', - 'realview64-o3-dual', - 'realview64-minor', - 'realview64-minor-dual', - 'realview64-switcheroo-o3', - 'realview64-switcheroo-full', - 'realview-simple-timing-ruby', - 'realview64-simple-timing-ruby', - 'realview64-simple-timing-dual-ruby', - 'realview64-o3-dual-ruby', - - + "realview-simple-atomic", + "realview-simple-atomic-checkpoint", + "realview-simple-timing", + "realview-switcheroo-atomic", + "realview-switcheroo-timing", + "realview-o3", + "realview-minor", + "realview-switcheroo-noncaching-timing", + "realview-switcheroo-o3", + "realview-switcheroo-full", + "realview64-o3", + "realview64-o3-checker", + "realview64-o3-dual", + "realview64-minor", + "realview64-minor-dual", + "realview64-switcheroo-o3", + "realview64-switcheroo-full", # The following tests fail. These are recorded in the GEM5-640 # Jira issue. # @@ -93,24 +84,38 @@ arm_fs_long_tests = [ #'realview-simple-timing-dual-ruby', ] -tarball = 'aarch-system-20220505.tar.bz2' +# These tests are Ruby-based and Ruby does not support multiple ISAs +arm_fs_long_tests_arm_target = [ + "realview-simple-timing-ruby", + "realview64-simple-timing-ruby", + "realview64-simple-timing-dual-ruby", + "realview64-o3-dual-ruby", +] + +tarball = "aarch-system-20220707.tar.bz2" url = config.resource_url + "/arm/" + tarball filepath = os.path.dirname(os.path.abspath(__file__)) -path = joinpath(config.bin_path, 'arm') +path = joinpath(config.bin_path, "arm") arm_fs_binaries = DownloadedArchive(url, path, tarball) + def support_kvm(): return os.access("/dev/kvm", os.R_OK | os.W_OK) + def verifier_list(name): - verifiers=[] + verifiers = [] if "dual" in name: - verifiers.append(verifier.MatchFileRegex( - re.compile(r'.*CPU1: Booted secondary processor.*'), - ["system.terminal"])) + verifiers.append( + verifier.MatchFileRegex( + re.compile(r".*CPU1: Booted secondary processor.*"), + ["system.terminal"], + ) + ) return verifiers + for name in arm_fs_quick_tests: if name in arm_fs_kvm_tests: # The current host might not be supporting KVM @@ -124,35 +129,52 @@ for name in arm_fs_quick_tests: valid_hosts = constants.supported_hosts args = [ - joinpath(config.base_dir, 'tests', 'gem5', 'configs', name + '.py'), + joinpath(config.base_dir, "tests", "gem5", "configs", name + ".py"), path, - config.base_dir + config.base_dir, ] gem5_verify_config( name=name, - verifiers=verifier_list(name), # Add basic stat verifiers - config=joinpath(filepath, 'run.py'), + verifiers=verifier_list(name), # Add basic stat verifiers + config=joinpath(filepath, "run.py"), config_args=args, - valid_isas=(constants.arm_tag,), + valid_isas=(constants.all_compiled_tag,), length=constants.quick_tag, valid_hosts=valid_hosts, fixtures=(arm_fs_binaries,), - uses_kvm= name in arm_fs_kvm_tests, + uses_kvm=name in arm_fs_kvm_tests, ) for name in arm_fs_long_tests: args = [ - joinpath(config.base_dir, 'tests', 'gem5', 'configs', name + '.py'), + joinpath(config.base_dir, "tests", "gem5", "configs", name + ".py"), path, - config.base_dir + config.base_dir, ] gem5_verify_config( name=name, - verifiers=verifier_list(name), # TODO: Add basic stat verifiers - config=joinpath(filepath, 'run.py'), + verifiers=verifier_list(name), # TODO: Add basic stat verifiers + config=joinpath(filepath, "run.py"), + config_args=args, + valid_isas=(constants.all_compiled_tag,), + length=constants.long_tag, + fixtures=(arm_fs_binaries,), + uses_kvm=name in arm_fs_kvm_tests, + ) + +for name in arm_fs_long_tests_arm_target: + args = [ + joinpath(config.base_dir, "tests", "gem5", "configs", name + ".py"), + path, + config.base_dir, + ] + gem5_verify_config( + name=name, + verifiers=verifier_list(name), # TODO: Add basic stat verifiers + config=joinpath(filepath, "run.py"), config_args=args, valid_isas=(constants.arm_tag,), length=constants.long_tag, fixtures=(arm_fs_binaries,), - uses_kvm= name in arm_fs_kvm_tests, + uses_kvm=name in arm_fs_kvm_tests, ) diff --git a/tests/gem5/gem5-resources/test_download_resources.py b/tests/gem5/gem5-resources/test_download_resources.py index 55b57db095..c0efc8baad 100644 --- a/tests/gem5/gem5-resources/test_download_resources.py +++ b/tests/gem5/gem5-resources/test_download_resources.py @@ -41,6 +41,6 @@ gem5_verify_config( config.base_dir, "tests", "gem5", "configs", "download_check.py" ), config_args=["--download-directory", resource_path], - valid_isas=(constants.null_tag,), + valid_isas=(constants.all_compiled_tag,), length=constants.very_long_tag, ) diff --git a/tests/gem5/gem5_library_example_tests/test_gem5_library_examples.py b/tests/gem5/gem5_library_example_tests/test_gem5_library_examples.py index 805f942e16..9b5c2c67ff 100644 --- a/tests/gem5/gem5_library_example_tests/test_gem5_library_examples.py +++ b/tests/gem5/gem5_library_example_tests/test_gem5_library_examples.py @@ -32,23 +32,25 @@ from testlib import * import re import os +if config.bin_path: + resource_path = config.bin_path +else: + resource_path = joinpath(absdirpath(__file__), "..", "resources") + hello_verifier = verifier.MatchRegex(re.compile(r"Hello world!")) save_checkpoint_verifier = verifier.MatchRegex( - re.compile(r"Done taking a checkpoint")) + re.compile(r"Done taking a checkpoint") +) gem5_verify_config( name="test-gem5-library-example-arm-hello", fixtures=(), verifiers=(hello_verifier,), config=joinpath( - config.base_dir, - "configs", - "example", - "gem5_library", - "arm-hello.py", + config.base_dir, "configs", "example", "gem5_library", "arm-hello.py" ), config_args=[], - valid_isas=(constants.arm_tag,), + valid_isas=(constants.all_compiled_tag,), valid_hosts=constants.supported_hosts, length=constants.quick_tag, ) @@ -63,10 +65,13 @@ gem5_verify_config( "example", "gem5_library", "checkpoints", - "riscv-hello-save-checkpoint.py" + "riscv-hello-save-checkpoint.py", ), - config_args=[], - valid_isas=(constants.riscv_tag,), + config_args=[ + "--checkpoint-path", + joinpath(resource_path, "riscv-hello-checkpoint-save"), + ], + valid_isas=(constants.all_compiled_tag,), valid_hosts=constants.supported_hosts, length=constants.quick_tag, ) @@ -81,10 +86,49 @@ gem5_verify_config( "example", "gem5_library", "checkpoints", - "riscv-hello-restore-checkpoint.py" + "riscv-hello-restore-checkpoint.py", ), config_args=[], - valid_isas=(constants.riscv_tag,), + valid_isas=(constants.all_compiled_tag,), + valid_hosts=constants.supported_hosts, + length=constants.quick_tag, +) + +gem5_verify_config( + name="test-simpoints-se-checkpoint", + fixtures=(), + verifiers=(), + config=joinpath( + config.base_dir, + "configs", + "example", + "gem5_library", + "checkpoints", + "simpoints-se-checkpoint.py", + ), + config_args=[ + "--checkpoint-path", + joinpath(resource_path, "se_checkpoint_folder-save"), + ], + valid_isas=(constants.all_compiled_tag,), + valid_hosts=constants.supported_hosts, + length=constants.quick_tag, +) + +gem5_verify_config( + name="test-simpoints-se-restore", + fixtures=(), + verifiers=(), + config=joinpath( + config.base_dir, + "configs", + "example", + "gem5_library", + "checkpoints", + "simpoints-se-restore.py", + ), + config_args=[], + valid_isas=(constants.all_compiled_tag,), valid_hosts=constants.supported_hosts, length=constants.quick_tag, ) @@ -104,7 +148,7 @@ if os.access("/dev/kvm", mode=os.R_OK | os.W_OK): "x86-ubuntu-run-with-kvm.py", ), config_args=[], - valid_isas=(constants.x86_tag,), + valid_isas=(constants.all_compiled_tag,), valid_hosts=(constants.host_x86_64_tag,), length=constants.long_tag, uses_kvm=True, @@ -122,7 +166,7 @@ gem5_verify_config( "x86-ubuntu-run.py", ), config_args=[], - valid_isas=(constants.x86_tag,), + valid_isas=(constants.all_compiled_tag,), valid_hosts=constants.supported_hosts, length=constants.long_tag, ) @@ -141,8 +185,8 @@ if os.access("/dev/kvm", mode=os.R_OK | os.W_OK): "gem5_library", "x86-parsec-benchmarks.py", ), - config_args=["--benchmark","blackscholes","--size","simsmall"], - valid_isas=(constants.x86_tag,), + config_args=["--benchmark", "blackscholes", "--size", "simsmall"], + valid_isas=(constants.all_compiled_tag,), protocol="MESI_Two_Level", valid_hosts=(constants.host_x86_64_tag,), length=constants.long_tag, @@ -163,14 +207,15 @@ if os.access("/dev/kvm", mode=os.R_OK | os.W_OK): "gem5_library", "x86-npb-benchmarks.py", ), - config_args=["--benchmark", + config_args=[ + "--benchmark", "bt", "--size", "A", "--ticks", - "5000000000" + "5000000000", ], - valid_isas=(constants.x86_tag,), + valid_isas=(constants.all_compiled_tag,), protocol="MESI_Two_Level", valid_hosts=(constants.host_x86_64_tag,), length=constants.long_tag, @@ -191,8 +236,8 @@ if os.access("/dev/kvm", mode=os.R_OK | os.W_OK): "gem5_library", "x86-gapbs-benchmarks.py", ), - config_args=["--benchmark","bfs","--synthetic","1","--size","1"], - valid_isas=(constants.x86_tag,), + config_args=["--benchmark", "bfs", "--synthetic", "1", "--size", "1"], + valid_isas=(constants.all_compiled_tag,), protocol="MESI_Two_Level", valid_hosts=(constants.host_x86_64_tag,), length=constants.long_tag, @@ -211,7 +256,7 @@ gem5_verify_config( "riscv-ubuntu-run.py", ), config_args=[], - valid_isas=(constants.riscv_tag,), + valid_isas=(constants.all_compiled_tag,), valid_hosts=constants.supported_hosts, length=constants.long_tag, ) @@ -221,20 +266,16 @@ gem5_verify_config( fixtures=(), verifiers=(), config=joinpath( - config.base_dir, - "configs", - "example", - "lupv", - "run_lupv.py", + config.base_dir, "configs", "example", "lupv", "run_lupv.py" ), config_args=["timing", "1", "--max-ticks", "1000000000"], - valid_isas=(constants.riscv_tag,), + valid_isas=(constants.all_compiled_tag,), valid_hosts=constants.supported_hosts, length=constants.long_tag, ) gem5_verify_config( - name="test-gem5-library-example-arm-ubuntu-boot-test", + name="test-gem5-library-example-arm-ubuntu-run-test", fixtures=(), verifiers=(), config=joinpath( @@ -242,10 +283,44 @@ gem5_verify_config( "configs", "example", "gem5_library", - "arm-ubuntu-boot-exit.py", + "arm-ubuntu-run.py", ), config_args=[], - valid_isas=(constants.arm_tag,), + valid_isas=(constants.all_compiled_tag,), valid_hosts=constants.supported_hosts, length=constants.long_tag, ) + +gem5_verify_config( + name="test-gem5-library-example-riscvmatched-hello", + fixtures=(), + verifiers=(), + config=joinpath( + config.base_dir, + "configs", + "example", + "gem5_library", + "riscvmatched-hello.py", + ), + config_args=[], + valid_isas=(constants.all_compiled_tag,), + valid_hosts=constants.supported_hosts, + length=constants.long_tag, +) + +gem5_verify_config( + name="test-gem5-library-example-riscvmatched-fs", + fixtures=(), + verifiers=(), + config=joinpath( + config.base_dir, + "configs", + "example", + "gem5_library", + "riscvmatched-fs.py", + ), + config_args=["--to-init"], + valid_isas=(constants.all_compiled_tag,), + valid_hosts=constants.supported_hosts, + length=constants.very_long_tag, +) diff --git a/tests/gem5/gpu/test_gpu_ruby_random.py b/tests/gem5/gpu/test_gpu_ruby_random.py index 83f0e9f642..e29ecf24b1 100644 --- a/tests/gem5/gpu/test_gpu_ruby_random.py +++ b/tests/gem5/gpu/test_gpu_ruby_random.py @@ -48,20 +48,12 @@ gem5_verify_config( fixtures=(), verifiers=(), config=joinpath( - config.base_dir, - "configs", - "example", - "ruby_gpu_random_test.py", + config.base_dir, "configs", "example", "ruby_gpu_random_test.py" ), - config_args=[ - "--test-length", - "50000", - "--num-dmas", - "0", - ], + config_args=["--test-length", "50000", "--num-dmas", "0"], valid_isas=(constants.vega_x86_tag,), valid_hosts=constants.supported_hosts, - length=constants.quick_tag, + length=constants.long_tag, ) @@ -83,17 +75,9 @@ gem5_verify_config( fixtures=(), verifiers=(), config=joinpath( - config.base_dir, - "configs", - "example", - "ruby_gpu_random_test.py", + config.base_dir, "configs", "example", "ruby_gpu_random_test.py" ), - config_args=[ - "--test-length", - "5000000", - "--num-dmas", - "0", - ], + config_args=["--test-length", "5000000", "--num-dmas", "0"], valid_isas=(constants.vega_x86_tag,), valid_hosts=constants.supported_hosts, length=constants.long_tag, diff --git a/tests/gem5/insttest_se/test.py b/tests/gem5/insttest_se/test.py index dfae7ce6db..4dde9d6e94 100644 --- a/tests/gem5/insttest_se/test.py +++ b/tests/gem5/insttest_se/test.py @@ -41,9 +41,7 @@ else: for isa in test_progs: for binary in test_progs[isa]: ref_path = joinpath(getcwd(), "ref") - verifiers = ( - verifier.MatchStdoutNoPerf(joinpath(ref_path, "simout")), - ) + verifiers = (verifier.MatchStdoutNoPerf(joinpath(ref_path, "simout")),) for cpu in cpu_types[isa]: gem5_verify_config( @@ -64,6 +62,6 @@ for isa in test_progs: resource_path, "sparc", ], - valid_isas=(isa,), + valid_isas=(constants.all_compiled_tag,), length=constants.long_tag, ) diff --git a/tests/gem5/kvm-fork-tests/test_kvm_fork_run.py b/tests/gem5/kvm-fork-tests/test_kvm_fork_run.py index b7986c7cd7..7467c02763 100644 --- a/tests/gem5/kvm-fork-tests/test_kvm_fork_run.py +++ b/tests/gem5/kvm-fork-tests/test_kvm_fork_run.py @@ -53,24 +53,20 @@ def test_kvm_fork_run(cpu: str, num_cpus: int, mem_system: str, length: str): if mem_system == "mesi_two_level": protocol_to_use = None - isa_to_use = constants.x86_tag + isa_to_use = constants.all_compiled_tag elif mem_system == "mi_example": protocol_to_use = "MI_example" isa_to_use = constants.x86_tag else: protocol_to_use = None - isa_to_use = constants.vega_x86_tag + isa_to_use = constants.all_compiled_tag gem5_verify_config( name=name, verifiers=verifiers, fixtures=(), config=joinpath( - config.base_dir, - "tests", - "gem5", - "configs", - "boot_kvm_fork_run.py", + config.base_dir, "tests", "gem5", "configs", "boot_kvm_fork_run.py" ), config_args=[ "--cpu", diff --git a/tests/gem5/kvm-switch-tests/test_kvm_cpu_switch.py b/tests/gem5/kvm-switch-tests/test_kvm_cpu_switch.py index ede90db251..222c26b9e2 100644 --- a/tests/gem5/kvm-switch-tests/test_kvm_cpu_switch.py +++ b/tests/gem5/kvm-switch-tests/test_kvm_cpu_switch.py @@ -53,13 +53,13 @@ def test_kvm_switch(cpu: str, num_cpus: int, mem_system: str, length: str): if mem_system == "mesi_two_level": protocol_to_use = None - isa_to_use = constants.x86_tag + isa_to_use = constants.all_compiled_tag elif mem_system == "mi_example": protocol_to_use = "MI_example" isa_to_use = constants.x86_tag else: protocol_to_use = None - isa_to_use = constants.vega_x86_tag + isa_to_use = constants.all_compiled_tag gem5_verify_config( name=name, @@ -133,4 +133,4 @@ test_kvm_switch( num_cpus=2, mem_system="mesi_two_level", length=constants.long_tag, -) \ No newline at end of file +) diff --git a/tests/gem5/learning_gem5/part1_test.py b/tests/gem5/learning_gem5/part1_test.py index 32dc666520..5e44b0fc1a 100644 --- a/tests/gem5/learning_gem5/part1_test.py +++ b/tests/gem5/learning_gem5/part1_test.py @@ -26,32 +26,42 @@ from testlib import * -config_path = joinpath(config.base_dir, 'configs', 'learning_gem5', 'part1') +config_path = joinpath(config.base_dir, "configs", "learning_gem5", "part1") # The "quick" simple tests. gem5_verify_config( - name='simple_test', - verifiers = (), - config=joinpath(config_path, 'simple.py'), - config_args = [], - length = constants.quick_tag, - valid_isas=( - constants.vega_x86_tag, - constants.riscv_tag, - constants.arm_tag, - ), + name="simple_test", + verifiers=(), + config=joinpath(config_path, "simple.py"), + config_args=[], + length=constants.quick_tag, + valid_isas=(constants.all_compiled_tag,), +) + +gem5_verify_config( + name="simple_test_arm", + verifiers=(), + config=joinpath(config_path, "simple-arm.py"), + config_args=[], + length=constants.quick_tag, + valid_isas=(constants.all_compiled_tag,), +) + +gem5_verify_config( + name="simple_test_riscv", + verifiers=(), + config=joinpath(config_path, "simple-riscv.py"), + config_args=[], + length=constants.quick_tag, + valid_isas=(constants.all_compiled_tag,), ) # The "quick" two level tests. gem5_verify_config( - name='two_level_test', - verifiers = (), - config=joinpath(config_path, 'two_level.py'), - config_args = [], - length = constants.quick_tag, - valid_isas=( - constants.vega_x86_tag, - constants.riscv_tag, - constants.arm_tag - ), + name="two_level_test", + verifiers=(), + config=joinpath(config_path, "two_level.py"), + config_args=[], + length=constants.quick_tag, + valid_isas=(constants.all_compiled_tag,), ) diff --git a/tests/gem5/learning_gem5/part2_test.py b/tests/gem5/learning_gem5/part2_test.py index f3658b7501..916bdfdae8 100644 --- a/tests/gem5/learning_gem5/part2_test.py +++ b/tests/gem5/learning_gem5/part2_test.py @@ -26,45 +26,43 @@ from testlib import * -config_path = joinpath(config.base_dir, 'configs', 'learning_gem5', 'part2') -ref_path = joinpath(getcwd(), 'ref') +config_path = joinpath(config.base_dir, "configs", "learning_gem5", "part2") +ref_path = joinpath(getcwd(), "ref") get_verifier = lambda file: verifier.MatchStdout(joinpath(ref_path, file)) gem5_verify_config( - name='run_simple_test', - verifiers = (get_verifier('simple'),), - config=joinpath(config_path, 'run_simple.py'), - config_args = [], - valid_isas=(constants.null_tag,), + name="run_simple_test", + verifiers=(get_verifier("simple"),), + config=joinpath(config_path, "run_simple.py"), + config_args=[], + valid_isas=(constants.all_compiled_tag,), ) gem5_verify_config( - name='hello_goodbye_test', - verifiers =(get_verifier('hello_goodbye'),), - config=joinpath(config_path, 'hello_goodbye.py'), - config_args = [], - valid_isas=(constants.null_tag,), + name="hello_goodbye_test", + verifiers=(get_verifier("hello_goodbye"),), + config=joinpath(config_path, "hello_goodbye.py"), + config_args=[], + valid_isas=(constants.all_compiled_tag,), ) gem5_verify_config( - name='simple_memobj_test', - verifiers =(verifier.MatchStdoutNoPerf(joinpath(ref_path, 'hello')),), - config=joinpath(config_path, 'simple_memobj.py'), - config_args = [], + name="simple_memobj_test", + verifiers=(verifier.MatchStdoutNoPerf(joinpath(ref_path, "hello")),), + config=joinpath(config_path, "simple_memobj.py"), + config_args=[], # note: by default the above script uses x86 - valid_isas=(constants.vega_x86_tag,), + valid_isas=(constants.all_compiled_tag,), ) gem5_verify_config( - name='simple_cache_test', - verifiers =(verifier.MatchStdoutNoPerf(joinpath(ref_path, 'hello')),), - config=joinpath(config_path, 'simple_cache.py'), - config_args = [], + name="simple_cache_test", + verifiers=(verifier.MatchStdoutNoPerf(joinpath(ref_path, "hello")),), + config=joinpath(config_path, "simple_cache.py"), + config_args=[], # note: by default the above script uses x86 - valid_isas=(constants.vega_x86_tag,), + valid_isas=(constants.all_compiled_tag,), ) # Note: for simple memobj and simple cache I want to use the traffic generator # as well as the scripts above. - - diff --git a/tests/gem5/learning_gem5/part3_test.py b/tests/gem5/learning_gem5/part3_test.py index ad9ea8d53f..668f57dc25 100644 --- a/tests/gem5/learning_gem5/part3_test.py +++ b/tests/gem5/learning_gem5/part3_test.py @@ -30,29 +30,29 @@ from testlib import * # think more about this. Maybe we should have another parameter to # gem5_verify_config... -config_path = joinpath(config.base_dir, 'configs', 'learning_gem5', 'part3') -ref_path = joinpath(getcwd(), 'ref') +config_path = joinpath(config.base_dir, "configs", "learning_gem5", "part3") +ref_path = joinpath(getcwd(), "ref") gem5_verify_config( - name='simple_ruby_test', - verifiers = (verifier.MatchStdoutNoPerf(joinpath(ref_path, 'threads')),), - config=joinpath(config_path, 'simple_ruby.py'), - config_args = [], - protocol = 'MSI', + name="simple_ruby_test", + verifiers=(verifier.MatchStdoutNoPerf(joinpath(ref_path, "threads")),), + config=joinpath(config_path, "simple_ruby.py"), + config_args=[], + protocol="MSI", # Currently only x86 has the threads test - valid_isas=(constants.x86_tag,), + valid_isas=(constants.all_compiled_tag,), # dynamically linked - valid_hosts=constants.target_host[constants.x86_tag], + valid_hosts=(constants.x86_tag,), length=constants.long_tag, ) gem5_verify_config( - name='ruby_test_test', - verifiers = (verifier.MatchStdout(joinpath(ref_path, 'test')),), - config=joinpath(config_path, 'ruby_test.py'), - config_args = [], - protocol = 'MSI', + name="ruby_test_test", + verifiers=(verifier.MatchStdout(joinpath(ref_path, "test")),), + config=joinpath(config_path, "ruby_test.py"), + config_args=[], + protocol="MSI", # Currently only x86 has the threads test - valid_isas=(constants.x86_tag,), + valid_isas=(constants.all_compiled_tag,), length=constants.long_tag, ) diff --git a/tests/gem5/m5_util/test_exit.py b/tests/gem5/m5_util/test_exit.py index d06e703113..b79a8fadc2 100644 --- a/tests/gem5/m5_util/test_exit.py +++ b/tests/gem5/m5_util/test_exit.py @@ -57,11 +57,7 @@ gem5_verify_config( verifiers=[a], fixtures=(), config=joinpath( - config.base_dir, - "tests", - "gem5", - "configs", - "simple_binary_run.py", + config.base_dir, "tests", "gem5", "configs", "simple_binary_run.py" ), config_args=[ "x86-m5-exit", @@ -70,5 +66,5 @@ gem5_verify_config( resource_path, "x86", ], - valid_isas=(constants.vega_x86_tag,), + valid_isas=(constants.all_compiled_tag,), ) diff --git a/tests/gem5/m5threads_test_atomic/atomic_system.py b/tests/gem5/m5threads_test_atomic/atomic_system.py index a7c6535b59..b7bd67db10 100644 --- a/tests/gem5/m5threads_test_atomic/atomic_system.py +++ b/tests/gem5/m5threads_test_atomic/atomic_system.py @@ -30,30 +30,32 @@ from caches import * import sys import argparse -parser = argparse.ArgumentParser(description='m5threads atomic tester') -parser.add_argument('--cpu-type', default='DerivO3CPU') -parser.add_argument('--num-cores', default='8') -parser.add_argument('--cmd') +parser = argparse.ArgumentParser(description="m5threads atomic tester") +parser.add_argument("--cpu-type", default="DerivO3CPU") +parser.add_argument("--num-cores", default="8") +parser.add_argument("--cmd") args = parser.parse_args() -root = Root(full_system = False) +root = Root(full_system=False) root.system = System() root.system.workload = SEWorkload.init_compatible(args.cmd) root.system.clk_domain = SrcClockDomain() -root.system.clk_domain.clock = '3GHz' +root.system.clk_domain.clock = "3GHz" root.system.clk_domain.voltage_domain = VoltageDomain() -root.system.mem_mode = 'timing' -root.system.mem_ranges = [AddrRange('512MB')] +root.system.mem_mode = "timing" +root.system.mem_ranges = [AddrRange("512MB")] -if args.cpu_type == 'DerivO3CPU': - root.system.cpu = [DerivO3CPU(cpu_id = i) - for i in range (int(args.num_cores))] -elif args.cpu_type == 'TimingSimpleCPU': - root.system.cpu = [TimingSimpleCPU(cpu_id=i) - for i in range(int(args.num_cores))] +if args.cpu_type == "DerivO3CPU": + root.system.cpu = [ + SparcDerivO3CPU(cpu_id=i) for i in range(int(args.num_cores)) + ] +elif args.cpu_type == "TimingSimpleCPU": + root.system.cpu = [ + SparcTimingSimpleCPU(cpu_id=i) for i in range(int(args.num_cores)) + ] else: print("ERROR: CPU Type '" + args.cpu_type + "' not supported") sys.exit(1) @@ -64,8 +66,7 @@ root.system.membus.default = root.system.membus.badaddr_responder.pio root.system.system_port = root.system.membus.cpu_side_ports -process = Process(executable = args.cmd, - cmd = [args.cmd, str(args.num_cores)]) +process = Process(executable=args.cmd, cmd=[args.cmd, str(args.num_cores)]) for cpu in root.system.cpu: cpu.workload = process diff --git a/tests/gem5/m5threads_test_atomic/caches.py b/tests/gem5/m5threads_test_atomic/caches.py index af303a171c..fd87d0484d 100755 --- a/tests/gem5/m5threads_test_atomic/caches.py +++ b/tests/gem5/m5threads_test_atomic/caches.py @@ -60,14 +60,15 @@ class L1Cache(PrefetchCache): def connectCPU(self, cpu): """Connect this cache's port to a CPU-side port - This must be defined in a subclass""" + This must be defined in a subclass""" raise NotImplementedError + class L1ICache(L1Cache): """Simple L1 instruction cache with default values""" # Set the size - size = '32kB' + size = "32kB" def __init__(self, opts=None): super(L1ICache, self).__init__(opts) @@ -76,11 +77,12 @@ class L1ICache(L1Cache): """Connect this cache's port to a CPU icache port""" self.cpu_side = cpu.icache_port + class L1DCache(L1Cache): """Simple L1 data cache with default values""" # Set the size - size = '32kB' + size = "32kB" def __init__(self, opts=None): super(L1DCache, self).__init__(opts) @@ -89,11 +91,12 @@ class L1DCache(L1Cache): """Connect this cache's port to a CPU dcache port""" self.cpu_side = cpu.dcache_port + class L2Cache(PrefetchCache): """Simple L2 Cache with default values""" # Default parameters - size = '256kB' + size = "256kB" assoc = 16 tag_latency = 10 data_latency = 10 diff --git a/tests/gem5/m5threads_test_atomic/test.py b/tests/gem5/m5threads_test_atomic/test.py index 9596d2f99e..531de83b2f 100644 --- a/tests/gem5/m5threads_test_atomic/test.py +++ b/tests/gem5/m5threads_test_atomic/test.py @@ -24,9 +24,9 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -''' +""" Test file for the m5threads atomic test -''' +""" from testlib import * cpu_types = ( @@ -38,26 +38,31 @@ cpu_types = ( # 'TimingSimpleCPU', ) -base_path = joinpath(config.bin_path, 'pthreads', 'sparc64') +base_path = joinpath(config.bin_path, "pthreads", "sparc64") -binary = 'test_atomic' -url = config.resource_url + '/test-progs/pthreads/sparc64/' + binary +binary = "test_atomic" +url = config.resource_url + "/test-progs/pthreads/sparc64/" + binary test_atomic = DownloadedProgram(url, base_path, binary) verifiers = ( - verifier.MatchStdoutNoPerf(joinpath(getcwd(), 'ref/sparc64/simout')), + verifier.MatchStdoutNoPerf(joinpath(getcwd(), "ref/sparc64/simout")), ) for cpu in cpu_types: gem5_verify_config( - name='test-atomic-' + cpu, + name="test-atomic-" + cpu, verifiers=verifiers, fixtures=(test_atomic,), - config=joinpath(getcwd(), 'atomic_system.py'), - config_args=['--cpu-type', cpu, - '--num-cores', '8', - '--cmd', joinpath(base_path, binary)], + config=joinpath(getcwd(), "atomic_system.py"), + config_args=[ + "--cpu-type", + cpu, + "--num-cores", + "8", + "--cmd", + joinpath(base_path, binary), + ], valid_isas=(constants.sparc_tag,), valid_hosts=constants.supported_hosts, - length = constants.long_tag, + length=constants.long_tag, ) diff --git a/tests/gem5/memory/memtest-run.py b/tests/gem5/memory/memtest-run.py index 9b6625c09d..d133b46e8e 100644 --- a/tests/gem5/memory/memtest-run.py +++ b/tests/gem5/memory/memtest-run.py @@ -26,30 +26,30 @@ import m5 from m5.objects import * -m5.util.addToPath('../../../configs/') + +m5.util.addToPath("../../../configs/") from common.Caches import * -#MAX CORES IS 8 with the fals sharing method +# MAX CORES IS 8 with the fals sharing method nb_cores = 8 -cpus = [MemTest(max_loads = 1e5, progress_interval = 1e4) - for i in range(nb_cores) ] +cpus = [MemTest(max_loads=1e5, progress_interval=1e4) for i in range(nb_cores)] # system simulated -system = System(cpu = cpus, - physmem = SimpleMemory(), - membus = SystemXBar()) +system = System(cpu=cpus, physmem=SimpleMemory(), membus=SystemXBar()) # Dummy voltage domain for all our clock domains system.voltage_domain = VoltageDomain() -system.clk_domain = SrcClockDomain(clock = '1GHz', - voltage_domain = system.voltage_domain) +system.clk_domain = SrcClockDomain( + clock="1GHz", voltage_domain=system.voltage_domain +) # Create a seperate clock domain for components that should run at # CPUs frequency -system.cpu_clk_domain = SrcClockDomain(clock = '2GHz', - voltage_domain = system.voltage_domain) +system.cpu_clk_domain = SrcClockDomain( + clock="2GHz", voltage_domain=system.voltage_domain +) -system.toL2Bus = L2XBar(clk_domain = system.cpu_clk_domain) -system.l2c = L2Cache(clk_domain = system.cpu_clk_domain, size='64kB', assoc=8) +system.toL2Bus = L2XBar(clk_domain=system.cpu_clk_domain) +system.l2c = L2Cache(clk_domain=system.cpu_clk_domain, size="64kB", assoc=8) system.l2c.cpu_side = system.toL2Bus.mem_side_ports # connect l2c to membus @@ -59,7 +59,7 @@ system.l2c.mem_side = system.membus.cpu_side_ports for cpu in cpus: # All cpus are associated with cpu_clk_domain cpu.clk_domain = system.cpu_clk_domain - cpu.l1c = L1Cache(size = '32kB', assoc = 4) + cpu.l1c = L1Cache(size="32kB", assoc=4) cpu.l1c.cpu_side = cpu.port cpu.l1c.mem_side = system.toL2Bus.cpu_side_ports @@ -73,11 +73,10 @@ system.physmem.port = system.membus.mem_side_ports # run simulation # ----------------------- -root = Root( full_system = False, system = system ) -root.system.mem_mode = 'timing' +root = Root(full_system=False, system=system) +root.system.mem_mode = "timing" m5.instantiate() exit_event = m5.simulate() if exit_event.getCause() != "maximum number of loads reached": exit(1) - diff --git a/tests/gem5/memory/simple-run.py b/tests/gem5/memory/simple-run.py index 16349536ae..ec5b2d3385 100644 --- a/tests/gem5/memory/simple-run.py +++ b/tests/gem5/memory/simple-run.py @@ -38,10 +38,10 @@ from m5.objects import * import argparse -parser = argparse.ArgumentParser(description='Simple memory tester') -parser.add_argument('--bandwidth', default=None) -parser.add_argument('--latency', default=None) -parser.add_argument('--latency_var', default=None) +parser = argparse.ArgumentParser(description="Simple memory tester") +parser.add_argument("--bandwidth", default=None) +parser.add_argument("--latency", default=None) +parser.add_argument("--latency_var", default=None) args = parser.parse_args() @@ -49,11 +49,14 @@ args = parser.parse_args() # the scripts are happy try: cpu = TrafficGen( - config_file=os.path.join(os.path.dirname(os.path.abspath(__file__)), - "tgen-simple-mem.cfg")) + config_file=os.path.join( + os.path.dirname(os.path.abspath(__file__)), "tgen-simple-mem.cfg" + ) + ) except NameError: m5.fatal("protobuf required for simple memory test") + class MyMem(SimpleMemory): if args.bandwidth: bandwidth = args.bandwidth @@ -62,18 +65,20 @@ class MyMem(SimpleMemory): if args.latency_var: latency_var = args.latency_var + # system simulated -system = System(cpu = cpu, physmem = MyMem(), - membus = IOXBar(width = 16), - clk_domain = SrcClockDomain(clock = '1GHz', - voltage_domain = - VoltageDomain())) +system = System( + cpu=cpu, + physmem=MyMem(), + membus=IOXBar(width=16), + clk_domain=SrcClockDomain(clock="1GHz", voltage_domain=VoltageDomain()), +) # add a communication monitor, and also trace all the packets and # calculate and verify stack distance system.monitor = CommMonitor() -system.monitor.trace = MemTraceProbe(trace_file = "monitor.ptrc.gz") -system.monitor.stackdist = StackDistProbe(verify = True) +system.monitor.trace = MemTraceProbe(trace_file="monitor.ptrc.gz") +system.monitor.stackdist = StackDistProbe(verify=True) # connect the traffic generator to the bus via a communication monitor system.cpu.port = system.monitor.cpu_side_port @@ -89,8 +94,8 @@ system.physmem.port = system.membus.mem_side_ports # run simulation # ----------------------- -root = Root(full_system = False, system = system) -root.system.mem_mode = 'timing' +root = Root(full_system=False, system=system) +root.system.mem_mode = "timing" m5.instantiate() exit_event = m5.simulate(100000000000) diff --git a/tests/gem5/memory/test.py b/tests/gem5/memory/test.py index 01bd68b9b5..d76463be7b 100644 --- a/tests/gem5/memory/test.py +++ b/tests/gem5/memory/test.py @@ -24,67 +24,102 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -''' +""" Test file for simple memory test TODO: Add stats checking -''' +""" from testlib import * gem5_verify_config( - name='simple_mem_default', - verifiers=(), # No need for verfiers this will return non-zero on fail - config=joinpath(getcwd(), 'simple-run.py'), - config_args = [], + name="simple_mem_default", + verifiers=(), # No need for verfiers this will return non-zero on fail + config=joinpath(getcwd(), "simple-run.py"), + config_args=[], valid_isas=(constants.null_tag,), + length=constants.long_tag, ) simple_mem_params = [ - ('inf-bandwidth', {'bandwidth': '0GB/s'}), - ('low-latency', {'latency': '1ns'}), - ('high-latency', {'latency': '1us'}), - ('low-bandwidth', {'bandwidth': '1MB/s'}), - ('high-var', {'latency_var': '100ns'}) - ] + ("inf-bandwidth", {"bandwidth": "0GB/s"}), + ("low-latency", {"latency": "1ns"}), + ("high-latency", {"latency": "1us"}), + ("low-bandwidth", {"bandwidth": "1MB/s"}), + ("high-var", {"latency_var": "100ns"}), +] for name, params in simple_mem_params: - args = ['--' + key + '=' + val for key,val in params.items()] + args = ["--" + key + "=" + val for key, val in params.items()] gem5_verify_config( - name='simple_mem_' + name, - verifiers=(), # No need for verfiers this will return non-zero on fail - config=joinpath(getcwd(), 'simple-run.py'), - config_args = args, + name="simple_mem_" + name, + verifiers=(), # No need for verfiers this will return non-zero on fail + config=joinpath(getcwd(), "simple-run.py"), + config_args=args, valid_isas=(constants.null_tag,), - ) # This tests for validity as well as performance + length=constants.long_tag, + ) # This tests for validity as well as performance gem5_verify_config( - name='memtest', - verifiers=(), # No need for verfiers this will return non-zero on fail - config=joinpath(getcwd(), 'memtest-run.py'), - config_args = [], + name="memtest", + verifiers=(), # No need for verfiers this will return non-zero on fail + config=joinpath(getcwd(), "memtest-run.py"), + config_args=[], valid_isas=(constants.null_tag,), + length=constants.long_tag, ) null_tests = [ - ('garnet_synth_traffic', None, ['--sim-cycles', '5000000']), - ('memcheck', None, ['--maxtick', '2000000000', '--prefetchers']), - ('ruby_mem_test-garnet', 'ruby_mem_test', - ['--abs-max-tick', '20000000', '--functional', '10', \ - '--network=garnet']), - ('ruby_mem_test-simple', 'ruby_mem_test', - ['--abs-max-tick', '20000000', '--functional', '10', \ - '--network=simple']), - ('ruby_mem_test-simple-extra', 'ruby_mem_test', - ['--abs-max-tick', '20000000', '--functional', '10', \ - '--network=simple', '--simple-physical-channels']), - ('ruby_mem_test-simple-extra-multicore', 'ruby_mem_test', - ['--abs-max-tick', '20000000', '--functional', '10', \ - '--network=simple', '--simple-physical-channels', - '--num-cpus=4']), - ('ruby_random_test', None, ['--maxloads', '5000']), - ('ruby_direct_test', None, ['--requests', '50000']), + ("garnet_synth_traffic", None, ["--sim-cycles", "5000000"]), + ("memcheck", None, ["--maxtick", "2000000000", "--prefetchers"]), + ( + "ruby_mem_test-garnet", + "ruby_mem_test", + [ + "--abs-max-tick", + "20000000", + "--network=garnet", + ], + ), + ( + "ruby_mem_test-simple", + "ruby_mem_test", + [ + "--abs-max-tick", + "20000000", + "--functional", + "10", + "--network=simple", + ], + ), + ( + "ruby_mem_test-simple-extra", + "ruby_mem_test", + [ + "--abs-max-tick", + "20000000", + "--functional", + "10", + "--network=simple", + "--simple-physical-channels", + ], + ), + ( + "ruby_mem_test-simple-extra-multicore", + "ruby_mem_test", + [ + "--abs-max-tick", + "20000000", + "--functional", + "10", + "--network=simple", + "--simple-physical-channels", + "--num-cpus=4", + ], + ), + ("ruby_random_test", None, ["--maxloads", "5000"]), + ("ruby_direct_test", None, ["--requests", "50000"]), ] for test_name, basename_noext, args in null_tests: @@ -94,9 +129,11 @@ for test_name, basename_noext, args in null_tests: name=test_name, fixtures=(), verifiers=(), - config=joinpath(config.base_dir, 'configs', - 'example', basename_noext + '.py'), + config=joinpath( + config.base_dir, "configs", "example", basename_noext + ".py" + ), config_args=args, valid_isas=(constants.null_tag,), valid_hosts=constants.supported_hosts, + length=constants.long_tag, ) diff --git a/tests/gem5/multi_isa/test_multi_isa.py b/tests/gem5/multi_isa/test_multi_isa.py index 2f1f67ccec..7d278b75ea 100644 --- a/tests/gem5/multi_isa/test_multi_isa.py +++ b/tests/gem5/multi_isa/test_multi_isa.py @@ -36,54 +36,44 @@ isa_map = { "riscv": constants.riscv_tag, } -length_map = { - "sparc": constants.long_tag, - "mips": constants.long_tag, - "null": constants.quick_tag, - "arm": constants.quick_tag, - "x86": constants.quick_tag, - "power": constants.long_tag, - "riscv": constants.long_tag, -} for isa in isa_map.keys(): - gem5_verify_config( - name=f"runtime-isa-check_{isa}-compiled-alone", - verifiers=(), - fixtures=(), - config=joinpath( - config.base_dir, - "tests", - "gem5", - "configs", - "runtime_isa_check.py", - ), - config_args=["-e", isa], - valid_isas=(isa_map[isa],), - valid_hosts=constants.supported_hosts, - length=length_map[isa], - ) + if isa in ("x86", "arm", "riscv"): + # We only do these checks for X86, ARM, and RISCV to save compiling + # other ISAs. + gem5_verify_config( + name=f"runtime-isa-check_{isa}-compiled-alone", + verifiers=(), + fixtures=(), + config=joinpath( + config.base_dir, + "tests", + "gem5", + "configs", + "runtime_isa_check.py", + ), + config_args=["-e", isa], + valid_isas=(isa_map[isa],), + valid_hosts=constants.supported_hosts, + length=constants.long_tag, + ) - gem5_verify_config( - name=f"supported-isas-check_{isa}-compiled-alone", - verifiers=(), - fixtures=(), - config=joinpath( - config.base_dir, - "tests", - "gem5", - "configs", - "supported_isa_check.py", - ), - config_args=["-e", isa], - valid_isas=(isa_map[isa],), - valid_hosts=constants.supported_hosts, - length=length_map[isa], - ) - - # Remove this when the muli-isa work is incorporated. `build/ALL/gem5.opt` - # must be compilable. - continue + gem5_verify_config( + name=f"supported-isas-check_{isa}-compiled-alone", + verifiers=(), + fixtures=(), + config=joinpath( + config.base_dir, + "tests", + "gem5", + "configs", + "supported_isa_check.py", + ), + config_args=["-e", isa], + valid_isas=(isa_map[isa],), + valid_hosts=constants.supported_hosts, + length=constants.long_tag, + ) if isa != "null": # The null isa is not "supported" in a case where other ISAs are @@ -102,5 +92,5 @@ for isa in isa_map.keys(): config_args=["-e", isa], valid_isas=(constants.all_compiled_tag,), valid_hosts=constants.supported_hosts, - length=constants.long_tag, + length=constants.quick_tag, ) diff --git a/tests/gem5/parsec-benchmarks/test_parsec.py b/tests/gem5/parsec-benchmarks/test_parsec.py index 1104cf10b2..11735ab43f 100644 --- a/tests/gem5/parsec-benchmarks/test_parsec.py +++ b/tests/gem5/parsec-benchmarks/test_parsec.py @@ -51,23 +51,13 @@ def test_parsec( return gem5_verify_config( - name="{}-boot-cpu_{}-detailed-cpu_{}-cores_{}_{}_{}_parsec-test"\ - .format( - boot_cpu, - detailed_cpu, - str(num_cpus), - mem_system, - benchmark, - size, - ), + name="{}-boot-cpu_{}-detailed-cpu_{}-cores_{}_{}_{}_parsec-test".format( + boot_cpu, detailed_cpu, str(num_cpus), mem_system, benchmark, size + ), verifiers=(), fixtures=(), config=joinpath( - config.base_dir, - "tests", - "gem5", - "configs", - "parsec_disk_run.py", + config.base_dir, "tests", "gem5", "configs", "parsec_disk_run.py" ), config_args=[ "--cpu", @@ -85,7 +75,7 @@ def test_parsec( "--resource-directory", resource_path, ], - valid_isas=(constants.x86_tag,), + valid_isas=(constants.all_compiled_tag,), valid_hosts=(constants.host_x86_64_tag,), length=length, uses_kvm=True, @@ -112,7 +102,7 @@ test_parsec( length=constants.very_long_tag, ) -#test_parsec( +# test_parsec( # boot_cpu="kvm", # detailed_cpu="timing", # num_cpus=1, @@ -120,7 +110,7 @@ test_parsec( # benchmark="bodytrack", # size="simsmall", # length=constants.very_long_tag, -#) +# ) test_parsec( boot_cpu="kvm", @@ -132,7 +122,7 @@ test_parsec( length=constants.very_long_tag, ) -#test_parsec( +# test_parsec( # boot_cpu="kvm", # detailed_cpu="kvm", # num_cpus=8, @@ -140,7 +130,7 @@ test_parsec( # benchmark="dedup", # size="simsmall", # length=constants.very_long_tag, -#) +# ) test_parsec( boot_cpu="kvm", @@ -152,7 +142,7 @@ test_parsec( length=constants.very_long_tag, ) -#test_parsec( +# test_parsec( # boot_cpu="kvm", # detailed_cpu="timing", # num_cpus=1, @@ -160,7 +150,7 @@ test_parsec( # benchmark="ferret", # size="simsmall", # length=constants.very_long_tag, -#) +# ) test_parsec( boot_cpu="kvm", @@ -172,7 +162,7 @@ test_parsec( length=constants.very_long_tag, ) -#test_parsec( +# test_parsec( # boot_cpu="kvm", # detailed_cpu="kvm", # num_cpus=8, @@ -180,7 +170,7 @@ test_parsec( # benchmark="freqmine", # size="simsmall", # length=constants.very_long_tag, -#) +# ) test_parsec( @@ -193,7 +183,7 @@ test_parsec( length=constants.very_long_tag, ) -#test_parsec( +# test_parsec( # boot_cpu="kvm", # detailed_cpu="timing", # num_cpus=1, @@ -201,7 +191,7 @@ test_parsec( # benchmark="streamcluster", # size="simsmall", # length=constants.very_long_tag, -#) +# ) test_parsec( boot_cpu="kvm", @@ -213,7 +203,7 @@ test_parsec( length=constants.very_long_tag, ) -#test_parsec( +# test_parsec( # boot_cpu="kvm", # detailed_cpu="kvm", # num_cpus=8, @@ -221,9 +211,9 @@ test_parsec( # benchmark="vips", # size="simsmall", # length=constants.very_long_tag, -#) +# ) -#test_parsec( +# test_parsec( # boot_cpu="kvm", # detailed_cpu="timing", # num_cpus=1, @@ -231,4 +221,4 @@ test_parsec( # benchmark="x264", # size="simsmall", # length=constants.very_long_tag, -#) +# ) diff --git a/tests/gem5/riscv-boot-tests/test_linux_boot.py b/tests/gem5/riscv-boot-tests/test_linux_boot.py index e39c409387..5ba4fa5dc0 100644 --- a/tests/gem5/riscv-boot-tests/test_linux_boot.py +++ b/tests/gem5/riscv-boot-tests/test_linux_boot.py @@ -46,7 +46,8 @@ def test_boot( ): name = "{}-cpu_{}-cores_{}_{}_riscv-boot-test".format( - cpu, str(num_cpus), cache_type, memory_class) + cpu, str(num_cpus), cache_type, memory_class + ) verifiers = [] exit_regex = re.compile( @@ -56,7 +57,7 @@ def test_boot( ) verifiers.append(verifier.MatchRegex(exit_regex)) - config_args=[ + config_args = [ "--cpu", cpu, "--num-cpus", @@ -85,7 +86,7 @@ def test_boot( "riscv_boot_exit_run.py", ), config_args=config_args, - valid_isas=(constants.riscv_tag,), + valid_isas=(constants.all_compiled_tag,), valid_hosts=constants.supported_hosts, length=length, ) @@ -132,7 +133,7 @@ test_boot( test_boot( cpu="minor", num_cpus=1, - cache_type="mi_example", + cache_type="mesi_two_level", memory_class="SingleChannelDDR3_2133", length=constants.quick_tag, to_tick=10000000000, @@ -141,18 +142,17 @@ test_boot( test_boot( cpu="minor", num_cpus=8, - cache_type="mi_example", + cache_type="mesi_two_level", memory_class="SingleChannelDDR3_2133", length=constants.quick_tag, to_tick=10000000000, ) - test_boot( cpu="timing", num_cpus=1, - cache_type="mi_example", + cache_type="mesi_two_level", memory_class="SingleChannelDDR4_2400", length=constants.quick_tag, to_tick=10000000000, @@ -179,7 +179,7 @@ test_boot( test_boot( cpu="timing", num_cpus=4, - cache_type="mi_example", + cache_type="mesi_two_level", memory_class="DualChannelDDR4_2400", length=constants.quick_tag, to_tick=10000000000, @@ -202,7 +202,7 @@ test_boot( # test_boot( # cpu="timing", # num_cpus=1, -# cache_type="mi_example", +# cache_type="mesi_two_level", # memory_class="SingleChannelLPDDR3_1600", # length=constants.long_tag, # ) @@ -210,7 +210,7 @@ test_boot( # test_boot( # cpu="timing", # num_cpus=4, -# cache_type="mi_example", +# cache_type="mesi_two_level", # memory_class="DualChannelDDR4_2400", # length=constants.long_tag, # ) @@ -226,7 +226,7 @@ test_boot( # test_boot( # cpu="o3", # num_cpus=8, -# cache_type="mi_example", +# cache_type="mesi_two_level", # memory_class="HBM2Stack", # length=constants.long_tag, # ) diff --git a/tests/gem5/hello_se/test_hello_se.py b/tests/gem5/se_mode/hello_se/test_hello_se.py similarity index 69% rename from tests/gem5/hello_se/test_hello_se.py rename to tests/gem5/se_mode/hello_se/test_hello_se.py index 9cbfa520cb..1aaac4a435 100644 --- a/tests/gem5/hello_se/test_hello_se.py +++ b/tests/gem5/se_mode/hello_se/test_hello_se.py @@ -40,7 +40,8 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ -Test file for the util m5 exit assembly instruction. +Tests which run simple binaries in gem5's SE mode. The stdlib's SimpleBoard +is used to run these tests. """ from testlib import * @@ -52,22 +53,22 @@ isa_str_map = { constants.mips_tag: "mips", constants.riscv_tag: "riscv", constants.sparc_tag: "sparc", + constants.vega_x86_tag: "x86", } static_progs = { - constants.vega_x86_tag: ( - "x86-hello64-static", - "x86-hello32-static", - ), - constants.arm_tag: ( - "arm-hello64-static", - "arm-hello32-static", - ), + constants.vega_x86_tag: ("x86-hello64-static", "x86-hello32-static"), + constants.arm_tag: ("arm-hello64-static", "arm-hello32-static"), constants.mips_tag: ("mips-hello",), constants.riscv_tag: ("riscv-hello",), constants.sparc_tag: ("sparc-hello",), } +take_params_progs = { + constants.vega_x86_tag: ("x86-print-this",), + constants.riscv_tag: ("riscv-print-this",), +} + dynamic_progs = {constants.vega_x86_tag: ("x86-hello64-dynamic",)} cpu_types = { @@ -78,39 +79,24 @@ cpu_types = { constants.sparc_tag: ("timing", "atomic"), } -# We only want to test x86, arm, and riscv on quick. Mips and sparc will be -# left for long. -os_length = { - constants.vega_x86_tag: constants.quick_tag, - constants.arm_tag: constants.quick_tag, - constants.mips_tag: constants.long_tag, - constants.riscv_tag: constants.quick_tag, - constants.sparc_tag: constants.long_tag, -} - - if config.bin_path: resource_path = config.bin_path else: - resource_path = joinpath(absdirpath(__file__), "..", "resources") + resource_path = joinpath(absdirpath(__file__), "..", "..", "resources") regex = re.compile(r"Hello world!") stdout_verifier = verifier.MatchRegex(regex) -def verify_config(isa, binary, cpu, hosts): +def verify_config(isa, binary, cpu, hosts, verifier, input): gem5_verify_config( name="test-" + binary + "-" + cpu, fixtures=(), - verifiers=(stdout_verifier,), + verifiers=(verifier,), config=joinpath( - config.base_dir, - "tests", - "gem5", - "configs", - "simple_binary_run.py", + config.base_dir, "tests", "gem5", "configs", "simple_binary_run.py" ), config_args=[ binary, @@ -118,20 +104,53 @@ def verify_config(isa, binary, cpu, hosts): "--resource-directory", resource_path, isa_str_map[isa], - ], - valid_isas=(isa,), + ] + + input, + valid_isas=(constants.all_compiled_tag,), valid_hosts=hosts, - length=os_length[isa], + length=constants.quick_tag, ) + # Run statically linked hello worlds for isa in static_progs: for binary in static_progs[isa]: for cpu in cpu_types[isa]: - verify_config(isa, binary, cpu, constants.supported_hosts) + verify_config( + isa, + binary, + cpu, + constants.supported_hosts, + stdout_verifier, + [], + ) # Run dynamically linked hello worlds for isa in dynamic_progs: for binary in dynamic_progs[isa]: for cpu in cpu_types[isa]: - verify_config(isa, binary, cpu, constants.target_host[isa]) + verify_config( + isa, + binary, + cpu, + constants.target_host[isa], + stdout_verifier, + [], + ) + +regex = re.compile(r"1 print this") +stdout_verifier = verifier.MatchRegex(regex) + +args = ["--arguments", "print this", "--arguments", "2000"] + +for isa in take_params_progs: + for binary in take_params_progs[isa]: + for cpu in cpu_types[isa]: + verify_config( + isa, + binary, + cpu, + constants.target_host[isa], + stdout_verifier, + args, + ) diff --git a/tests/gem5/se_mode/hello_se/test_se_multicore.py b/tests/gem5/se_mode/hello_se/test_se_multicore.py new file mode 100644 index 0000000000..55fc61fbf8 --- /dev/null +++ b/tests/gem5/se_mode/hello_se/test_se_multicore.py @@ -0,0 +1,56 @@ +# Copyright (c) 2022 The Regents of the University of California +# All Rights Reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" +Tests which test SE mode's functionality when running workloads on multiple +core setups. +""" +from testlib import * + +if config.bin_path: + resource_path = config.bin_path +else: + resource_path = joinpath(absdirpath(__file__), "..", "..", "resources") + +gem5_verify_config( + name="test-x86-hello-4-atomic-core-se-mode", + fixtures=(), + verifiers=(), + config=joinpath( + config.base_dir, "tests", "gem5", "configs", "simple_binary_run.py" + ), + config_args=[ + "x86-hello64-static", + "atomic", + "x86", + "--num-cores", + "4", + "--resource-directory", + resource_path, + ], + valid_isas=(constants.all_compiled_tag,), + length=constants.quick_tag, +) diff --git a/tests/gem5/stats/test_hdf5.py b/tests/gem5/stats/test_hdf5.py index b5662358d5..993534a61d 100644 --- a/tests/gem5/stats/test_hdf5.py +++ b/tests/gem5/stats/test_hdf5.py @@ -47,7 +47,8 @@ from testlib import * if config.bin_path: resource_path = config.bin_path else: - resource_path = joinpath(absdirpath(__file__), '..', 'resources') + resource_path = joinpath(absdirpath(__file__), "..", "resources") + def have_hdf5(): have_hdf5_file = os.path.join( @@ -72,7 +73,7 @@ if have_hdf5(): # FIXME: flaky, should check return code instead... # See: https://gem5.atlassian.net/browse/GEM5-1099 err_regex = re.compile( - r'RuntimeError: Failed creating H5::DataSet \w+; .*' + r"RuntimeError: Failed creating H5::DataSet \w+; .*" ) err_verifier = verifier.NoMatchRegex(err_regex, True, False) @@ -83,11 +84,7 @@ if have_hdf5(): verifiers=[ok_verifier, err_verifier, h5_verifier], fixtures=(), config=joinpath( - config.base_dir, - "tests", - "gem5", - "configs", - "simple_binary_run.py", + config.base_dir, "tests", "gem5", "configs", "simple_binary_run.py" ), config_args=[ "arm-hello64-static", @@ -97,6 +94,5 @@ if have_hdf5(): "arm", ], gem5_args=["--stats-file=h5://stats.h5"], - valid_isas=(constants.arm_tag,), + valid_isas=(constants.all_compiled_tag,), ) - diff --git a/tests/gem5/stdlib/test_base_cpu_processor.py b/tests/gem5/stdlib/test_base_cpu_processor.py new file mode 100644 index 0000000000..cbc6767481 --- /dev/null +++ b/tests/gem5/stdlib/test_base_cpu_processor.py @@ -0,0 +1,69 @@ +# Copyright (c) 2022 The Regents of the University of California +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from testlib import * + +""" +These tests are designed to test the BaseCPUProcessor. It utilizes the +tests/gem5/configs/simple_binary_run.py to run a simple SE-mode simualation +with different configurations of the BaseCPUProcessor. +""" + +gem5_verify_config( + name=f"BaseCPUProcessor-x86-hello", + verifiers=(), + fixtures=(), + config=joinpath( + config.base_dir, "tests", "gem5", "configs", "simple_binary_run.py" + ), + config_args=["x86-hello64-static", "timing", "x86", "-b"], + valid_isas=(constants.all_compiled_tag,), + length=constants.quick_tag, +) + +gem5_verify_config( + name=f"BaseCPUProcessor-riscv-hello", + verifiers=(), + fixtures=(), + config=joinpath( + config.base_dir, "tests", "gem5", "configs", "simple_binary_run.py" + ), + config_args=["riscv-hello", "atomic", "riscv", "-b"], + valid_isas=(constants.all_compiled_tag,), + length=constants.quick_tag, +) + +gem5_verify_config( + name=f"BaseCPUProcessor-arm-hello", + verifiers=(), + fixtures=(), + config=joinpath( + config.base_dir, "tests", "gem5", "configs", "simple_binary_run.py" + ), + config_args=["arm-hello64-static", "o3", "arm", "-b"], + valid_isas=(constants.all_compiled_tag,), + length=constants.quick_tag, +) diff --git a/tests/gem5/stdlib/test_requires.py b/tests/gem5/stdlib/test_requires.py index 0a7f12399f..b729050b47 100644 --- a/tests/gem5/stdlib/test_requires.py +++ b/tests/gem5/stdlib/test_requires.py @@ -39,26 +39,46 @@ isa_map = { length_map = { "sparc": constants.long_tag, "mips": constants.long_tag, - "null": constants.quick_tag, - "arm": constants.quick_tag, - "x86": constants.quick_tag, + "null": constants.long_tag, + "arm": constants.long_tag, + "x86": constants.long_tag, "power": constants.long_tag, "riscv": constants.long_tag, } for isa in isa_map.keys(): - gem5_verify_config( - name=f"requires-isa-{isa}", - verifiers=(), - fixtures=(), - config=joinpath( - config.base_dir, - "tests", - "gem5", - "configs", - "requires_check.py", - ), - config_args=["-i", isa], - valid_isas=(isa_map[isa],), - length=length_map[isa], - ) \ No newline at end of file + if isa in ("x86", "arm", "riscv"): + # We only do these checks for X86, ARM, and RISCV to save compiling + # other ISAs. + gem5_verify_config( + name=f"requires-isa-{isa}", + verifiers=(), + fixtures=(), + config=joinpath( + config.base_dir, + "tests", + "gem5", + "configs", + "requires_check.py", + ), + config_args=["-i", isa], + valid_isas=(isa_map[isa],), + length=length_map[isa], + ) + + if isa != "null": + gem5_verify_config( + name=f"requires-isa-{isa}-with-all-compiled", + verifiers=(), + fixtures=(), + config=joinpath( + config.base_dir, + "tests", + "gem5", + "configs", + "requires_check.py", + ), + config_args=["-i", isa], + valid_isas=(constants.all_compiled_tag,), + length=constants.quick_tag, + ) diff --git a/tests/gem5/suite.py b/tests/gem5/suite.py index 354c8efc98..36532aa9f7 100644 --- a/tests/gem5/suite.py +++ b/tests/gem5/suite.py @@ -49,20 +49,22 @@ from .fixture import TempdirFixture, Gem5Fixture, VariableFixture from . import verifier -def gem5_verify_config(name, - config, - config_args, - verifiers, - gem5_args=tuple(), - fixtures=[], - valid_isas=constants.supported_isas, - valid_variants=constants.supported_variants, - length=constants.supported_lengths[0], - valid_hosts=constants.supported_hosts, - protocol=None, - uses_kvm=False, - ): - ''' + +def gem5_verify_config( + name, + config, + config_args, + verifiers, + gem5_args=tuple(), + fixtures=[], + valid_isas=constants.supported_isas, + valid_variants=constants.supported_variants, + length=constants.supported_lengths[0], + valid_hosts=constants.supported_hosts, + protocol=None, + uses_kvm=False, +): + """ Helper class to generate common gem5 tests using verifiers. The generated TestSuite will run gem5 with the provided config and @@ -89,7 +91,7 @@ def gem5_verify_config(name, :param uses_kvm: States if this verifier uses KVM. If so, the "kvm" tag will be included. - ''' + """ fixtures = list(fixtures) testsuites = [] @@ -100,24 +102,24 @@ def gem5_verify_config(name, # Create a tempdir fixture to be shared throughout the test. tempdir = TempdirFixture() gem5_returncode = VariableFixture( - name=constants.gem5_returncode_fixture_name) + name=constants.gem5_returncode_fixture_name + ) # Common name of this generated testcase. - _name = '{given_name}-{isa}-{host}-{opt}'.format( - given_name=name, - isa=isa, - host=host, - opt=opt) + _name = "{given_name}-{isa}-{host}-{opt}".format( + given_name=name, isa=isa, host=host, opt=opt + ) if protocol: - _name += '-'+protocol + _name += "-" + protocol # Create the running of gem5 subtest. NOTE: We specifically # create this test before our verifiers so this is listed # first. tests = [] gem5_execution = TestFunction( - _create_test_run_gem5(config, config_args, gem5_args), - name=_name) + _create_test_run_gem5(config, config_args, gem5_args), + name=_name, + ) tests.append(gem5_execution) # Create copies of the verifier subtests for this isa and @@ -140,20 +142,21 @@ def gem5_verify_config(name, # Finally construct the self contained TestSuite out of our # tests. - testsuites.append(TestSuite( - name=_name, - fixtures=_fixtures, - tags=tags, - tests=tests)) + testsuites.append( + TestSuite( + name=_name, fixtures=_fixtures, tags=tags, tests=tests + ) + ) return testsuites + def _create_test_run_gem5(config, config_args, gem5_args): def test_run_gem5(params): - ''' + """ Simple \'test\' which runs gem5 and saves the result into a tempdir. NOTE: Requires fixtures: tempdir, gem5 - ''' + """ fixtures = params.fixtures if gem5_args is None: @@ -176,16 +179,21 @@ def _create_test_run_gem5(config, config_args, gem5_args): gem5 = fixtures[constants.gem5_binary_fixture_name].path command = [ gem5, - '-d', # Set redirect dir to tempdir. + "-d", # Set redirect dir to tempdir. tempdir, - '-re', # TODO: Change to const. Redirect stdout and stderr - '--silent-redirect', + "-re", # TODO: Change to const. Redirect stdout and stderr + "--silent-redirect", ] command.extend(_gem5_args) command.append(config) # Config_args should set up the program args. command.extend(config_args) - log_call(params.log, command, time=params.time, - stdout=sys.stdout, stderr=sys.stderr) + log_call( + params.log, + command, + time=params.time, + stdout=sys.stdout, + stderr=sys.stderr, + ) return test_run_gem5 diff --git a/tests/gem5/to_tick/configs/tick-exit.py b/tests/gem5/to_tick/configs/tick-exit.py new file mode 100644 index 0000000000..9b412cbfb6 --- /dev/null +++ b/tests/gem5/to_tick/configs/tick-exit.py @@ -0,0 +1,100 @@ +# Copyright (c) 2022 The Regents of the University of California +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" + +""" + +from gem5.resources.resource import Resource +from gem5.isas import ISA +from gem5.components.memory import SingleChannelDDR3_1600 +from gem5.components.boards.simple_board import SimpleBoard +from gem5.components.cachehierarchies.classic.no_cache import NoCache +from gem5.components.processors.simple_processor import SimpleProcessor +from gem5.components.processors.cpu_types import CPUTypes +from gem5.simulate.simulator import Simulator +from gem5.simulate.exit_event import ExitEvent + +import m5 + +import argparse + +parser = argparse.ArgumentParser() + +parser.add_argument( + "-t", + "--tick-exits", + type=int, + nargs="+", + required=True, + help="Set the tick exits to exit.", +) + +parser.add_argument( + "-r", + "--resource-directory", + type=str, + required=False, + help="The directory in which resources will be downloaded or exist.", +) + +args = parser.parse_args() + +# Setup the system. +motherboard = SimpleBoard( + clk_freq="3GHz", + processor=SimpleProcessor( + cpu_type=CPUTypes.TIMING, + isa=ISA.X86, + num_cores=1, + ), + memory=SingleChannelDDR3_1600(), + cache_hierarchy=NoCache(), +) + +# Set the workload +binary = Resource( + "x86-hello64-static", resource_directory=args.resource_directory +) +motherboard.set_se_binary_workload(binary) + + +def scheduled_tick_generator(): + while True: + print(f"Exiting at: {m5.curTick()}") + yield False + + +# Run the simulation +simulator = Simulator( + board=motherboard, + on_exit_event={ExitEvent.SCHEDULED_TICK: scheduled_tick_generator()}, +) + +for tick in args.tick_exits: + m5.scheduleTickExitFromCurrent(tick) + +simulator.run() diff --git a/tests/gem5/to_tick/configs/tick-to-max.py b/tests/gem5/to_tick/configs/tick-to-max.py new file mode 100644 index 0000000000..2b679df412 --- /dev/null +++ b/tests/gem5/to_tick/configs/tick-to-max.py @@ -0,0 +1,123 @@ +# Copyright (c) 2022 The Regents of the University of California +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" +This configuration script is used to test running a simulation to a specified +maximum tick. This script was setup to test setting the number of ticks to +run before, at, or after the running of `simulator.run`. + +**Note:** There can only ever be one MAX_TICK exit event scheduled at any one +time. +""" + +from gem5.resources.resource import Resource +from gem5.isas import ISA +from gem5.components.memory import SingleChannelDDR3_1600 +from gem5.components.boards.simple_board import SimpleBoard +from gem5.components.cachehierarchies.classic.no_cache import NoCache +from gem5.components.processors.simple_processor import SimpleProcessor +from gem5.components.processors.cpu_types import CPUTypes +from gem5.simulate.simulator import Simulator + +import m5 + +import argparse + +parser = argparse.ArgumentParser() + +parser.add_argument( + "-b", + "--set-ticks-before", + type=int, + required=False, + help="Set the number of ticks to run to prior to executing " + "`simulator.run`.", +) + +parser.add_argument( + "-e", + "--set-ticks-at-execution", + type=int, + required=False, + help="Set the number of ticks to run via `simulator.run`.", +) + +parser.add_argument( + "-a", + "--set-ticks-after", + type=int, + required=False, + help="Set the number of ticks to run after `simulator.run` has ceased " + "execution.", +) + +parser.add_argument( + "-r", + "--resource-directory", + type=str, + required=False, + help="The directory in which resources will be downloaded or exist.", +) + +args = parser.parse_args() + +# Setup the system. +motherboard = SimpleBoard( + clk_freq="3GHz", + processor=SimpleProcessor( + cpu_type=CPUTypes.TIMING, + isa=ISA.X86, + num_cores=1, + ), + memory=SingleChannelDDR3_1600(), + cache_hierarchy=NoCache(), +) + +# Set the workload +binary = Resource( + "x86-hello64-static", resource_directory=args.resource_directory +) +motherboard.set_se_binary_workload(binary) + +# Set the max ticks before setting up the simulation, if applicable. +if args.set_ticks_before: + m5.setMaxTick(args.set_ticks_before) + +# Run the simulation +simulator = Simulator(board=motherboard) + +if args.set_ticks_at_execution: + simulator.run(max_ticks=args.set_ticks_at_execution) +else: + simulator.run() + +# Set the max ticks after the simulator run. +if args.set_ticks_after: + m5.setMaxTick(args.set_ticks_after) + +print(f"Current Tick: {m5.curTick()}") +print(f"Current Max Tick: {m5.getMaxTick()}") +print(f"Ticks until max: {m5.getTicksUntilMax()}") diff --git a/tests/gem5/to_tick/ref/tick-exit-10-20-30-40.txt b/tests/gem5/to_tick/ref/tick-exit-10-20-30-40.txt new file mode 100644 index 0000000000..05f8159065 --- /dev/null +++ b/tests/gem5/to_tick/ref/tick-exit-10-20-30-40.txt @@ -0,0 +1,6 @@ +Global frequency set at 1000000000000 ticks per second +Exiting at: 10 +Exiting at: 20 +Exiting at: 30 +Exiting at: 40 +Hello world! diff --git a/tests/gem5/to_tick/ref/tick-exit-100.txt b/tests/gem5/to_tick/ref/tick-exit-100.txt new file mode 100644 index 0000000000..62f9330e13 --- /dev/null +++ b/tests/gem5/to_tick/ref/tick-exit-100.txt @@ -0,0 +1,3 @@ +Global frequency set at 1000000000000 ticks per second +Exiting at: 100 +Hello world! diff --git a/tests/gem5/to_tick/ref/tick-to-max-at-execution-100.txt b/tests/gem5/to_tick/ref/tick-to-max-at-execution-100.txt new file mode 100644 index 0000000000..1507716e42 --- /dev/null +++ b/tests/gem5/to_tick/ref/tick-to-max-at-execution-100.txt @@ -0,0 +1,4 @@ +Global frequency set at 1000000000000 ticks per second +Current Tick: 100 +Current Max Tick: 100 +Ticks until max: 0 diff --git a/tests/gem5/to_tick/ref/tick-to-max-at-execution-and-after-100-200.txt b/tests/gem5/to_tick/ref/tick-to-max-at-execution-and-after-100-200.txt new file mode 100644 index 0000000000..b1cde8ae4c --- /dev/null +++ b/tests/gem5/to_tick/ref/tick-to-max-at-execution-and-after-100-200.txt @@ -0,0 +1,4 @@ +Global frequency set at 1000000000000 ticks per second +Current Tick: 100 +Current Max Tick: 200 +Ticks until max: 100 diff --git a/tests/gem5/to_tick/ref/tick-to-max-before-execution-250.txt b/tests/gem5/to_tick/ref/tick-to-max-before-execution-250.txt new file mode 100644 index 0000000000..b26e9ebee2 --- /dev/null +++ b/tests/gem5/to_tick/ref/tick-to-max-before-execution-250.txt @@ -0,0 +1,4 @@ +Global frequency set at 1000000000000 ticks per second +Current Tick: 250 +Current Max Tick: 250 +Ticks until max: 0 diff --git a/tests/gem5/to_tick/test_to_tick.py b/tests/gem5/to_tick/test_to_tick.py new file mode 100644 index 0000000000..ba5bcbf9b9 --- /dev/null +++ b/tests/gem5/to_tick/test_to_tick.py @@ -0,0 +1,174 @@ +# Copyright (c) 2022 The Regents of the University of California +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from testlib import * + +if config.bin_path: + resource_path = config.bin_path +else: + resource_path = joinpath(absdirpath(__file__), "..", "resources") + +# This test sets the tick to max tick via the `simulator.run` function. This is +# set to 100. Therefore, at the end of the execution the expected current tick +# should be 100, with the max tick still 100. The number of expected ticks to +# max is therefore 0. +gem5_verify_config( + name="test-to-max-tick-at-execution-100", + verifiers=[ + verifier.MatchStdoutNoPerf( + joinpath(getcwd(), "ref", "tick-to-max-at-execution-100.txt") + ) + ], + fixtures=(), + config=joinpath( + config.base_dir, + "tests", + "gem5", + "to_tick", + "configs", + "tick-to-max.py", + ), + config_args=[ + "--resource-directory", + resource_path, + "--set-ticks-at-execution", + "100", + ], + valid_isas=(constants.all_compiled_tag,), + valid_hosts=constants.supported_hosts, + length=constants.quick_tag, +) + +# This test sets the max tick via the `simulator.run` function at tick 100. +# The `m5.setMaxTick` function is then called after, passing the value 200 . +# This means at the end of execution the current tick is 100, and the max tick +# is 200. The number of expected ticks to max is therefore 100. +gem5_verify_config( + name="test-to-max-tick-at-execution-and-after-100-200", + verifiers=[ + verifier.MatchStdoutNoPerf( + joinpath( + getcwd(), + "ref", + "tick-to-max-at-execution-and-after-100-200.txt", + ) + ) + ], + fixtures=(), + config=joinpath( + config.base_dir, + "tests", + "gem5", + "to_tick", + "configs", + "tick-to-max.py", + ), + config_args=[ + "--resource-directory", + resource_path, + "--set-ticks-at-execution", + "100", + "--set-ticks-after", + "200", + ], + valid_isas=(constants.all_compiled_tag,), + valid_hosts=constants.supported_hosts, + length=constants.quick_tag, +) + +# This test sets the max tick to 250 via the `m5.setMaxTick` prior to running +# `simulator.run`. This means at the end of execution the current tick is 250 +# and the max tick is 250. The expected number of ticks to max is therefore 0. +gem5_verify_config( + name="test-to-max-tick-before-execution-250", + verifiers=[ + verifier.MatchStdoutNoPerf( + joinpath(getcwd(), "ref", "tick-to-max-before-execution-250.txt") + ) + ], + fixtures=(), + config=joinpath( + config.base_dir, + "tests", + "gem5", + "to_tick", + "configs", + "tick-to-max.py", + ), + config_args=[ + "--resource-directory", + resource_path, + "--set-ticks-before", + "250", + ], + valid_isas=(constants.all_compiled_tag,), + valid_hosts=constants.supported_hosts, + length=constants.quick_tag, +) + +# Tests the scheduling of a tick exit event at tick 100. +gem5_verify_config( + name="test-to-tick-exit-100", + verifiers=[ + verifier.MatchStdoutNoPerf( + joinpath(getcwd(), "ref", "tick-exit-100.txt") + ) + ], + fixtures=(), + config=joinpath( + config.base_dir, "tests", "gem5", "to_tick", "configs", "tick-exit.py" + ), + config_args=["--resource-directory", resource_path, "--tick-exits", "100"], + valid_isas=(constants.all_compiled_tag,), + valid_hosts=constants.supported_hosts, + length=constants.quick_tag, +) + +# Tests the scheduling of a tick exit event at tick 10, 20, 30, and 40. +gem5_verify_config( + name="test-to-tick-exit-10-20-30-40", + verifiers=[ + verifier.MatchStdoutNoPerf( + joinpath(getcwd(), "ref", "tick-exit-10-20-30-40.txt") + ) + ], + fixtures=(), + config=joinpath( + config.base_dir, "tests", "gem5", "to_tick", "configs", "tick-exit.py" + ), + config_args=[ + "--resource-directory", + resource_path, + "--tick-exits", + "10", + "20", + "30", + "40", + ], + valid_isas=(constants.all_compiled_tag,), + valid_hosts=constants.supported_hosts, + length=constants.quick_tag, +) diff --git a/tests/gem5/traffic_gen/simple_traffic_run.py b/tests/gem5/traffic_gen/simple_traffic_run.py index ede49937ac..4e38155070 100644 --- a/tests/gem5/traffic_gen/simple_traffic_run.py +++ b/tests/gem5/traffic_gen/simple_traffic_run.py @@ -32,10 +32,13 @@ set of statistics. """ import m5 + import argparse import importlib +from pathlib import Path from m5.objects import Root, MemorySize +from m5.stats.gem5stats import get_simstat from gem5.components.boards.test_board import TestBoard @@ -70,9 +73,7 @@ def generator_factory( from gem5.components.processors.gups_generator import GUPSGenerator table_size = f"{int(mem_size / 2)}B" - return GUPSGenerator( - 0, table_size, update_limit=1000, clk_freq="2GHz" - ) + return GUPSGenerator(0, table_size, update_limit=1000, clk_freq="2GHz") elif generator_class == "GUPSGeneratorEP": from gem5.components.processors.gups_generator_ep import ( GUPSGeneratorEP, @@ -102,15 +103,13 @@ def cache_factory(cache_class: str): return NoCache() elif cache_class == "PrivateL1": - from gem5.components.cachehierarchies\ - .classic.private_l1_cache_hierarchy import ( + from gem5.components.cachehierarchies.classic.private_l1_cache_hierarchy import ( PrivateL1CacheHierarchy, ) return PrivateL1CacheHierarchy(l1d_size="32KiB", l1i_size="32KiB") elif cache_class == "PrivateL1PrivateL2": - from gem5.components.cachehierarchies\ - .classic.private_l1_private_l2_cache_hierarchy import ( + from gem5.components.cachehierarchies.classic.private_l1_private_l2_cache_hierarchy import ( PrivateL1PrivateL2CacheHierarchy, ) @@ -118,8 +117,7 @@ def cache_factory(cache_class: str): l1d_size="32KiB", l1i_size="32KiB", l2_size="256KiB" ) elif cache_class == "MESITwoLevel": - from gem5.components.cachehierarchies\ - .ruby.mesi_two_level_cache_hierarchy import ( + from gem5.components.cachehierarchies.ruby.mesi_two_level_cache_hierarchy import ( MESITwoLevelCacheHierarchy, ) @@ -166,9 +164,7 @@ parser.add_argument( ) parser.add_argument( - "mem_module", - type=str, - help="The python module to import for memory.", + "mem_module", type=str, help="The python module to import for memory." ) parser.add_argument( @@ -181,7 +177,6 @@ parser.add_argument( help="The arguments needed to instantiate the memory class.", ) - args = parser.parse_args() cache_hierarchy = cache_factory(args.cache_class) @@ -199,13 +194,14 @@ generator = generator_factory( # tasks motherboard = TestBoard( clk_freq="3GHz", - processor=generator, # We pass the traffic generator as the processor. + generator=generator, memory=memory, cache_hierarchy=cache_hierarchy, ) root = Root(full_system=False, system=motherboard) +motherboard._pre_instantiate() m5.instantiate() generator.start_traffic() @@ -214,3 +210,8 @@ exit_event = m5.simulate() print( "Exiting @ tick {} because {}.".format(m5.curTick(), exit_event.getCause()) ) + +simstats = get_simstat(root, prepare_stats=True) +json_output = Path(m5.options.outdir) / "output.json" +with open(json_output, "w") as stats_file: + simstats.dump(stats_file, indent=2) diff --git a/tests/gem5/traffic_gen/test_memory_traffic_gen.py b/tests/gem5/traffic_gen/test_memory_traffic_gen.py index 5910f96fc1..122204e3e9 100644 --- a/tests/gem5/traffic_gen/test_memory_traffic_gen.py +++ b/tests/gem5/traffic_gen/test_memory_traffic_gen.py @@ -31,6 +31,8 @@ TODO: At present all the Single Channel memory components are tested. This should be expanded to included DRAMSIM3 memory systems. """ +import os + from testlib import * @@ -42,18 +44,6 @@ def test_memory( memory: str, *args, ) -> None: - protocol_map = { - "NoCache": None, - "PrivateL1": None, - "PrivateL1PrivateL2": None, - "MESITwoLevel": "MESI_Two_Level", - } - tag_map = { - "NoCache": constants.quick_tag, - "PrivateL1": constants.quick_tag, - "PrivateL1PrivateL2": constants.quick_tag, - "MESITwoLevel": constants.long_tag, - } name = ( "test-memory-" @@ -62,10 +52,21 @@ def test_memory( for arg in args: name += "-" + arg + stats_verifier = verifier.MatchJSONStats( + os.path.join( + os.path.dirname(__file__), + "trusted_stats", + f"{generator}-{generator_cores}-{cache}-{module}-{memory}", + "trusted_stats.json", + ), + "output.json", + True, + ) + gem5_verify_config( name=name, fixtures=(), - verifiers=(), + verifiers=(stats_verifier,), config=joinpath( config.base_dir, "tests", @@ -73,18 +74,12 @@ def test_memory( "traffic_gen", "simple_traffic_run.py", ), - config_args=[ - generator, - generator_cores, - cache, - module, - memory, - ] + config_args=[generator, generator_cores, cache, module] + + [memory] + list(args), - valid_isas=(constants.null_tag,), - protocol=protocol_map[cache], + valid_isas=(constants.all_compiled_tag,), valid_hosts=constants.supported_hosts, - length=tag_map[cache], + length=constants.quick_tag, ) @@ -132,11 +127,6 @@ def create_dual_core_tests(module, memory_classes): "512MiB", ) -create_single_core_tests( - "gem5.components.memory", - memory_classes, -) -create_dual_core_tests( - "gem5.components.memory", - memory_classes, -) + +create_single_core_tests("gem5.components.memory", memory_classes) +create_dual_core_tests("gem5.components.memory", memory_classes) diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..8f0565dc46 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 7542118, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 102360837.0 + }, + "totalWriteLat": { + "value": 25013163.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..e93812f9f6 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 7216444, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 96921243.0 + }, + "totalWriteLat": { + "value": 24910986.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..d739884408 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 6875119, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 91450343.0 + }, + "totalWriteLat": { + "value": 24581211.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..0504a13ec0 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 10279045, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 147098860.0 + }, + "totalWriteLat": { + "value": 26170348.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-MESITwoLevel-gem5.components.memory-HBM2Stack/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-MESITwoLevel-gem5.components.memory-HBM2Stack/trusted_stats.json new file mode 100644 index 0000000000..50f2ea2831 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-MESITwoLevel-gem5.components.memory-HBM2Stack/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 6731596, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 87694067.0 + }, + "totalWriteLat": { + "value": 25894514.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..ab94fee2db --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 8350309, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 115620241.0 + }, + "totalWriteLat": { + "value": 25519547.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..5e9e4cae39 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 8113879, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 108988873.0 + }, + "totalWriteLat": { + "value": 25628551.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..4a1e8c9fc4 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 7707952, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 105036398.0 + }, + "totalWriteLat": { + "value": 25154639.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelHBM/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelHBM/trusted_stats.json new file mode 100644 index 0000000000..7f05d79d51 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelHBM/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 10359631, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 150230146.0 + }, + "totalWriteLat": { + "value": 24992647.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..b3d282b1de --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 14549437, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 220671117.0 + }, + "totalWriteLat": { + "value": 25791304.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-NoCache-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-NoCache-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..147b8be25c --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-NoCache-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 5489505, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 206638506.0 + }, + "totalWriteLat": { + "value": 16200850.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-NoCache-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-NoCache-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..b2e974a1b9 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-NoCache-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 4678650, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 182618315.0 + }, + "totalWriteLat": { + "value": 15897317.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-NoCache-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-NoCache-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..b551de7922 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-NoCache-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 3758571, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 265521377.0 + }, + "totalWriteLat": { + "value": 15676732.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-NoCache-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-NoCache-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..bd08886f86 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-NoCache-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 12585735, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 359411591.0 + }, + "totalWriteLat": { + "value": 20076558.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-NoCache-gem5.components.memory-HBM2Stack/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-NoCache-gem5.components.memory-HBM2Stack/trusted_stats.json new file mode 100644 index 0000000000..02672c1bb1 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-NoCache-gem5.components.memory-HBM2Stack/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 1137195, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 136012202.0 + }, + "totalWriteLat": { + "value": 60025754.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-NoCache-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-NoCache-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..c2e7ac3061 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-NoCache-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 11296026, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 219317997.0 + }, + "totalWriteLat": { + "value": 19375973.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-NoCache-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-NoCache-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..478f30308f --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-NoCache-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 9474516, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 188865306.0 + }, + "totalWriteLat": { + "value": 18532527.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-NoCache-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-NoCache-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..a622ce6f02 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-NoCache-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 7567425, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 302737167.0 + }, + "totalWriteLat": { + "value": 17422026.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-NoCache-gem5.components.memory-SingleChannelHBM/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-NoCache-gem5.components.memory-SingleChannelHBM/trusted_stats.json new file mode 100644 index 0000000000..151904d7b1 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-NoCache-gem5.components.memory-SingleChannelHBM/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 16397253, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 289867606.0 + }, + "totalWriteLat": { + "value": 22414383.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-NoCache-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-NoCache-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..9dd0db6e8a --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-NoCache-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 25226082, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 453326817.0 + }, + "totalWriteLat": { + "value": 26266904.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..3963fa50d0 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 12536118, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 99249350.0 + }, + "totalWriteLat": { + "value": 111088113.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..e7808aa6dd --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 11642679, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 93359598.0 + }, + "totalWriteLat": { + "value": 102539041.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..ebd199ce53 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 10706616, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 87577523.0 + }, + "totalWriteLat": { + "value": 93490111.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..21de3066cb --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 19123191, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 144504806.0 + }, + "totalWriteLat": { + "value": 177877743.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1-gem5.components.memory-HBM2Stack/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1-gem5.components.memory-HBM2Stack/trusted_stats.json new file mode 100644 index 0000000000..ce877d807b --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1-gem5.components.memory-HBM2Stack/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 8144847, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 67175287.0 + }, + "totalWriteLat": { + "value": 65933938.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..cb1b16043e --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 16486830, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 113060338.0 + }, + "totalWriteLat": { + "value": 163786312.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..317303b622 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 14557095, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 105259241.0 + }, + "totalWriteLat": { + "value": 139761076.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..12d9673e2e --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 13267053, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 99883815.0 + }, + "totalWriteLat": { + "value": 124093423.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1-gem5.components.memory-SingleChannelHBM/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1-gem5.components.memory-SingleChannelHBM/trusted_stats.json new file mode 100644 index 0000000000..cf3da2b0d1 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1-gem5.components.memory-SingleChannelHBM/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 20975337, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 149941781.0 + }, + "totalWriteLat": { + "value": 202474301.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..4af69cf4e9 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 32503464, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 221315733.0 + }, + "totalWriteLat": { + "value": 327211830.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..d354616901 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 7033959, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 102466724.0 + }, + "totalWriteLat": { + "value": 17025363.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..6b499d152e --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 6729930, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 97324026.0 + }, + "totalWriteLat": { + "value": 16992685.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..233fca3f36 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 6350643, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 91246045.0 + }, + "totalWriteLat": { + "value": 16600704.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..00bef6dd16 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 9694962, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 147518849.0 + }, + "totalWriteLat": { + "value": 17212298.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1PrivateL2-gem5.components.memory-HBM2Stack/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1PrivateL2-gem5.components.memory-HBM2Stack/trusted_stats.json new file mode 100644 index 0000000000..d71307592e --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1PrivateL2-gem5.components.memory-HBM2Stack/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 5206122, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 72134772.0 + }, + "totalWriteLat": { + "value": 16254809.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..8db6b2b6cb --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 7844481, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 116009709.0 + }, + "totalWriteLat": { + "value": 17169293.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..ed5b41e82e --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 7472187, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 109732745.0 + }, + "totalWriteLat": { + "value": 17232837.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..21e759ec9f --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 7061931, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 102627364.0 + }, + "totalWriteLat": { + "value": 17305038.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelHBM/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelHBM/trusted_stats.json new file mode 100644 index 0000000000..5959b04b1c --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelHBM/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 9880110, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 150899685.0 + }, + "totalWriteLat": { + "value": 16959847.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..38c693a788 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 14043942, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLat": { + "value": 221282327.0 + }, + "totalWriteLat": { + "value": 17269182.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-MESITwoLevel-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-MESITwoLevel-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..5c61a67745 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-MESITwoLevel-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 10032292, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 125079302.0 + }, + "totalWriteLat": { + "value": 43399791.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7992.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 124423340.0 + }, + "totalWriteLat": { + "value": 44808114.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-MESITwoLevel-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-MESITwoLevel-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..b29fe68984 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-MESITwoLevel-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 9374617, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 113302038.0 + }, + "totalWriteLat": { + "value": 42751738.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7992.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 114152300.0 + }, + "totalWriteLat": { + "value": 42395818.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-MESITwoLevel-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-MESITwoLevel-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..49b22e4f2a --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-MESITwoLevel-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 8712946, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 105293872.0 + }, + "totalWriteLat": { + "value": 41707917.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 106340610.0 + }, + "totalWriteLat": { + "value": 40665541.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7784.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-MESITwoLevel-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-MESITwoLevel-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..72b218158d --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-MESITwoLevel-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 16205113, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 229811326.0 + }, + "totalWriteLat": { + "value": 43640974.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7744.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 232126411.0 + }, + "totalWriteLat": { + "value": 40656963.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-MESITwoLevel-gem5.components.memory-HBM2Stack/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-MESITwoLevel-gem5.components.memory-HBM2Stack/trusted_stats.json new file mode 100644 index 0000000000..922a4e8918 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-MESITwoLevel-gem5.components.memory-HBM2Stack/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 9195130, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 112369875.0 + }, + "totalWriteLat": { + "value": 42859175.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 113513817.0 + }, + "totalWriteLat": { + "value": 41689073.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7720.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-MESITwoLevel-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-MESITwoLevel-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..f6ac330d39 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-MESITwoLevel-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 14136184, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 202116524.0 + }, + "totalWriteLat": { + "value": 36988784.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7272.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 198271792.0 + }, + "totalWriteLat": { + "value": 39762676.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-MESITwoLevel-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-MESITwoLevel-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..543a8aef25 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-MESITwoLevel-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 12383938, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 165853933.0 + }, + "totalWriteLat": { + "value": 42651962.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 166782957.0 + }, + "totalWriteLat": { + "value": 41953442.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7968.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-MESITwoLevel-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-MESITwoLevel-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..66f1181589 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-MESITwoLevel-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 11649340, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 154972644.0 + }, + "totalWriteLat": { + "value": 41929582.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 157104997.0 + }, + "totalWriteLat": { + "value": 39853795.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7568.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-MESITwoLevel-gem5.components.memory-SingleChannelHBM/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-MESITwoLevel-gem5.components.memory-SingleChannelHBM/trusted_stats.json new file mode 100644 index 0000000000..146690ef2b --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-MESITwoLevel-gem5.components.memory-SingleChannelHBM/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 18375940, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 277766238.0 + }, + "totalWriteLat": { + "value": 33571548.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 6640.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 264745168.0 + }, + "totalWriteLat": { + "value": 45518381.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-MESITwoLevel-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-MESITwoLevel-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..ccc7a4fdfe --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-MESITwoLevel-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 28556749, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 439656866.0 + }, + "totalWriteLat": { + "value": 38018008.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 443997697.0 + }, + "totalWriteLat": { + "value": 37138389.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 5968.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-NoCache-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-NoCache-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..d535d14471 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-NoCache-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 11358963, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 205446750.0 + }, + "totalWriteLat": { + "value": 18470775.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7592.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 203556726.0 + }, + "totalWriteLat": { + "value": 19539472.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-NoCache-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-NoCache-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..d8094b3112 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-NoCache-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 9521802, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 179440423.0 + }, + "totalWriteLat": { + "value": 17826759.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7608.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 178149943.0 + }, + "totalWriteLat": { + "value": 18747555.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-NoCache-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-NoCache-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..e11fbdf453 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-NoCache-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 7599060, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 280944605.0 + }, + "totalWriteLat": { + "value": 17611648.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 293396774.0 + }, + "totalWriteLat": { + "value": 17327059.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7872.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-NoCache-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-NoCache-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..3fe4f1837a --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-NoCache-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 25206768, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 387325734.0 + }, + "totalWriteLat": { + "value": 26448385.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 391267577.0 + }, + "totalWriteLat": { + "value": 25420307.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7512.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-NoCache-gem5.components.memory-HBM2Stack/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-NoCache-gem5.components.memory-HBM2Stack/trusted_stats.json new file mode 100644 index 0000000000..afd839eb8a --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-NoCache-gem5.components.memory-HBM2Stack/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 1260405, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 149348443.0 + }, + "totalWriteLat": { + "value": 35160194.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7992.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 149852067.0 + }, + "totalWriteLat": { + "value": 34279970.0 + }, + "totalBytesRead": { + "value": 7992.0 + }, + "totalBytesWritten": { + "value": 7984.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-NoCache-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-NoCache-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..50e874d293 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-NoCache-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 22856121, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 226463248.0 + }, + "totalWriteLat": { + "value": 25151322.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 226526014.0 + }, + "totalWriteLat": { + "value": 24866222.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7888.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-NoCache-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-NoCache-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..237de79209 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-NoCache-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 18915399, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 190655286.0 + }, + "totalWriteLat": { + "value": 23431220.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 190276732.0 + }, + "totalWriteLat": { + "value": 23084634.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7872.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-NoCache-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-NoCache-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..994e1acf74 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-NoCache-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 16433883, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 312415658.0 + }, + "totalWriteLat": { + "value": 22008771.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 318444166.0 + }, + "totalWriteLat": { + "value": 21645172.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7840.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-NoCache-gem5.components.memory-SingleChannelHBM/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-NoCache-gem5.components.memory-SingleChannelHBM/trusted_stats.json new file mode 100644 index 0000000000..fb1ccd2444 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-NoCache-gem5.components.memory-SingleChannelHBM/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 33782184, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 303963986.0 + }, + "totalWriteLat": { + "value": 31454953.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7904.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 297979992.0 + }, + "totalWriteLat": { + "value": 31727275.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-NoCache-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-NoCache-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..429c7ee766 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-NoCache-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 51539076, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 467420382.0 + }, + "totalWriteLat": { + "value": 39698708.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7936.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 465345572.0 + }, + "totalWriteLat": { + "value": 39824502.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..ed802346a0 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 16667316, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 120857294.0 + }, + "totalWriteLat": { + "value": 160238288.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7664.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 117690896.0 + }, + "totalWriteLat": { + "value": 163831383.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..4f9181bcb4 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 14808510, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 108706323.0 + }, + "totalWriteLat": { + "value": 141682431.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 110968412.0 + }, + "totalWriteLat": { + "value": 138865086.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7856.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..e5fa110554 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 13181139, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 98498058.0 + }, + "totalWriteLat": { + "value": 123499961.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 99528067.0 + }, + "totalWriteLat": { + "value": 122525896.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7920.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..816534d267 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 33292674, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 232455752.0 + }, + "totalWriteLat": { + "value": 326697262.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 233058856.0 + }, + "totalWriteLat": { + "value": 325041046.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7888.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1-gem5.components.memory-HBM2Stack/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1-gem5.components.memory-HBM2Stack/trusted_stats.json new file mode 100644 index 0000000000..3a91033bec --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1-gem5.components.memory-HBM2Stack/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 8290701, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 69616430.0 + }, + "totalWriteLat": { + "value": 70041646.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 70127890.0 + }, + "totalWriteLat": { + "value": 70182752.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7744.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..1d6ba498c5 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 29294343, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 203059208.0 + }, + "totalWriteLat": { + "value": 292118938.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7936.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 196853786.0 + }, + "totalWriteLat": { + "value": 294552791.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..b8418783d3 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 24559083, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 166499867.0 + }, + "totalWriteLat": { + "value": 247218737.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7976.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 165835669.0 + }, + "totalWriteLat": { + "value": 247716211.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..d7e2968c61 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 21580065, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 158912796.0 + }, + "totalWriteLat": { + "value": 206068225.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7792.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 156486900.0 + }, + "totalWriteLat": { + "value": 204549052.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1-gem5.components.memory-SingleChannelHBM/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1-gem5.components.memory-SingleChannelHBM/trusted_stats.json new file mode 100644 index 0000000000..549f9bbdbd --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1-gem5.components.memory-SingleChannelHBM/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 40737888, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 272582084.0 + }, + "totalWriteLat": { + "value": 409834875.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7960.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 275117608.0 + }, + "totalWriteLat": { + "value": 408572543.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..4ea2041cbd --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 65319615, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 436683124.0 + }, + "totalWriteLat": { + "value": 630245015.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 436289856.0 + }, + "totalWriteLat": { + "value": 643489503.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7960.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..1d29c4e520 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 8299359, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 123740827.0 + }, + "totalWriteLat": { + "value": 17246404.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 124324545.0 + }, + "totalWriteLat": { + "value": 16651388.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7664.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..b21dd02b84 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 7678980, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 113431279.0 + }, + "totalWriteLat": { + "value": 16990832.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 113250349.0 + }, + "totalWriteLat": { + "value": 17187735.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7968.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..cb3db39d8d --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 7090236, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 104312866.0 + }, + "totalWriteLat": { + "value": 16081183.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7608.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 103574904.0 + }, + "totalWriteLat": { + "value": 16845822.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1PrivateL2-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1PrivateL2-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..0a3d2dc45d --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1PrivateL2-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 14566419, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 234424781.0 + }, + "totalWriteLat": { + "value": 13064037.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 6240.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 230187504.0 + }, + "totalWriteLat": { + "value": 17322803.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1PrivateL2-gem5.components.memory-HBM2Stack/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1PrivateL2-gem5.components.memory-HBM2Stack/trusted_stats.json new file mode 100644 index 0000000000..4f55c33192 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1PrivateL2-gem5.components.memory-HBM2Stack/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 5326668, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 74182256.0 + }, + "totalWriteLat": { + "value": 16302050.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 74300398.0 + }, + "totalWriteLat": { + "value": 16122081.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7928.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..5118c42efd --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 12810843, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 201348316.0 + }, + "totalWriteLat": { + "value": 16340128.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7624.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 200333788.0 + }, + "totalWriteLat": { + "value": 17329778.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..7ba25a51c4 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 10765224, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 165592990.0 + }, + "totalWriteLat": { + "value": 17306703.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 166834905.0 + }, + "totalWriteLat": { + "value": 16095377.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7464.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..e2cffc795b --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 10171152, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 155401717.0 + }, + "totalWriteLat": { + "value": 17389049.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 156114883.0 + }, + "totalWriteLat": { + "value": 16665669.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7760.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1PrivateL2-gem5.components.memory-SingleChannelHBM/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1PrivateL2-gem5.components.memory-SingleChannelHBM/trusted_stats.json new file mode 100644 index 0000000000..03072a5447 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1PrivateL2-gem5.components.memory-SingleChannelHBM/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 16748235, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 275096528.0 + }, + "totalWriteLat": { + "value": 9529147.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 5200.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 269605000.0 + }, + "totalWriteLat": { + "value": 15020674.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1PrivateL2-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1PrivateL2-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..c750b56e7f --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorEP-2-PrivateL1PrivateL2-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 26927046, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 441529337.0 + }, + "totalWriteLat": { + "value": 16184556.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 8000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 441891507.0 + }, + "totalWriteLat": { + "value": 15772300.0 + }, + "totalBytesRead": { + "value": 8000.0 + }, + "totalBytesWritten": { + "value": 7712.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-MESITwoLevel-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-MESITwoLevel-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..1614c26ae5 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-MESITwoLevel-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 3929068, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 62582430.0 + }, + "totalWriteLat": { + "value": 3019146.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 3056.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 61097728.0 + }, + "totalWriteLat": { + "value": 4833274.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-MESITwoLevel-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-MESITwoLevel-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..b234250b09 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-MESITwoLevel-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 3773890, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 57118920.0 + }, + "totalWriteLat": { + "value": 5765666.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 57292919.0 + }, + "totalWriteLat": { + "value": 6039702.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 3992.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-MESITwoLevel-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-MESITwoLevel-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..0e494d0b7b --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-MESITwoLevel-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 3474856, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 51737536.0 + }, + "totalWriteLat": { + "value": 5377921.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 53042654.0 + }, + "totalWriteLat": { + "value": 4987792.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 3904.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-MESITwoLevel-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-MESITwoLevel-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..ef540f04a9 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-MESITwoLevel-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 7051276, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 112347402.0 + }, + "totalWriteLat": { + "value": 5541203.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 112213557.0 + }, + "totalWriteLat": { + "value": 4729017.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 3992.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-MESITwoLevel-gem5.components.memory-HBM2Stack/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-MESITwoLevel-gem5.components.memory-HBM2Stack/trusted_stats.json new file mode 100644 index 0000000000..5dfcd22075 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-MESITwoLevel-gem5.components.memory-HBM2Stack/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 3579418, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 54655744.0 + }, + "totalWriteLat": { + "value": 5255714.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 3744.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 54622490.0 + }, + "totalWriteLat": { + "value": 5172617.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-MESITwoLevel-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-MESITwoLevel-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..2da58299cf --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-MESITwoLevel-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 6122539, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 97891496.0 + }, + "totalWriteLat": { + "value": 4712488.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 99223280.0 + }, + "totalWriteLat": { + "value": 4129203.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 3408.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-MESITwoLevel-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-MESITwoLevel-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..d794b8bf1c --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-MESITwoLevel-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 5129866, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 80518890.0 + }, + "totalWriteLat": { + "value": 5592085.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 81737927.0 + }, + "totalWriteLat": { + "value": 4381919.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 3896.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-MESITwoLevel-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-MESITwoLevel-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..a4ff532108 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-MESITwoLevel-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 4627036, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 73343485.0 + }, + "totalWriteLat": { + "value": 4212627.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 74803403.0 + }, + "totalWriteLat": { + "value": 3120385.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 2408.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-MESITwoLevel-gem5.components.memory-SingleChannelHBM/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-MESITwoLevel-gem5.components.memory-SingleChannelHBM/trusted_stats.json new file mode 100644 index 0000000000..1ce61c41a8 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-MESITwoLevel-gem5.components.memory-SingleChannelHBM/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 8151841, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 130081625.0 + }, + "totalWriteLat": { + "value": 5169002.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 132125187.0 + }, + "totalWriteLat": { + "value": 4276582.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 3984.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-MESITwoLevel-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-MESITwoLevel-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..b44f001660 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-MESITwoLevel-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 13304683, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 218711419.0 + }, + "totalWriteLat": { + "value": 4569008.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 219850253.0 + }, + "totalWriteLat": { + "value": 5039520.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 3768.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-NoCache-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-NoCache-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..66d15f5db0 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-NoCache-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 5260401, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 94999695.0 + }, + "totalWriteLat": { + "value": 9577121.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 102943715.0 + }, + "totalWriteLat": { + "value": 8416702.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 3400.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-NoCache-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-NoCache-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..eed390d535 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-NoCache-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 4450545, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 80386471.0 + }, + "totalWriteLat": { + "value": 9222295.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 86773744.0 + }, + "totalWriteLat": { + "value": 8758955.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 3736.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-NoCache-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-NoCache-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..fd26595270 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-NoCache-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 3683646, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 126151572.0 + }, + "totalWriteLat": { + "value": 8866999.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 129192246.0 + }, + "totalWriteLat": { + "value": 7524074.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 3296.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-NoCache-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-NoCache-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..f5912071c3 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-NoCache-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 12075246, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 179802122.0 + }, + "totalWriteLat": { + "value": 13069986.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 195270245.0 + }, + "totalWriteLat": { + "value": 11548476.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 3296.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-NoCache-gem5.components.memory-HBM2Stack/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-NoCache-gem5.components.memory-HBM2Stack/trusted_stats.json new file mode 100644 index 0000000000..02b0c8ea9f --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-NoCache-gem5.components.memory-HBM2Stack/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 625707, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 58314857.0 + }, + "totalWriteLat": { + "value": 21008357.0 + }, + "totalBytesRead": { + "value": 3992.0 + }, + "totalBytesWritten": { + "value": 3992.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 60223087.0 + }, + "totalWriteLat": { + "value": 21864967.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-NoCache-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-NoCache-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..b058015111 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-NoCache-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 11160162, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 105909685.0 + }, + "totalWriteLat": { + "value": 12671298.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 110632117.0 + }, + "totalWriteLat": { + "value": 12429535.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 3912.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-NoCache-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-NoCache-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..5d4239a382 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-NoCache-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 9348642, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 90040100.0 + }, + "totalWriteLat": { + "value": 11940890.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 91700301.0 + }, + "totalWriteLat": { + "value": 11696123.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 3896.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-NoCache-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-NoCache-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..39eba12745 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-NoCache-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 7627698, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 148107292.0 + }, + "totalWriteLat": { + "value": 10754974.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 150953636.0 + }, + "totalWriteLat": { + "value": 10327220.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 3808.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-NoCache-gem5.components.memory-SingleChannelHBM/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-NoCache-gem5.components.memory-SingleChannelHBM/trusted_stats.json new file mode 100644 index 0000000000..122e8b090e --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-NoCache-gem5.components.memory-SingleChannelHBM/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 16296021, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 141827083.0 + }, + "totalWriteLat": { + "value": 15734259.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 146737433.0 + }, + "totalWriteLat": { + "value": 15584985.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 3944.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-NoCache-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-NoCache-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..b5f1e6ff7f --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-NoCache-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 25157151, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 230106635.0 + }, + "totalWriteLat": { + "value": 19696036.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 235243087.0 + }, + "totalWriteLat": { + "value": 19271053.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 3928.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..fbfef901cd --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 4983678, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 61708682.0 + }, + "totalWriteLat": { + "value": 21982514.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 3320.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 57877984.0 + }, + "totalWriteLat": { + "value": 24626039.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..7db73bbdf9 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 4701294, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 55347103.0 + }, + "totalWriteLat": { + "value": 22128249.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 3992.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 54535719.0 + }, + "totalWriteLat": { + "value": 21552438.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..6e3202ef9c --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 4140522, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 50841660.0 + }, + "totalWriteLat": { + "value": 18354413.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 3944.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 50279508.0 + }, + "totalWriteLat": { + "value": 18907663.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..e054d0a82e --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 9487170, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 112907184.0 + }, + "totalWriteLat": { + "value": 42686997.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 112122744.0 + }, + "totalWriteLat": { + "value": 44712587.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 3832.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1-gem5.components.memory-HBM2Stack/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1-gem5.components.memory-HBM2Stack/trusted_stats.json new file mode 100644 index 0000000000..80b7227079 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1-gem5.components.memory-HBM2Stack/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 2648682, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 33703872.0 + }, + "totalWriteLat": { + "value": 10568908.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 3104.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 33359985.0 + }, + "totalWriteLat": { + "value": 9520358.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..7f4a5da3fd --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 8495829, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 100369508.0 + }, + "totalWriteLat": { + "value": 40139702.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 3056.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 96769640.0 + }, + "totalWriteLat": { + "value": 37932707.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..f6829043f0 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 6760899, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 81394785.0 + }, + "totalWriteLat": { + "value": 30604076.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 3224.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 79331183.0 + }, + "totalWriteLat": { + "value": 31751636.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..5956a5c6c7 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 6211116, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 75549864.0 + }, + "totalWriteLat": { + "value": 29137912.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 3744.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 74863829.0 + }, + "totalWriteLat": { + "value": 28665570.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1-gem5.components.memory-SingleChannelHBM/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1-gem5.components.memory-SingleChannelHBM/trusted_stats.json new file mode 100644 index 0000000000..2717b695e3 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1-gem5.components.memory-SingleChannelHBM/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 10909413, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 133959920.0 + }, + "totalWriteLat": { + "value": 48198718.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 2928.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 128748260.0 + }, + "totalWriteLat": { + "value": 48958293.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..c844737a02 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 20256057, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 220792316.0 + }, + "totalWriteLat": { + "value": 115545610.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 3592.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 216299901.0 + }, + "totalWriteLat": { + "value": 110736504.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..c552cb108d --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 3834495, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 61520210.0 + }, + "totalWriteLat": { + "value": 1714011.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 61995779.0 + }, + "totalWriteLat": { + "value": 1873603.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 3656.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..ea25c2a954 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 3542121, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 56720161.0 + }, + "totalWriteLat": { + "value": 1436388.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 58108613.0 + }, + "totalWriteLat": { + "value": 1715428.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 2440.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..7ae4460732 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 3257406, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 51561324.0 + }, + "totalWriteLat": { + "value": 2586043.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 51789918.0 + }, + "totalWriteLat": { + "value": 2711635.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 3592.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1PrivateL2-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1PrivateL2-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..0cf3ec1d96 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1PrivateL2-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 6836490, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 112853531.0 + }, + "totalWriteLat": { + "value": 1493403.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 3992.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 113249481.0 + }, + "totalWriteLat": { + "value": 2012613.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 3608.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1PrivateL2-gem5.components.memory-HBM2Stack/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1PrivateL2-gem5.components.memory-HBM2Stack/trusted_stats.json new file mode 100644 index 0000000000..400812fdf3 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1PrivateL2-gem5.components.memory-HBM2Stack/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 2306025, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 35423987.0 + }, + "totalWriteLat": { + "value": 1663711.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 35652872.0 + }, + "totalWriteLat": { + "value": 1442250.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 3920.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..8e5c7b75e5 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 5911416, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 97633555.0 + }, + "totalWriteLat": { + "value": 1037658.0 + }, + "totalBytesRead": { + "value": 3976.0 + }, + "totalBytesWritten": { + "value": 2832.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 96903004.0 + }, + "totalWriteLat": { + "value": 2591346.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..1d812776a0 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 4923405, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 80478448.0 + }, + "totalWriteLat": { + "value": 1534192.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 4000.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 81391012.0 + }, + "totalWriteLat": { + "value": 996400.0 + }, + "totalBytesRead": { + "value": 3976.0 + }, + "totalBytesWritten": { + "value": 2272.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..0c45850a6f --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 4530798, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 73209572.0 + }, + "totalWriteLat": { + "value": 2080533.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 3992.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 75742418.0 + }, + "totalWriteLat": { + "value": 835244.0 + }, + "totalBytesRead": { + "value": 3992.0 + }, + "totalBytesWritten": { + "value": 1432.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1PrivateL2-gem5.components.memory-SingleChannelHBM/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1PrivateL2-gem5.components.memory-SingleChannelHBM/trusted_stats.json new file mode 100644 index 0000000000..e15464943e --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1PrivateL2-gem5.components.memory-SingleChannelHBM/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 8157834, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 130488540.0 + }, + "totalWriteLat": { + "value": 2049895.0 + }, + "totalBytesRead": { + "value": 3992.0 + }, + "totalBytesWritten": { + "value": 3992.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 131591437.0 + }, + "totalWriteLat": { + "value": 1941688.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 3992.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1PrivateL2-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1PrivateL2-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..40ffd5f46f --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/GUPSGeneratorPAR-2-PrivateL1PrivateL2-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,40 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 13135518, + "system": { + "processor": { + "cores0": { + "generator": { + "totalReadLat": { + "value": 219112030.0 + }, + "totalWriteLat": { + "value": 1782217.0 + }, + "totalBytesRead": { + "value": 3992.0 + }, + "totalBytesWritten": { + "value": 3992.0 + } + } + }, + "cores1": { + "generator": { + "totalReadLat": { + "value": 219615304.0 + }, + "totalWriteLat": { + "value": 2072225.0 + }, + "totalBytesRead": { + "value": 4000.0 + }, + "totalBytesWritten": { + "value": 3992.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..bba02ddeb0 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250002081, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4245920643.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 4182976.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..6ef3fe3a56 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250001415, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4248361173.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 4141376.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..0b1116cfd8 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250000083, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4245772705.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 4148544.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..727cfcc450 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250006077, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4246690359.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 2151936.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-MESITwoLevel-gem5.components.memory-HBM2Stack/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-MESITwoLevel-gem5.components.memory-HBM2Stack/trusted_stats.json new file mode 100644 index 0000000000..ae08e85c76 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-MESITwoLevel-gem5.components.memory-HBM2Stack/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250001748, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4248256552.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 2953984.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..984b52b96c --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250002414, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4248723076.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 2317696.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..9c3942acd9 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250000083, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4248827748.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 2857536.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..a954f8b663 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250003746, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4248800773.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 2281984.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelHBM/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelHBM/trusted_stats.json new file mode 100644 index 0000000000..a7438fa8e1 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelHBM/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250002414, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4249248263.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 3002624.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..616e6ef5a9 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250007409, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4247691213.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 1088256.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-NoCache-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-NoCache-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..35785c1ef1 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-NoCache-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250000000, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 18043000926.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 4994560.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-NoCache-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-NoCache-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..95c27005ad --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-NoCache-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250025006, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 18503578414.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 6252672.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-NoCache-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-NoCache-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..0ba34a4350 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-NoCache-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250000908, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 34701634682.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 6915200.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-NoCache-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-NoCache-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..ceadf990a7 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-NoCache-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250002500, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 9098236064.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 2344960.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-NoCache-gem5.components.memory-HBM2Stack/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-NoCache-gem5.components.memory-HBM2Stack/trusted_stats.json new file mode 100644 index 0000000000..ccf769e46a --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-NoCache-gem5.components.memory-HBM2Stack/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250000000, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 18249803880.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 10734272.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-NoCache-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-NoCache-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..a095b9099c --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-NoCache-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250000000, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 9166054204.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 2497280.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-NoCache-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-NoCache-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..5adac450c0 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-NoCache-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250025006, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 9402800995.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 3126336.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-NoCache-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-NoCache-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..674fcded08 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-NoCache-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250000908, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 17508433581.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 3457600.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-NoCache-gem5.components.memory-SingleChannelHBM/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-NoCache-gem5.components.memory-SingleChannelHBM/trusted_stats.json new file mode 100644 index 0000000000..af8be8d268 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-NoCache-gem5.components.memory-SingleChannelHBM/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250001000, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 9652577939.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 3800768.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-NoCache-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-NoCache-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..968f25c2ad --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-NoCache-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250002500, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4683075845.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 1172480.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..ba0f235f08 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250001749, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4252050791.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 4436480.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..e173cf01ef --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250000000, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4246491883.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 4920832.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..d1d9a01e4e --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250001749, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4243889193.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 4405120.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..96309baa7b --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250008409, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4245927491.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 2152704.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1-gem5.components.memory-HBM2Stack/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1-gem5.components.memory-HBM2Stack/trusted_stats.json new file mode 100644 index 0000000000..5124a01f66 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1-gem5.components.memory-HBM2Stack/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250000000, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4166094112.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 4085120.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..6cbabf0bec --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250002082, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4260704266.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 2317696.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..18b2f4df7a --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250003081, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4263740796.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 2856704.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..266a0008e8 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250001416, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4260738293.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 2292480.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1-gem5.components.memory-SingleChannelHBM/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1-gem5.components.memory-SingleChannelHBM/trusted_stats.json new file mode 100644 index 0000000000..9cafd92c8a --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1-gem5.components.memory-SingleChannelHBM/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250029388, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4264679197.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 3005440.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..ddc071f944 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250007077, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4253313893.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 1088256.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..803a6bb329 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250003081, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4255602694.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 4543872.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..f1179b4aed --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250007410, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4256499112.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 5162368.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..48ce27595c --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250020397, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4255252272.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 4470784.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..79349e2732 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250017067, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4251488807.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 2150400.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1PrivateL2-gem5.components.memory-HBM2Stack/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1PrivateL2-gem5.components.memory-HBM2Stack/trusted_stats.json new file mode 100644 index 0000000000..cfb7bacec3 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1PrivateL2-gem5.components.memory-HBM2Stack/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250000750, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4222122030.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 4595136.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..bb5cdabb57 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250002082, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4260734567.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 2317824.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..f0dc495b32 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250027390, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4263940608.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 2855936.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..0aab437234 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250000417, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4260785572.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 2292928.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelHBM/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelHBM/trusted_stats.json new file mode 100644 index 0000000000..84b81150dd --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelHBM/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250003081, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4264812036.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 3007232.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..8a3d299ba0 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/LinearGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250008076, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4253329544.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 1088256.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..4f12b9c24c --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250028721, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4248284253.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 2547392.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..8a5515ae28 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250002081, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4248222582.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 2668032.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..65c78b8db8 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250000416, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4248158907.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 2833408.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..938cc8451b --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-MESITwoLevel-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250020729, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4247199836.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 1809024.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-MESITwoLevel-gem5.components.memory-HBM2Stack/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-MESITwoLevel-gem5.components.memory-HBM2Stack/trusted_stats.json new file mode 100644 index 0000000000..f73b4cecaa --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-MESITwoLevel-gem5.components.memory-HBM2Stack/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250000749, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4248765604.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 3032448.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..15eae6344d --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250006743, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4248823191.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 2224896.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..ff30b6aec1 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250004412, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4248763435.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 2364544.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..55917cab2c --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250002081, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4249078903.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 2478592.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelHBM/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelHBM/trusted_stats.json new file mode 100644 index 0000000000..4848501c34 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelHBM/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250003746, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4248671768.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 1726144.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..4633015d8e --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-MESITwoLevel-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250004745, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4247981174.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 1209792.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-NoCache-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-NoCache-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..0cadb72796 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-NoCache-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250000000, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 15775379567.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 5207232.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-NoCache-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-NoCache-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..53036dce0c --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-NoCache-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250001788, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 16402568630.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 6375232.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-NoCache-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-NoCache-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..f875e2b37c --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-NoCache-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250002304, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 29346214582.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 7040320.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-NoCache-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-NoCache-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..bbdb654cc9 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-NoCache-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250000000, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 7786959933.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 2300480.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-NoCache-gem5.components.memory-HBM2Stack/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-NoCache-gem5.components.memory-HBM2Stack/trusted_stats.json new file mode 100644 index 0000000000..798e418799 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-NoCache-gem5.components.memory-HBM2Stack/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250000000, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 12794362311.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 10735424.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-NoCache-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-NoCache-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..b0acda1e16 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-NoCache-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250003500, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 9243261890.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 2703168.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-NoCache-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-NoCache-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..03be3eef9c --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-NoCache-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250003518, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 9477626381.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 3332352.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-NoCache-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-NoCache-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..0fdebdbff8 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-NoCache-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250002138, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 17575337411.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 3628864.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-NoCache-gem5.components.memory-SingleChannelHBM/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-NoCache-gem5.components.memory-SingleChannelHBM/trusted_stats.json new file mode 100644 index 0000000000..97b034fb48 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-NoCache-gem5.components.memory-SingleChannelHBM/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250005000, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 8977276714.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 1988160.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-NoCache-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-NoCache-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..95bde29175 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-NoCache-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250001000, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4696557408.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 1214208.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..0ba34c3595 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250003081, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4259738109.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 2654336.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..4188e2fb3b --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250004080, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4260124984.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 2798400.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..dd004fc3bf --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250002082, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4260914446.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 2987008.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..613c90095f --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250001083, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4255661095.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 1832384.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1-gem5.components.memory-HBM2Stack/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1-gem5.components.memory-HBM2Stack/trusted_stats.json new file mode 100644 index 0000000000..8563af8f12 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1-gem5.components.memory-HBM2Stack/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250001749, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4250934366.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 4001536.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..b3e1542c68 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250000417, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4260599998.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 2277568.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..7acc064541 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250002415, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4261668833.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 2475456.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..fc21ba2f85 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250015069, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4262131078.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 2600704.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1-gem5.components.memory-SingleChannelHBM/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1-gem5.components.memory-SingleChannelHBM/trusted_stats.json new file mode 100644 index 0000000000..eab94702b7 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1-gem5.components.memory-SingleChannelHBM/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250007410, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4257102233.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 1752768.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..5e8ae5871a --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250008409, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4254267025.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 1210624.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..fcf3425f08 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250000417, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4260420343.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 2554688.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..342a4ad38f --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250015402, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4260623570.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 2679232.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..e4e3e25ae5 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250002748, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4261676990.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 2855744.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..9d68663dbc --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1PrivateL2-gem5.components.memory-DualChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250000084, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4254186811.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 1811392.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1PrivateL2-gem5.components.memory-HBM2Stack/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1PrivateL2-gem5.components.memory-HBM2Stack/trusted_stats.json new file mode 100644 index 0000000000..1a7b12667f --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1PrivateL2-gem5.components.memory-HBM2Stack/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250007743, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4256212088.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 3741568.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..30182e03de --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250007077, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4260335421.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 2224832.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json new file mode 100644 index 0000000000..ec9647fa2a --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR3_2133/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250002082, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4260501928.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 2373568.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json new file mode 100644 index 0000000000..6cf6f0a637 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelDDR4_2400/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250009075, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4261831387.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 2493568.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelHBM/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelHBM/trusted_stats.json new file mode 100644 index 0000000000..fa9406f550 --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelHBM/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250001749, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4256949904.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 1730496.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json new file mode 100644 index 0000000000..00fce128ac --- /dev/null +++ b/tests/gem5/traffic_gen/trusted_stats/RandomGenerator-1-PrivateL1PrivateL2-gem5.components.memory-SingleChannelLPDDR3_1600/trusted_stats.json @@ -0,0 +1,24 @@ +{ + "simulated_begin_time": 0, + "simulated_end_time": 250015069, + "system": { + "processor": { + "cores": { + "generator": { + "totalReadLatency": { + "value": 4254233261.0 + }, + "totalWriteLatency": { + "value": 0.0 + }, + "bytesRead": { + "value": 1209984.0 + }, + "bytesWritten": { + "value": 0.0 + } + } + } + } + } +} diff --git a/tests/gem5/verifier.py b/tests/gem5/verifier.py index c947a62825..075cec15d2 100644 --- a/tests/gem5/verifier.py +++ b/tests/gem5/verifier.py @@ -37,16 +37,18 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -''' +""" Built in test cases that verify particular details about a gem5 run. -''' +""" import re import os +import json from testlib import test_util from testlib.configuration import constants from testlib.helper import joinpath, diff_out_file + class Verifier(object): def __init__(self, fixtures=tuple()): self.fixtures = fixtures @@ -57,12 +59,14 @@ class Verifier(object): self.test(*args, **kwargs) def instantiate_test(self, name_pfx): - name = '-'.join([name_pfx, self.__class__.__name__]) - return test_util.TestFunction(self._test, - name=name, fixtures=self.fixtures) + name = "-".join([name_pfx, self.__class__.__name__]) + return test_util.TestFunction( + self._test, name=name, fixtures=self.fixtures + ) + class CheckH5StatsExist(Verifier): - def __init__(self, stats_file='stats.h5'): + def __init__(self, stats_file="stats.h5"): super(CheckH5StatsExist, self).__init__() self.stats_file = stats_file @@ -70,23 +74,26 @@ class CheckH5StatsExist(Verifier): tempdir = params.fixtures[constants.tempdir_fixture_name].path h5_file = joinpath(tempdir, self.stats_file) if not os.path.isfile(h5_file): - test_util.fail('Could not find h5 stats file %s', h5_file) + test_util.fail("Could not find h5 stats file %s", h5_file) + class MatchGoldStandard(Verifier): - ''' + """ Compares a standard output to the test output and passes if they match, fails if they do not. - ''' - def __init__(self, standard_filename, ignore_regex=None, - test_filename='simout'): - ''' + """ + + def __init__( + self, standard_filename, ignore_regex=None, test_filename="simout" + ): + """ :param standard_filename: The path of the standard file to compare output to. :param ignore_regex: A string, compiled regex, or iterable containing either which will be ignored in 'standard' and test output files when diffing. - ''' + """ super(MatchGoldStandard, self).__init__() self.standard_filename = standard_filename self.test_filename = test_filename @@ -100,31 +107,39 @@ class MatchGoldStandard(Verifier): tempdir = fixtures[constants.tempdir_fixture_name].path self.test_filename = joinpath(tempdir, self.test_filename) - diff = diff_out_file(self.standard_filename, - self.test_filename, - ignore_regexes=self.ignore_regex, - logger=params.log) + diff = diff_out_file( + self.standard_filename, + self.test_filename, + ignore_regexes=self.ignore_regex, + logger=params.log, + ) if diff is not None: - test_util.fail('Stdout did not match:\n%s\nSee %s for full results' - % (diff, tempdir)) + test_util.fail( + "Stdout did not match:\n%s\nSee %s for full results" + % (diff, tempdir) + ) def _generic_instance_warning(self, kwargs): - ''' + """ Method for helper classes to tell users to use this more generic class if they are going to manually override the test_filename param. - ''' - if 'test_filename' in kwargs: - raise ValueError('If you are setting test_filename use the more' - ' generic %s' - ' instead' % MatchGoldStandard.__name__) + """ + if "test_filename" in kwargs: + raise ValueError( + "If you are setting test_filename use the more" + " generic %s" + " instead" % MatchGoldStandard.__name__ + ) + class DerivedGoldStandard(MatchGoldStandard): __ignore_regex_sentinel = object() _file = None _default_ignore_regex = [] - def __init__(self, standard_filename, - ignore_regex=__ignore_regex_sentinel, **kwargs): + def __init__( + self, standard_filename, ignore_regex=__ignore_regex_sentinel, **kwargs + ): if ignore_regex == self.__ignore_regex_sentinel: ignore_regex = self._default_ignore_regex @@ -135,58 +150,66 @@ class DerivedGoldStandard(MatchGoldStandard): standard_filename, test_filename=self._file, ignore_regex=ignore_regex, - **kwargs) + **kwargs, + ) + class MatchStdout(DerivedGoldStandard): _file = constants.gem5_simulation_stdout _default_ignore_regex = [ - re.compile('^\s+$'), # Remove blank lines. - re.compile('^gem5 Simulator System'), - re.compile('^gem5 is copyrighted software'), - re.compile('^Redirecting (stdout|stderr) to'), - re.compile('^gem5 version '), - re.compile('^gem5 compiled '), - re.compile('^gem5 started '), - re.compile('^gem5 executing on '), - re.compile('^command line:'), - re.compile("^Couldn't import dot_parser,"), - re.compile("^info: kernel located at:"), - re.compile("^info: Standard input is not a terminal"), - re.compile("^Couldn't unlink "), - re.compile("^Using GPU kernel code file\(s\) "), - re.compile("^.* not found locally\. Downloading"), - re.compile("^Finished downloading"), - ] + re.compile("^\s+$"), # Remove blank lines. + re.compile("^gem5 Simulator System"), + re.compile("^gem5 is copyrighted software"), + re.compile("^Redirecting (stdout|stderr) to"), + re.compile("^gem5 version "), + re.compile("^gem5 compiled "), + re.compile("^gem5 started "), + re.compile("^gem5 executing on "), + re.compile("^command line:"), + re.compile("^Couldn't import dot_parser,"), + re.compile("^info: kernel located at:"), + re.compile("^info: Standard input is not a terminal"), + re.compile("^Couldn't unlink "), + re.compile("^Using GPU kernel code file\(s\) "), + re.compile("^.* not found locally\. Downloading"), + re.compile("^Finished downloading"), + ] + class MatchStdoutNoPerf(MatchStdout): _file = constants.gem5_simulation_stdout _default_ignore_regex = MatchStdout._default_ignore_regex + [ - re.compile('^Exiting @ tick'), - ] + re.compile("^Exiting @ tick") + ] + class MatchStderr(DerivedGoldStandard): _file = constants.gem5_simulation_stderr _default_ignore_regex = [] + class MatchStats(DerivedGoldStandard): # TODO: Likely will want to change this verifier since we have the weird # perl script right now. A simple diff probably isn't going to work. _file = constants.gem5_simulation_stats _default_ignore_regex = [] + class MatchConfigINI(DerivedGoldStandard): _file = constants.gem5_simulation_config_ini _default_ignore_regex = ( - re.compile("^(executable|readfile|kernel|image_file)="), - re.compile("^(cwd|input|codefile)="), - ) + re.compile("^(executable|readfile|kernel|image_file)="), + re.compile("^(cwd|input|codefile)="), + ) + class MatchConfigJSON(DerivedGoldStandard): _file = constants.gem5_simulation_config_json _default_ignore_regex = ( - re.compile(r'''^\s*"(executable|readfile|kernel|image_file)":'''), - re.compile(r'''^\s*"(cwd|input|codefile)":'''), - ) + re.compile(r"""^\s*"(executable|readfile|kernel|image_file)":"""), + re.compile(r"""^\s*"(cwd|input|codefile)":"""), + ) + class MatchFileRegex(Verifier): """ @@ -194,13 +217,14 @@ class MatchFileRegex(Verifier): of files. Verifier will pass as long as the pattern is found in at least one of the files. """ + def __init__(self, regex, filenames): super(MatchFileRegex, self).__init__() self.regex = _iterable_regex(regex) self.filenames = filenames def parse_file(self, fname): - with open(fname, 'r') as file_: + with open(fname, "r") as file_: for line in file_: for regex in self.regex: if re.match(regex, line): @@ -213,14 +237,16 @@ class MatchFileRegex(Verifier): for fname in self.filenames: if self.parse_file(joinpath(tempdir, fname)): - return # Success + return # Success + + test_util.fail("Could not match regex.") - test_util.fail('Could not match regex.') class MatchRegex(MatchFileRegex): """ Looking for a match between a regex pattern and stdout/stderr. """ + def __init__(self, regex, match_stderr=True, match_stdout=True): filenames = list() if match_stdout: @@ -229,10 +255,12 @@ class MatchRegex(MatchFileRegex): filenames.append(constants.gem5_simulation_stderr) super(MatchRegex, self).__init__(regex, filenames) + class NoMatchRegex(MatchRegex): """ Checks that the given pattern does *not* match """ + def __init__(self, regex, match_stderr=True, match_stdout=True): super(NoMatchRegex, self).__init__(regex, match_stderr, match_stdout) @@ -242,10 +270,72 @@ class NoMatchRegex(MatchRegex): for fname in self.filenames: if self.parse_file(joinpath(tempdir, fname)): - test_util.fail('Could not match regex.') + test_util.fail("Could not match regex.") + + +class MatchJSONStats(Verifier): + """ + Verifer to check the correctness of stats reported by gem5. It uses + gem5stats to store the stastistics as json files and does the comparison. + """ + + def __init__( + self, + truth_name: str, + test_name: str, + test_name_in_outdir: bool = False, + ): + """ + :param truth_dir: The path to the directory including the trusted_stats + for this test. + :param test_name_in_m5out: True if the 'test_name' dir is to found in + the `m5.options.outdir`. + """ + super(MatchJSONStats, self).__init__() + self.truth_name = truth_name + self.test_name = test_name + self.test_name_in_outdir = test_name_in_outdir + + def _compare_stats(self, trusted_file, test_file): + trusted_stats = json.load(trusted_file) + test_stats = json.load(test_file) + is_subset = trusted_stats.items() <= test_stats.items() + if is_subset: + err = ( + "Following differences found between " + + f"{self.truth_name} and {self.test_name}.\n" + ) + diffs = set(trusted_stats.items()) - set(test_stats.items()) + for diff in diffs: + trusted_value = trusted_stats[diff[0]] + test_value = None + if diff[0] in test_stats.keys(): + test_value = test_stats[diff[0]] + err += f"{diff[0]}:\n" + err += ( + f"trusted_value: {trusted_value}, " + + f"test_value: {test_value}" + ) + test_util.fail(err) + + def test(self, params): + trusted_file = open(self.truth_name, "r") + if self.test_name_in_outdir: + fixtures = params.fixtures + tempdir = fixtures[constants.tempdir_fixture_name].path + test_file = open(joinpath(tempdir, self.test_name), "r") + else: + test_file = open(self.test_name, "r") + + return self._compare_stats(trusted_file, test_file) + + +_re_type = type(re.compile("")) + -_re_type = type(re.compile('')) def _iterable_regex(regex): + if not regex: + return () # If no regex we return an empty tuple. if isinstance(regex, _re_type) or isinstance(regex, str): regex = (regex,) return regex diff --git a/tests/gem5/x86-boot-tests/test_linux_boot.py b/tests/gem5/x86-boot-tests/test_linux_boot.py index 8b517beed5..76d593bd3a 100644 --- a/tests/gem5/x86-boot-tests/test_linux_boot.py +++ b/tests/gem5/x86-boot-tests/test_linux_boot.py @@ -64,24 +64,20 @@ def test_boot( if mem_system == "mesi_two_level": protocol_to_use = None - isa_to_use=constants.x86_tag + isa_to_use = constants.all_compiled_tag elif mem_system == "mi_example": protocol_to_use = "MI_example" - isa_to_use=constants.x86_tag + isa_to_use = constants.x86_tag else: - protocol_to_use=None - isa_to_use=constants.vega_x86_tag + protocol_to_use = None + isa_to_use = constants.all_compiled_tag gem5_verify_config( name=name, verifiers=verifiers, fixtures=(), config=joinpath( - config.base_dir, - "tests", - "gem5", - "configs", - "x86_boot_exit_run.py", + config.base_dir, "tests", "gem5", "configs", "x86_boot_exit_run.py" ), config_args=[ "--cpu", @@ -112,7 +108,7 @@ test_boot( num_cpus=1, mem_system="classic", memory_class="SingleChannelDDR3_1600", - to_tick=10000000000, #Simulates 1/100th of a second. + to_tick=10000000000, # Simulates 1/100th of a second. length=constants.quick_tag, ) @@ -154,39 +150,13 @@ test_boot( #### The long (Nightly) tests #### -test_boot( - cpu="atomic", - num_cpus=1, - mem_system="classic", - memory_class="SingleChannelHBM", - boot_type="init", - length=constants.long_tag, -) test_boot( cpu="timing", num_cpus=1, mem_system="mesi_two_level", memory_class="DualChannelDDR3_1600", - boot_type="init", - length=constants.long_tag, -) - -test_boot( - cpu="timing", - num_cpus=1, - mem_system="mi_example", - memory_class="DualChannelDDR3_2133", - boot_type="init", - length=constants.long_tag, -) - -test_boot( - cpu="timing", - num_cpus=4, - mem_system="classic", - memory_class="DualChannelDDR3_2133", - boot_type="init", + boot_type="systemd", length=constants.long_tag, ) @@ -204,23 +174,15 @@ test_boot( # https://gem5.atlassian.net/browse/GEM5-1120, this test has been disabled # until the exact error causing the Nightly tests to timeout is established. -#test_boot( +# test_boot( # cpu="o3", # num_cpus=2, # mem_system="mesi_two_level", # memory_class="DualChannelDDR4_2400" # boot_type="init", # length=constants.long_tag, -#) +# ) -test_boot( - cpu="atomic", - num_cpus=4, - mem_system="classic", - memory_class="HBM2Stack", - boot_type="systemd", - length=constants.long_tag, -) #### The very-long (Weekly) tests #### @@ -283,7 +245,7 @@ run_map = { "timing": { 1: True, 2: False, # Disabled due to - # https://gem5.atlassian.net/browse/GEM5-1219. + # https://gem5.atlassian.net/browse/GEM5-1219. 4: True, 8: True, }, @@ -307,7 +269,7 @@ for mem_system in run_map: memory_class="DualChannelDDR4_2400", boot_type="systemd", length=constants.very_long_tag, - ) + ) # To ensure the O3 CPU is working correctly, we include some "init" tests here. # There were not included above as booting to "systemd" takes too long with @@ -347,4 +309,3 @@ test_boot( boot_type="init", length=constants.very_long_tag, ) - diff --git a/tests/jenkins/gem5art-tests.sh b/tests/jenkins/gem5art-tests.sh index b9dee556b6..c655fbbc8c 100755 --- a/tests/jenkins/gem5art-tests.sh +++ b/tests/jenkins/gem5art-tests.sh @@ -43,14 +43,6 @@ mkdir -p .pyenv python3 -m venv .pyenv source .pyenv/bin/activate -# The 20.04_all-dependencies image has a slightly outdated version of pip which -# causes problems when trying to install the celery package. The error -# is `invalid command 'bdist_wheel'`, which can be resolved by upgrading pip -# prior to installing the modules. More information on this error is found -# here: -# https://stackoverflow.com/questions/34819221/why-is-python-setup-py-saying-invalid-command-bdist-wheel-on-travis-ci - pip install --upgrade pip - # Install the packages pip install -e util/gem5art/artifact pip install -e util/gem5art/run diff --git a/tests/jenkins/presubmit-stage2.sh b/tests/jenkins/presubmit-stage2.sh index 6e73394a0d..745a7bbeb2 100755 --- a/tests/jenkins/presubmit-stage2.sh +++ b/tests/jenkins/presubmit-stage2.sh @@ -37,6 +37,10 @@ set -e +# Run pre-commit style checks +PRE_COMMIT_HOME=/tmp/pre-commit-cache pre-commit run \ + --from-ref HEAD~ --to-ref HEAD + # Use ccache with the default directory for caching #XXX Not available in docker image. #export PATH="/usr/lib/ccache:$PATH" @@ -47,4 +51,4 @@ set -e # Once complete, run the Google Tests cd tests ./main.py run -j4 -t4 gem5 -vv && scons -C .. --no-compress-debug \ - build/ARM/unittests.opt + --ignore-style build/ARM/unittests.opt diff --git a/tests/jenkins/presubmit.sh b/tests/jenkins/presubmit.sh index 2883bf7d33..36da3facd1 100755 --- a/tests/jenkins/presubmit.sh +++ b/tests/jenkins/presubmit.sh @@ -37,8 +37,8 @@ set -e -DOCKER_IMAGE_ALL_DEP=gcr.io/gem5-test/ubuntu-20.04_all-dependencies:latest -DOCKER_IMAGE_CLANG_COMPILE=gcr.io/gem5-test/clang-version-11:latest +DOCKER_IMAGE_ALL_DEP=gcr.io/gem5-test/ubuntu-22.04_all-dependencies:v22-1 +DOCKER_IMAGE_CLANG_COMPILE=gcr.io/gem5-test/clang-version-14:v22-1 PRESUBMIT_STAGE2=tests/jenkins/presubmit-stage2.sh GEM5ART_TESTS=tests/jenkins/gem5art-tests.sh @@ -61,11 +61,9 @@ docker run -u $UID:$GID --volume $(pwd):$(pwd) -w $(pwd) --rm \ # DOCKER_IMAGE_ALL_DEP compiles gem5.opt with GCC. We run a compilation of # gem5.fast on the Clang compiler to ensure changes are compilable with the -# clang compiler. Due to the costs of compilation, we only compile -# ARM_MESI_Three_Level_HTM at this point. Further compiler tests are carried -# out as part of our weekly "Compiler Checks" tests: -# http://jenkins.gem5.org/job/Compiler-Checks. +# clang compiler. rm -rf build docker run -u $UID:$GID --volume $(pwd):$(pwd) -w $(pwd) --rm \ "${DOCKER_IMAGE_CLANG_COMPILE}" /usr/bin/env python3 /usr/bin/scons \ - build/ARM_MESI_Three_Level_HTM/gem5.fast -j4 --no-compress-debug + build/ALL/gem5.fast -j4 --no-compress-debug \ + --ignore-style diff --git a/tests/main.py b/tests/main.py index 81e476448a..77ab73d3c0 100755 --- a/tests/main.py +++ b/tests/main.py @@ -1,10 +1,10 @@ #!/usr/bin/env python3 -''' +""" The main source for testlib. Ties together the default test runners and loaders. Discovers and runs all tests from a given root directory. -''' +""" import sys import os @@ -12,7 +12,7 @@ import os os.environ["PYTHONUNBUFFERED"] = "1" base_dir = os.path.dirname(os.path.abspath(__file__)) -ext_path = os.path.join(base_dir, os.pardir, 'ext') +ext_path = os.path.join(base_dir, os.pardir, "ext") sys.path.insert(0, base_dir) sys.path.insert(0, ext_path) diff --git a/tests/nightly.sh b/tests/nightly.sh index 4ee895142d..bf05154fe7 100755 --- a/tests/nightly.sh +++ b/tests/nightly.sh @@ -35,6 +35,10 @@ gem5_root="${dir}/.." # The per-container Docker memory limit. docker_mem_limit="18g" +# The docker tag to use (varies between develop, and versions on the staging +# branch) +tag="v22-1" + # The first argument is the number of threads to be used for compilation. If no # argument is given we default to one. compile_threads=1 @@ -61,39 +65,18 @@ if [[ "$gpu_isa" != "GCN3_X86" ]] && [[ "$gpu_isa" != "VEGA_X86" ]]; then exit 1 fi -build_target () { - isa=$1 - - # Try to build. If not, delete the build directory and try again. - # SCons is not perfect, and occasionally does not catch a necessary - # compilation: https://gem5.atlassian.net/browse/GEM5-753 - docker run -u $UID:$GID --volume "${gem5_root}":"${gem5_root}" -w \ - "${gem5_root}" --memory="${docker_mem_limit}" --rm \ - gcr.io/gem5-test/ubuntu-20.04_all-dependencies:v22-0 \ - bash -c "scons build/${isa}/gem5.opt -j${compile_threads} \ - || (rm -rf build && scons build/${isa}/gem5.opt -j${compile_threads})" -} - unit_test () { build=$1 docker run -u $UID:$GID --volume "${gem5_root}":"${gem5_root}" -w \ "${gem5_root}" --memory="${docker_mem_limit}" --rm \ - gcr.io/gem5-test/ubuntu-20.04_all-dependencies:v22-0 \ - scons build/NULL/unittests.${build} -j${compile_threads} + gcr.io/gem5-test/ubuntu-22.04_all-dependencies:${tag} \ + scons build/ALL/unittests.${build} -j${compile_threads} \ + --ignore-style } # Ensure we have the latest docker images. -docker pull gcr.io/gem5-test/ubuntu-20.04_all-dependencies:v22-0 - -# Try to build the ISA targets. -build_target NULL -build_target RISCV -build_target X86 -build_target ARM -build_target SPARC -build_target MIPS -build_target POWER +docker pull gcr.io/gem5-test/ubuntu-22.04_all-dependencies:${tag} # Run the unit tests. unit_test opt @@ -102,7 +85,7 @@ unit_test debug # Run the gem5 long tests. docker run -u $UID:$GID --volume "${gem5_root}":"${gem5_root}" -w \ "${gem5_root}"/tests --memory="${docker_mem_limit}" --rm \ - gcr.io/gem5-test/ubuntu-20.04_all-dependencies:v22-0 \ + gcr.io/gem5-test/ubuntu-22.04_all-dependencies:${tag} \ ./main.py run --length long -j${compile_threads} -t${run_threads} -vv # Unfortunately, due docker being unable run KVM, we do so separately. @@ -110,19 +93,25 @@ docker run -u $UID:$GID --volume "${gem5_root}":"${gem5_root}" -w \ # removes all those part of the 'very-long' (weekly) tests, or for compilation # to '.debug' or '.fast'. We also remove ARM targets as our Jenkins is an X86 # system. Users wishing to run this script elsewhere should be aware of this. +# Note: we delete the build directory here. It was build in the +# "ubuntu-22.04_all-dependencies" docker image which may not be compatible with +# the host environment. +rm -rf "${gem5_root}/build" cd "${gem5_root}/tests" ./main.py run -j${compile_threads} -vv \ --exclude-tags ".*" --include-tags kvm --exclude-tags very\-long \ - --exclude-tags debug --exclude-tags fast --exclude-tags ARM + --exclude-tags debug --exclude-tags fast --exclude-tags ARM \ + --exclude-tags aarch64 cd "${gem5_root}" # For the GPU tests we compile and run the GPU ISA inside a gcn-gpu container. -docker pull gcr.io/gem5-test/gcn-gpu:v22-0 +docker pull gcr.io/gem5-test/gcn-gpu:${tag} docker run --rm -u $UID:$GID --volume "${gem5_root}":"${gem5_root}" -w \ "${gem5_root}" --memory="${docker_mem_limit}" \ - gcr.io/gem5-test/gcn-gpu:v22-0 bash -c \ - "scons build/${gpu_isa}/gem5.opt -j${compile_threads} \ - || (rm -rf build && scons build/${gpu_isa}/gem5.opt -j${compile_threads})" + gcr.io/gem5-test/gcn-gpu:${tag} bash -c \ + "scons build/${gpu_isa}/gem5.opt -j${compile_threads} --ignore-style \ + || (rm -rf build && scons build/${gpu_isa}/gem5.opt \ + -j${compile_threads} --ignore-style)" # get square wget -qN http://dist.gem5.org/dist/develop/test-progs/square/square @@ -134,7 +123,7 @@ mkdir -p tests/testing-results # basic GPU functionality is working. docker run --rm -u $UID:$GID --volume "${gem5_root}":"${gem5_root}" -w \ "${gem5_root}" --memory="${docker_mem_limit}" \ - gcr.io/gem5-test/gcn-gpu:v22-0 build/${gpu_isa}/gem5.opt \ + gcr.io/gem5-test/gcn-gpu:${tag} build/${gpu_isa}/gem5.opt \ configs/example/apu_se.py --reg-alloc-policy=dynamic -n3 -c square # get HeteroSync @@ -146,7 +135,7 @@ wget -qN http://dist.gem5.org/dist/develop/test-progs/heterosync/gcn3/allSyncPri # atomics are tested. docker run --rm -u $UID:$GID --volume "${gem5_root}":"${gem5_root}" -w \ "${gem5_root}" --memory="${docker_mem_limit}" \ - gcr.io/gem5-test/gcn-gpu:v22-0 build/${gpu_isa}/gem5.opt \ + gcr.io/gem5-test/gcn-gpu:${tag} build/${gpu_isa}/gem5.opt \ configs/example/apu_se.py --reg-alloc-policy=dynamic -n3 -c \ allSyncPrims-1kernel --options="sleepMutex 10 16 4" @@ -157,7 +146,7 @@ docker run --rm -u $UID:$GID --volume "${gem5_root}":"${gem5_root}" -w \ # atomics are tested. docker run --rm -u $UID:$GID --volume "${gem5_root}":"${gem5_root}" -w \ "${gem5_root}" --memory="${docker_mem_limit}" \ - gcr.io/gem5-test/gcn-gpu:v22-0 build/${gpu_isa}/gem5.opt \ + gcr.io/gem5-test/gcn-gpu:${tag} build/${gpu_isa}/gem5.opt \ configs/example/apu_se.py --reg-alloc-policy=dynamic -n3 -c \ allSyncPrims-1kernel --options="lfTreeBarrUniq 10 16 4" @@ -168,8 +157,9 @@ build_and_run_SST () { docker run -u $UID:$GID --volume "${gem5_root}":"${gem5_root}" -w \ "${gem5_root}" --rm --memory="${docker_mem_limit}" \ - gcr.io/gem5-test/sst-env:v22-0 bash -c "\ -scons build/${isa}/libgem5_${variant}.so -j${compile_threads} --without-tcmalloc; \ + gcr.io/gem5-test/sst-env:${tag} bash -c "\ +scons build/${isa}/libgem5_${variant}.so -j${compile_threads} \ +--without-tcmalloc --ignore-style; \ cd ext/sst; \ make clean; make -j ${compile_threads}; \ sst --add-lib-path=./ sst/example.py; \ @@ -182,15 +172,15 @@ build_and_run_systemc () { rm -rf "${gem5_root}/build/ARM" docker run -u $UID:$GID --volume "${gem5_root}":"${gem5_root}" -w \ "${gem5_root}" --memory="${docker_mem_limit}" --rm \ - gcr.io/gem5-test/ubuntu-20.04_all-dependencies:v22-0 bash -c "\ -scons -j${compile_threads} build/ARM/gem5.opt; \ + gcr.io/gem5-test/ubuntu-22.04_all-dependencies:${tag} bash -c "\ +scons -j${compile_threads} --ignore-style build/ARM/gem5.opt; \ scons --with-cxx-config --without-python --without-tcmalloc USE_SYSTEMC=0 \ -j${compile_threads} build/ARM/libgem5_opt.so \ " docker run -u $UID:$GID --volume "${gem5_root}":"${gem5_root}" -w \ "${gem5_root}" --memory="${docker_mem_limit}" --rm \ - gcr.io/gem5-test/systemc-env:v22-0 bash -c "\ + gcr.io/gem5-test/systemc-env:${tag} bash -c "\ cd util/systemc/gem5_within_systemc; \ make -j${compile_threads}; \ ../../../build/ARM/gem5.opt ../../../configs/example/se.py -c \ diff --git a/tests/pyunit/__init__.py b/tests/pyunit/__init__.py index 8b13789179..e69de29bb2 100644 --- a/tests/pyunit/__init__.py +++ b/tests/pyunit/__init__.py @@ -1 +0,0 @@ - diff --git a/tests/pyunit/pyunit_jsonserializable_check.py b/tests/pyunit/pyunit_jsonserializable_check.py new file mode 100644 index 0000000000..8d5d2fa857 --- /dev/null +++ b/tests/pyunit/pyunit_jsonserializable_check.py @@ -0,0 +1,71 @@ +# Copyright (c) 2022 The Regents of The University of California +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest +from m5.ext.pystats.serializable_stat import SerializableStat + + +class MockSerializable(SerializableStat): + def __init__(self): + self.child_1 = MockSerializableChild() + self.child_1.stat1 = 2 + self.child_1.stat2 = "3" + self.child_list = [] + + child_list_1 = MockSerializableChild() + child_list_1.stat1 = "hello" + self.child_list.append(child_list_1) + child_list_2 = MockSerializableChild() + child_list_2.list_stat2 = ["1", 2, "3", 4, 5.2, None] + self.child_list.append(child_list_2) + + +class MockSerializableChild(SerializableStat): + def __init__(self): + pass + + +class JsonSerializableTestSuite(unittest.TestCase): + def test_to_json(self): + obj = MockSerializable() + obj_json = obj.to_json() + self.assertTrue("child_1" in obj_json) + self.assertTrue("stat1" in obj_json["child_1"]) + self.assertEquals(2, obj_json["child_1"]["stat1"]) + self.assertTrue("stat2" in obj_json["child_1"]) + self.assertEquals("3", obj_json["child_1"]["stat2"]) + self.assertTrue("child_list" in obj_json) + self.assertEquals(2, len(obj_json["child_list"])) + self.assertTrue("stat1" in obj_json["child_list"][0]) + self.assertEqual("hello", obj_json["child_list"][0]["stat1"]) + self.assertTrue("list_stat2" in obj_json["child_list"][1]) + self.assertEquals(6, len(obj_json["child_list"][1]["list_stat2"])) + self.assertEquals("1", obj_json["child_list"][1]["list_stat2"][0]) + self.assertEquals(2, obj_json["child_list"][1]["list_stat2"][1]) + self.assertEquals("3", obj_json["child_list"][1]["list_stat2"][2]) + self.assertEquals(4, obj_json["child_list"][1]["list_stat2"][3]) + self.assertEquals(5.2, obj_json["child_list"][1]["list_stat2"][4]) + self.assertEquals(None, obj_json["child_list"][1]["list_stat2"][5]) diff --git a/tests/pyunit/stdlib/resources/pyunit_downloader_checks.py b/tests/pyunit/stdlib/resources/pyunit_downloader_checks.py index 02f9b9b3db..08736bbc32 100644 --- a/tests/pyunit/stdlib/resources/pyunit_downloader_checks.py +++ b/tests/pyunit/stdlib/resources/pyunit_downloader_checks.py @@ -29,22 +29,25 @@ import tempfile import os from typing import Dict -from gem5.resources.downloader import( +from gem5.resources.downloader import ( _get_resources_json_at_path, _get_resources_json, _resources_json_version_required, ) -class MD5FileTestSuite(unittest.TestCase): + +class ResourceDownloaderTestSuite(unittest.TestCase): """Test cases for gem5.resources.downloader""" - def create_temp_resources_json(self) -> str: + @classmethod + def setUpClass(cls) -> str: """ This creates a simple resource.json temp file for testing purposes. """ - file_contents = \ - "{" + f"\"version\" : \"{_resources_json_version_required()}\"," \ + file_contents = ( + "{" + + f'"version" : "{_resources_json_version_required()}",' + """ "url_base" : "http://dist.gem5.org/dist/v21-2", "previous-versions" : {}, @@ -78,10 +81,18 @@ class MD5FileTestSuite(unittest.TestCase): ] } """ + ) file = tempfile.NamedTemporaryFile(mode="w", delete=False) file.write(file_contents) file.close() - return file.name + cls.file_path = file.name + + os.environ["GEM5_RESOURCE_JSON"] = cls.file_path + + @classmethod + def tearDownClass(cls) -> None: + os.remove(cls.file_path) + del os.environ["GEM5_RESOURCE_JSON"] def verify_json(self, json: Dict) -> None: """ @@ -94,35 +105,23 @@ class MD5FileTestSuite(unittest.TestCase): self.assertTrue("name" in json["resources"][0]) self.assertEquals("riscv-disk-img", json["resources"][0]["name"]) self.assertTrue("name" in json["resources"][1]) - self.assertEquals("riscv-lupio-busybox-img", - json["resources"][1]["name"]) + self.assertEquals( + "riscv-lupio-busybox-img", json["resources"][1]["name"] + ) def test_get_resources_json_at_path(self) -> None: # Tests the gem5.resources.downloader._get_resources_json_at_path() # function. - path = self.create_temp_resources_json() - json = _get_resources_json_at_path(path = path) - + json = _get_resources_json_at_path(path=self.file_path) self.verify_json(json=json) - # Cleanup the temp file - os.remove(path) - def test_get_resources_json(self) -> None: # Tests the gem5.resources.downloader._get_resources_json() function. - path = self.create_temp_resources_json() - - # We set the "GEM5_RESOURCE_JSON" environment variable to allow using - # our test temp resources.json. - os.environ["GEM5_RESOURCE_JSON"] = path json = _get_resources_json() self.verify_json(json=json) - # Cleanup the temp file - os.remove(path) - def test_get_resources_json_invalid_url(self) -> None: # Tests the gem5.resources.downloader._get_resources_json() function in # case where an invalid url is passed as the URL/PATH of the @@ -134,6 +133,9 @@ class MD5FileTestSuite(unittest.TestCase): _get_resources_json() self.assertTrue( - f"Resources location '{path}' is not a valid path or URL." in \ - str(context.exception) - ) \ No newline at end of file + f"Resources location '{path}' is not a valid path or URL." + in str(context.exception) + ) + + # Set back to the old path + os.environ["GEM5_RESOURCE_JSON"] = self.file_path diff --git a/tests/pyunit/stdlib/resources/pyunit_workload_checks.py b/tests/pyunit/stdlib/resources/pyunit_workload_checks.py new file mode 100644 index 0000000000..9620289446 --- /dev/null +++ b/tests/pyunit/stdlib/resources/pyunit_workload_checks.py @@ -0,0 +1,264 @@ +# Copyright (c) 2022 The Regents of the University of California +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest +import tempfile +import os + +from gem5.resources.workload import Workload, CustomWorkload +from gem5.resources.resource import Resource +from gem5.resources.downloader import _resources_json_version_required + +from typing import Dict + + +class CustomWorkloadTestSuite(unittest.TestCase): + """ + Tests the `gem5.resources.workload.CustomWorkload` class. + """ + + @classmethod + def setUpClass(cls) -> None: + file_contents = ( + "{" + + f'"version" : "{_resources_json_version_required()}",' + + """ + "url_base" : "http://dist.gem5.org/dist/v22-0", + "previous-versions" : {}, + "resources": [ + { + "type" : "resource", + "name" : "x86-hello64-static", + "documentation" : "A 'Hello World!' binary.", + "architecture" : "X86", + "is_zipped" : false, + "md5sum" : "dbf120338b37153e3334603970cebd8c", + "url" : "{url_base}/test-progs/hello/bin/x86/linux/hello64-static", + "source" : "src/simple" + } + ] +} + """ + ) + file = tempfile.NamedTemporaryFile(mode="w", delete=False) + file.write(file_contents) + file.close() + + cls.test_json = file.name + os.environ["GEM5_RESOURCE_JSON"] = cls.test_json + + cls.custom_workload = CustomWorkload( + function="set_se_binary_workload", + parameters={ + "binary": Resource("x86-hello64-static"), + "arguments": ["hello", 6], + }, + ) + + @classmethod + def tearDownClass(cls): + # Remove the test json file and unset the environment variable so this + # test does not interfere with others. + os.remove(cls.test_json) + os.environ["GEM5_RESOURCE_JSON"] + + def test_get_function_str(self) -> None: + # Tests `CustomResource.get_function_str` + + self.assertEqual( + "set_se_binary_workload", self.custom_workload.get_function_str() + ) + + def test_get_parameters(self) -> None: + # Tests `CustomResource.get_parameter` + + parameters = self.custom_workload.get_parameters() + self.assertTrue(isinstance(parameters, Dict)) + self.assertEquals(2, len(parameters)) + + self.assertTrue("binary" in parameters) + self.assertTrue(isinstance(parameters["binary"], Resource)) + + self.assertTrue("arguments" in parameters) + self.assertTrue(isinstance(parameters["arguments"], list)) + self.assertEquals(2, len(parameters["arguments"])) + self.assertEquals("hello", parameters["arguments"][0]) + self.assertEquals(6, parameters["arguments"][1]) + + def test_add_parameters(self) -> None: + # Tests `CustomResource.set_parameter` for the case where we add a new + # parameter value. + + self.custom_workload.set_parameter("test_param", 10) + + self.assertTrue("test_param" in self.custom_workload.get_parameters()) + self.assertEquals( + 10, self.custom_workload.get_parameters()["test_param"] + ) + + # Cleanup + del self.custom_workload.get_parameters()["test_param"] + + def test_override_parameter(self) -> None: + # Tests `CustomResource.set_parameter` for the case where we override + # a parameter's value. + + old_value = self.custom_workload.get_parameters()["binary"] + + self.custom_workload.set_parameter("binary", "test") + self.assertTrue("binary" in self.custom_workload.get_parameters()) + self.assertEquals( + "test", self.custom_workload.get_parameters()["binary"] + ) + + # We set the overridden parameter back to it's old value. + self.custom_workload.set_parameter("binary", old_value) + + +class WorkloadTestSuite(unittest.TestCase): + """ + Tests the `gem5.resources.workload.Workload` class. + """ + + @classmethod + def setUpClass(cls): + # In this constructor we create a json file to load then create a test + # workload. + + file_contents = ( + "{" + + f'"version" : "{_resources_json_version_required()}",' + + """ + "url_base" : "http://dist.gem5.org/dist/v22-0", + "previous-versions" : {}, + "resources": [ + { + "type" : "resource", + "name" : "x86-linux-kernel-5.2.3", + "documentation" : "The linux kernel (v5.2.3), compiled to X86.", + "architecture" : "X86", + "is_zipped" : false, + "md5sum" : "4838c99b77d33c8307b939c16624e4ac", + "url" : "{url_base}/kernels/x86/static/vmlinux-5.2.3", + "source" : "src/linux-kernel" + }, + { + "type" : "resource", + "name" : "x86-ubuntu-18.04-img", + "documentation" : "A disk image containing Ubuntu 18.04 for x86..", + "architecture" : "X86", + "is_zipped" : true, + "md5sum" : "90e363abf0ddf22eefa2c7c5c9391c49", + "url" : "{url_base}/images/x86/ubuntu-18-04/x86-ubuntu.img.gz", + "source" : "src/x86-ubuntu", + "additional_metadata" : { + "root_partition": "1" + } + }, + { + "type" : "workload", + "name" : "simple-boot", + "documentation" : "Description of workload here", + "function" : "set_kernel_disk_workload", + "resources" : { + "kernel" : "x86-linux-kernel-5.2.3", + "disk_image" : "x86-ubuntu-18.04-img" + }, + "additional_params" : { + "readfile_contents" : "echo 'Boot successful'; m5 exit" + } + } + ] +} + """ + ) + file = tempfile.NamedTemporaryFile(mode="w", delete=False) + file.write(file_contents) + file.close() + + cls.test_json = file.name + os.environ["GEM5_RESOURCE_JSON"] = cls.test_json + cls.workload = Workload("simple-boot") + + @classmethod + def tearDownClass(cls): + # Remove the test json file and unset the environment variable so this + # test does not interfere with others. + os.remove(cls.test_json) + os.environ["GEM5_RESOURCE_JSON"] + + def test_get_function_str(self) -> None: + # Tests `Resource.get_function_str` + + self.assertEquals( + "set_kernel_disk_workload", self.workload.get_function_str() + ) + + def test_get_parameters(self) -> None: + # Tests `Resource.get_parameters` + + parameters = self.workload.get_parameters() + + self.assertTrue(isinstance(parameters, Dict)) + self.assertEqual(3, len(parameters)) + + self.assertTrue("kernel" in parameters) + self.assertTrue(isinstance(parameters["kernel"], Resource)) + + self.assertTrue("disk_image" in parameters) + self.assertTrue(isinstance(parameters["disk_image"], Resource)) + + self.assertTrue("readfile_contents" in parameters) + self.assertTrue( + "echo 'Boot successful'; m5 exit", parameters["readfile_contents"] + ) + + def test_add_parameters(self) -> None: + # Tests `Resource.set_parameter` for the case where we add a new + # parameter value. + + self.workload.set_parameter("test_param", 10) + + self.assertTrue("test_param" in self.workload.get_parameters()) + self.assertEquals(10, self.workload.get_parameters()["test_param"]) + + # Cleanup + del self.workload.get_parameters()["test_param"] + + def test_override_parameter(self) -> None: + # Tests `Resource.set_parameter` for the case where we override + # a parameter's value. + + old_value = self.workload.get_parameters()["readfile_contents"] + + self.workload.set_parameter("readfile_contents", "test") + self.assertTrue("readfile_contents" in self.workload.get_parameters()) + self.assertEquals( + "test", self.workload.get_parameters()["readfile_contents"] + ) + + # We set the overridden parameter back to it's old value. + self.workload.set_parameter("readfile_contents", old_value) diff --git a/tests/pyunit/test_run.py b/tests/pyunit/test_run.py index 2accd4cc18..76cd5f70cc 100644 --- a/tests/pyunit/test_run.py +++ b/tests/pyunit/test_run.py @@ -29,20 +29,20 @@ import os from testlib.configuration import constants from gem5.suite import * -''' +""" As the filename begins with `test_`, it will be added to the TestLib testsuite when `../main.py` is run. The purpose of this file is to ensure the pyunit tests are executed as part of a typical TestLib execution. These have been added as part of the "quick" tests and will run with NULL/gem5.* -''' +""" gem5_verify_config( - name="pyunit-tests", - config=os.path.join(os.getcwd(), os.pardir, 'run_pyunit.py'), - verifiers=(), - config_args=[], - valid_isas=(constants.null_tag,), - length = constants.quick_tag, -) \ No newline at end of file + name="pyunit-tests", + config=os.path.join(os.getcwd(), os.pardir, "run_pyunit.py"), + verifiers=(), + config_args=[], + valid_isas=(constants.all_compiled_tag,), + length=constants.quick_tag, +) diff --git a/tests/pyunit/util/__init__.py b/tests/pyunit/util/__init__.py index 8b13789179..e69de29bb2 100644 --- a/tests/pyunit/util/__init__.py +++ b/tests/pyunit/util/__init__.py @@ -1 +0,0 @@ - diff --git a/tests/pyunit/util/pyunit_convert_check.py b/tests/pyunit/util/pyunit_convert_check.py index da618436bf..91b89e64ad 100644 --- a/tests/pyunit/util/pyunit_convert_check.py +++ b/tests/pyunit/util/pyunit_convert_check.py @@ -39,176 +39,177 @@ import unittest from m5.util import convert + def _ip(*args): return (args[0] << 24) | (args[1] << 16) | (args[2] << 8) | args[3] + class ConvertTestSuite(unittest.TestCase): """Test cases for unit conversion""" def test_toMetricFloat(self): def conv(x): - return convert.toMetricFloat(x, 'value', 'X') + return convert.toMetricFloat(x, "value", "X") - self.assertEqual(conv('42'), 42e0) - self.assertEqual(conv('42.5'), 42.5e0) - self.assertEqual(conv('42kX'), 42e3) - self.assertEqual(conv('42.5kX'), 42.5e3) - self.assertEqual(conv('42MX'), 42e6) - self.assertEqual(conv('42GX'), 42e9) - self.assertEqual(conv('42TX'), 42e12) - self.assertEqual(conv('42PX'), 42e15) - self.assertEqual(conv('42EX'), 42e18) + self.assertEqual(conv("42"), 42e0) + self.assertEqual(conv("42.5"), 42.5e0) + self.assertEqual(conv("42kX"), 42e3) + self.assertEqual(conv("42.5kX"), 42.5e3) + self.assertEqual(conv("42MX"), 42e6) + self.assertEqual(conv("42GX"), 42e9) + self.assertEqual(conv("42TX"), 42e12) + self.assertEqual(conv("42PX"), 42e15) + self.assertEqual(conv("42EX"), 42e18) - self.assertEqual(conv('42KiX'), 42 * 2**10) - self.assertEqual(conv('42MiX'), 42 * 2**20) - self.assertEqual(conv('42GiX'), 42 * 2**30) - self.assertEqual(conv('42TiX'), 42 * 2**40) - self.assertEqual(conv('42PiX'), 42 * 2**50) - self.assertEqual(conv('42EiX'), 42 * 2**60) + self.assertEqual(conv("42KiX"), 42 * 2**10) + self.assertEqual(conv("42MiX"), 42 * 2**20) + self.assertEqual(conv("42GiX"), 42 * 2**30) + self.assertEqual(conv("42TiX"), 42 * 2**40) + self.assertEqual(conv("42PiX"), 42 * 2**50) + self.assertEqual(conv("42EiX"), 42 * 2**60) - self.assertRaises(ValueError, conv, '42k') - self.assertRaises(ValueError, conv, '42KX') - self.assertRaises(ValueError, conv, '42kiX') + self.assertRaises(ValueError, conv, "42k") + self.assertRaises(ValueError, conv, "42KX") + self.assertRaises(ValueError, conv, "42kiX") - self.assertEqual(convert.toMetricFloat('42'), 42) + self.assertEqual(convert.toMetricFloat("42"), 42) # Prefixes not allowed without a unit - self.assertRaises(ValueError, convert.toMetricFloat, '42k') + self.assertRaises(ValueError, convert.toMetricFloat, "42k") def test_toMetricInteger(self): def conv(x): - return convert.toMetricInteger(x, 'value', 'X') + return convert.toMetricInteger(x, "value", "X") - self.assertEqual(conv('42'), 42 * 10**0) - self.assertEqual(conv('42kX'), 42 * 10**3) - self.assertEqual(conv('42MX'), 42 * 10**6) - self.assertEqual(conv('42GX'), 42 * 10**9) - self.assertEqual(conv('42TX'), 42 * 10**12) - self.assertEqual(conv('42PX'), 42 * 10**15) - self.assertEqual(conv('42EX'), 42 * 10**18) + self.assertEqual(conv("42"), 42 * 10**0) + self.assertEqual(conv("42kX"), 42 * 10**3) + self.assertEqual(conv("42MX"), 42 * 10**6) + self.assertEqual(conv("42GX"), 42 * 10**9) + self.assertEqual(conv("42TX"), 42 * 10**12) + self.assertEqual(conv("42PX"), 42 * 10**15) + self.assertEqual(conv("42EX"), 42 * 10**18) - self.assertEqual(conv('42KiX'), 42 * 2**10) - self.assertEqual(conv('42MiX'), 42 * 2**20) - self.assertEqual(conv('42GiX'), 42 * 2**30) - self.assertEqual(conv('42TiX'), 42 * 2**40) - self.assertEqual(conv('42PiX'), 42 * 2**50) - self.assertEqual(conv('42EiX'), 42 * 2**60) + self.assertEqual(conv("42KiX"), 42 * 2**10) + self.assertEqual(conv("42MiX"), 42 * 2**20) + self.assertEqual(conv("42GiX"), 42 * 2**30) + self.assertEqual(conv("42TiX"), 42 * 2**40) + self.assertEqual(conv("42PiX"), 42 * 2**50) + self.assertEqual(conv("42EiX"), 42 * 2**60) - self.assertRaises(ValueError, conv, '42.1') - self.assertRaises(ValueError, conv, '42.1kX') + self.assertRaises(ValueError, conv, "42.1") + self.assertRaises(ValueError, conv, "42.1kX") - self.assertRaises(ValueError, conv, '42k') - self.assertRaises(ValueError, conv, '42KX') - self.assertRaises(ValueError, conv, '42kiX') + self.assertRaises(ValueError, conv, "42k") + self.assertRaises(ValueError, conv, "42KX") + self.assertRaises(ValueError, conv, "42kiX") - self.assertEqual(convert.toMetricInteger('42'), 42) + self.assertEqual(convert.toMetricInteger("42"), 42) # Prefixes not allowed without a unit - self.assertRaises(ValueError, convert.toMetricInteger, '42k') + self.assertRaises(ValueError, convert.toMetricInteger, "42k") def test_toBool(self): conv = convert.toBool - self.assertEqual(conv('TRUE'), True) - self.assertEqual(conv('true'), True) - self.assertEqual(conv('t'), True) - self.assertEqual(conv('yes'), True) - self.assertEqual(conv('y'), True) - self.assertEqual(conv('1'), True) + self.assertEqual(conv("TRUE"), True) + self.assertEqual(conv("true"), True) + self.assertEqual(conv("t"), True) + self.assertEqual(conv("yes"), True) + self.assertEqual(conv("y"), True) + self.assertEqual(conv("1"), True) - self.assertEqual(conv('FALSE'), False) - self.assertEqual(conv('false'), False) - self.assertEqual(conv('f'), False) - self.assertEqual(conv('no'), False) - self.assertEqual(conv('n'), False) - self.assertEqual(conv('0'), False) + self.assertEqual(conv("FALSE"), False) + self.assertEqual(conv("false"), False) + self.assertEqual(conv("f"), False) + self.assertEqual(conv("no"), False) + self.assertEqual(conv("n"), False) + self.assertEqual(conv("0"), False) - self.assertRaises(ValueError, conv, 'not a bool') - self.assertRaises(ValueError, conv, '2') + self.assertRaises(ValueError, conv, "not a bool") + self.assertRaises(ValueError, conv, "2") def test_toFrequency(self): conv = convert.toFrequency - self.assertEqual(conv('42'), 42.0) - self.assertEqual(conv('42Hz'), 42) - self.assertEqual(conv('42kHz'), 42e3) + self.assertEqual(conv("42"), 42.0) + self.assertEqual(conv("42Hz"), 42) + self.assertEqual(conv("42kHz"), 42e3) # Prefixes need a unit - self.assertRaises(ValueError, conv, '42k') + self.assertRaises(ValueError, conv, "42k") # Seconds isn't a valid unit unless using anyToFrequency. - self.assertRaises(ValueError, conv, '42s') + self.assertRaises(ValueError, conv, "42s") def test_toLatency(self): conv = convert.toLatency - self.assertEqual(conv('42'), 42.0) - self.assertEqual(conv('42s'), 42.0) + self.assertEqual(conv("42"), 42.0) + self.assertEqual(conv("42s"), 42.0) # We allow prefixes for seconds. - self.assertEqual(conv('42ks'), 42e3) + self.assertEqual(conv("42ks"), 42e3) # Prefixe need a unit - self.assertRaises(ValueError, conv, '42k') + self.assertRaises(ValueError, conv, "42k") # Hz shouldn't be converted unless using anyToLatency - self.assertRaises(ValueError, conv, '42Hz') + self.assertRaises(ValueError, conv, "42Hz") def test_anyToLatency(self): conv = convert.anyToLatency - self.assertEqual(conv('42s'), 42.0) + self.assertEqual(conv("42s"), 42.0) # We currently allow prefixes for seconds. - self.assertEqual(conv('42ks'), 42e3) + self.assertEqual(conv("42ks"), 42e3) - self.assertEqual(conv('10Hz'), 0.1) - self.assertEqual(conv('1kHz'), 1e-3) + self.assertEqual(conv("10Hz"), 0.1) + self.assertEqual(conv("1kHz"), 1e-3) - self.assertRaises(ValueError, conv, '42k') - self.assertRaises(ValueError, conv, '42') + self.assertRaises(ValueError, conv, "42k") + self.assertRaises(ValueError, conv, "42") def test_anyToFrequency(self): conv = convert.anyToFrequency - self.assertEqual(conv('42kHz'), 42e3) + self.assertEqual(conv("42kHz"), 42e3) - self.assertEqual(conv('0.1s'), 10.0) - self.assertEqual(conv('1ms'), 1000.0) + self.assertEqual(conv("0.1s"), 10.0) + self.assertEqual(conv("1ms"), 1000.0) - self.assertRaises(ValueError, conv, '42k') - self.assertRaises(ValueError, conv, '42') + self.assertRaises(ValueError, conv, "42k") + self.assertRaises(ValueError, conv, "42") def test_toNetworkBandwidth(self): conv = convert.toNetworkBandwidth - self.assertEqual(conv('42'), 42.0) - self.assertEqual(conv('42bps'), 42.0) - self.assertEqual(conv('42kbps'), 42e3) + self.assertEqual(conv("42"), 42.0) + self.assertEqual(conv("42bps"), 42.0) + self.assertEqual(conv("42kbps"), 42e3) - self.assertRaises(ValueError, conv, '42Kbps') + self.assertRaises(ValueError, conv, "42Kbps") def test_toMemoryBandwidth(self): conv = convert.toMemoryBandwidth - self.assertEqual(conv('42'), 42.0) - self.assertEqual(conv('42B/s'), 42.0) + self.assertEqual(conv("42"), 42.0) + self.assertEqual(conv("42B/s"), 42.0) - self.assertEqual(conv('42MB/s'), 42 * 2 ** 20) - self.assertEqual(conv('42MiB/s'), 42 * 2 ** 20) + self.assertEqual(conv("42MB/s"), 42 * 2**20) + self.assertEqual(conv("42MiB/s"), 42 * 2**20) - self.assertRaises(ValueError, conv, '42KB/s') - self.assertRaises(ValueError, conv, '42Mi') + self.assertRaises(ValueError, conv, "42KB/s") + self.assertRaises(ValueError, conv, "42Mi") def test_toMemorySize(self): conv = convert.toMemorySize - self.assertEqual(conv('42'), 42.0) - self.assertEqual(conv('42B'), 42.0) + self.assertEqual(conv("42"), 42.0) + self.assertEqual(conv("42B"), 42.0) - self.assertEqual(conv('42kB'), 42 * 2**10) - self.assertEqual(conv('42MB'), 42 * 2**20) - - self.assertEqual(conv('42KiB'), 42 * 2**10) - self.assertEqual(conv('42MiB'), 42 * 2**20) + self.assertEqual(conv("42kB"), 42 * 2**10) + self.assertEqual(conv("42MB"), 42 * 2**20) + self.assertEqual(conv("42KiB"), 42 * 2**10) + self.assertEqual(conv("42MiB"), 42 * 2**20) def test_toIpAddress(self): conv = convert.toIpAddress @@ -225,12 +226,10 @@ class ConvertTestSuite(unittest.TestCase): conv = convert.toIpNetmask self.assertEqual(conv("1.2.3.4/24"), (_ip(1, 2, 3, 4), 24)) - self.assertEqual(conv("1.2.3.4/255.255.255.0"), - (_ip(1, 2, 3, 4), 24)) + self.assertEqual(conv("1.2.3.4/255.255.255.0"), (_ip(1, 2, 3, 4), 24)) self.assertEqual(conv("1.2.3.4/0"), (_ip(1, 2, 3, 4), 0)) - self.assertEqual(conv("1.2.3.4/0.0.0.0"), - (_ip(1, 2, 3, 4), 0)) + self.assertEqual(conv("1.2.3.4/0.0.0.0"), (_ip(1, 2, 3, 4), 0)) self.assertRaises(ValueError, conv, "0.0.0.0") self.assertRaises(ValueError, conv, "0.0.0.0/") @@ -248,23 +247,23 @@ class ConvertTestSuite(unittest.TestCase): def test_toVoltage(self): conv = convert.toVoltage - self.assertEqual(conv('42'), 42) - self.assertEqual(conv('42V'), 42) - self.assertEqual(conv('42kV'), 42e3) + self.assertEqual(conv("42"), 42) + self.assertEqual(conv("42V"), 42) + self.assertEqual(conv("42kV"), 42e3) def test_toCurrent(self): conv = convert.toCurrent - self.assertEqual(conv('42'), 42) - self.assertEqual(conv('42A'), 42) - self.assertEqual(conv('42kA'), 42e3) + self.assertEqual(conv("42"), 42) + self.assertEqual(conv("42A"), 42) + self.assertEqual(conv("42kA"), 42e3) def test_toEnergy(self): conv = convert.toEnergy - self.assertEqual(conv('42'), 42) - self.assertEqual(conv('42J'), 42) - self.assertEqual(conv('42kJ'), 42e3) + self.assertEqual(conv("42"), 42) + self.assertEqual(conv("42J"), 42) + self.assertEqual(conv("42kJ"), 42e3) def test_temperature(self): conv = convert.toTemperature diff --git a/tests/run.py b/tests/run.py index c3360acbd4..e24d4b3bdd 100644 --- a/tests/run.py +++ b/tests/run.py @@ -47,6 +47,7 @@ import os import m5 + def skip_test(reason=""): """Signal that a test should be skipped and optionally print why. @@ -58,6 +59,7 @@ def skip_test(reason=""): print("Skipping test: %s" % reason) sys.exit(2) + def has_sim_object(name): """Test if a SimObject exists in the simulator. @@ -73,6 +75,7 @@ def has_sim_object(name): except AttributeError: return False + def require_sim_object(name, fatal=False): """Test if a SimObject exists and abort/skip test if not. @@ -121,6 +124,7 @@ def require_file(path, fatal=False, mode=os.F_OK): else: skip_test(msg) + def require_kvm(kvm_dev="/dev/kvm", fatal=False): """Test if KVM is available. @@ -133,6 +137,7 @@ def require_kvm(kvm_dev="/dev/kvm", fatal=False): require_sim_object("BaseKvmCPU", fatal=fatal) require_file(kvm_dev, fatal=fatal, mode=os.R_OK | os.W_OK) + def run_test(root): """Default run_test implementations. Scripts can override it.""" @@ -141,49 +146,50 @@ def run_test(root): # simulate until program terminates exit_event = m5.simulate(maxtick) - print('Exiting @ tick', m5.curTick(), 'because', exit_event.getCause()) + print("Exiting @ tick", m5.curTick(), "because", exit_event.getCause()) + # Since we're in batch mode, dont allow tcp socket connections m5.disableAllListeners() # single "path" arg encodes everything we need to know about test -(category, mode, name, isa, opsys, config) = sys.argv[1].split('/')[-6:] +(category, mode, name, isa, opsys, config) = sys.argv[1].split("/")[-6:] # find path to directory containing this file tests_root = os.path.dirname(__file__) -test_progs = os.environ.get('M5_TEST_PROGS', '/dist/m5/regression/test-progs') +test_progs = os.environ.get("M5_TEST_PROGS", "/dist/m5/regression/test-progs") if not os.path.isdir(test_progs): - test_progs = joinpath(tests_root, 'test-progs') + test_progs = joinpath(tests_root, "test-progs") # generate path to binary file def binpath(app, file=None): # executable has same name as app unless specified otherwise if not file: file = app - return joinpath(test_progs, app, 'bin', isa, opsys, file) + return joinpath(test_progs, app, "bin", isa, opsys, file) + # generate path to input file def inputpath(app, file=None): # input file has same name as app unless specified otherwise if not file: file = app - return joinpath(test_progs, app, 'input', file) + return joinpath(test_progs, app, "input", file) + def srcpath(path): """Path to file in gem5's source tree""" return joinpath(os.path.dirname(__file__), "..", path) + def run_config(config, argv=None): """Execute a configuration script that is external to the test system""" src_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../")) abs_path = joinpath(src_root, config) - code = compile(open(abs_path, 'r').read(), abs_path, 'exec') - scope = { - '__file__' : config, - '__name__' : '__m5_main__', - } + code = compile(open(abs_path, "r").read(), abs_path, "exec") + scope = {"__file__": config, "__name__": "__m5_main__"} # Set the working directory in case we are executing from # outside gem5's source tree @@ -191,23 +197,28 @@ def run_config(config, argv=None): # gem5 normally adds the script's directory to the path to make # script-relative imports work. - sys.path = [ os.path.dirname(abs_path), ] + sys.path + sys.path = [os.path.dirname(abs_path)] + sys.path if argv is None: - sys.argv = [ config, ] + sys.argv = [config] else: sys.argv = argv exec(code, scope) + # build configuration -sys.path.append(joinpath(tests_root, 'configs')) +sys.path.append(joinpath(tests_root, "configs")) test_filename = config # for ruby configurations, remove the protocol name from the test filename -if re.search('-ruby', test_filename): - test_filename = test_filename.split('-ruby')[0]+'-ruby' -exec(compile( \ - open(joinpath(tests_root, 'configs', test_filename + '.py')).read(), \ - joinpath(tests_root, 'configs', test_filename + '.py'), 'exec')) +if re.search("-ruby", test_filename): + test_filename = test_filename.split("-ruby")[0] + "-ruby" +exec( + compile( + open(joinpath(tests_root, "configs", test_filename + ".py")).read(), + joinpath(tests_root, "configs", test_filename + ".py"), + "exec", + ) +) # set default maxtick... script can override # -1 means run forever @@ -215,9 +226,13 @@ maxtick = m5.MaxTick # tweak configuration for specific test sys.path.append(joinpath(tests_root, category, mode, name)) -exec(compile( \ - open(joinpath(tests_root, category, mode, name, 'test.py')).read(), \ - joinpath(tests_root, category, mode, name, 'test.py'), 'exec')) +exec( + compile( + open(joinpath(tests_root, category, mode, name, "test.py")).read(), + joinpath(tests_root, category, mode, name, "test.py"), + "exec", + ) +) # Initialize all CPUs in a system def initCPUs(sys): @@ -236,13 +251,14 @@ def initCPUs(sys): # The CPU can either be a list of CPUs or a single object. if isinstance(sys.cpu, list): - [ initCPU(cpu) for cpu in sys.cpu ] + [initCPU(cpu) for cpu in sys.cpu] else: initCPU(sys.cpu) + # We might be creating a single system or a dual system. Try # initializing the CPUs in all known system attributes. -for sysattr in [ "system", "testsys", "drivesys" ]: +for sysattr in ["system", "testsys", "drivesys"]: if hasattr(root, sysattr): initCPUs(getattr(root, sysattr)) diff --git a/tests/run_pyunit.py b/tests/run_pyunit.py index 8bc4f17468..6d8a5201b9 100644 --- a/tests/run_pyunit.py +++ b/tests/run_pyunit.py @@ -45,7 +45,7 @@ if __name__ == "__m5_main__": import unittest loader = unittest.TestLoader() - tests = loader.discover("pyunit", pattern='pyunit*.py') + tests = loader.discover("pyunit", pattern="pyunit*.py") runner = unittest.runner.TextTestRunner(verbosity=2) result = runner.run(tests) @@ -53,4 +53,3 @@ if __name__ == "__m5_main__": sys.exit(0) else: sys.exit(1) - diff --git a/tests/test-progs/hello/src/hello.c b/tests/test-progs/hello/src/hello.c index 9bf4ed5174..866e5622eb 100644 --- a/tests/test-progs/hello/src/hello.c +++ b/tests/test-progs/hello/src/hello.c @@ -33,4 +33,3 @@ int main(int argc, char* argv[]) printf("Hello world!\n"); return 0; } - diff --git a/tests/weekly.sh b/tests/weekly.sh index 838ccd4d45..9b400b9a83 100755 --- a/tests/weekly.sh +++ b/tests/weekly.sh @@ -35,6 +35,10 @@ gem5_root="${dir}/.." # The per-container Docker memory limit. docker_mem_limit="24g" +# The docker tag to use (varies between develop, and versions on the staging +# branch) +tag="v22-1" + # We assume the first two arguments are the number of threads followed by the # GPU ISA to test. These default to 1 and GCN3_X86 is no argument is given. threads=1 @@ -59,7 +63,7 @@ fi # Run the gem5 very-long tests. docker run -u $UID:$GID --volume "${gem5_root}":"${gem5_root}" -w \ "${gem5_root}"/tests --memory="${docker_mem_limit}" --rm \ - gcr.io/gem5-test/ubuntu-20.04_all-dependencies:v22-0 \ + gcr.io/gem5-test/ubuntu-22.04_all-dependencies:${tag} \ ./main.py run --length very-long -j${threads} -t${threads} -vv mkdir -p tests/testing-results @@ -68,7 +72,7 @@ mkdir -p tests/testing-results # before pulling gem5 resources, make sure it doesn't exist already docker run --rm --volume "${gem5_root}":"${gem5_root}" -w \ "${gem5_root}" --memory="${docker_mem_limit}" \ - gcr.io/gem5-test/gcn-gpu:v22-0 bash -c \ + gcr.io/gem5-test/gcn-gpu:${tag} bash -c \ "rm -rf ${gem5_root}/gem5-resources" # delete Pannotia datasets and output files in case a failed regression run left # them around @@ -100,12 +104,12 @@ git clone https://gem5.googlesource.com/public/gem5-resources \ cd "${gem5_root}/gem5-resources" git checkout develop -version_tag=$(git tag | grep "v22.0") - -if [[ ${version_tag} != "" ]]; then - git checkout "${version_tag}" -fi - +#version_tag=$(git tag | grep "v21.2") +# +#if [[ ${version_tag} != "" ]]; then +# git checkout "${version_tag}" +#fi +# cd "${gem5_root}" # For the GPU tests we compile and run the GPU ISA inside a gcn-gpu container. @@ -113,13 +117,14 @@ cd "${gem5_root}" # avoid needing to set all of these, we instead build a docker for it, which # has all these variables pre-set in its Dockerfile # To avoid compiling gem5 multiple times, all GPU benchmarks will use this -docker pull gcr.io/gem5-test/gcn-gpu:v22-0 +docker pull gcr.io/gem5-test/gcn-gpu:${tag} docker build -t hacc-test-weekly ${gem5_root}/gem5-resources/src/gpu/halo-finder docker run --rm -u $UID:$GID --volume "${gem5_root}":"${gem5_root}" -w \ "${gem5_root}" --memory="${docker_mem_limit}" hacc-test-weekly bash -c \ - "scons build/${gpu_isa}/gem5.opt -j${threads} \ - || rm -rf build && scons build/${gpu_isa}/gem5.opt -j${threads}" + "scons build/${gpu_isa}/gem5.opt -j${threads} --ignore-style \ + || rm -rf build && scons build/${gpu_isa}/gem5.opt -j${threads} \ + --ignore-style" # Some of the apps we test use m5ops (and x86), so compile them for x86 # Note: setting TERM in the environment is necessary as scons fails for m5ops if @@ -240,7 +245,7 @@ docker run --rm -v ${PWD}:${PWD} \ "export GEM5_PATH=${gem5_root} ; make gem5-fusion" # # get input dataset for BC test -wget http://dist.gem5.org/dist/v22-0/datasets/pannotia/bc/1k_128k.gr +wget http://dist.gem5.org/dist/develop/datasets/pannotia/bc/1k_128k.gr # run BC docker run --rm -v ${gem5_root}:${gem5_root} -w ${gem5_root} -u $UID:$GID \ --memory="${docker_mem_limit}" hacc-test-weekly \ @@ -317,7 +322,7 @@ docker run --rm -v ${gem5_root}:${gem5_root} -w \ "export GEM5_PATH=${gem5_root} ; make gem5-fusion" # get PageRank input dataset -wget http://dist.gem5.org/dist/v22-0/datasets/pannotia/pagerank/coAuthorsDBLP.graph +wget http://dist.gem5.org/dist/develop/datasets/pannotia/pagerank/coAuthorsDBLP.graph # run PageRank (Default) docker run --rm -v ${gem5_root}:${gem5_root} -w ${gem5_root} -u $UID:$GID \ --memory="${docker_mem_limit}" hacc-test-weekly \ diff --git a/util/checkpoint-tester.py b/util/checkpoint-tester.py index 58174693c6..6bc636ac18 100755 --- a/util/checkpoint-tester.py +++ b/util/checkpoint-tester.py @@ -70,39 +70,39 @@ import argparse parser = argparse.ArgumentParser() -parser.add_argument('-i', '--interval', type=int) -parser.add_argument('-d', '--directory', default='checkpoint-test') -parser.add_argument('cmdline', nargs='+', help='gem5 command line') +parser.add_argument("-i", "--interval", type=int) +parser.add_argument("-d", "--directory", default="checkpoint-test") +parser.add_argument("cmdline", nargs="+", help="gem5 command line") args = parser.parse_args() interval = args.interval if os.path.exists(args.directory): - print('Error: test directory', args.directory, 'exists') - print(' Tester needs to create directory from scratch') + print("Error: test directory", args.directory, "exists") + print(" Tester needs to create directory from scratch") sys.exit(1) top_dir = args.directory os.mkdir(top_dir) -cmd_echo = open(os.path.join(top_dir, 'command'), 'w') -print(' '.join(sys.argv), file=cmd_echo) +cmd_echo = open(os.path.join(top_dir, "command"), "w") +print(" ".join(sys.argv), file=cmd_echo) cmd_echo.close() m5_binary = args.cmdline[0] args = args.cmdline[1:] -checkpoint_args = ['--take-checkpoints', '%d,%d' % (interval, interval)] +checkpoint_args = ["--take-checkpoints", "%d,%d" % (interval, interval)] -cptdir = os.path.join(top_dir, 'm5out') +cptdir = os.path.join(top_dir, "m5out") -print('===> Running initial simulation.') -subprocess.call([m5_binary] + ['-red', cptdir] + args + checkpoint_args) +print("===> Running initial simulation.") +subprocess.call([m5_binary] + ["-red", cptdir] + args + checkpoint_args) dirs = os.listdir(cptdir) -expr = re.compile('cpt\.([0-9]*)') +expr = re.compile("cpt\.([0-9]*)") cpts = [] for dir in dirs: match = expr.match(dir) @@ -116,22 +116,39 @@ cpts.sort() # original checkpoint N+1. Thus the number of tests we can run is one # less than tha number of checkpoints. for i in range(1, len(cpts)): - print('===> Running test %d of %d.' % (i, len(cpts)-1)) - checkpoint_args = ['--take-checkpoints', '%d,%d' % (cpts[i], interval)] - mydir = os.path.join(top_dir, 'test.%d' % i) - subprocess.call([m5_binary] + ['-red', mydir] + args + checkpoint_args + - ['--max-checkpoints' , '1', '--checkpoint-dir', cptdir, - '--checkpoint-restore', str(i)]) - cpt_name = 'cpt.%d' % cpts[i] - diff_name = os.path.join(mydir, 'diffout') - diffout = open(diff_name, 'w') - subprocess.call(['diff', '-ru', '-I', '^##.*', - '%s/%s' % (cptdir, cpt_name), - '%s/%s' % (mydir, cpt_name)], stdout=diffout) + print("===> Running test %d of %d." % (i, len(cpts) - 1)) + checkpoint_args = ["--take-checkpoints", "%d,%d" % (cpts[i], interval)] + mydir = os.path.join(top_dir, "test.%d" % i) + subprocess.call( + [m5_binary] + + ["-red", mydir] + + args + + checkpoint_args + + [ + "--max-checkpoints", + "1", + "--checkpoint-dir", + cptdir, + "--checkpoint-restore", + str(i), + ] + ) + cpt_name = "cpt.%d" % cpts[i] + diff_name = os.path.join(mydir, "diffout") + diffout = open(diff_name, "w") + subprocess.call( + [ + "diff", + "-ru", + "-I", + "^##.*", + "%s/%s" % (cptdir, cpt_name), + "%s/%s" % (mydir, cpt_name), + ], + stdout=diffout, + ) diffout.close() # print out the diff diffout = open(diff_name) - print(diffout.read(), end=' ') + print(diffout.read(), end=" ") diffout.close() - - diff --git a/util/checkpoint_aggregator.py b/util/checkpoint_aggregator.py index 60f015d70b..86892c87b1 100755 --- a/util/checkpoint_aggregator.py +++ b/util/checkpoint_aggregator.py @@ -31,6 +31,7 @@ import gzip import sys, re, os + class myCP(ConfigParser): def __init__(self): ConfigParser.__init__(self) @@ -38,6 +39,7 @@ class myCP(ConfigParser): def optionxform(self, optionstr): return optionstr + def aggregate(output_dir, cpts, no_compress, memory_size): merged_config = None page_ptr = 0 @@ -50,10 +52,10 @@ def aggregate(output_dir, cpts, no_compress, memory_size): agg_config_file = open(output_path + "/m5.cpt", "wb+") if not no_compress: - merged_mem = gzip.GzipFile(fileobj= agg_mem_file, mode="wb") + merged_mem = gzip.GzipFile(fileobj=agg_mem_file, mode="wb") max_curtick = 0 - num_digits = len(str(len(cpts)-1)) + num_digits = len(str(len(cpts) - 1)) for (i, arg) in enumerate(cpts): print(arg) @@ -69,7 +71,9 @@ def aggregate(output_dir, cpts, no_compress, memory_size): items = config.items(sec) for item in items: if item[0] == "paddr": - merged_config.set(newsec, item[0], int(item[1]) + (page_ptr << 12)) + merged_config.set( + newsec, item[0], int(item[1]) + (page_ptr << 12) + ) continue merged_config.set(newsec, item[0], item[1]) @@ -83,12 +87,12 @@ def aggregate(output_dir, cpts, no_compress, memory_size): if tick > max_curtick: max_curtick = tick else: - if i == len(cpts)-1: + if i == len(cpts) - 1: merged_config.add_section(sec) for item in config.items(sec): merged_config.set(sec, item[0], item[1]) - if i != len(cpts)-1: + if i != len(cpts) - 1: merged_config.write(agg_config_file) ### memory stuff @@ -126,9 +130,13 @@ def aggregate(output_dir, cpts, no_compress, memory_size): page_ptr += 1 print("WARNING: ") - print("Make sure the simulation using this checkpoint has at least ", end=' ') + print( + "Make sure the simulation using this checkpoint has at least ", end=" " + ) print(page_ptr, "x 4K of memory") - merged_config.set("system.physmem.store0", "range_size", page_ptr * 4 * 1024) + merged_config.set( + "system.physmem.store0", "range_size", page_ptr * 4 * 1024 + ) merged_config.add_section("Globals") merged_config.set("Globals", "curTick", max_curtick) @@ -141,14 +149,19 @@ def aggregate(output_dir, cpts, no_compress, memory_size): else: agg_mem_file.close() + if __name__ == "__main__": from argparse import ArgumentParser - parser = ArgumentParser(usage="%(prog)s [options] ") - parser.add_argument("-o", "--output-dir", action="store", - help="Output directory") + + parser = ArgumentParser( + usage="%(prog)s [options] " + ) + parser.add_argument( + "-o", "--output-dir", action="store", help="Output directory" + ) parser.add_argument("-c", "--no-compress", action="store_true") - parser.add_argument("--cpts", nargs='+') + parser.add_argument("--cpts", nargs="+") parser.add_argument("--memory-size", action="store", type=int) # Assume x86 ISA. Any other ISAs would need extra stuff in this script @@ -156,8 +169,14 @@ if __name__ == "__main__": options = parser.parse_args() print(options.cpts, len(options.cpts)) if len(options.cpts) <= 1: - parser.error("You must specify atleast two checkpoint files that "\ - "need to be combined.") + parser.error( + "You must specify atleast two checkpoint files that " + "need to be combined." + ) - aggregate(options.output_dir, options.cpts, options.no_compress, - options.memory_size) + aggregate( + options.output_dir, + options.cpts, + options.no_compress, + options.memory_size, + ) diff --git a/util/checktrace.sh b/util/checktrace.sh index a63cdd4b6b..b3f1aeaa2f 100755 --- a/util/checktrace.sh +++ b/util/checktrace.sh @@ -29,7 +29,7 @@ do bad=`tethereal -r $trace -q -z "io,stat,100,tcp.analysis.retransmission||tcp.analysis.fast_retransmission||tcp.analysis.out_of_order||tcp.analysis.lost_segment||tcp.analysis.ack_lost_segment||tcp.analysis.window_full||tcp.analysis.duplicate_ack||tcp.analysis.duplicate_ack_num||tcp.analysis.duplicate_ack_frame" | grep 000.000 | awk '{print $2}'` name=`dirname $trace` - if [ "$bad" != "0" ] + if [ "$bad" != "0" ] then echo "Run $name had problems." fi diff --git a/util/cpt_upgrader.py b/util/cpt_upgrader.py index abbaba211d..06f98d8a74 100755 --- a/util/cpt_upgrader.py +++ b/util/cpt_upgrader.py @@ -75,25 +75,28 @@ import os.path as osp verbose_print = False + def verboseprint(*args): if not verbose_print: return for arg in args: - print(arg, end=' ') + print(arg, end=" ") print("\n") + class Upgrader: tag_set = set() - untag_set = set() # tags to remove by downgrading + untag_set = set() # tags to remove by downgrading by_tag = {} legacy = {} + def __init__(self, filename): self.filename = filename exec(open(filename).read(), {}, self.__dict__) - if not hasattr(self, 'tag'): + if not hasattr(self, "tag"): self.tag = osp.basename(filename)[:-3] - if not hasattr(self, 'depends'): + if not hasattr(self, "depends"): self.depends = [] elif isinstance(self.depends, str): self.depends = [self.depends] @@ -102,35 +105,47 @@ class Upgrader: print("Error: 'depends' for {} is the wrong type".format(self.tag)) sys.exit(1) - if hasattr(self, 'fwd_depends'): + if hasattr(self, "fwd_depends"): if isinstance(self.fwd_depends, str): self.fwd_depends = [self.fwd_depends] else: self.fwd_depends = [] if not isinstance(self.fwd_depends, list): - print("Error: 'fwd_depends' for {} is the wrong type".format( - self.tag)) + print( + "Error: 'fwd_depends' for {} is the wrong type".format( + self.tag + ) + ) sys.exit(1) - if hasattr(self, 'upgrader'): + if hasattr(self, "upgrader"): if not isinstance(self.upgrader, types.FunctionType): - print("Error: 'upgrader' for {} is {}, not function".format( - self.tag, type(self))) + print( + "Error: 'upgrader' for {} is {}, not function".format( + self.tag, type(self) + ) + ) sys.exit(1) Upgrader.tag_set.add(self.tag) - elif hasattr(self, 'downgrader'): + elif hasattr(self, "downgrader"): if not isinstance(self.downgrader, types.FunctionType): - print("Error: 'downgrader' for {} is {}, not function".format( - self.tag, type(self))) + print( + "Error: 'downgrader' for {} is {}, not function".format( + self.tag, type(self) + ) + ) sys.exit(1) Upgrader.untag_set.add(self.tag) else: - print("Error: no upgrader or downgrader method for {}".format( - self.tag)) + print( + "Error: no upgrader or downgrader method for {}".format( + self.tag + ) + ) sys.exit(1) - if hasattr(self, 'legacy_version'): + if hasattr(self, "legacy_version"): Upgrader.legacy[self.legacy_version] = self Upgrader.by_tag[self.tag] = self @@ -142,7 +157,7 @@ class Upgrader: return True def update(self, cpt, tags): - if hasattr(self, 'upgrader'): + if hasattr(self, "upgrader"): self.upgrader(cpt) tags.add(self.tag) verboseprint("applied upgrade for", self.tag) @@ -159,39 +174,46 @@ class Upgrader: def load_all(): util_dir = osp.dirname(osp.abspath(__file__)) - for py in glob.glob(util_dir + '/cpt_upgraders/*.py'): + for py in glob.glob(util_dir + "/cpt_upgraders/*.py"): Upgrader(py) # make linear dependences for legacy versions i = 3 while i in Upgrader.legacy: - Upgrader.legacy[i].depends = [Upgrader.legacy[i-1].tag] + Upgrader.legacy[i].depends = [Upgrader.legacy[i - 1].tag] i = i + 1 # resolve forward dependencies and audit normal dependencies for tag, upg in list(Upgrader.by_tag.items()): for fd in upg.fwd_depends: if fd not in Upgrader.by_tag: - print("Error: '{}' cannot (forward) depend on " - "nonexistent tag '{}'".format(fd, tag)) + print( + "Error: '{}' cannot (forward) depend on " + "nonexistent tag '{}'".format(fd, tag) + ) sys.exit(1) Upgrader.by_tag[fd].depends.append(tag) for dep in upg.depends: if dep not in Upgrader.by_tag: - print("Error: '{}' cannot depend on " - "nonexistent tag '{}'".format(tag, dep)) + print( + "Error: '{}' cannot depend on " + "nonexistent tag '{}'".format(tag, dep) + ) sys.exit(1) + def process_file(path, **kwargs): if not osp.isfile(path): import errno + raise IOError(errno.ENOENT, "No such file", path) verboseprint("Processing file %s...." % path) - if kwargs.get('backup', True): + if kwargs.get("backup", True): import shutil - shutil.copyfile(path, path + '.bak') + + shutil.copyfile(path, path + ".bak") cpt = configparser.ConfigParser() @@ -199,51 +221,54 @@ def process_file(path, **kwargs): cpt.optionxform = str # Read the current data - cpt_file = open(path, 'r') + cpt_file = open(path, "r") cpt.read_file(cpt_file) cpt_file.close() change = False # Make sure we know what we're starting from - if cpt.has_option('root','cpt_ver'): - cpt_ver = cpt.getint('root','cpt_ver') + if cpt.has_option("root", "cpt_ver"): + cpt_ver = cpt.getint("root", "cpt_ver") # Legacy linear checkpoint version # convert to list of tags before proceeding tags = set([]) - for i in range(2, cpt_ver+1): + for i in range(2, cpt_ver + 1): tags.add(Upgrader.legacy[i].tag) verboseprint("performed legacy version -> tags conversion") change = True - cpt.remove_option('root', 'cpt_ver') + cpt.remove_option("root", "cpt_ver") # @todo The 'Globals' option is deprecated, and should be removed in the # future - elif cpt.has_option('Globals','version_tags'): - tags = set((''.join(cpt.get('Globals','version_tags'))).split()) - elif cpt.has_option('root.globals','version_tags'): - tags = set((''.join(cpt.get('root.globals','version_tags'))).split()) + elif cpt.has_option("Globals", "version_tags"): + tags = set(("".join(cpt.get("Globals", "version_tags"))).split()) + elif cpt.has_option("root.globals", "version_tags"): + tags = set(("".join(cpt.get("root.globals", "version_tags"))).split()) else: print("fatal: no version information in checkpoint") exit(1) - verboseprint("has tags", ' '.join(tags)) + verboseprint("has tags", " ".join(tags)) # If the current checkpoint has a tag we don't know about, we have # a divergence that (in general) must be addressed by (e.g.) merging # simulator support for its changes. unknown_tags = tags - (Upgrader.tag_set | Upgrader.untag_set) if unknown_tags: - print("warning: upgrade script does not recognize the following " - "tags in this checkpoint:", ' '.join(unknown_tags)) + print( + "warning: upgrade script does not recognize the following " + "tags in this checkpoint:", + " ".join(unknown_tags), + ) # Apply migrations for tags not in checkpoint and tags present for which # downgraders are present, respecting dependences to_apply = (Upgrader.tag_set - tags) | (Upgrader.untag_set & tags) while to_apply: - ready = set([ t for t in to_apply if Upgrader.get(t).ready(tags) ]) + ready = set([t for t in to_apply if Upgrader.get(t).ready(tags)]) if not ready: - print("could not apply these upgrades:", ' '.join(to_apply)) + print("could not apply these upgrades:", " ".join(to_apply)) print("update dependences impossible to resolve; aborting") exit(1) @@ -257,31 +282,45 @@ def process_file(path, **kwargs): verboseprint("...nothing to do") return - cpt.set('root.globals', 'version_tags', ' '.join(tags)) + cpt.set("root.globals", "version_tags", " ".join(tags)) # Write the old data back verboseprint("...completed") - cpt.write(open(path, 'w')) + cpt.write(open(path, "w")) -if __name__ == '__main__': + +if __name__ == "__main__": from argparse import ArgumentParser, SUPPRESS + parser = ArgumentParser(usage="%(prog)s [args] ") parser.add_argument( - "-r", "--recurse", action="store_true", - help="Recurse through all subdirectories modifying "\ - "each checkpoint that is found") + "-r", + "--recurse", + action="store_true", + help="Recurse through all subdirectories modifying " + "each checkpoint that is found", + ) parser.add_argument( - "-N", "--no-backup", action="store_false", - dest="backup", default=True, - help="Do no backup each checkpoint before modifying it") + "-N", + "--no-backup", + action="store_false", + dest="backup", + default=True, + help="Do no backup each checkpoint before modifying it", + ) parser.add_argument( - "-v", "--verbose", action="store_true", - help="Print out debugging information as") + "-v", + "--verbose", + action="store_true", + help="Print out debugging information as", + ) parser.add_argument( - "--get-cc-file", action="store_true", + "--get-cc-file", + action="store_true", # used during build; generate src/sim/tags.cc and exit - help=SUPPRESS) - parser.add_argument("checkpoint", nargs='?') + help=SUPPRESS, + ) + parser.add_argument("checkpoint", nargs="?") args = parser.parse_args() verbose_print = args.verbose @@ -298,14 +337,16 @@ if __name__ == '__main__': print() print("std::set version_tags = {") for tag in sorted(Upgrader.tag_set): - print(" \"{}\",".format(tag)) + print(' "{}",'.format(tag)) print("};") print() print("} // namespace gem5") exit(0) elif not args.checkpoint: - parser.error("You must specify a checkpoint file to modify or a " - "directory of checkpoints to recursively update") + parser.error( + "You must specify a checkpoint file to modify or a " + "directory of checkpoints to recursively update" + ) # Deal with shell variables and ~ path = osp.expandvars(osp.expanduser(args.checkpoint)) @@ -315,13 +356,13 @@ if __name__ == '__main__': process_file(path, **vars(args)) # Process an entire directory elif osp.isdir(path): - cpt_file = osp.join(path, 'm5.cpt') + cpt_file = osp.join(path, "m5.cpt") if args.recurse: # Visit very file and see if it matches - for root,dirs,files in os.walk(path): + for root, dirs, files in os.walk(path): for name in files: - if name == 'm5.cpt': - process_file(osp.join(root,name), **vars(args)) + if name == "m5.cpt": + process_file(osp.join(root, name), **vars(args)) for dir in dirs: pass # Maybe someone passed a cpt.XXXXXXX directory and not m5.cpt @@ -332,4 +373,3 @@ if __name__ == '__main__': print("and recurse not specified") sys.exit(1) sys.exit(0) - diff --git a/util/cpt_upgraders/arm-ccregs.py b/util/cpt_upgraders/arm-ccregs.py index 3bce03608d..435be7b0cb 100644 --- a/util/cpt_upgraders/arm-ccregs.py +++ b/util/cpt_upgraders/arm-ccregs.py @@ -1,28 +1,29 @@ # Use condition code registers for the ARM architecture. # Previously the integer register file was used for these registers. def upgrader(cpt): - if cpt.get('root', 'isa', fallback='') == 'arm': + if cpt.get("root", "isa", fallback="") == "arm": for sec in cpt.sections(): import re - re_cpu_match = re.match('^(.*sys.*\.cpu[^.]*)\.xc\.(.+)$', sec) + re_cpu_match = re.match("^(.*sys.*\.cpu[^.]*)\.xc\.(.+)$", sec) # Search for all the execution contexts if not re_cpu_match: continue items = [] - for (item,value) in cpt.items(sec): + for (item, value) in cpt.items(sec): items.append(item) - if 'ccRegs' not in items: - intRegs = cpt.get(sec, 'intRegs').split() + if "ccRegs" not in items: + intRegs = cpt.get(sec, "intRegs").split() # Move those 5 integer registers to the ccRegs register file ccRegs = intRegs[38:43] - del intRegs[38:43] + del intRegs[38:43] - ccRegs.append('0') # CCREG_ZERO + ccRegs.append("0") # CCREG_ZERO + + cpt.set(sec, "intRegs", " ".join(intRegs)) + cpt.set(sec, "ccRegs", " ".join(ccRegs)) - cpt.set(sec, 'intRegs', ' '.join(intRegs)) - cpt.set(sec, 'ccRegs', ' '.join(ccRegs)) legacy_version = 13 diff --git a/util/cpt_upgraders/arm-contextidr-el2.py b/util/cpt_upgraders/arm-contextidr-el2.py index 87d7ab670b..891fec5e0d 100644 --- a/util/cpt_upgraders/arm-contextidr-el2.py +++ b/util/cpt_upgraders/arm-contextidr-el2.py @@ -1,13 +1,15 @@ # Add the ARM CONTEXTIDR_EL2 miscreg. def upgrader(cpt): - if cpt.get('root', 'isa', fallback='') == 'arm': + if cpt.get("root", "isa", fallback="") == "arm": for sec in cpt.sections(): import re + # Search for all ISA sections - if re.search('.*sys.*\.cpu.*\.isa$', sec): - miscRegs = cpt.get(sec, 'miscRegs').split() + if re.search(".*sys.*\.cpu.*\.isa$", sec): + miscRegs = cpt.get(sec, "miscRegs").split() # CONTEXTIDR_EL2 defaults to 0b11111100000000000001 miscRegs[599:599] = [0xFC001] - cpt.set(sec, 'miscRegs', ' '.join(str(x) for x in miscRegs)) + cpt.set(sec, "miscRegs", " ".join(str(x) for x in miscRegs)) + legacy_version = 14 diff --git a/util/cpt_upgraders/arm-gem5-gic-ext.py b/util/cpt_upgraders/arm-gem5-gic-ext.py index d4d588042e..fea852ff13 100644 --- a/util/cpt_upgraders/arm-gem5-gic-ext.py +++ b/util/cpt_upgraders/arm-gem5-gic-ext.py @@ -33,12 +33,14 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + def upgrader(cpt): """The gem5 GIC extensions change the size of many GIC data structures. Resize them to match the new GIC.""" import re - if cpt.get('root', 'isa', fallback='') != 'arm': + + if cpt.get("root", "isa", fallback="") != "arm": return old_cpu_max = 8 @@ -46,29 +48,27 @@ def upgrader(cpt): sgi_max = 16 ppi_max = 16 per_cpu_regs = ( - ("iccrpr", [ "0xff", ]), - ("cpuEnabled", [ "false", ]), - ("cpuPriority", [ "0xff", ]), - ("cpuBpr", [ "0", ]), - ("cpuHighestInt", [ "1023", ]), - ("cpuPpiPending", [ "0", ]), - ("cpuPpiActive", [ "0", ] ), - ("interrupt_time", [ "0", ]), - ("*bankedIntPriority", ["0", ] * (sgi_max + ppi_max)), - ) - new_per_cpu_regs = ( - ("cpuSgiPendingExt", "0"), - ("cpuSgiActiveExt", "0"), + ("iccrpr", ["0xff"]), + ("cpuEnabled", ["false"]), + ("cpuPriority", ["0xff"]), + ("cpuBpr", ["0"]), + ("cpuHighestInt", ["1023"]), + ("cpuPpiPending", ["0"]), + ("cpuPpiActive", ["0"]), + ("interrupt_time", ["0"]), + ("*bankedIntPriority", ["0"] * (sgi_max + ppi_max)), ) + new_per_cpu_regs = (("cpuSgiPendingExt", "0"), ("cpuSgiActiveExt", "0")) for sec in cpt.sections(): - if re.search('.*\.gic$', sec): + if re.search(".*\.gic$", sec): for reg, default in per_cpu_regs: value = cpt.get(sec, reg).split(" ") - assert len(value) / len(default) == old_cpu_max, \ - "GIC register size mismatch" - value += [ " ".join(default), ] * (new_cpu_max - old_cpu_max) + assert ( + len(value) / len(default) == old_cpu_max + ), "GIC register size mismatch" + value += [" ".join(default)] * (new_cpu_max - old_cpu_max) cpt.set(sec, reg, " ".join(value)) for reg, default in new_per_cpu_regs: - cpt.set(sec, reg, " ".join([ default, ] * new_cpu_max)) + cpt.set(sec, reg, " ".join([default] * new_cpu_max)) diff --git a/util/cpt_upgraders/arm-gicv2-banked-regs.py b/util/cpt_upgraders/arm-gicv2-banked-regs.py index e6437e62df..44a6146b58 100644 --- a/util/cpt_upgraders/arm-gicv2-banked-regs.py +++ b/util/cpt_upgraders/arm-gicv2-banked-regs.py @@ -35,45 +35,45 @@ # duplicate banked registers into new per-cpu arrays. def upgrader(cpt): - if cpt.get('root', 'isa', fallback='') == 'arm': + if cpt.get("root", "isa", fallback="") == "arm": for sec in cpt.sections(): import re - if not re.search('\.gic$', sec): + if not re.search("\.gic$", sec): continue - cpuEnabled = cpt.get(sec, 'cpuEnabled' ).split() + cpuEnabled = cpt.get(sec, "cpuEnabled").split() - intEnabled = cpt.get(sec, 'intEnabled' ).split() - pendingInt = cpt.get(sec, 'pendingInt' ).split() - activeInt = cpt.get(sec, 'activeInt' ).split() - intPriority = cpt.get(sec, 'intPriority').split() - cpuTarget = cpt.get(sec, 'cpuTarget' ).split() + intEnabled = cpt.get(sec, "intEnabled").split() + pendingInt = cpt.get(sec, "pendingInt").split() + activeInt = cpt.get(sec, "activeInt").split() + intPriority = cpt.get(sec, "intPriority").split() + cpuTarget = cpt.get(sec, "cpuTarget").split() b_intEnabled = intEnabled[0] b_pendingInt = pendingInt[0] - b_activeInt = activeInt[0] + b_activeInt = activeInt[0] del intEnabled[0] del pendingInt[0] del activeInt[0] - del intPriority[0:32] # unused; overlapped with bankedIntPriority + del intPriority[0:32] # unused; overlapped with bankedIntPriority del cpuTarget[0:32] - cpt.set(sec, 'intEnabled', ' '.join(intEnabled)) - cpt.set(sec, 'pendingInt', ' '.join(pendingInt)) - cpt.set(sec, 'activeInt', ' '.join(activeInt)) - cpt.set(sec, 'intPriority',' '.join(intPriority)) - cpt.set(sec, 'cpuTarget', ' '.join(cpuTarget)) + cpt.set(sec, "intEnabled", " ".join(intEnabled)) + cpt.set(sec, "pendingInt", " ".join(pendingInt)) + cpt.set(sec, "activeInt", " ".join(activeInt)) + cpt.set(sec, "intPriority", " ".join(intPriority)) + cpt.set(sec, "cpuTarget", " ".join(cpuTarget)) - b_intPriority = cpt.get(sec, '*bankedIntPriority').split() - cpt.remove_option(sec, '*bankedIntPriority') + b_intPriority = cpt.get(sec, "*bankedIntPriority").split() + cpt.remove_option(sec, "*bankedIntPriority") for cpu in range(255): - if cpuEnabled[cpu] == 'true': - intPriority = b_intPriority[cpu*32 : (cpu+1)*32] + if cpuEnabled[cpu] == "true": + intPriority = b_intPriority[cpu * 32 : (cpu + 1) * 32] new_sec = "%s.bankedRegs%u" % (sec, cpu) cpt.add_section(new_sec) - cpt.set(new_sec, 'intEnabled', b_intEnabled) - cpt.set(new_sec, 'pendingInt', b_pendingInt) - cpt.set(new_sec, 'activeInt', b_activeInt) - cpt.set(new_sec, 'intPriority',' '.join(intPriority)) + cpt.set(new_sec, "intEnabled", b_intEnabled) + cpt.set(new_sec, "pendingInt", b_pendingInt) + cpt.set(new_sec, "activeInt", b_activeInt) + cpt.set(new_sec, "intPriority", " ".join(intPriority)) diff --git a/util/cpt_upgraders/arm-hdlcd-upgrade.py b/util/cpt_upgraders/arm-hdlcd-upgrade.py index a7885a295c..bbd2b9c79e 100644 --- a/util/cpt_upgraders/arm-hdlcd-upgrade.py +++ b/util/cpt_upgraders/arm-hdlcd-upgrade.py @@ -33,45 +33,43 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + def upgrader(cpt): """HDLCD controller rewrite. Converted checkpoints cause the HDLCD model to start a new screen refresh and FIFO buffer fill immediately after they are loaded. Expect some timing differences.""" import re - if cpt.get('root', 'isa', fallback='') != 'arm': + + if cpt.get("root", "isa", fallback="") != "arm": return option_names = { - "int_rawstat" : "int_rawstat_serial", - "int_mask" : "int_mask_serial", - "fb_base" : "fb_base", - "fb_line_length" : "fb_line_length", - "fb_line_count" : "fb_line_count_serial", - "fb_line_pitch" : "fb_line_pitch", - "bus_options" : "bus_options_serial", - - "v_sync" : "v_sync_serial", - "v_back_porch" : "v_back_porch_serial", - "v_data" : "v_data_serial", - "v_front_porch" : "v_front_porch_serial", - - "h_sync" : "h_sync_serial", - "h_back_porch" : "h_back_porch_serial", - "h_data" : "h_data_serial", - "h_front_porch" : "h_front_porch_serial", - - "polarities" : "polarities_serial", - - "command" : "command_serial", - "pixel_format" : "pixel_format_serial", - "red_select" : "red_select_serial", - "green_select" : "green_select_serial", - "blue_select" : "blue_select_serial", + "int_rawstat": "int_rawstat_serial", + "int_mask": "int_mask_serial", + "fb_base": "fb_base", + "fb_line_length": "fb_line_length", + "fb_line_count": "fb_line_count_serial", + "fb_line_pitch": "fb_line_pitch", + "bus_options": "bus_options_serial", + "v_sync": "v_sync_serial", + "v_back_porch": "v_back_porch_serial", + "v_data": "v_data_serial", + "v_front_porch": "v_front_porch_serial", + "h_sync": "h_sync_serial", + "h_back_porch": "h_back_porch_serial", + "h_data": "h_data_serial", + "h_front_porch": "h_front_porch_serial", + "polarities": "polarities_serial", + "command": "command_serial", + "pixel_format": "pixel_format_serial", + "red_select": "red_select_serial", + "green_select": "green_select_serial", + "blue_select": "blue_select_serial", } for sec in cpt.sections(): - if re.search('.*\.hdlcd$', sec): + if re.search(".*\.hdlcd$", sec): options = {} for new, old in list(option_names.items()): options[new] = cpt.get(sec, old) @@ -93,12 +91,13 @@ def upgrader(cpt): cpt.set(sec_dma, "nextAddr", "0") cpt.set(sec_dma, "buffer", "") - - print("Warning: Assuming that the HDLCD pixel clock and global frequency " - "are still using their default values.") + print( + "Warning: Assuming that the HDLCD pixel clock and global frequency " + "are still using their default values." + ) sec_osc = "system.realview.realview_io.osc_pxl" - global_tick = 1E12 - pxl_freq = 137E6 + global_tick = 1e12 + pxl_freq = 137e6 pxl_ticks = global_tick / pxl_freq if not cpt.has_section(sec_osc): cpt.add_section(sec_osc) diff --git a/util/cpt_upgraders/arm-miscreg-teehbr.py b/util/cpt_upgraders/arm-miscreg-teehbr.py index 1717d40208..d6e81e0da1 100644 --- a/util/cpt_upgraders/arm-miscreg-teehbr.py +++ b/util/cpt_upgraders/arm-miscreg-teehbr.py @@ -1,15 +1,18 @@ # Add the ARM MISCREG TEEHBR def upgrader(cpt): - if cpt.get('root', 'isa', fallback='') == 'arm': + if cpt.get("root", "isa", fallback="") == "arm": for sec in cpt.sections(): import re + # Search for all ISA sections - if re.search('.*sys.*\.cpu.*\.isa$', sec): - mr = cpt.get(sec, 'miscRegs').split() + if re.search(".*sys.*\.cpu.*\.isa$", sec): + mr = cpt.get(sec, "miscRegs").split() if len(mr) == 161: print("MISCREG_TEEHBR already seems to be inserted.") else: - mr.insert(51,0); # Add dummy value for MISCREG_TEEHBR - cpt.set(sec, 'miscRegs', ' '.join(str(x) for x in mr)) + mr.insert(51, 0) + # Add dummy value for MISCREG_TEEHBR + cpt.set(sec, "miscRegs", " ".join(str(x) for x in mr)) + legacy_version = 8 diff --git a/util/cpt_upgraders/arm-sve.py b/util/cpt_upgraders/arm-sve.py index 4ef28d0a6e..45d2949aa8 100644 --- a/util/cpt_upgraders/arm-sve.py +++ b/util/cpt_upgraders/arm-sve.py @@ -7,32 +7,40 @@ def upgrader(cpt): 2) Set isa.sveVL to 1 3) Add SVE misc registers in the checkpoint """ - if cpt.get('root', 'isa', fallback='') == 'arm': + if cpt.get("root", "isa", fallback="") == "arm": for sec in cpt.sections(): import re + # Search for all ISA sections - if re.search('.*sys.*\.cpu.*\.isa$', sec): + if re.search(".*sys.*\.cpu.*\.isa$", sec): # haveSVE = false - cpt.set(sec, 'haveSVE', 'false') + cpt.set(sec, "haveSVE", "false") # sveVL (sve Vector Length in quadword) = 1 # (This is a dummy value since haveSVE is set to false) - cpt.set(sec, 'sveVL', '1') + cpt.set(sec, "sveVL", "1") # Updating SVE misc registers (dummy values) - mr = cpt.get(sec, 'miscRegs').split() + mr = cpt.get(sec, "miscRegs").split() if len(mr) == 820: - print("MISCREG_SVE registers already seems " - "to be inserted.") + print( + "MISCREG_SVE registers already seems " + "to be inserted." + ) else: # Replace MISCREG_FREESLOT_1 with MISCREG_ID_AA64ZFR0_EL1 - mr[-1] = 0; + mr[-1] = 0 + + mr.append(0) + # Add dummy value for MISCREG_ZCR_EL3 + mr.append(0) + # Add dummy value for MISCREG_ZCR_EL2 + mr.append(0) + # Add dummy value for MISCREG_ZCR_EL12 + mr.append(0) + # Add dummy value for MISCREG_ZCR_EL1 + cpt.set(sec, "miscRegs", " ".join(str(x) for x in mr)) - mr.append(0); # Add dummy value for MISCREG_ZCR_EL3 - mr.append(0); # Add dummy value for MISCREG_ZCR_EL2 - mr.append(0); # Add dummy value for MISCREG_ZCR_EL12 - mr.append(0); # Add dummy value for MISCREG_ZCR_EL1 - cpt.set(sec, 'miscRegs', ' '.join(str(x) for x in mr)) legacy_version = 15 diff --git a/util/cpt_upgraders/arm-sysreg-mapping-ns.py b/util/cpt_upgraders/arm-sysreg-mapping-ns.py index e0418300e2..fd02062039 100644 --- a/util/cpt_upgraders/arm-sysreg-mapping-ns.py +++ b/util/cpt_upgraders/arm-sysreg-mapping-ns.py @@ -35,38 +35,39 @@ # reflect updated register mappings for ARM ISA def upgrader(cpt): - if cpt.get('root', 'isa', fallback='') == 'arm': + if cpt.get("root", "isa", fallback="") == "arm": for sec in cpt.sections(): import re - # Search for all ISA sections - if re.search('.*sys.*\.cpu.*\.isa\d*$', sec): - mr = cpt.get(sec, 'miscRegs').split() - if int(mr[0]) & 16 == 0: # CPSR reg width; 0 for AArch64 - mr[112] = mr[111] # ACTLR_NS = ACTLR - mr[146] = mr[145] # ADFSR_NS = ADFSR - mr[149] = mr[148] # AIFSR_NS = AIFSR - mr[253] = mr[252] # AMAIR0_NS = AMAIR0 - mr[289] = mr[288] # CNTP_CTL_NS = CNTP_CTL - mr[313] = mr[312] # CNTP_CVAL_NS = CNTP_CVAL - mr[286] = mr[285] # CNTP_TVAL_NS = CNTP_TVAL - mr[271] = mr[270] # CONTEXTIDR_NS = CONTEXTIDR - mr[104] = mr[103] # CSSELR_NS = CSSELR - mr[137] = mr[136] # DACR_NS = DACR - mr[155] = mr[154] # DFAR_NS = DFAR - mr[158] = mr[157] # IFAR_NS = IFAR - mr[143] = mr[142] # IFSR_NS = IFSR - mr[247] = mr[246] # NMRR_NS = NMRR - mr[166] = mr[165] # PAR_NS = PAR - mr[241] = mr[240] # PRRR_NS = PRRR - mr[ 4] = mr[424] # SPSR_SVC = SPSR_EL1 - mr[ 7] = mr[435] # SPSR_HYP = SPSR_EL2 - mr[ 5] = mr[442] # SPSR_MON = SPSR_EL3 - mr[277] = mr[276] # TPIDRURO_NS = TPIDRURO - mr[280] = mr[279] # TPIDRPRW_NS = TPIDRPRW - mr[274] = mr[273] # TPIDRURW_NS = TPIDRURW - mr[132] = mr[131] # TTBCR_NS = TTBCR - mr[126] = mr[125] # TTBR0_NS = TTBR0 - mr[129] = mr[128] # TTBR1_NS = TTBR1 - mr[263] = mr[262] # VBAR_NS = VBAR - cpt.set(sec, 'miscRegs', ' '.join(str(x) for x in mr)) + # Search for all ISA sections + if re.search(".*sys.*\.cpu.*\.isa\d*$", sec): + mr = cpt.get(sec, "miscRegs").split() + if int(mr[0]) & 16 == 0: # CPSR reg width; 0 for AArch64 + mr[112] = mr[111] # ACTLR_NS = ACTLR + mr[146] = mr[145] # ADFSR_NS = ADFSR + mr[149] = mr[148] # AIFSR_NS = AIFSR + mr[253] = mr[252] # AMAIR0_NS = AMAIR0 + mr[289] = mr[288] # CNTP_CTL_NS = CNTP_CTL + mr[313] = mr[312] # CNTP_CVAL_NS = CNTP_CVAL + mr[286] = mr[285] # CNTP_TVAL_NS = CNTP_TVAL + mr[271] = mr[270] # CONTEXTIDR_NS = CONTEXTIDR + mr[104] = mr[103] # CSSELR_NS = CSSELR + mr[137] = mr[136] # DACR_NS = DACR + mr[155] = mr[154] # DFAR_NS = DFAR + mr[158] = mr[157] # IFAR_NS = IFAR + mr[143] = mr[142] # IFSR_NS = IFSR + mr[247] = mr[246] # NMRR_NS = NMRR + mr[166] = mr[165] # PAR_NS = PAR + mr[241] = mr[240] # PRRR_NS = PRRR + mr[4] = mr[424] # SPSR_SVC = SPSR_EL1 + mr[7] = mr[435] # SPSR_HYP = SPSR_EL2 + mr[5] = mr[442] # SPSR_MON = SPSR_EL3 + mr[277] = mr[276] # TPIDRURO_NS = TPIDRURO + mr[280] = mr[279] # TPIDRPRW_NS = TPIDRPRW + mr[274] = mr[273] # TPIDRURW_NS = TPIDRURW + mr[132] = mr[131] # TTBCR_NS = TTBCR + mr[126] = mr[125] # TTBR0_NS = TTBR0 + mr[129] = mr[128] # TTBR1_NS = TTBR1 + mr[263] = mr[262] # VBAR_NS = VBAR + + cpt.set(sec, "miscRegs", " ".join(str(x) for x in mr)) diff --git a/util/cpt_upgraders/armv8.py b/util/cpt_upgraders/armv8.py index 4390aa1a3a..6679beb88a 100644 --- a/util/cpt_upgraders/armv8.py +++ b/util/cpt_upgraders/armv8.py @@ -1,20 +1,23 @@ # Add all ARMv8 state def upgrader(cpt): - if cpt.get('root', 'isa', fallback='') != 'arm': + if cpt.get("root", "isa", fallback="") != "arm": return import re - print("Warning: The size of the FP register file has changed. " - "To get similar results you need to adjust the number of " - "physical registers in the CPU you're restoring into by " - "NNNN.") + + print( + "Warning: The size of the FP register file has changed. " + "To get similar results you need to adjust the number of " + "physical registers in the CPU you're restoring into by " + "NNNN." + ) # Find the CPU context's and upgrade their registers for sec in cpt.sections(): - re_xc_match = re.match('^.*?sys.*?\.cpu(\d+)*\.xc\.*', sec) + re_xc_match = re.match("^.*?sys.*?\.cpu(\d+)*\.xc\.*", sec) if not re_xc_match: continue # Update floating point regs - fpr = cpt.get(sec, 'floatRegs.i').split() + fpr = cpt.get(sec, "floatRegs.i").split() # v8 has 128 normal fp and 32 special fp regs compared # to v7's 64 normal fp and 8 special fp regs. # Insert the extra normal fp registers at end of v7 normal fp regs @@ -23,15 +26,15 @@ def upgrader(cpt): # Append the extra special registers for x in range(24): fpr.append("0") - cpt.set(sec, 'floatRegs.i', ' '.join(str(x) for x in fpr)) + cpt.set(sec, "floatRegs.i", " ".join(str(x) for x in fpr)) - ir = cpt.get(sec, 'intRegs').split() + ir = cpt.get(sec, "intRegs").split() # Add in v8 int reg state # Splice in R13_HYP ir.insert(20, "0") # Splice in INTREG_DUMMY and SP0 - SP3 ir.extend(["0", "0", "0", "0", "0"]) - cpt.set(sec, 'intRegs', ' '.join(str(x) for x in ir)) + cpt.set(sec, "intRegs", " ".join(str(x) for x in ir)) # Update the cpu interrupt field for sec in cpt.sections(): @@ -42,7 +45,7 @@ def upgrader(cpt): irqs = cpt.get(sec, "interrupts").split() irqs.append("false") irqs.append("false") - cpt.set(sec, "interrupts", ' '.join(str(x) for x in irqs)) + cpt.set(sec, "interrupts", " ".join(str(x) for x in irqs)) # Update the per cpu interrupt structure for sec in cpt.sections(): @@ -53,7 +56,7 @@ def upgrader(cpt): irqs = cpt.get(sec, "interrupts").split() irqs.append("false") irqs.append("false") - cpt.set(sec, "interrupts", ' '.join(str(x) for x in irqs)) + cpt.set(sec, "interrupts", " ".join(str(x) for x in irqs)) # Update the misc regs and add in new isa specific fields for sec in cpt.sections(): @@ -61,254 +64,257 @@ def upgrader(cpt): if not re_isa_match: continue - cpt.set(sec, 'haveSecurity', 'false') - cpt.set(sec, 'haveLPAE', 'false') - cpt.set(sec, 'haveVirtualization', 'false') - cpt.set(sec, 'haveLargeAsid64', 'false') - cpt.set(sec, 'physAddrRange64', '40') + cpt.set(sec, "haveSecurity", "false") + cpt.set(sec, "haveLPAE", "false") + cpt.set(sec, "haveVirtualization", "false") + cpt.set(sec, "haveLargeAsid64", "false") + cpt.set(sec, "physAddrRange64", "40") # splice in the new misc registers, ~200 -> 605 registers, # ordering does not remain consistent - mr_old = cpt.get(sec, 'miscRegs').split() - mr_new = [ '0' for x in range(605) ] + mr_old = cpt.get(sec, "miscRegs").split() + mr_new = ["0" for x in range(605)] # map old v7 miscRegs to new v8 miscRegs - mr_new[0] = mr_old[0] # CPSR - mr_new[16] = mr_old[1] # CPSR_Q - mr_new[1] = mr_old[2] # SPSR - mr_new[2] = mr_old[3] # SPSR_FIQ - mr_new[3] = mr_old[4] # SPSR_IRQ - mr_new[4] = mr_old[5] # SPSR_SVC - mr_new[5] = mr_old[6] # SPSR_MON - mr_new[8] = mr_old[7] # SPSR_UND - mr_new[6] = mr_old[8] # SPSR_ABT - mr_new[432] = mr_old[9] # FPSR - mr_new[10] = mr_old[10] # FPSID - mr_new[11] = mr_old[11] # FPSCR - mr_new[18] = mr_old[12] # FPSCR_QC - mr_new[17] = mr_old[13] # FPSCR_EXC - mr_new[14] = mr_old[14] # FPEXC - mr_new[13] = mr_old[15] # MVFR0 - mr_new[12] = mr_old[16] # MVFR1 - mr_new[28] = mr_old[17] # SCTLR_RST, - mr_new[29] = mr_old[18] # SEV_MAILBOX, - mr_new[30] = mr_old[19] # DBGDIDR - mr_new[31] = mr_old[20] # DBGDSCR_INT, - mr_new[33] = mr_old[21] # DBGDTRRX_INT, - mr_new[34] = mr_old[22] # DBGTRTX_INT, - mr_new[35] = mr_old[23] # DBGWFAR, - mr_new[36] = mr_old[24] # DBGVCR, - #mr_new[] = mr_old[25] # DBGECR -> UNUSED, - #mr_new[] = mr_old[26] # DBGDSCCR -> UNUSED, - #mr_new[] = mr_old[27] # DBGSMCR -> UNUSED, - mr_new[37] = mr_old[28] # DBGDTRRX_EXT, - mr_new[38] = mr_old[29] # DBGDSCR_EXT, - mr_new[39] = mr_old[30] # DBGDTRTX_EXT, - #mr_new[] = mr_old[31] # DBGDRCR -> UNUSED, - mr_new[41] = mr_old[32] # DBGBVR, - mr_new[47] = mr_old[33] # DBGBCR, - #mr_new[] = mr_old[34] # DBGBVR_M -> UNUSED, - #mr_new[] = mr_old[35] # DBGBCR_M -> UNUSED, - mr_new[61] = mr_old[36] # DBGDRAR, - #mr_new[] = mr_old[37] # DBGBXVR_M -> UNUSED, - mr_new[64] = mr_old[38] # DBGOSLAR, - #mr_new[] = mr_old[39] # DBGOSSRR -> UNUSED, - mr_new[66] = mr_old[40] # DBGOSDLR, - mr_new[67] = mr_old[41] # DBGPRCR, - #mr_new[] = mr_old[42] # DBGPRSR -> UNUSED, - mr_new[68] = mr_old[43] # DBGDSAR, - #mr_new[] = mr_old[44] # DBGITCTRL -> UNUSED, - mr_new[69] = mr_old[45] # DBGCLAIMSET, - mr_new[70] = mr_old[46] # DBGCLAIMCLR, - mr_new[71] = mr_old[47] # DBGAUTHSTATUS, - mr_new[72] = mr_old[48] # DBGDEVID2, - mr_new[73] = mr_old[49] # DBGDEVID1, - mr_new[74] = mr_old[50] # DBGDEVID, - mr_new[77] = mr_old[51] # TEEHBR, - mr_new[109] = mr_old[52] # v7 SCTLR -> aarc32 SCTLR_NS - mr_new[189] = mr_old[53] # DCCISW, - mr_new[188] = mr_old[54] # DCCIMVAC, - mr_new[183] = mr_old[55] # DCCMVAC, - mr_new[271] = mr_old[56] # v7 CONTEXTIDR -> aarch32 CONTEXTIDR_NS, - mr_new[274] = mr_old[57] # v7 TPIDRURW -> aarch32 TPIDRURW_NS, - mr_new[277] = mr_old[58] # v7 TPIDRURO -> aarch32 TPIDRURO_NS, - mr_new[280] = mr_old[59] # v7 TPIDRPRW -> aarch32 TPIDRPRW_NS, - mr_new[170] = mr_old[60] # CP15ISB, - mr_new[185] = mr_old[61] # CP15DSB, - mr_new[186] = mr_old[62] # CP15DMB, - mr_new[114] = mr_old[63] # CPACR, - mr_new[101] = mr_old[64] # CLIDR, - mr_new[100] = mr_old[65] # CCSIDR, - mr_new[104] = mr_old[66] # v7 CSSELR -> aarch32 CSSELR_NS, - mr_new[163] = mr_old[67] # ICIALLUIS, - mr_new[168] = mr_old[68] # ICIALLU, - mr_new[169] = mr_old[69] # ICIMVAU, - mr_new[172] = mr_old[70] # BPIMVA, - mr_new[164] = mr_old[71] # BPIALLIS, - mr_new[171] = mr_old[72] # BPIALL, - mr_new[80] = mr_old[73] # MIDR, - mr_new[126] = mr_old[74] # v7 TTBR0 -> aarch32 TTBR0_NS, - mr_new[129] = mr_old[75] # v7 TTBR1 -> aarch32 TTBR1_NS, - mr_new[83] = mr_old[76] # TLBTR, - mr_new[137] = mr_old[77] # v7 DACR -> aarch32 DACR_NS, - mr_new[192] = mr_old[78] # TLBIALLIS, - mr_new[193] = mr_old[79] # TLBIMVAIS, - mr_new[194] = mr_old[80] # TLBIASIDIS, - mr_new[195] = mr_old[81] # TLBIMVAAIS, - mr_new[198] = mr_old[82] # ITLBIALL, - mr_new[199] = mr_old[83] # ITLBIMVA, - mr_new[200] = mr_old[84] # ITLBIASID, - mr_new[201] = mr_old[85] # DTLBIALL, - mr_new[202] = mr_old[86] # DTLBIMVA, - mr_new[203] = mr_old[87] # DTLBIASID, - mr_new[204] = mr_old[88] # TLBIALL, - mr_new[205] = mr_old[89] # TLBIMVA, - mr_new[206] = mr_old[90] # TLBIASID, - mr_new[207] = mr_old[91] # TLBIMVAA, - mr_new[140] = mr_old[92] # v7 DFSR -> aarch32 DFSR_NS, - mr_new[143] = mr_old[93] # v7 IFSR -> aarch32 IFSR_NS, - mr_new[155] = mr_old[94] # v7 DFAR -> aarch32 DFAR_NS, - mr_new[158] = mr_old[95] # v7 IFAR -> aarch32 IFAR_NS, - mr_new[84] = mr_old[96] # MPIDR, - mr_new[241] = mr_old[97] # v7 PRRR -> aarch32 PRRR_NS, - mr_new[247] = mr_old[98] # v7 NMRR -> aarch32 NMRR_NS, - mr_new[131] = mr_old[99] # TTBCR, - mr_new[86] = mr_old[100] # ID_PFR0, - mr_new[81] = mr_old[101] # CTR, - mr_new[115] = mr_old[102] # SCR, + mr_new[0] = mr_old[0] # CPSR + mr_new[16] = mr_old[1] # CPSR_Q + mr_new[1] = mr_old[2] # SPSR + mr_new[2] = mr_old[3] # SPSR_FIQ + mr_new[3] = mr_old[4] # SPSR_IRQ + mr_new[4] = mr_old[5] # SPSR_SVC + mr_new[5] = mr_old[6] # SPSR_MON + mr_new[8] = mr_old[7] # SPSR_UND + mr_new[6] = mr_old[8] # SPSR_ABT + mr_new[432] = mr_old[9] # FPSR + mr_new[10] = mr_old[10] # FPSID + mr_new[11] = mr_old[11] # FPSCR + mr_new[18] = mr_old[12] # FPSCR_QC + mr_new[17] = mr_old[13] # FPSCR_EXC + mr_new[14] = mr_old[14] # FPEXC + mr_new[13] = mr_old[15] # MVFR0 + mr_new[12] = mr_old[16] # MVFR1 + mr_new[28] = mr_old[17] # SCTLR_RST, + mr_new[29] = mr_old[18] # SEV_MAILBOX, + mr_new[30] = mr_old[19] # DBGDIDR + mr_new[31] = mr_old[20] # DBGDSCR_INT, + mr_new[33] = mr_old[21] # DBGDTRRX_INT, + mr_new[34] = mr_old[22] # DBGTRTX_INT, + mr_new[35] = mr_old[23] # DBGWFAR, + mr_new[36] = mr_old[24] # DBGVCR, + # mr_new[] = mr_old[25] # DBGECR -> UNUSED, + # mr_new[] = mr_old[26] # DBGDSCCR -> UNUSED, + # mr_new[] = mr_old[27] # DBGSMCR -> UNUSED, + mr_new[37] = mr_old[28] # DBGDTRRX_EXT, + mr_new[38] = mr_old[29] # DBGDSCR_EXT, + mr_new[39] = mr_old[30] # DBGDTRTX_EXT, + # mr_new[] = mr_old[31] # DBGDRCR -> UNUSED, + mr_new[41] = mr_old[32] # DBGBVR, + mr_new[47] = mr_old[33] # DBGBCR, + # mr_new[] = mr_old[34] # DBGBVR_M -> UNUSED, + # mr_new[] = mr_old[35] # DBGBCR_M -> UNUSED, + mr_new[61] = mr_old[36] # DBGDRAR, + # mr_new[] = mr_old[37] # DBGBXVR_M -> UNUSED, + mr_new[64] = mr_old[38] # DBGOSLAR, + # mr_new[] = mr_old[39] # DBGOSSRR -> UNUSED, + mr_new[66] = mr_old[40] # DBGOSDLR, + mr_new[67] = mr_old[41] # DBGPRCR, + # mr_new[] = mr_old[42] # DBGPRSR -> UNUSED, + mr_new[68] = mr_old[43] # DBGDSAR, + # mr_new[] = mr_old[44] # DBGITCTRL -> UNUSED, + mr_new[69] = mr_old[45] # DBGCLAIMSET, + mr_new[70] = mr_old[46] # DBGCLAIMCLR, + mr_new[71] = mr_old[47] # DBGAUTHSTATUS, + mr_new[72] = mr_old[48] # DBGDEVID2, + mr_new[73] = mr_old[49] # DBGDEVID1, + mr_new[74] = mr_old[50] # DBGDEVID, + mr_new[77] = mr_old[51] # TEEHBR, + mr_new[109] = mr_old[52] # v7 SCTLR -> aarc32 SCTLR_NS + mr_new[189] = mr_old[53] # DCCISW, + mr_new[188] = mr_old[54] # DCCIMVAC, + mr_new[183] = mr_old[55] # DCCMVAC, + mr_new[271] = mr_old[56] # v7 CONTEXTIDR -> aarch32 CONTEXTIDR_NS, + mr_new[274] = mr_old[57] # v7 TPIDRURW -> aarch32 TPIDRURW_NS, + mr_new[277] = mr_old[58] # v7 TPIDRURO -> aarch32 TPIDRURO_NS, + mr_new[280] = mr_old[59] # v7 TPIDRPRW -> aarch32 TPIDRPRW_NS, + mr_new[170] = mr_old[60] # CP15ISB, + mr_new[185] = mr_old[61] # CP15DSB, + mr_new[186] = mr_old[62] # CP15DMB, + mr_new[114] = mr_old[63] # CPACR, + mr_new[101] = mr_old[64] # CLIDR, + mr_new[100] = mr_old[65] # CCSIDR, + mr_new[104] = mr_old[66] # v7 CSSELR -> aarch32 CSSELR_NS, + mr_new[163] = mr_old[67] # ICIALLUIS, + mr_new[168] = mr_old[68] # ICIALLU, + mr_new[169] = mr_old[69] # ICIMVAU, + mr_new[172] = mr_old[70] # BPIMVA, + mr_new[164] = mr_old[71] # BPIALLIS, + mr_new[171] = mr_old[72] # BPIALL, + mr_new[80] = mr_old[73] # MIDR, + mr_new[126] = mr_old[74] # v7 TTBR0 -> aarch32 TTBR0_NS, + mr_new[129] = mr_old[75] # v7 TTBR1 -> aarch32 TTBR1_NS, + mr_new[83] = mr_old[76] # TLBTR, + mr_new[137] = mr_old[77] # v7 DACR -> aarch32 DACR_NS, + mr_new[192] = mr_old[78] # TLBIALLIS, + mr_new[193] = mr_old[79] # TLBIMVAIS, + mr_new[194] = mr_old[80] # TLBIASIDIS, + mr_new[195] = mr_old[81] # TLBIMVAAIS, + mr_new[198] = mr_old[82] # ITLBIALL, + mr_new[199] = mr_old[83] # ITLBIMVA, + mr_new[200] = mr_old[84] # ITLBIASID, + mr_new[201] = mr_old[85] # DTLBIALL, + mr_new[202] = mr_old[86] # DTLBIMVA, + mr_new[203] = mr_old[87] # DTLBIASID, + mr_new[204] = mr_old[88] # TLBIALL, + mr_new[205] = mr_old[89] # TLBIMVA, + mr_new[206] = mr_old[90] # TLBIASID, + mr_new[207] = mr_old[91] # TLBIMVAA, + mr_new[140] = mr_old[92] # v7 DFSR -> aarch32 DFSR_NS, + mr_new[143] = mr_old[93] # v7 IFSR -> aarch32 IFSR_NS, + mr_new[155] = mr_old[94] # v7 DFAR -> aarch32 DFAR_NS, + mr_new[158] = mr_old[95] # v7 IFAR -> aarch32 IFAR_NS, + mr_new[84] = mr_old[96] # MPIDR, + mr_new[241] = mr_old[97] # v7 PRRR -> aarch32 PRRR_NS, + mr_new[247] = mr_old[98] # v7 NMRR -> aarch32 NMRR_NS, + mr_new[131] = mr_old[99] # TTBCR, + mr_new[86] = mr_old[100] # ID_PFR0, + mr_new[81] = mr_old[101] # CTR, + mr_new[115] = mr_old[102] # SCR, # Set the non-secure bit scr = int(mr_new[115]) scr = scr | 0x1 mr_new[115] = str(scr) ### - mr_new[116] = mr_old[103] # SDER, - mr_new[165] = mr_old[104] # PAR, - mr_new[175] = mr_old[105] # V2PCWPR -> ATS1CPR, - mr_new[176] = mr_old[106] # V2PCWPW -> ATS1CPW, - mr_new[177] = mr_old[107] # V2PCWUR -> ATS1CUR, - mr_new[178] = mr_old[108] # V2PCWUW -> ATS1CUW, - mr_new[179] = mr_old[109] # V2POWPR -> ATS12NSOPR, - mr_new[180] = mr_old[110] # V2POWPW -> ATS12NSOPW, - mr_new[181] = mr_old[111] # V2POWUR -> ATS12NSOUR, - mr_new[182] = mr_old[112] # V2POWUW -> ATS12NWOUW, - mr_new[90] = mr_old[113] # ID_MMFR0, - mr_new[92] = mr_old[114] # ID_MMFR2, - mr_new[93] = mr_old[115] # ID_MMFR3, - mr_new[112] = mr_old[116] # v7 ACTLR -> aarch32 ACTLR_NS - mr_new[222] = mr_old[117] # PMCR, - mr_new[230] = mr_old[118] # PMCCNTR, - mr_new[223] = mr_old[119] # PMCNTENSET, - mr_new[224] = mr_old[120] # PMCNTENCLR, - mr_new[225] = mr_old[121] # PMOVSR, - mr_new[226] = mr_old[122] # PMSWINC, - mr_new[227] = mr_old[123] # PMSELR, - mr_new[228] = mr_old[124] # PMCEID0, - mr_new[229] = mr_old[125] # PMCEID1, - mr_new[231] = mr_old[126] # PMXEVTYPER, - mr_new[233] = mr_old[127] # PMXEVCNTR, - mr_new[234] = mr_old[128] # PMUSERENR, - mr_new[235] = mr_old[129] # PMINTENSET, - mr_new[236] = mr_old[130] # PMINTENCLR, - mr_new[94] = mr_old[131] # ID_ISAR0, - mr_new[95] = mr_old[132] # ID_ISAR1, - mr_new[96] = mr_old[133] # ID_ISAR2, - mr_new[97] = mr_old[134] # ID_ISAR3, - mr_new[98] = mr_old[135] # ID_ISAR4, - mr_new[99] = mr_old[136] # ID_ISAR5, - mr_new[20] = mr_old[137] # LOCKFLAG, - mr_new[19] = mr_old[138] # LOCKADDR, - mr_new[87] = mr_old[139] # ID_PFR1, + mr_new[116] = mr_old[103] # SDER, + mr_new[165] = mr_old[104] # PAR, + mr_new[175] = mr_old[105] # V2PCWPR -> ATS1CPR, + mr_new[176] = mr_old[106] # V2PCWPW -> ATS1CPW, + mr_new[177] = mr_old[107] # V2PCWUR -> ATS1CUR, + mr_new[178] = mr_old[108] # V2PCWUW -> ATS1CUW, + mr_new[179] = mr_old[109] # V2POWPR -> ATS12NSOPR, + mr_new[180] = mr_old[110] # V2POWPW -> ATS12NSOPW, + mr_new[181] = mr_old[111] # V2POWUR -> ATS12NSOUR, + mr_new[182] = mr_old[112] # V2POWUW -> ATS12NWOUW, + mr_new[90] = mr_old[113] # ID_MMFR0, + mr_new[92] = mr_old[114] # ID_MMFR2, + mr_new[93] = mr_old[115] # ID_MMFR3, + mr_new[112] = mr_old[116] # v7 ACTLR -> aarch32 ACTLR_NS + mr_new[222] = mr_old[117] # PMCR, + mr_new[230] = mr_old[118] # PMCCNTR, + mr_new[223] = mr_old[119] # PMCNTENSET, + mr_new[224] = mr_old[120] # PMCNTENCLR, + mr_new[225] = mr_old[121] # PMOVSR, + mr_new[226] = mr_old[122] # PMSWINC, + mr_new[227] = mr_old[123] # PMSELR, + mr_new[228] = mr_old[124] # PMCEID0, + mr_new[229] = mr_old[125] # PMCEID1, + mr_new[231] = mr_old[126] # PMXEVTYPER, + mr_new[233] = mr_old[127] # PMXEVCNTR, + mr_new[234] = mr_old[128] # PMUSERENR, + mr_new[235] = mr_old[129] # PMINTENSET, + mr_new[236] = mr_old[130] # PMINTENCLR, + mr_new[94] = mr_old[131] # ID_ISAR0, + mr_new[95] = mr_old[132] # ID_ISAR1, + mr_new[96] = mr_old[133] # ID_ISAR2, + mr_new[97] = mr_old[134] # ID_ISAR3, + mr_new[98] = mr_old[135] # ID_ISAR4, + mr_new[99] = mr_old[136] # ID_ISAR5, + mr_new[20] = mr_old[137] # LOCKFLAG, + mr_new[19] = mr_old[138] # LOCKADDR, + mr_new[87] = mr_old[139] # ID_PFR1, # Set up the processor features register pfr = int(mr_new[87]) pfr = pfr | 0x1011 mr_new[87] = str(pfr) ### - mr_new[238] = mr_old[140] # L2CTLR, - mr_new[82] = mr_old[141] # TCMTR - mr_new[88] = mr_old[142] # ID_DFR0, - mr_new[89] = mr_old[143] # ID_AFR0, - mr_new[91] = mr_old[144] # ID_MMFR1, - mr_new[102] = mr_old[145] # AIDR, - mr_new[146] = mr_old[146] # v7 ADFSR -> aarch32 ADFSR_NS, - mr_new[148] = mr_old[147] # AIFSR, - mr_new[173] = mr_old[148] # DCIMVAC, - mr_new[174] = mr_old[149] # DCISW, - mr_new[184] = mr_old[150] # MCCSW -> DCCSW, - mr_new[187] = mr_old[151] # DCCMVAU, - mr_new[117] = mr_old[152] # NSACR, - mr_new[262] = mr_old[153] # VBAR, - mr_new[265] = mr_old[154] # MVBAR, - mr_new[267] = mr_old[155] # ISR, - mr_new[269] = mr_old[156] # FCEIDR -> FCSEIDR, - #mr_new[] = mr_old[157] # L2LATENCY -> UNUSED, - #mr_new[] = mr_old[158] # CRN15 -> UNUSED, - mr_new[599] = mr_old[159] # NOP - mr_new[600] = mr_old[160] # RAZ, + mr_new[238] = mr_old[140] # L2CTLR, + mr_new[82] = mr_old[141] # TCMTR + mr_new[88] = mr_old[142] # ID_DFR0, + mr_new[89] = mr_old[143] # ID_AFR0, + mr_new[91] = mr_old[144] # ID_MMFR1, + mr_new[102] = mr_old[145] # AIDR, + mr_new[146] = mr_old[146] # v7 ADFSR -> aarch32 ADFSR_NS, + mr_new[148] = mr_old[147] # AIFSR, + mr_new[173] = mr_old[148] # DCIMVAC, + mr_new[174] = mr_old[149] # DCISW, + mr_new[184] = mr_old[150] # MCCSW -> DCCSW, + mr_new[187] = mr_old[151] # DCCMVAU, + mr_new[117] = mr_old[152] # NSACR, + mr_new[262] = mr_old[153] # VBAR, + mr_new[265] = mr_old[154] # MVBAR, + mr_new[267] = mr_old[155] # ISR, + mr_new[269] = mr_old[156] # FCEIDR -> FCSEIDR, + # mr_new[] = mr_old[157] # L2LATENCY -> UNUSED, + # mr_new[] = mr_old[158] # CRN15 -> UNUSED, + mr_new[599] = mr_old[159] # NOP + mr_new[600] = mr_old[160] # RAZ, # Set the new miscRegs structure - cpt.set(sec, 'miscRegs', ' '.join(str(x) for x in mr_new)) + cpt.set(sec, "miscRegs", " ".join(str(x) for x in mr_new)) cpu_prefix = {} # Add in state for ITB/DTB for sec in cpt.sections(): - re_tlb_match = re.match('(^.*?sys.*?\.cpu(\d+)*)\.(dtb|itb)$', sec) + re_tlb_match = re.match("(^.*?sys.*?\.cpu(\d+)*)\.(dtb|itb)$", sec) if not re_tlb_match: continue - cpu_prefix[re_tlb_match.group(1)] = True # Save off prefix to add + cpu_prefix[re_tlb_match.group(1)] = True # Save off prefix to add # Set the non-secure bit (bit 9) to 1 for attributes - attr = int(cpt.get(sec, '_attr')) + attr = int(cpt.get(sec, "_attr")) attr = attr | 0x200 - cpt.set(sec, '_attr', str(attr)) - cpt.set(sec, 'haveLPAE', 'false') - cpt.set(sec, 'directToStage2', 'false') - cpt.set(sec, 'stage2Req', 'false') - cpt.set(sec, 'bootUncacheability', 'true') + cpt.set(sec, "_attr", str(attr)) + cpt.set(sec, "haveLPAE", "false") + cpt.set(sec, "directToStage2", "false") + cpt.set(sec, "stage2Req", "false") + cpt.set(sec, "bootUncacheability", "true") # Add in extra state for the new TLB Entries for sec in cpt.sections(): - re_tlbentry_match = re.match('(^.*?sys.*?\.cpu(\d+)*)\.(dtb|itb).TlbEntry\d+$', sec) + re_tlbentry_match = re.match( + "(^.*?sys.*?\.cpu(\d+)*)\.(dtb|itb).TlbEntry\d+$", sec + ) if not re_tlbentry_match: continue # Add in the new entries - cpt.set(sec, 'longDescFormat', 'false') - cpt.set(sec, 'vmid', '0') - cpt.set(sec, 'isHyp', 'false') - valid = cpt.get(sec, 'valid') - if valid == 'true': - cpt.set(sec, 'ns', 'true') - cpt.set(sec, 'nstid', 'true') - cpt.set(sec, 'pxn', 'true') - cpt.set(sec, 'hap', '3') + cpt.set(sec, "longDescFormat", "false") + cpt.set(sec, "vmid", "0") + cpt.set(sec, "isHyp", "false") + valid = cpt.get(sec, "valid") + if valid == "true": + cpt.set(sec, "ns", "true") + cpt.set(sec, "nstid", "true") + cpt.set(sec, "pxn", "true") + cpt.set(sec, "hap", "3") # All v7 code used 2 level page tables - cpt.set(sec, 'lookupLevel', '2') - attr = int(cpt.get(sec, 'attributes')) + cpt.set(sec, "lookupLevel", "2") + attr = int(cpt.get(sec, "attributes")) # set the non-secure bit (bit 9) to 1 # as no previous v7 code used secure code attr = attr | 0x200 - cpt.set(sec, 'attributes', str(attr)) + cpt.set(sec, "attributes", str(attr)) else: - cpt.set(sec, 'ns', 'false') - cpt.set(sec, 'nstid', 'false') - cpt.set(sec, 'pxn', 'false') - cpt.set(sec, 'hap', '0') - cpt.set(sec, 'lookupLevel', '0') - cpt.set(sec, 'outerShareable', 'false') + cpt.set(sec, "ns", "false") + cpt.set(sec, "nstid", "false") + cpt.set(sec, "pxn", "false") + cpt.set(sec, "hap", "0") + cpt.set(sec, "lookupLevel", "0") + cpt.set(sec, "outerShareable", "false") # Add d/istage2_mmu and d/istage2_mmu.stage2_tlb for key in cpu_prefix: - for suffix in ['.istage2_mmu', '.dstage2_mmu']: + for suffix in [".istage2_mmu", ".dstage2_mmu"]: new_sec = key + suffix cpt.add_section(new_sec) new_sec = key + suffix + ".stage2_tlb" cpt.add_section(new_sec) # Fill in tlb info with some defaults - cpt.set(new_sec, '_attr', '0') - cpt.set(new_sec, 'haveLPAE', 'false') - cpt.set(new_sec, 'directToStage2', 'false') - cpt.set(new_sec, 'stage2Req', 'false') - cpt.set(new_sec, 'bootUncacheability', 'false') - cpt.set(new_sec, 'num_entries', '0') + cpt.set(new_sec, "_attr", "0") + cpt.set(new_sec, "haveLPAE", "false") + cpt.set(new_sec, "directToStage2", "false") + cpt.set(new_sec, "stage2Req", "false") + cpt.set(new_sec, "bootUncacheability", "false") + cpt.set(new_sec, "num_entries", "0") + legacy_version = 9 diff --git a/util/cpt_upgraders/cpu-pid.py b/util/cpt_upgraders/cpu-pid.py index 4daf3742a8..232b9ea491 100644 --- a/util/cpt_upgraders/cpu-pid.py +++ b/util/cpt_upgraders/cpu-pid.py @@ -1,12 +1,14 @@ def upgrader(cpt): for sec in cpt.sections(): import re + # Search for a CPUs - if re.search('.*sys.*cpu', sec): + if re.search(".*sys.*cpu", sec): try: - junk = cpt.get(sec, 'instCnt') - cpt.set(sec, '_pid', '0') + junk = cpt.get(sec, "instCnt") + cpt.set(sec, "_pid", "0") except ConfigParser.NoOptionError: pass + legacy_version = 3 diff --git a/util/cpt_upgraders/dvfs-perflevel.py b/util/cpt_upgraders/dvfs-perflevel.py index 0dc15f351c..e90f4401cc 100644 --- a/util/cpt_upgraders/dvfs-perflevel.py +++ b/util/cpt_upgraders/dvfs-perflevel.py @@ -3,13 +3,14 @@ def upgrader(cpt): for sec in cpt.sections(): import re - if re.match('^.*sys.*[._]clk_domain$', sec): + if re.match("^.*sys.*[._]clk_domain$", sec): # Make _perfLevel equal to 0 which means best performance - cpt.set(sec, '_perfLevel', ' '.join('0')) - elif re.match('^.*sys.*[._]voltage_domain$', sec): + cpt.set(sec, "_perfLevel", " ".join("0")) + elif re.match("^.*sys.*[._]voltage_domain$", sec): # Make _perfLevel equal to 0 which means best performance - cpt.set(sec, '_perfLevel', ' '.join('0')) + cpt.set(sec, "_perfLevel", " ".join("0")) else: continue + legacy_version = 11 diff --git a/util/cpt_upgraders/etherswitch.py b/util/cpt_upgraders/etherswitch.py index 9cd82a2d0f..e10fa3601c 100644 --- a/util/cpt_upgraders/etherswitch.py +++ b/util/cpt_upgraders/etherswitch.py @@ -3,14 +3,15 @@ def upgrader(cpt): if sec == "system": options = cpt.items(sec) for it in options: - opt_split = it[0].split('.') - if len(opt_split) < 2: continue + opt_split = it[0].split(".") + if len(opt_split) < 2: + continue new_sec_name = opt_split[1] old_opt_name = opt_split[len(opt_split) - 1] if "outputFifo" in new_sec_name: new_sec_name = new_sec_name.rstrip("outputFifo") new_sec_name += ".outputFifo" - new_sec_name = "system.system.%s" %(new_sec_name) + new_sec_name = "system.system.%s" % (new_sec_name) if not cpt.has_section(new_sec_name): cpt.add_section(new_sec_name) if old_opt_name == "size": diff --git a/util/cpt_upgraders/globals-to-root.py b/util/cpt_upgraders/globals-to-root.py index 3452def4da..73c1f0fc9f 100644 --- a/util/cpt_upgraders/globals-to-root.py +++ b/util/cpt_upgraders/globals-to-root.py @@ -1,13 +1,15 @@ # This upgrader renames section "Globals" as "root.globals". def upgrader(cpt): import re + for sec in cpt.sections(): - if re.match('Globals', sec): + if re.match("Globals", sec): # rename the section items = cpt.items(sec) - cpt.add_section('root.globals') + cpt.add_section("root.globals") for item in items: - cpt.set('root.globals', item[0], item[1]) + cpt.set("root.globals", item[0], item[1]) cpt.remove_section(sec) + legacy_version = 16 diff --git a/util/cpt_upgraders/ide-dma-abort.py b/util/cpt_upgraders/ide-dma-abort.py index 773e3dbe0a..86a26446d1 100644 --- a/util/cpt_upgraders/ide-dma-abort.py +++ b/util/cpt_upgraders/ide-dma-abort.py @@ -5,4 +5,5 @@ def upgrader(cpt): if cpt.has_option(sec, "curSector"): cpt.set(sec, "dmaAborted", "false") + legacy_version = 7 diff --git a/util/cpt_upgraders/isa-is-simobject.py b/util/cpt_upgraders/isa-is-simobject.py index f6aa63d35b..3f0132ce36 100644 --- a/util/cpt_upgraders/isa-is-simobject.py +++ b/util/cpt_upgraders/isa-is-simobject.py @@ -1,32 +1,66 @@ # The ISA is now a separate SimObject, which means that we serialize # it in a separate section instead of as a part of the ThreadContext. def upgrader(cpt): - isa = cpt.get('root', 'isa', fallback='') - if isa == '': + isa = cpt.get("root", "isa", fallback="") + if isa == "": return isa_fields = { - "arm" : ( "miscRegs" ), - "sparc" : ( "asi", "tick", "fprs", "gsr", "softint", "tick_cmpr", - "stick", "stick_cmpr", "tpc", "tnpc", "tstate", "tt", - "tba", "pstate", "tl", "pil", "cwp", "gl", "hpstate", - "htstate", "hintp", "htba", "hstick_cmpr", - "strandStatusReg", "fsr", "priContext", "secContext", - "partId", "lsuCtrlReg", "scratchPad", - "cpu_mondo_head", "cpu_mondo_tail", - "dev_mondo_head", "dev_mondo_tail", - "res_error_head", "res_error_tail", - "nres_error_head", "nres_error_tail", - "tick_intr_sched", - "cpu", "tc_num", "tick_cmp", "stick_cmp", "hstick_cmp"), - "x86" : ( "regVal" ), - } + "arm": ("miscRegs"), + "sparc": ( + "asi", + "tick", + "fprs", + "gsr", + "softint", + "tick_cmpr", + "stick", + "stick_cmpr", + "tpc", + "tnpc", + "tstate", + "tt", + "tba", + "pstate", + "tl", + "pil", + "cwp", + "gl", + "hpstate", + "htstate", + "hintp", + "htba", + "hstick_cmpr", + "strandStatusReg", + "fsr", + "priContext", + "secContext", + "partId", + "lsuCtrlReg", + "scratchPad", + "cpu_mondo_head", + "cpu_mondo_tail", + "dev_mondo_head", + "dev_mondo_tail", + "res_error_head", + "res_error_tail", + "nres_error_head", + "nres_error_tail", + "tick_intr_sched", + "cpu", + "tc_num", + "tick_cmp", + "stick_cmp", + "hstick_cmp", + ), + "x86": ("regVal"), + } isa_fields = isa_fields.get(isa, []) isa_sections = [] for sec in cpt.sections(): import re - re_cpu_match = re.match('^(.*sys.*\.cpu[^.]*)\.xc\.(.+)$', sec) + re_cpu_match = re.match("^(.*sys.*\.cpu[^.]*)\.xc\.(.+)$", sec) # Search for all the execution contexts if not re_cpu_match: continue @@ -34,8 +68,10 @@ def upgrader(cpt): if re_cpu_match.group(2) != "0": # This shouldn't happen as we didn't support checkpointing # of in-order and O3 CPUs. - raise ValueError("Don't know how to migrate multi-threaded CPUs " - "from version 1") + raise ValueError( + "Don't know how to migrate multi-threaded CPUs " + "from version 1" + ) isa_section = [] for fspec in isa_fields: @@ -57,10 +93,12 @@ def upgrader(cpt): cpt.add_section(sec) else: if cpt.items(sec): - raise ValueError("Unexpected populated ISA section in old " - "checkpoint") + raise ValueError( + "Unexpected populated ISA section in old " "checkpoint" + ) for (key, value) in options: cpt.set(sec, key, value) + legacy_version = 4 diff --git a/util/cpt_upgraders/memory-per-range.py b/util/cpt_upgraders/memory-per-range.py index 8ad78577c3..d75a4acf8c 100644 --- a/util/cpt_upgraders/memory-per-range.py +++ b/util/cpt_upgraders/memory-per-range.py @@ -4,29 +4,31 @@ def upgrader(cpt): for sec in cpt.sections(): import re + # Search for a physical memory - if re.search('.*sys.*\.physmem$', sec): + if re.search(".*sys.*\.physmem$", sec): # Add the number of stores attribute to the global physmem - cpt.set(sec, 'nbr_of_stores', '1') + cpt.set(sec, "nbr_of_stores", "1") # Get the filename and size as this is moving to the # specific backing store - mem_filename = cpt.get(sec, 'filename') - mem_size = cpt.get(sec, '_size') - cpt.remove_option(sec, 'filename') - cpt.remove_option(sec, '_size') + mem_filename = cpt.get(sec, "filename") + mem_size = cpt.get(sec, "_size") + cpt.remove_option(sec, "filename") + cpt.remove_option(sec, "_size") # Get the name so that we can create the new section - system_name = str(sec).split('.')[0] - section_name = system_name + '.physmem.store0' + system_name = str(sec).split(".")[0] + section_name = system_name + ".physmem.store0" cpt.add_section(section_name) - cpt.set(section_name, 'store_id', '0') - cpt.set(section_name, 'range_size', mem_size) - cpt.set(section_name, 'filename', mem_filename) - elif re.search('.*sys.*\.\w*mem$', sec): + cpt.set(section_name, "store_id", "0") + cpt.set(section_name, "range_size", mem_size) + cpt.set(section_name, "filename", mem_filename) + elif re.search(".*sys.*\.\w*mem$", sec): # Due to the lack of information about a start address, # this migration only works if there is a single memory in # the system, thus starting at 0 raise ValueError("more than one memory detected (" + sec + ")") + legacy_version = 2 diff --git a/util/cpt_upgraders/mempool-sections.py b/util/cpt_upgraders/mempool-sections.py index 8b0fd2a56b..dec2e02799 100644 --- a/util/cpt_upgraders/mempool-sections.py +++ b/util/cpt_upgraders/mempool-sections.py @@ -5,8 +5,8 @@ def upgrader(cpt): systems = {} for sec in cpt.sections(): - ptrs = cpt.get(sec, 'ptrs', fallback=None) - limits = cpt.get(sec, 'limits', fallback=None) + ptrs = cpt.get(sec, "ptrs", fallback=None) + limits = cpt.get(sec, "limits", fallback=None) if ptrs and limits: systems[sec] = ptrs, limits @@ -18,22 +18,23 @@ def upgrader(cpt): if len(ptrs) != len(limits): print( - f"'{sec}ptrs' and '{limits}limits' were not the same length!") + f"'{sec}ptrs' and '{limits}limits' were not the same length!" + ) - cpt.set(sec, 'num_mem_pools', str(len(ptrs))) + cpt.set(sec, "num_mem_pools", str(len(ptrs))) - cpt.remove_option(sec, 'ptrs') - cpt.remove_option(sec, 'limits') + cpt.remove_option(sec, "ptrs") + cpt.remove_option(sec, "limits") # Assume the page shift is 12, for a 4KiB page. page_shift = 12 for idx, (ptr, limit) in enumerate(zip(ptrs, limits)): - new_sec = f'{sec}.memPool{idx}' + new_sec = f"{sec}.memPool{idx}" cpt.add_section(new_sec) - cpt.set(new_sec, 'page_shift', str(page_shift)) + cpt.set(new_sec, "page_shift", str(page_shift)) # Since there's no way to tell where the pool actually started, # just assume it started wherever it is right now. - cpt.set(new_sec, 'start_page', str(ptr >> page_shift)) - cpt.set(new_sec, 'free_page_num', str(ptr >> page_shift)) - cpt.set(new_sec, 'total_pages', str((limit - ptr) >> page_shift)) + cpt.set(new_sec, "start_page", str(ptr >> page_shift)) + cpt.set(new_sec, "free_page_num", str(ptr >> page_shift)) + cpt.set(new_sec, "total_pages", str((limit - ptr) >> page_shift)) diff --git a/util/cpt_upgraders/mempool-to-seworkload.py b/util/cpt_upgraders/mempool-to-seworkload.py index c4ff50854a..0fc449104e 100644 --- a/util/cpt_upgraders/mempool-to-seworkload.py +++ b/util/cpt_upgraders/mempool-to-seworkload.py @@ -6,18 +6,18 @@ def upgrader(cpt): # Find sections with 'num_mem_pools' options, and assume those are system # objects which host MemPools. for sec in cpt.sections(): - num_mem_pools = cpt.get(sec, 'num_mem_pools', fallback=None) + num_mem_pools = cpt.get(sec, "num_mem_pools", fallback=None) if num_mem_pools is not None: systems[sec] = num_mem_pools for sec, num_mem_pools in systems.items(): # Transfer num_mem_pools to the new location. - cpt.remove_option(sec, 'num_mem_pools') - cpt.set(f'{sec}.workload', 'num_mem_pools', num_mem_pools) + cpt.remove_option(sec, "num_mem_pools") + cpt.set(f"{sec}.workload", "num_mem_pools", num_mem_pools) for idx in range(int(num_mem_pools)): - old_name = f'{sec}.memPool{idx}' - new_name = f'{sec}.workload.memPool{idx}' + old_name = f"{sec}.memPool{idx}" + new_name = f"{sec}.workload.memPool{idx}" # Create the new section. cpt.add_section(new_name) @@ -29,4 +29,5 @@ def upgrader(cpt): # Delete the old section. cpt.remove_section(old_name) -depends = 'mempool-sections' + +depends = "mempool-sections" diff --git a/util/cpt_upgraders/multiple-event-queues.py b/util/cpt_upgraders/multiple-event-queues.py index 5d542ca415..5f270cae0d 100644 --- a/util/cpt_upgraders/multiple-event-queues.py +++ b/util/cpt_upgraders/multiple-event-queues.py @@ -1,5 +1,6 @@ # Add support for multiple event queues def upgrader(cpt): - cpt.set('Globals', 'numMainEventQueues', '1') + cpt.set("Globals", "numMainEventQueues", "1") + legacy_version = 12 diff --git a/util/cpt_upgraders/process-fdmap-rename.py b/util/cpt_upgraders/process-fdmap-rename.py index 6bbbd5459a..dfd23bd5e0 100644 --- a/util/cpt_upgraders/process-fdmap-rename.py +++ b/util/cpt_upgraders/process-fdmap-rename.py @@ -7,19 +7,21 @@ def rename_section(cp, section_from, section_to): cp.set(section_to, item[0], item[1]) cp.remove_section(section_from) + # Checkpoint version F renames an internal member of Process class. def upgrader(cpt): import re + for sec in cpt.sections(): - fdm = 'FdMap' - fde = 'FDEntry' - if re.match('.*\.%s.*' % fdm, sec): + fdm = "FdMap" + fde = "FDEntry" + if re.match(".*\.%s.*" % fdm, sec): rename = re.sub(fdm, fde, sec) split = re.split(fde, rename) # rename the section and add the 'mode' field rename_section(cpt, sec, rename) - cpt.set(rename, 'mode', "0") # no proper value to set :( + cpt.set(rename, "mode", "0") # no proper value to set :( # add in entries 257 to 1023 if split[1] == "0": @@ -27,6 +29,7 @@ def upgrader(cpt): seq = (split[0], fde, "%s" % x) section = "".join(seq) cpt.add_section(section) - cpt.set(section, 'fd', '-1') + cpt.set(section, "fd", "-1") + legacy_version = 15 diff --git a/util/cpt_upgraders/register-files.py b/util/cpt_upgraders/register-files.py new file mode 100644 index 0000000000..81698b7d2d --- /dev/null +++ b/util/cpt_upgraders/register-files.py @@ -0,0 +1,81 @@ +# Rename register files to their new systematic names. +def upgrader(cpt): + is_arm = cpt.get("root", "isa", fallback="") == "arm" + + import re + + is_cpu = lambda sec: "intRegs" in cpt[sec] + cpu_sections = filter(is_cpu, cpt.sections()) + + for sec in cpu_sections: + items = cpt[sec] + + # Almost all registers are 64 bits, except vectors and predicate + # vectors in ARM. + regval_bits = 64 + arm_vec_bits = 2048 + + byte_bits = 8 + byte_mask = (0x1 << byte_bits) - 1 + + # If there's vecRegs, create regs.vector_element from it. + vec_regs = items.get("vecRegs") + if vec_regs is not None: + reg_vals = vec_regs.split() + if is_arm: + full_bits = arm_vec_bits + else: + full_bits = regval_bits + reg_vals = ["0"] + elem_bits = 32 + elem_mask = (0x1 << elem_bits) - 1 + + bytes = [] + for full in reg_vals: + full = int(full) + for idx in range(full_bits // elem_bits): + # Extract one element. + elem = full & elem_mask + full = full >> elem_bits + + # Treat the element as a RegVal value, even if it's + # fewer bits in the vector registers. + for chunk in range(regval_bits // byte_bits): + bytes.append(f"{elem & byte_mask}") + elem = elem >> byte_bits + + items["regs.vector_element"] = " ".join(bytes) + + name_map = { + "floatRegs.i": "regs.floating_point", + "vecRegs": "regs.vector", + "vecPredRegs": "regs.vector_predicate", + "intRegs": "regs.integer", + "ccRegs": "regs.condition_code", + } + + for old, new in name_map.items(): + if old in items: + if is_arm and old in ("vecRegs", "vecPredRegs"): + reg_bits = 2048 + else: + reg_bits = regval_bits + + reg_vals = items[old].split() + if not is_arm and old in ("vecRegs", "vecPredRegs"): + reg_vals = ["0"] + + bytes = [] + for reg in reg_vals: + reg = int(reg) + for chunk in range(reg_bits // byte_bits): + bytes.append(f"{reg & byte_mask}") + reg = reg >> byte_bits + + items[new] = " ".join(bytes) + del items[old] + + items.setdefault("regs.condition_code", "") + + +legacy_version = 16 diff --git a/util/cpt_upgraders/remove-arm-cpsr-mode-miscreg.py b/util/cpt_upgraders/remove-arm-cpsr-mode-miscreg.py index 73256e1e94..8eba866f1a 100644 --- a/util/cpt_upgraders/remove-arm-cpsr-mode-miscreg.py +++ b/util/cpt_upgraders/remove-arm-cpsr-mode-miscreg.py @@ -1,13 +1,15 @@ # Remove the MISCREG_CPSR_MODE register from the ARM register file def upgrader(cpt): - if cpt.get('root', 'isa', fallback='') == 'arm': + if cpt.get("root", "isa", fallback="") == "arm": for sec in cpt.sections(): import re + # Search for all ISA sections - if re.search('.*sys.*\.cpu.*\.isa$', sec): - mr = cpt.get(sec, 'miscRegs').split() + if re.search(".*sys.*\.cpu.*\.isa$", sec): + mr = cpt.get(sec, "miscRegs").split() # Remove MISCREG_CPSR_MODE del mr[137] - cpt.set(sec, 'miscRegs', ' '.join(str(x) for x in mr)) + cpt.set(sec, "miscRegs", " ".join(str(x) for x in mr)) + legacy_version = 5 diff --git a/util/cpt_upgraders/ruby-block-size-bytes.py b/util/cpt_upgraders/ruby-block-size-bytes.py index 337bd5d035..daa4707abb 100644 --- a/util/cpt_upgraders/ruby-block-size-bytes.py +++ b/util/cpt_upgraders/ruby-block-size-bytes.py @@ -1,9 +1,10 @@ # Add block_size_bytes to system.ruby def upgrader(cpt): for sec in cpt.sections(): - if sec == 'system.ruby': + if sec == "system.ruby": # Use Gem5's default of 64; this should be changed if the to be # upgraded checkpoints were not taken with block-size 64! - cpt.set(sec, 'block_size_bytes', '64') + cpt.set(sec, "block_size_bytes", "64") + legacy_version = 10 diff --git a/util/cpt_upgraders/smt-interrupts.py b/util/cpt_upgraders/smt-interrupts.py index 2c7109c04a..d8366c2aa4 100644 --- a/util/cpt_upgraders/smt-interrupts.py +++ b/util/cpt_upgraders/smt-interrupts.py @@ -5,15 +5,15 @@ def upgrader(cpt): for sec in cpt.sections(): import re - re_cpu_match = re.match('^(.*sys.*\.cpu[^._]*)$', sec) + re_cpu_match = re.match("^(.*sys.*\.cpu[^._]*)$", sec) if re_cpu_match != None: - interrupts = cpt.get(sec, 'interrupts') - intStatus = cpt.get(sec, 'intStatus') + interrupts = cpt.get(sec, "interrupts") + intStatus = cpt.get(sec, "intStatus") cpu_name = re_cpu_match.group(1) - cpt.set(cpu_name + ".xc.0", 'interrupts', interrupts) - cpt.set(cpu_name + ".xc.0", 'intStatus', intStatus) + cpt.set(cpu_name + ".xc.0", "interrupts", interrupts) + cpt.set(cpu_name + ".xc.0", "intStatus", intStatus) - cpt.remove_option(sec, 'interrupts') - cpt.remove_option(sec, 'intStatus') + cpt.remove_option(sec, "interrupts") + cpt.remove_option(sec, "intStatus") diff --git a/util/cpt_upgraders/x86-add-tlb.py b/util/cpt_upgraders/x86-add-tlb.py index 0109f5d35a..5b6778bcbf 100644 --- a/util/cpt_upgraders/x86-add-tlb.py +++ b/util/cpt_upgraders/x86-add-tlb.py @@ -1,17 +1,19 @@ # Add TLB to x86 checkpoints def upgrader(cpt): - if cpt.get('root', 'isa', fallback='') == 'x86': + if cpt.get("root", "isa", fallback="") == "x86": for sec in cpt.sections(): import re - # Search for all ISA sections - if re.search('.*sys.*\.cpu.*\.dtb$', sec): - cpt.set(sec, '_size', '0') - cpt.set(sec, 'lruSeq', '0') - if re.search('.*sys.*\.cpu.*\.itb$', sec): - cpt.set(sec, '_size', '0') - cpt.set(sec, 'lruSeq', '0') + # Search for all ISA sections + if re.search(".*sys.*\.cpu.*\.dtb$", sec): + cpt.set(sec, "_size", "0") + cpt.set(sec, "lruSeq", "0") + + if re.search(".*sys.*\.cpu.*\.itb$", sec): + cpt.set(sec, "_size", "0") + cpt.set(sec, "lruSeq", "0") else: print("ISA is not x86") + legacy_version = 6 diff --git a/util/cscope-index.py b/util/cscope-index.py index 45b63c7341..6a2497bacf 100755 --- a/util/cscope-index.py +++ b/util/cscope-index.py @@ -32,10 +32,11 @@ import os # absolute paths to skip -skipdirs = [ 'src/unittest', 'src/doxygen' ] +skipdirs = ["src/unittest", "src/doxygen"] # suffixes of files to index -suffixes = [ '.cc', '.hh', '.c', '.h' ] +suffixes = [".cc", ".hh", ".c", ".h"] + def oksuffix(f): for s in suffixes: @@ -43,13 +44,14 @@ def oksuffix(f): return True return False -file_list = file('cscope.files', 'w') + +file_list = file("cscope.files", "w") cwd = os.getcwd() -for dirpath,subdirs,files in os.walk(os.path.join(cwd, 'src')): +for dirpath, subdirs, files in os.walk(os.path.join(cwd, "src")): # filter out undesirable subdirectories - for i,dir in enumerate(subdirs): - if dir == 'SCCS': + for i, dir in enumerate(subdirs): + if dir == "SCCS": del subdirs[i] break @@ -61,8 +63,10 @@ for dirpath,subdirs,files in os.walk(os.path.join(cwd, 'src')): # find C/C++ sources okfiles = [f for f in files if oksuffix(f)] if okfiles: - print('\n'.join([os.path.join(dirpath, f) for f in okfiles]), - file=file_list) + print( + "\n".join([os.path.join(dirpath, f) for f in okfiles]), + file=file_list, + ) file_list.close() diff --git a/util/decode_inst_dep_trace.py b/util/decode_inst_dep_trace.py index 92a6bfea8e..2a43f52d54 100755 --- a/util/decode_inst_dep_trace.py +++ b/util/decode_inst_dep_trace.py @@ -97,15 +97,24 @@ try: except: print("Did not find proto definition, attempting to generate") from subprocess import call - error = call(['protoc', '--python_out=util', '--proto_path=src/proto', - 'src/proto/inst_dep_record.proto']) + + error = call( + [ + "protoc", + "--python_out=util", + "--proto_path=src/proto", + "src/proto/inst_dep_record.proto", + ] + ) if not error: import inst_dep_record_pb2 + print("Generated proto definitions for instruction dependency record") else: print("Failed to import proto definitions") exit(-1) + def main(): if len(sys.argv) != 3: print("Usage: ", sys.argv[0], " ") @@ -115,7 +124,7 @@ def main(): proto_in = protolib.openFileRd(sys.argv[1]) try: - ascii_out = open(sys.argv[2], 'w') + ascii_out = open(sys.argv[2], "w") except IOError: print("Failed to open ", sys.argv[2], " for writing") exit(-1) @@ -142,7 +151,7 @@ def main(): enumNames = {} desc = inst_dep_record_pb2.InstDepRecord.DESCRIPTOR for namestr, valdesc in list(desc.enum_values_by_name.items()): - print('\t', valdesc.number, namestr) + print("\t", valdesc.number, namestr) enumNames[valdesc.number] = namestr num_packets = 0 @@ -155,52 +164,54 @@ def main(): num_packets += 1 # Write to file the seq num - ascii_out.write('%s' % (packet.seq_num)) + ascii_out.write("%s" % (packet.seq_num)) # Write to file the pc of the instruction, default is 0 - if packet.HasField('pc'): - ascii_out.write(',%s' % (packet.pc)) + if packet.HasField("pc"): + ascii_out.write(",%s" % (packet.pc)) else: - ascii_out.write(',0') + ascii_out.write(",0") # Write to file the weight, default is 1 - if packet.HasField('weight'): - ascii_out.write(',%s' % (packet.weight)) + if packet.HasField("weight"): + ascii_out.write(",%s" % (packet.weight)) else: - ascii_out.write(',1') + ascii_out.write(",1") # Write to file the type of the record try: - ascii_out.write(',%s' % enumNames[packet.type]) + ascii_out.write(",%s" % enumNames[packet.type]) except KeyError: - print("Seq. num", packet.seq_num, "has unsupported type", \ - packet.type) + print( + "Seq. num", packet.seq_num, "has unsupported type", packet.type + ) exit(-1) - # Write to file if it has the optional fields physical addr, size, # flags - if packet.HasField('p_addr'): - ascii_out.write(',%s' % (packet.p_addr)) - if packet.HasField('size'): - ascii_out.write(',%s' % (packet.size)) - if packet.HasField('flags'): - ascii_out.write(',%s' % (packet.flags)) + if packet.HasField("p_addr"): + ascii_out.write(",%s" % (packet.p_addr)) + if packet.HasField("size"): + ascii_out.write(",%s" % (packet.size)) + if packet.HasField("flags"): + ascii_out.write(",%s" % (packet.flags)) # Write to file the comp delay - ascii_out.write(',%s' % (packet.comp_delay)) + ascii_out.write(",%s" % (packet.comp_delay)) # Write to file the repeated field order dependency - ascii_out.write(':') + ascii_out.write(":") if packet.rob_dep: num_robdeps += 1 for dep in packet.rob_dep: - ascii_out.write(',%s' % dep) + ascii_out.write(",%s" % dep) # Write to file the repeated field register dependency - ascii_out.write(':') + ascii_out.write(":") if packet.reg_dep: - num_regdeps += 1 # No. of packets with atleast 1 register dependency + num_regdeps += ( + 1 # No. of packets with atleast 1 register dependency + ) for dep in packet.reg_dep: - ascii_out.write(',%s' % dep) + ascii_out.write(",%s" % dep) # New line - ascii_out.write('\n') + ascii_out.write("\n") print("Parsed packets:", num_packets) print("Packets with at least 1 reg dep:", num_regdeps) @@ -210,5 +221,6 @@ def main(): ascii_out.close() proto_in.close() + if __name__ == "__main__": main() diff --git a/util/decode_inst_trace.py b/util/decode_inst_trace.py index 1334d69f36..8e59f6955d 100755 --- a/util/decode_inst_trace.py +++ b/util/decode_inst_trace.py @@ -51,8 +51,15 @@ try: except: print("Did not find protobuf inst definitions, attempting to generate") from subprocess import call - error = call(['protoc', '--python_out=util', '--proto_path=src/proto', - 'src/proto/inst.proto']) + + error = call( + [ + "protoc", + "--python_out=util", + "--proto_path=src/proto", + "src/proto/inst.proto", + ] + ) if not error: print("Generated inst proto definitions") @@ -67,6 +74,7 @@ except: print("Failed to import inst proto definitions") exit(-1) + def main(): if len(sys.argv) != 3: print("Usage: ", sys.argv[0], " ") @@ -76,7 +84,7 @@ def main(): proto_in = protolib.openFileRd(sys.argv[1]) try: - ascii_out = open(sys.argv[2], 'w') + ascii_out = open(sys.argv[2], "w") except IOError: print("Failed to open ", sys.argv[2], " for writing") exit(-1) @@ -102,40 +110,53 @@ def main(): print("Warning: file version newer than decoder:", header.ver) print("This decoder may not understand how to decode this file") - print("Parsing instructions") num_insts = 0 inst = inst_pb2.Inst() # Decode the inst messages until we hit the end of the file - optional_fields = ('tick', 'type', 'inst_flags', 'addr', 'size', 'mem_flags') - while protolib.decodeMessage(proto_in, inst): + optional_fields = ( + "tick", + "type", + "inst_flags", + "addr", + "size", + "mem_flags", + ) + while protolib.decodeMessage(proto_in, inst): # If we have a tick use it, otherwise count instructions - if inst.HasField('tick'): + if inst.HasField("tick"): tick = inst.tick else: tick = num_insts - if inst.HasField('nodeid'): + if inst.HasField("nodeid"): node_id = inst.nodeid else: - node_id = 0; - if inst.HasField('cpuid'): + node_id = 0 + if inst.HasField("cpuid"): cpu_id = inst.cpuid else: - cpu_id = 0; + cpu_id = 0 - ascii_out.write('%-20d: (%03d/%03d) %#010x @ %#016x ' % (tick, node_id, cpu_id, - inst.inst, inst.pc)) + ascii_out.write( + "%-20d: (%03d/%03d) %#010x @ %#016x " + % (tick, node_id, cpu_id, inst.inst, inst.pc) + ) - if inst.HasField('type'): - ascii_out.write(' : %10s' % inst_pb2._INST_INSTTYPE.values_by_number[inst.type].name) + if inst.HasField("type"): + ascii_out.write( + " : %10s" + % inst_pb2._INST_INSTTYPE.values_by_number[inst.type].name + ) for mem_acc in inst.mem_access: - ascii_out.write(" %#x-%#x;" % (mem_acc.addr, mem_acc.addr + mem_acc.size)) + ascii_out.write( + " %#x-%#x;" % (mem_acc.addr, mem_acc.addr + mem_acc.size) + ) - ascii_out.write('\n') + ascii_out.write("\n") num_insts += 1 print("Parsed instructions:", num_insts) @@ -144,5 +165,6 @@ def main(): ascii_out.close() proto_in.close() + if __name__ == "__main__": main() diff --git a/util/decode_packet_trace.py b/util/decode_packet_trace.py index 21d7f9a157..798a824ecb 100755 --- a/util/decode_packet_trace.py +++ b/util/decode_packet_trace.py @@ -45,9 +45,10 @@ import sys util_dir = os.path.dirname(os.path.realpath(__file__)) # Make sure the proto definitions are up to date. -subprocess.check_call(['make', '--quiet', '-C', util_dir, 'packet_pb2.py']) +subprocess.check_call(["make", "--quiet", "-C", util_dir, "packet_pb2.py"]) import packet_pb2 + def main(): if len(sys.argv) != 3: print("Usage: ", sys.argv[0], " ") @@ -57,7 +58,7 @@ def main(): proto_in = protolib.openFileRd(sys.argv[1]) try: - ascii_out = open(sys.argv[2], 'w') + ascii_out = open(sys.argv[2], "w") except IOError: print("Failed to open ", sys.argv[2], " for writing") exit(-1) @@ -79,7 +80,7 @@ def main(): print("Tick frequency:", header.tick_freq) for id_string in header.id_strings: - print('Master id %d: %s' % (id_string.key, id_string.value)) + print("Master id %d: %s" % (id_string.key, id_string.value)) print("Parsing packets") @@ -90,19 +91,22 @@ def main(): while protolib.decodeMessage(proto_in, packet): num_packets += 1 # ReadReq is 1 and WriteReq is 4 in src/mem/packet.hh Command enum - cmd = 'r' if packet.cmd == 1 else ('w' if packet.cmd == 4 else 'u') - if packet.HasField('pkt_id'): - ascii_out.write('%s,' % (packet.pkt_id)) - if packet.HasField('flags'): - ascii_out.write('%s,%s,%s,%s,%s' % (cmd, packet.addr, packet.size, - packet.flags, packet.tick)) + cmd = "r" if packet.cmd == 1 else ("w" if packet.cmd == 4 else "u") + if packet.HasField("pkt_id"): + ascii_out.write("%s," % (packet.pkt_id)) + if packet.HasField("flags"): + ascii_out.write( + "%s,%s,%s,%s,%s" + % (cmd, packet.addr, packet.size, packet.flags, packet.tick) + ) else: - ascii_out.write('%s,%s,%s,%s' % (cmd, packet.addr, packet.size, - packet.tick)) - if packet.HasField('pc'): - ascii_out.write(',%s\n' % (packet.pc)) + ascii_out.write( + "%s,%s,%s,%s" % (cmd, packet.addr, packet.size, packet.tick) + ) + if packet.HasField("pc"): + ascii_out.write(",%s\n" % (packet.pc)) else: - ascii_out.write('\n') + ascii_out.write("\n") print("Parsed packets:", num_packets) @@ -110,5 +114,6 @@ def main(): ascii_out.close() proto_in.close() + if __name__ == "__main__": main() diff --git a/util/dist/gem5-dist.sh b/util/dist/gem5-dist.sh index 2969b349d9..c4649751b7 100755 --- a/util/dist/gem5-dist.sh +++ b/util/dist/gem5-dist.sh @@ -1,7 +1,7 @@ #! /bin/bash # -# Copyright (c) 2015 ARM Limited +# Copyright (c) 2015, 2022 Arm Limited # All rights reserved # # The license below extends only to copyright in the software and shall @@ -319,12 +319,18 @@ SW_PID=$! # block here till switch process starts connected $RUN_DIR/log.switch "tcp_iface listening on port" "switch" $SW_PID -LINE=$(grep -r "tcp_iface listening on port" $RUN_DIR/log.switch) -IFS=' ' read -ra ADDR <<< "$LINE" # actual port that switch is listening on may be different # from what we specified if the port was busy -SW_PORT=${ADDR[5]} +PORT_REGEX="tcp_iface listening on port ([0-9]+)" +SW_FILE=$(cat $RUN_DIR/log.switch) + +if [[ $SW_FILE =~ $PORT_REGEX ]]; then + SW_PORT="${BASH_REMATCH[1]}" +else + echo "Unable to find port info from $RUN_DIR/log.switch" + abort_func +fi # Now launch all the gem5 processes with ssh. echo "START $(date)" diff --git a/util/dist/test/test-2nodes-AArch64.sh b/util/dist/test/test-2nodes-AArch64.sh old mode 100644 new mode 100755 index cafcf1cdec..79d03c279b --- a/util/dist/test/test-2nodes-AArch64.sh +++ b/util/dist/test/test-2nodes-AArch64.sh @@ -1,7 +1,7 @@ #! /bin/bash # -# Copyright (c) 2015 ARM Limited +# Copyright (c) 2015, 2022 Arm Limited # All rights reserved # # The license below extends only to copyright in the software and shall @@ -44,11 +44,10 @@ GEM5_DIR=$(pwd)/$(dirname $0)/../../.. -IMG=$M5_PATH/disks/aarch64-ubuntu-trusty-headless.img -VMLINUX=$M5_PATH/binaries/vmlinux.aarch64.20140821 -DTB=$M5_PATH/binaries/vexpress.aarch64.20140821.dtb +IMG=$M5_PATH/disks/ubuntu-18.04-arm64-docker.img +VMLINUX=$M5_PATH/binaries/vmlinux.arm64 -FS_CONFIG=$GEM5_DIR/configs/example/fs.py +FS_CONFIG=$GEM5_DIR/configs/example/arm/dist_bigLITTLE.py SW_CONFIG=$GEM5_DIR/configs/dist/sw.py GEM5_EXE=$GEM5_DIR/build/ARM/gem5.opt @@ -60,20 +59,19 @@ DEBUG_FLAGS="--debug-flags=DistEthernet" NNODES=2 -$GEM5_DIST_SH -n $NNODES \ - -x $GEM5_EXE \ - -s $SW_CONFIG \ - -f $FS_CONFIG \ - --m5-args \ - $DEBUG_FLAGS \ - --fs-args \ - --cpu-type=atomic \ - --num-cpus=1 \ - --machine-type=VExpress_EMM64 \ - --disk-image=$IMG \ - --kernel=$VMLINUX \ - --dtb-filename=$DTB \ - --script=$BOOT_SCRIPT \ - --cf-args \ +$GEM5_DIST_SH -n $NNODES \ + -x $GEM5_EXE \ + -s $SW_CONFIG \ + -f $FS_CONFIG \ + --m5-args \ + $DEBUG_FLAGS \ + --fs-args \ + --cpu-type=atomic \ + --little-cpus=1 \ + --big-cpus=1 \ + --machine-type=VExpress_GEM5_Foundation \ + --disk=$IMG \ + --kernel=$VMLINUX \ + --bootscript=$BOOT_SCRIPT \ + --cf-args \ $CHKPT_RESTORE - diff --git a/util/dockerfiles/docker-compose.yaml b/util/dockerfiles/docker-compose.yaml index 103c221dc0..004052283a 100644 --- a/util/dockerfiles/docker-compose.yaml +++ b/util/dockerfiles/docker-compose.yaml @@ -5,123 +5,154 @@ services: build: context: gcn-gpu dockerfile: Dockerfile - image: gcr.io/gem5-test/gcn-gpu + image: gcr.io/gem5-test/gcn-gpu:v22-1 gpu-fs: build: context: gpu-fs dockerfile: Dockerfile - image: gcr.io/gem5-test/gpu-fs + image: gcr.io/gem5-test/gpu-fs:v22-1 sst: build: context: sst-11.1.0 dockerfile: Dockerfile - image: gcr.io/gem5-test/sst-env + image: gcr.io/gem5-test/sst-env:v22-1 systemc: build: context: systemc-2.3.3 dockerfile: Dockerfile - image: gcr.io/gem5-test/systemc-env + image: gcr.io/gem5-test/systemc-env:v22-1 ubuntu-18.04_all-dependencies: build: context: ubuntu-18.04_all-dependencies dockerfile: Dockerfile - image: gcr.io/gem5-test/ubuntu-18.04_all-dependencies + image: gcr.io/gem5-test/ubuntu-18.04_all-dependencies:v22-1 ubuntu-20.04_all-dependencies: build: context: ubuntu-20.04_all-dependencies dockerfile: Dockerfile - image: gcr.io/gem5-test/ubuntu-20.04_all-dependencies - ubuntu-20.04_min-dependencies: + image: gcr.io/gem5-test/ubuntu-20.04_all-dependencies:v22-1 + ubuntu-22.04_all-dependencies: build: - context: ubuntu-20.04_min-dependencies + context: ubuntu-22.04_all-dependencies dockerfile: Dockerfile - image: gcr.io/gem5-test/ubuntu-20.04_min-dependencies + image: gcr.io/gem5-test/ubuntu-22.04_all-dependencies:v22-1 + ubuntu-22.04_min-dependencies: + build: + context: ubuntu-22.04_min-dependencies + dockerfile: Dockerfile + image: gcr.io/gem5-test/ubuntu-22.04_min-dependencies:v22-1 gcc-7: build: context: ubuntu-18.04_gcc-version dockerfile: Dockerfile args: - version=7 - image: gcr.io/gem5-test/gcc-version-7 + image: gcr.io/gem5-test/gcc-version-7:v22-1 gcc-8: build: context: ubuntu-18.04_gcc-version dockerfile: Dockerfile args: - version=8 - image: gcr.io/gem5-test/gcc-version-8 + image: gcr.io/gem5-test/gcc-version-8:v22-1 gcc-9: build: context: ubuntu-20.04_gcc-version dockerfile: Dockerfile args: - version=9 - image: gcr.io/gem5-test/gcc-version-9 + image: gcr.io/gem5-test/gcc-version-9:v22-1 gcc-10: build: context: ubuntu-20.04_gcc-version dockerfile: Dockerfile args: - version=10 - image: gcr.io/gem5-test/gcc-version-10 + image: gcr.io/gem5-test/gcc-version-10:v22-1 gcc-11: build: - context: ubuntu-20.04_gcc-version-11 + context: ubuntu-22.04_gcc-version dockerfile: Dockerfile args: - version=11 - image: gcr.io/gem5-test/gcc-version-11 + image: gcr.io/gem5-test/gcc-version-11:v22-1 + gcc-12: + build: + context: ubuntu-22.04_gcc-version + dockerfile: Dockerfile + args: + - version=12 + image: gcr.io/gem5-test/gcc-version-12:v22-1 clang-6: build: context: ubuntu-18.04_clang-version dockerfile: Dockerfile args: - version=6.0 - image: gcr.io/gem5-test/clang-version-6.0 + image: gcr.io/gem5-test/clang-version-6.0:v22-1 clang-7: build: context: ubuntu-18.04_clang-version dockerfile: Dockerfile args: - version=7 - image: gcr.io/gem5-test/clang-version-7 + image: gcr.io/gem5-test/clang-version-7:v22-1 clang-8: build: context: ubuntu-18.04_clang-version dockerfile: Dockerfile args: - version=8 - image: gcr.io/gem5-test/clang-version-8 + image: gcr.io/gem5-test/clang-version-8:v22-1 clang-9: build: context: ubuntu-18.04_clang-version dockerfile: Dockerfile args: - version=9 - image: gcr.io/gem5-test/clang-version-9 + image: gcr.io/gem5-test/clang-version-9:v22-1 clang-10: build: context: ubuntu-20.04_clang-version dockerfile: Dockerfile args: - version=10 - image: gcr.io/gem5-test/clang-version-10 + image: gcr.io/gem5-test/clang-version-10:v22-1 clang-11: build: context: ubuntu-20.04_clang-version dockerfile: Dockerfile args: - version=11 - image: gcr.io/gem5-test/clang-version-11 + image: gcr.io/gem5-test/clang-version-11:v22-1 clang-12: build: context: ubuntu-20.04_clang-version dockerfile: Dockerfile args: - version=12 - image: gcr.io/gem5-test/clang-version-12 + image: gcr.io/gem5-test/clang-version-12:v22-1 + clang-13: + build: + context: ubuntu-22.04_clang-version + dockerfile: Dockerfile + args: + - version=13 + image: gcr.io/gem5-test/clang-version-13:v22-1 + clang-14: + build: + context: ubuntu-22.04_clang-version + dockerfile: Dockerfile + args: + - version=14 + image: gcr.io/gem5-test/clang-version-14:v22-1 llvm-gnu-cross-compiler-riscv64: build: context: llvm-gnu-cross-compiler-riscv64 dockerfile: Dockerfile - image: gcr.io/gem5-test/llvm-gnu-cross-compiler-riscv64 + image: gcr.io/gem5-test/llvm-gnu-cross-compiler-riscv64:v22-1 + gem5-all-min-dependencies: + build: + context: gem5-all-min-dependencies + dockerfile: Dockerfile + image: gcr.io/gem5-test/gem5-all-min-dependencies:v22-1 diff --git a/util/dockerfiles/gcn-gpu/Dockerfile b/util/dockerfiles/gcn-gpu/Dockerfile index be5851402d..dfff455079 100644 --- a/util/dockerfiles/gcn-gpu/Dockerfile +++ b/util/dockerfiles/gcn-gpu/Dockerfile @@ -69,7 +69,7 @@ RUN git clone -b rocm-4.0.0 \ WORKDIR /ROCclr # The patch allows us to avoid building blit kernels on-the-fly in gem5 -RUN wget -q -O - dist.gem5.org/dist/v22-0/rocm_patches/ROCclr.patch | git apply -v +RUN wget -q -O - dist.gem5.org/dist/v22-1/rocm_patches/ROCclr.patch | git apply -v WORKDIR /ROCclr/build RUN cmake -DOPENCL_DIR="/ROCm-OpenCL-Runtime" \ diff --git a/util/dockerfiles/gem5-all-min-dependencies/Dockerfile b/util/dockerfiles/gem5-all-min-dependencies/Dockerfile new file mode 100644 index 0000000000..da5613e1a1 --- /dev/null +++ b/util/dockerfiles/gem5-all-min-dependencies/Dockerfile @@ -0,0 +1,36 @@ +# Copyright (c) 2022 The Regents of the University of California +# All Rights Reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +FROM gcr.io/gem5-test/ubuntu-22.04_min-dependencies:latest as source +RUN apt -y update && apt -y install git +RUN git clone -b develop https://gem5.googlesource.com/public/gem5 /gem5 +WORKDIR /gem5 +RUN scons -j`nproc` build/ALL/gem5.fast + +FROM gcr.io/gem5-test/ubuntu-22.04_min-dependencies:latest +COPY --from=source /gem5/build/ALL/gem5.fast /usr/local/bin/gem5 + +ENTRYPOINT [ "/usr/local/bin/gem5" ] diff --git a/util/dockerfiles/llvm-gnu-cross-compiler-riscv64/Dockerfile b/util/dockerfiles/llvm-gnu-cross-compiler-riscv64/Dockerfile index df7a58cd97..0f01e7931d 100644 --- a/util/dockerfiles/llvm-gnu-cross-compiler-riscv64/Dockerfile +++ b/util/dockerfiles/llvm-gnu-cross-compiler-riscv64/Dockerfile @@ -70,4 +70,3 @@ FROM stage1 RUN mkdir -p /riscv/ COPY --from=stage2 /riscv/_install/ /riscv/_install ENV PATH=/riscv/_install/bin:$PATH - diff --git a/util/dockerfiles/sst-11.1.0/Dockerfile b/util/dockerfiles/sst-11.1.0/Dockerfile index c9853746c7..970e6979b4 100644 --- a/util/dockerfiles/sst-11.1.0/Dockerfile +++ b/util/dockerfiles/sst-11.1.0/Dockerfile @@ -62,4 +62,3 @@ RUN ./configure --prefix=$SST_CORE_HOME --with-python=/usr/bin/python3-config \ # Setting the environmental variables ENV PATH=$PATH:$SST_CORE_HOME/bin ENV PKG_CONFIG_PATH=$PKG_CONFIG_PATH:$SST_CORE_HOME/lib/pkgconfig/ - diff --git a/util/dockerfiles/ubuntu-18.04_all-dependencies/Dockerfile b/util/dockerfiles/ubuntu-18.04_all-dependencies/Dockerfile index a05e0fefd5..629fc5d614 100644 --- a/util/dockerfiles/ubuntu-18.04_all-dependencies/Dockerfile +++ b/util/dockerfiles/ubuntu-18.04_all-dependencies/Dockerfile @@ -33,7 +33,7 @@ RUN apt -y update && apt -y upgrade && \ libhdf5-serial-dev python3-pydot libpng-dev libelf-dev pkg-config \ python3-pip python3-venv -RUN pip3 install mypy +RUN pip3 install black mypy pre-commit RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 10 RUN update-alternatives --install /usr/bin/python python /usr/bin/python2 1 diff --git a/util/dockerfiles/ubuntu-20.04_all-dependencies/Dockerfile b/util/dockerfiles/ubuntu-20.04_all-dependencies/Dockerfile index 27a63822a1..c838a06dda 100644 --- a/util/dockerfiles/ubuntu-20.04_all-dependencies/Dockerfile +++ b/util/dockerfiles/ubuntu-20.04_all-dependencies/Dockerfile @@ -32,6 +32,6 @@ RUN apt -y update && apt -y upgrade && \ libprotobuf-dev protobuf-compiler libprotoc-dev libgoogle-perftools-dev \ python3-dev python-is-python3 doxygen libboost-all-dev \ libhdf5-serial-dev python3-pydot libpng-dev libelf-dev pkg-config pip \ - python3-venv + python3-venv black -RUN pip install mypy +RUN pip install mypy pre-commit diff --git a/util/dockerfiles/ubuntu-22.04_all-dependencies/Dockerfile b/util/dockerfiles/ubuntu-22.04_all-dependencies/Dockerfile new file mode 100644 index 0000000000..e5afc63be6 --- /dev/null +++ b/util/dockerfiles/ubuntu-22.04_all-dependencies/Dockerfile @@ -0,0 +1,36 @@ +# Copyright (c) 2022 The Regents of the University of California +# All Rights Reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +FROM ubuntu:22.04 + +ENV DEBIAN_FRONTEND=noninteractive +RUN apt -y update && apt -y upgrade && \ + apt -y install build-essential git m4 scons zlib1g zlib1g-dev \ + libprotobuf-dev protobuf-compiler libprotoc-dev libgoogle-perftools-dev \ + python3-dev doxygen libboost-all-dev libhdf5-serial-dev python3-pydot \ + libpng-dev libelf-dev pkg-config pip python3-venv black + +RUN pip install mypy pre-commit diff --git a/util/dockerfiles/ubuntu-20.04_gcc-version-11/Dockerfile b/util/dockerfiles/ubuntu-22.04_clang-version/Dockerfile similarity index 73% rename from util/dockerfiles/ubuntu-20.04_gcc-version-11/Dockerfile rename to util/dockerfiles/ubuntu-22.04_clang-version/Dockerfile index f01479d526..148b71dea3 100644 --- a/util/dockerfiles/ubuntu-20.04_gcc-version-11/Dockerfile +++ b/util/dockerfiles/ubuntu-22.04_clang-version/Dockerfile @@ -1,4 +1,4 @@ -# Copyright (c) 2021 The Regents of the University of California +# Copyright (c) 2022 The Regents of the University of California # All Rights Reserved. # # Redistribution and use in source and binary forms, with or without @@ -23,29 +23,26 @@ # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -FROM ubuntu:20.04 +FROM ubuntu:22.04 -# At the time of this Dockerfile's creation, Ubuntu 20.04 APT does not -# distribute gcc-11 by default. A special APT repository is needed. We hope -# this Dockerfile will merge with ubuntu-20.04_gcc-version once GCC-11 can be -# installed via APT more easily. +# Valid version values: +# 13 +ARG version ENV DEBIAN_FRONTEND=noninteractive RUN apt -y update && apt -y upgrade && \ apt -y install git m4 scons zlib1g zlib1g-dev libprotobuf-dev \ protobuf-compiler libprotoc-dev libgoogle-perftools-dev python3-dev \ python-is-python3 doxygen libboost-all-dev libhdf5-serial-dev \ - python3-pydot libpng-dev make software-properties-common + python3-pydot libpng-dev clang-${version} make -RUN add-apt-repository \ - 'deb http://mirrors.kernel.org/ubuntu hirsute main universe' -RUN apt -y install gcc-11 g++-11 +RUN apt-get --purge -y remove gcc RUN update-alternatives --install \ - /usr/bin/g++ g++ /usr/bin/g++-11 100 + /usr/bin/clang++ clang++ /usr/bin/clang++-${version} 100 RUN update-alternatives --install \ - /usr/bin/gcc gcc /usr/bin/gcc-11 100 + /usr/bin/clang clang /usr/bin/clang-${version} 100 RUN update-alternatives --install \ - /usr/bin/c++ c++ /usr/bin/g++-11 100 + /usr/bin/c++ c++ /usr/bin/clang++-${version} 100 RUN update-alternatives --install \ - /usr/bin/cc cc /usr/bin/gcc-11 100 + /usr/bin/cc cc /usr/bin/clang-${version} 100 diff --git a/util/dockerfiles/ubuntu-22.04_gcc-version/Dockerfile b/util/dockerfiles/ubuntu-22.04_gcc-version/Dockerfile new file mode 100644 index 0000000000..fcf909cec2 --- /dev/null +++ b/util/dockerfiles/ubuntu-22.04_gcc-version/Dockerfile @@ -0,0 +1,48 @@ +# Copyright (c) 2022 The Regents of the University of California +# All Rights Reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +FROM ubuntu:22.04 + +# Valid version values: +# 11 +# 12 + +ARG version + +ENV DEBIAN_FRONTEND=noninteractive +RUN apt -y update && apt -y upgrade && \ + apt -y install git m4 scons zlib1g zlib1g-dev libprotobuf-dev \ + protobuf-compiler libprotoc-dev libgoogle-perftools-dev python3-dev \ + doxygen libboost-all-dev libhdf5-serial-dev python3-pydot libpng-dev \ + gcc-${version} g++-${version} make + +RUN update-alternatives --install \ + /usr/bin/g++ g++ /usr/bin/g++-${version} 100 +RUN update-alternatives --install \ + /usr/bin/gcc gcc /usr/bin/gcc-${version} 100 +RUN update-alternatives --install \ + /usr/bin/c++ c++ /usr/bin/g++-${version} 100 +RUN update-alternatives --install \ + /usr/bin/cc cc /usr/bin/gcc-${version} 100 diff --git a/util/dockerfiles/ubuntu-20.04_min-dependencies/Dockerfile b/util/dockerfiles/ubuntu-22.04_min-dependencies/Dockerfile similarity index 90% rename from util/dockerfiles/ubuntu-20.04_min-dependencies/Dockerfile rename to util/dockerfiles/ubuntu-22.04_min-dependencies/Dockerfile index 4b65146803..978e2c6af5 100644 --- a/util/dockerfiles/ubuntu-20.04_min-dependencies/Dockerfile +++ b/util/dockerfiles/ubuntu-22.04_min-dependencies/Dockerfile @@ -1,4 +1,4 @@ -# Copyright (c) 2020 The Regents of the University of California +# Copyright (c) 2022 The Regents of the University of California # All Rights Reserved. # # Redistribution and use in source and binary forms, with or without @@ -24,8 +24,8 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -FROM ubuntu:20.04 +FROM ubuntu:22.04 ENV DEBIAN_FRONTEND=noninteractive RUN apt -y update && apt -y upgrade && \ - apt -y install build-essential m4 scons python3-dev python-is-python3 + apt -y install build-essential m4 scons python3-dev diff --git a/util/encode_inst_dep_trace.py b/util/encode_inst_dep_trace.py index 8c37ffbc24..9ab95bd7ed 100755 --- a/util/encode_inst_dep_trace.py +++ b/util/encode_inst_dep_trace.py @@ -97,10 +97,18 @@ try: except: print("Did not find proto definition, attempting to generate") from subprocess import call - error = call(['protoc', '--python_out=util', '--proto_path=src/proto', - 'src/proto/inst_dep_record.proto']) + + error = call( + [ + "protoc", + "--python_out=util", + "--proto_path=src/proto", + "src/proto/inst_dep_record.proto", + ] + ) if not error: import inst_dep_record_pb2 + print("Generated proto definitions for instruction dependency record") else: print("Failed to import proto definitions") @@ -108,17 +116,18 @@ except: DepRecord = inst_dep_record_pb2.InstDepRecord + def main(): if len(sys.argv) != 3: print("Usage: ", sys.argv[0], " ") exit(-1) # Open the file in write mode - proto_out = open(sys.argv[2], 'wb') + proto_out = open(sys.argv[2], "wb") # Open the file in read mode try: - ascii_in = open(sys.argv[1], 'r') + ascii_in = open(sys.argv[1], "r") except IOError: print("Failed to open ", sys.argv[1], " for reading") exit(-1) @@ -138,15 +147,15 @@ def main(): print("Creating enum name,value lookup from proto") enumValues = {} for namestr, valdesc in DepRecord.DESCRIPTOR.enum_values_by_name.items(): - print('\t', namestr, valdesc.number) + print("\t", namestr, valdesc.number) enumValues[namestr] = valdesc.number num_records = 0 # For each line in the ASCII trace, create a packet message and # write it to the encoded output for line in ascii_in: - inst_info_str, rob_dep_str, reg_dep_str = (line.strip()).split(':') - inst_info_list = inst_info_str.split(',') + inst_info_str, rob_dep_str, reg_dep_str = (line.strip()).split(":") + inst_info_list = inst_info_str.split(",") dep_record = DepRecord() dep_record.seq_num = int(inst_info_list[0]) @@ -156,8 +165,12 @@ def main(): try: dep_record.type = enumValues[inst_info_list[3]] except KeyError: - print("Seq. num", dep_record.seq_num, "has unsupported type", \ - inst_info_list[3]) + print( + "Seq. num", + dep_record.seq_num, + "has unsupported type", + inst_info_list[3], + ) exit(-1) if dep_record.type == DepRecord.INVALID: @@ -178,7 +191,7 @@ def main(): # Parse the register and order dependencies both of which are # repeated fields. An empty list is valid. - rob_deps = rob_dep_str.strip().split(',') + rob_deps = rob_dep_str.strip().split(",") for a_dep in rob_deps: # if the string is empty, split(',') returns 1 item: '' # if the string is ",4", split(',') returns 2 items: '', '4' @@ -186,7 +199,7 @@ def main(): if a_dep: dep_record.rob_dep.append(int(a_dep)) - reg_deps = reg_dep_str.split(',') + reg_deps = reg_dep_str.split(",") for a_dep in reg_deps: if a_dep: dep_record.reg_dep.append(int(a_dep)) @@ -199,5 +212,6 @@ def main(): ascii_in.close() proto_out.close() + if __name__ == "__main__": main() diff --git a/util/encode_packet_trace.py b/util/encode_packet_trace.py index 52908aac11..bdf1c3db06 100755 --- a/util/encode_packet_trace.py +++ b/util/encode_packet_trace.py @@ -62,8 +62,15 @@ try: except: print("Did not find packet proto definitions, attempting to generate") from subprocess import call - error = call(['protoc', '--python_out=util', '--proto_path=src/proto', - 'src/proto/packet.proto']) + + error = call( + [ + "protoc", + "--python_out=util", + "--proto_path=src/proto", + "src/proto/packet.proto", + ] + ) if not error: print("Generated packet proto definitions") @@ -78,19 +85,20 @@ except: print("Failed to import packet proto definitions") exit(-1) + def main(): if len(sys.argv) != 3: print("Usage: ", sys.argv[0], " ") exit(-1) try: - ascii_in = open(sys.argv[1], 'r') + ascii_in = open(sys.argv[1], "r") except IOError: print("Failed to open ", sys.argv[1], " for reading") exit(-1) try: - proto_out = open(sys.argv[2], 'wb') + proto_out = open(sys.argv[2], "wb") except IOError: print("Failed to open ", sys.argv[2], " for writing") exit(-1) @@ -109,11 +117,11 @@ def main(): # For each line in the ASCII trace, create a packet message and # write it to the encoded output for line in ascii_in: - cmd, addr, size, tick = line.split(',') + cmd, addr, size, tick = line.split(",") packet = packet_pb2.Packet() packet.tick = int(tick) # ReadReq is 1 and WriteReq is 4 in src/mem/packet.hh Command enum - packet.cmd = 1 if cmd == 'r' else 4 + packet.cmd = 1 if cmd == "r" else 4 packet.addr = int(addr) packet.size = int(size) protolib.encodeMessage(proto_out, packet) @@ -122,5 +130,6 @@ def main(): ascii_in.close() proto_out.close() + if __name__ == "__main__": main() diff --git a/util/find_copyrights.py b/util/find_copyrights.py index 440ddf4c00..6fbb10cb61 100644 --- a/util/find_copyrights.py +++ b/util/find_copyrights.py @@ -6,17 +6,19 @@ import sys from file_types import lang_type, find_files -mode_line = re.compile('(-\*- *mode:.* *-\*-)') -shell_comment = re.compile(r'^\s*#') -lisp_comment = re.compile(r';') -cpp_comment = re.compile(r'//') -c_comment_start = re.compile(r'/\*') -c_comment_end = re.compile(r'\*/') +mode_line = re.compile("(-\*- *mode:.* *-\*-)") +shell_comment = re.compile(r"^\s*#") +lisp_comment = re.compile(r";") +cpp_comment = re.compile(r"//") +c_comment_start = re.compile(r"/\*") +c_comment_end = re.compile(r"\*/") + + def find_copyright_block(lines, lang_type): start = None - if lang_type in ('python', 'make', 'shell', 'perl', 'scons'): - for i,line in enumerate(lines): - if i == 0 and (line.startswith('#!') or mode_line.search(line)): + if lang_type in ("python", "make", "shell", "perl", "scons"): + for i, line in enumerate(lines): + if i == 0 and (line.startswith("#!") or mode_line.search(line)): continue if shell_comment.search(line): @@ -26,11 +28,11 @@ def find_copyright_block(lines, lang_type): if line.strip(): return else: - yield start, i-1 + yield start, i - 1 start = None - elif lang_type in ('lisp', ): - for i,line in enumerate(lines): + elif lang_type in ("lisp",): + for i, line in enumerate(lines): if i == 0 and mode_line.search(line): continue @@ -41,18 +43,26 @@ def find_copyright_block(lines, lang_type): if line.strip(): return else: - yield start, i-1 + yield start, i - 1 start = None - elif lang_type in ('C', 'C++', 'swig', 'isa', 'asm', 'slicc', - 'lex', 'yacc'): + elif lang_type in ( + "C", + "C++", + "swig", + "isa", + "asm", + "slicc", + "lex", + "yacc", + ): mode = None - for i,line in enumerate(lines): + for i, line in enumerate(lines): if i == 0 and mode_line.search(line): continue - if mode == 'C': - assert start is not None, 'on line %d' % (i + 1) + if mode == "C": + assert start is not None, "on line %d" % (i + 1) match = c_comment_end.search(line) if match: yield start, i @@ -63,30 +73,30 @@ def find_copyright_block(lines, lang_type): c_match = c_comment_start.search(line) if cpp_match: - assert not c_match, 'on line %d' % (i + 1) - if line[:cpp_match.start()].strip(): + assert not c_match, "on line %d" % (i + 1) + if line[: cpp_match.start()].strip(): return if mode is None: - mode = 'CPP' + mode = "CPP" start = i else: - text = line[cpp_match.end():].lstrip() + text = line[cpp_match.end() :].lstrip() if text.startswith("Copyright") > 0: - yield start, i-1 + yield start, i - 1 start = i continue - elif mode == 'CPP': - assert start is not None, 'on line %d' % (i + 1) + elif mode == "CPP": + assert start is not None, "on line %d" % (i + 1) if not line.strip(): continue - yield start, i-1 + yield start, i - 1 mode = None if not c_match: return if c_match: - assert mode is None, 'on line %d' % (i + 1) - mode = 'C' + assert mode is None, "on line %d" % (i + 1) + mode = "C" start = i if mode is None and line.strip(): @@ -95,16 +105,19 @@ def find_copyright_block(lines, lang_type): else: raise AttributeError("Could not handle language %s" % lang_type) -date_range_re = re.compile(r'([0-9]{4})\s*-\s*([0-9]{4})') + +date_range_re = re.compile(r"([0-9]{4})\s*-\s*([0-9]{4})") + + def process_dates(dates): - dates = [ d.strip() for d in dates.split(',') ] + dates = [d.strip() for d in dates.split(",")] output = set() for date in dates: match = date_range_re.match(date) if match: - f,l = [ int(d) for d in match.groups() ] - for i in range(f, l+1): + f, l = [int(d) for d in match.groups()] + for i in range(f, l + 1): output.add(i) else: try: @@ -115,24 +128,27 @@ def process_dates(dates): return output -copyright_re = \ - re.compile(r'Copyright (\([cC]\)) ([-, 0-9]+)[\s*#/]*([A-z-,. ]+)', - re.DOTALL) -authors_re = re.compile(r'^[\s*#/]*Authors:\s*([A-z .]+)\s*$') -more_authors_re = re.compile(r'^[\s*#/]*([A-z .]+)\s*$') +copyright_re = re.compile( + r"Copyright (\([cC]\)) ([-, 0-9]+)[\s*#/]*([A-z-,. ]+)", re.DOTALL +) + +authors_re = re.compile(r"^[\s*#/]*Authors:\s*([A-z .]+)\s*$") +more_authors_re = re.compile(r"^[\s*#/]*([A-z .]+)\s*$") all_owners = set() + + def get_data(lang_type, lines): data = [] last = None - for start,end in find_copyright_block(lines, lang_type): - joined = ''.join(lines[start:end+1]) + for start, end in find_copyright_block(lines, lang_type): + joined = "".join(lines[start : end + 1]) match = copyright_re.search(joined) if not match: continue - c,dates,owner = match.groups() + c, dates, owner = match.groups() dates = dates.strip() owner = owner.strip() @@ -145,7 +161,7 @@ def get_data(lang_type, lines): raise authors = [] - for i in range(start,end+1): + for i in range(start, end + 1): line = lines[i] if not authors: match = authors_re.search(line) @@ -154,12 +170,12 @@ def get_data(lang_type, lines): else: match = more_authors_re.search(line) if not match: - for j in range(i, end+1): + for j in range(i, end + 1): line = lines[j].strip() if not line: end = j break - if line.startswith('//'): + if line.startswith("//"): line = line[2:].lstrip() if line: end = j - 1 @@ -172,16 +188,18 @@ def get_data(lang_type, lines): return data + def datestr(dates): dates = list(dates) dates.sort() output = [] + def add_output(first, second): if first == second: - output.append('%d' % (first)) + output.append("%d" % (first)) else: - output.append('%d-%d' % (first, second)) + output.append("%d-%d" % (first, second)) first = dates.pop(0) second = first @@ -196,17 +214,20 @@ def datestr(dates): add_output(first, second) - return ','.join(output) + return ",".join(output) + usage_str = """usage: %s [-v] """ + def usage(exitcode): print(usage_str % sys.argv[0]) if exitcode is not None: sys.exit(exitcode) -if __name__ == '__main__': + +if __name__ == "__main__": import getopt show_counts = False @@ -217,34 +238,34 @@ if __name__ == '__main__': except getopt.GetoptError: usage(1) - for o,a in opts: - if o == '-c': + for o, a in opts: + if o == "-c": show_counts = True - if o == '-i': + if o == "-i": ignore.add(a) - if o == '-v': + if o == "-v": verbose = True files = [] for base in args: if os.path.isfile(base): - files += [ (base, lang_type(base)) ] + files += [(base, lang_type(base))] elif os.path.isdir(base): files += find_files(base) else: - raise AttributeError("can't access '%s'" % base) + raise AttributeError("can't access '%s'" % base) copyrights = {} counts = {} for filename, lang in files: - f = file(filename, 'r') + f = file(filename, "r") lines = f.readlines() if not lines: continue - lines = [ line.rstrip('\r\n') for line in lines ] + lines = [line.rstrip("\r\n") for line in lines] lt = lang_type(filename, lines[0]) try: @@ -252,7 +273,7 @@ if __name__ == '__main__': except Exception as e: if verbose: if len(e.args) == 1: - e.args = ('%s (%s))' % (e, filename), ) + e.args = ("%s (%s))" % (e, filename),) print("could not parse %s: %s" % (filename, e)) continue @@ -265,9 +286,9 @@ if __name__ == '__main__': copyrights[owner] |= dates counts[owner] += 1 - info = [ (counts[o], d, o) for o,d in list(copyrights.items()) ] + info = [(counts[o], d, o) for o, d in list(copyrights.items())] - for count,dates,owner in sorted(info, reverse=True): + for count, dates, owner in sorted(info, reverse=True): if show_counts: - owner = '%s (%s files)' % (owner, count) - print('Copyright (c) %s %s' % (datestr(dates), owner)) + owner = "%s (%s files)" % (owner, count) + print("Copyright (c) %s %s" % (datestr(dates), owner)) diff --git a/util/gem5art/artifact/gem5art/artifact/_artifactdb.py b/util/gem5art/artifact/gem5art/artifact/_artifactdb.py index 6714c9c209..c1b9a69f5b 100644 --- a/util/gem5art/artifact/gem5art/artifact/_artifactdb.py +++ b/util/gem5art/artifact/gem5art/artifact/_artifactdb.py @@ -388,7 +388,7 @@ class ArtifactFileDB(ArtifactDB): if count >= limit: return for artifact in self._uuid_artifact_map.values(): - #https://docs.python.org/3/library/stdtypes.html#frozenset.issubset + # https://docs.python.org/3/library/stdtypes.html#frozenset.issubset if attr.items() <= artifact.items(): yield artifact diff --git a/util/gem5art/artifact/gem5art/artifact/artifact.py b/util/gem5art/artifact/gem5art/artifact/artifact.py index 91ffc64e50..46664e82fb 100644 --- a/util/gem5art/artifact/gem5art/artifact/artifact.py +++ b/util/gem5art/artifact/gem5art/artifact/artifact.py @@ -158,7 +158,7 @@ class Artifact: documentation: str, inputs: List["Artifact"] = [], architecture: str = "", - size: int = None, + size: Optional[int] = None, is_zipped: bool = False, md5sum: str = "", url: str = "", diff --git a/util/gem5art/artifact/mypy.ini b/util/gem5art/artifact/mypy.ini index 2fb5fc6b14..9d68ba8f5a 100644 --- a/util/gem5art/artifact/mypy.ini +++ b/util/gem5art/artifact/mypy.ini @@ -1,3 +1,3 @@ [mypy] namespace_packages = True -warn_unreachable = True \ No newline at end of file +warn_unreachable = True diff --git a/util/gem5art/run/mypy.ini b/util/gem5art/run/mypy.ini index e0b81f832b..46ef145454 100644 --- a/util/gem5art/run/mypy.ini +++ b/util/gem5art/run/mypy.ini @@ -1,4 +1,4 @@ [mypy] namespace_packages = True warn_unreachable = True -mypy_path = ../artifact \ No newline at end of file +mypy_path = ../artifact diff --git a/util/gem5art/run/setup.py b/util/gem5art/run/setup.py index 98ff180386..d17124bd1f 100755 --- a/util/gem5art/run/setup.py +++ b/util/gem5art/run/setup.py @@ -60,7 +60,5 @@ setup( "Source": "https://gem5.googlesource.com/", "Documentation": "https://www.gem5.org/documentation/gem5art", }, - scripts=[ - "bin/gem5art-getruns", - ], + scripts=["bin/gem5art-getruns"], ) diff --git a/util/gem5art/tasks/mypy.ini b/util/gem5art/tasks/mypy.ini index e0b81f832b..46ef145454 100644 --- a/util/gem5art/tasks/mypy.ini +++ b/util/gem5art/tasks/mypy.ini @@ -1,4 +1,4 @@ [mypy] namespace_packages = True warn_unreachable = True -mypy_path = ../artifact \ No newline at end of file +mypy_path = ../artifact diff --git a/util/gem5art/tasks/setup.py b/util/gem5art/tasks/setup.py index 7cf1ce8f01..7bcfc642ae 100755 --- a/util/gem5art/tasks/setup.py +++ b/util/gem5art/tasks/setup.py @@ -54,9 +54,7 @@ setup( keywords="simulation architecture gem5", packages=find_namespace_packages(include=["gem5art.*"]), install_requires=["celery"], - extras_require={ - "flower": ["flower"], - }, + extras_require={"flower": ["flower"]}, python_requires=">=3.6", project_urls={ "Bug Reports": "https://gem5.atlassian.net/", diff --git a/util/gem5img.py b/util/gem5img.py index 3e5fabd9d9..9b32b6c1a5 100755 --- a/util/gem5img.py +++ b/util/gem5img.py @@ -60,7 +60,7 @@ BlockSize = 512 MB = 1024 * 1024 # Setup PATH to look in the sbins. -env['PATH'] += ':/sbin:/usr/sbin' +env["PATH"] += ":/sbin:/usr/sbin" # Whether to print debug output. debug = False @@ -69,7 +69,7 @@ debug = False def chsFromSize(sizeInBlocks): if sizeInBlocks >= MaxLBABlocks: sizeInMBs = (sizeInBlocks * BlockSize) / MB - print('%d MB is too big for LBA, truncating file.' % sizeInMBs) + print("%d MB is too big for LBA, truncating file." % sizeInMBs) return (MaxLBACylinders, MaxLBAHeads, MaxLBASectors) sectors = sizeInBlocks @@ -88,85 +88,90 @@ def chsFromSize(sizeInBlocks): # Figure out if we should use sudo. def needSudo(): - if not hasattr(needSudo, 'notRoot'): - needSudo.notRoot = (os.geteuid() != 0) + if not hasattr(needSudo, "notRoot"): + needSudo.notRoot = os.geteuid() != 0 if needSudo.notRoot: - print('You are not root. Using sudo.') + print("You are not root. Using sudo.") return needSudo.notRoot + # Run an external command. -def runCommand(command, inputVal=''): - print("%>", ' '.join(command)) +def runCommand(command, inputVal=""): + print("%>", " ".join(command)) proc = Popen(command, stdin=PIPE) proc.communicate(inputVal.encode()) return proc.returncode + # Run an external command and capture its output. This is intended to be # used with non-interactive commands where the output is for internal use. -def getOutput(command, inputVal=''): +def getOutput(command, inputVal=""): global debug if debug: - print("%>", ' '.join(command)) - proc = Popen(command, stderr=STDOUT, - stdin=PIPE, stdout=PIPE) + print("%>", " ".join(command)) + proc = Popen(command, stderr=STDOUT, stdin=PIPE, stdout=PIPE) (out, err) = proc.communicate(inputVal) return (out.decode(), proc.returncode) + # Run a command as root, using sudo if necessary. -def runPriv(command, inputVal=''): +def runPriv(command, inputVal=""): realCommand = command if needSudo(): - realCommand = [findProg('sudo')] + command + realCommand = [findProg("sudo")] + command return runCommand(realCommand, inputVal) -def privOutput(command, inputVal=''): + +def privOutput(command, inputVal=""): realCommand = command if needSudo(): - realCommand = [findProg('sudo')] + command + realCommand = [findProg("sudo")] + command return getOutput(realCommand, inputVal) + # Find the path to a program. def findProg(program, cleanupDev=None): - (out, returncode) = getOutput(['which', program]) + (out, returncode) = getOutput(["which", program]) if returncode != 0: if cleanupDev: cleanupDev.destroy() exit("Unable to find program %s, check your PATH variable." % program) return out.strip() + class LoopbackDevice(object): def __init__(self, devFile=None): self.devFile = devFile + def __str__(self): return str(self.devFile) def setup(self, fileName, offset=False): assert not self.devFile - (out, returncode) = privOutput([findProg('losetup'), '-f']) + (out, returncode) = privOutput([findProg("losetup"), "-f"]) if returncode != 0: print(out) return returncode self.devFile = out.strip() - command = [findProg('losetup'), self.devFile, fileName] + command = [findProg("losetup"), self.devFile, fileName] if offset: off = findPartOffset(self.devFile, fileName, 0) - command = command[:1] + \ - ["-o", "%d" % off] + \ - command[1:] + command = command[:1] + ["-o", "%d" % off] + command[1:] return runPriv(command) def destroy(self): assert self.devFile - returncode = runPriv([findProg('losetup'), '-d', self.devFile]) + returncode = runPriv([findProg("losetup"), "-d", self.devFile]) self.devFile = None return returncode + def findPartOffset(devFile, fileName, partition): # Attach a loopback device to the file so we can use sfdisk on it. dev = LoopbackDevice() dev.setup(fileName) # Dump the partition information. - command = [findProg('sfdisk'), '-d', dev.devFile] + command = [findProg("sfdisk"), "-d", dev.devFile] (out, returncode) = privOutput(command) if returncode != 0: print(out) @@ -175,16 +180,16 @@ def findPartOffset(devFile, fileName, partition): # Parse each line of the sfdisk output looking for the first # partition description. SFDISK_PARTITION_INFO_RE = re.compile( - r"^\s*" # Start of line - r"(?P\S+)" # Name - r"\s*:\s*" # Separator - r"start=\s*(?P\d+),\s*" # Partition start record - r"size=\s*(?P\d+),\s*" # Partition size record - r"type=(?P\d+)" # Partition type record - r"\s*$" # End of line + r"^\s*" # Start of line + r"(?P\S+)" # Name + r"\s*:\s*" # Separator + r"start=\s*(?P\d+),\s*" # Partition start record + r"size=\s*(?P\d+),\s*" # Partition size record + r"type=(?P\d+)" # Partition type record + r"\s*$" # End of line ) lines = out.splitlines() - for line in lines : + for line in lines: match = SFDISK_PARTITION_INFO_RE.match(line) if match: sectors = int(match.group("start")) @@ -200,8 +205,9 @@ def findPartOffset(devFile, fileName, partition): dev.destroy() return sectors * BlockSize + def mountPointToDev(mountPoint): - (mountTable, returncode) = getOutput([findProg('mount')]) + (mountTable, returncode) = getOutput([findProg("mount")]) if returncode != 0: print(mountTable) exit(returncode) @@ -220,6 +226,7 @@ def mountPointToDev(mountPoint): commands = {} commandOrder = [] + class Command(object): def addArgument(self, *args, **kargs): self.parser.add_argument(*args, **kargs) @@ -231,46 +238,64 @@ class Command(object): self.posArgs = posArgs commands[self.name] = self commandOrder.append(self.name) - usage = '%(prog)s [options]' - posUsage = '' + usage = "%(prog)s [options]" + posUsage = "" for posArg in posArgs: (argName, argDesc) = posArg - usage += ' %s' % argName - posUsage += '\n %s: %s' % posArg + usage += " %s" % argName + posUsage += "\n %s: %s" % posArg usage += posUsage self.parser = ArgumentParser(usage=usage, description=description) - self.addArgument('-d', '--debug', dest='debug', action='store_true', - help='Verbose output.') - self.addArgument('pos', nargs='*') + self.addArgument( + "-d", + "--debug", + dest="debug", + action="store_true", + help="Verbose output.", + ) + self.addArgument("pos", nargs="*") def parseArgs(self, argv): self.options = self.parser.parse_args(argv[2:]) self.args = self.options.pos if len(self.args) != len(self.posArgs): - self.parser.error('Incorrect number of arguments') + self.parser.error("Incorrect number of arguments") global debug if self.options.debug: debug = True def runCom(self): if not self.func: - exit('Unimplemented command %s!' % self.name) + exit("Unimplemented command %s!" % self.name) self.func(self.options, self.args) # A command which prepares an image with an partition table and an empty file # system. -initCom = Command('init', 'Create an image with an empty file system.', - [('file', 'Name of the image file.'), - ('mb', 'Size of the file in MB.')]) -initCom.addArgument('-t', '--type', dest='fstype', action='store', - default='ext2', - help='Type of file system to use. Appended to mkfs.') +initCom = Command( + "init", + "Create an image with an empty file system.", + [("file", "Name of the image file."), ("mb", "Size of the file in MB.")], +) +initCom.addArgument( + "-t", + "--type", + dest="fstype", + action="store", + default="ext2", + help="Type of file system to use. Appended to mkfs.", +) # A command to mount the first partition in the image. -mountCom = Command('mount', 'Mount the first partition in the disk image.', - [('file', 'Name of the image file.'), - ('mount point', 'Where to mount the image.')]) +mountCom = Command( + "mount", + "Mount the first partition in the disk image.", + [ + ("file", "Name of the image file."), + ("mount point", "Where to mount the image."), + ], +) + def mountComFunc(options, args): (path, mountPoint) = args @@ -281,15 +306,20 @@ def mountComFunc(options, args): if dev.setup(path, offset=True) != 0: exit(1) - if runPriv([findProg('mount'), str(dev), mountPoint]) != 0: + if runPriv([findProg("mount"), str(dev), mountPoint]) != 0: dev.destroy() exit(1) + mountCom.func = mountComFunc # A command to unmount the first partition in the image. -umountCom = Command('umount', 'Unmount the disk image mounted at mount_point.', - [('mount_point', 'What mount point to unmount.')]) +umountCom = Command( + "umount", + "Unmount the disk image mounted at mount_point.", + [("mount_point", "What mount point to unmount.")], +) + def umountComFunc(options, args): (mountPoint,) = args @@ -302,19 +332,23 @@ def umountComFunc(options, args): print("Unable to find mount information for %s." % mountPoint) # Unmount the loopback device. - if runPriv([findProg('umount'), mountPoint]) != 0: + if runPriv([findProg("umount"), mountPoint]) != 0: exit(1) # Destroy the loopback device. dev.destroy() + umountCom.func = umountComFunc # A command to create an empty file to hold the image. -newCom = Command('new', 'File creation part of "init".', - [('file', 'Name of the image file.'), - ('mb', 'Size of the file in MB.')]) +newCom = Command( + "new", + 'File creation part of "init".', + [("file", "Name of the image file."), ("mb", "Size of the file in MB.")], +) + def newImage(file, mb): (cylinders, heads, sectors) = chsFromSize((mb * MB) / BlockSize) @@ -325,7 +359,8 @@ def newImage(file, mb): # store to disk and which is defined to read as zero. fd = os.open(file, os.O_WRONLY | os.O_CREAT) os.lseek(fd, size - 1, os.SEEK_SET) - os.write(fd, b'\0') + os.write(fd, b"\0") + def newComFunc(options, args): (file, mb) = args @@ -336,16 +371,23 @@ def newComFunc(options, args): newCom.func = newComFunc # A command to partition the image file like a raw disk device. -partitionCom = Command('partition', 'Partition part of "init".', - [('file', 'Name of the image file.')]) +partitionCom = Command( + "partition", + 'Partition part of "init".', + [("file", "Name of the image file.")], +) + def partition(dev, cylinders, heads, sectors): # Use sfdisk to partition the device # The specified options are intended to work with both new and old # versions of sfdisk (see https://askubuntu.com/a/819614) - comStr = ';' - return runPriv([findProg('sfdisk'), '--no-reread', '-u', 'S', '-L', \ - str(dev)], inputVal=comStr) + comStr = ";" + return runPriv( + [findProg("sfdisk"), "--no-reread", "-u", "S", "-L", str(dev)], + inputVal=comStr, + ) + def partitionComFunc(options, args): (path,) = args @@ -362,17 +404,28 @@ def partitionComFunc(options, args): dev.destroy() + partitionCom.func = partitionComFunc # A command to format the first partition in the image. -formatCom = Command('format', 'Formatting part of "init".', - [('file', 'Name of the image file.')]) -formatCom.addArgument('-t', '--type', dest='fstype', action='store', - default='ext2', - help='Type of file system to use. Appended to mkfs.') +formatCom = Command( + "format", + 'Formatting part of "init".', + [("file", "Name of the image file.")], +) +formatCom.addArgument( + "-t", + "--type", + dest="fstype", + action="store", + default="ext2", + help="Type of file system to use. Appended to mkfs.", +) + def formatImage(dev, fsType): - return runPriv([findProg('mkfs.%s' % fsType, dev), str(dev)]) + return runPriv([findProg("mkfs.%s" % fsType, dev), str(dev)]) + def formatComFunc(options, args): (path,) = args @@ -388,8 +441,10 @@ def formatComFunc(options, args): dev.destroy() + formatCom.func = formatComFunc + def initComFunc(options, args): (path, mb) = args mb = int(mb) @@ -409,19 +464,20 @@ def initComFunc(options, args): exit(1) dev.destroy() + initCom.func = initComFunc # Figure out what command was requested and execute it. if len(argv) < 2 or argv[1] not in commands: - print('Usage: %s [command] ') - print('where [command] is one of ') + print("Usage: %s [command] ") + print("where [command] is one of ") for name in commandOrder: command = commands[name] - print(' %s: %s' % (command.name, command.description)) - print('Watch for orphaned loopback devices and delete them with') - print('losetup -d. Mounted images will belong to root, so you may need') - print('to use sudo to modify their contents.') + print(" %s: %s" % (command.name, command.description)) + print("Watch for orphaned loopback devices and delete them with") + print("losetup -d. Mounted images will belong to root, so you may need") + print("to use sudo to modify their contents.") exit(1) command = commands[argv[1]] diff --git a/util/gen_arm_fs_files.py b/util/gen_arm_fs_files.py index 9c29c8f347..548abe819f 100755 --- a/util/gen_arm_fs_files.py +++ b/util/gen_arm_fs_files.py @@ -47,78 +47,124 @@ from glob import glob import sys import os -def run_cmd(explanation, working_dir, cmd, stdout = None): + +def run_cmd(explanation, working_dir, cmd, stdout=None): print("Running phase '%s'" % explanation) sys.stdout.flush() # some of the commands need $PWD to be properly set env = os.environ.copy() - env['PWD'] = working_dir + env["PWD"] = working_dir - return_code = call(cmd, cwd = working_dir, stdout = stdout, - env = env) + return_code = call(cmd, cwd=working_dir, stdout=stdout, env=env) if return_code == 0: return - print("Error running phase %s. Returncode: %d" % (explanation, return_code)) + print( + "Error running phase %s. Returncode: %d" % (explanation, return_code) + ) sys.exit(1) + def linux_clone(): kernel_vexpress_gem5_dir = os.path.join( - args.dest_dir, "linux-kernel-vexpress_gem5") + args.dest_dir, "linux-kernel-vexpress_gem5" + ) - run_cmd("clone linux kernel for VExpress_GEM5_V1 platform", + run_cmd( + "clone linux kernel for VExpress_GEM5_V1 platform", args.dest_dir, - ["git", "clone", "https://gem5.googlesource.com/arm/linux", - kernel_vexpress_gem5_dir]) + [ + "git", + "clone", + "https://gem5.googlesource.com/arm/linux", + kernel_vexpress_gem5_dir, + ], + ) + def linux64(): kernel_vexpress_gem5_dir = os.path.join( - args.dest_dir, "linux-kernel-vexpress_gem5") + args.dest_dir, "linux-kernel-vexpress_gem5" + ) - linux_bin = os.path.join( - binaries_dir, "vmlinux.vexpress_gem5_v1_64") + linux_bin = os.path.join(binaries_dir, "vmlinux.vexpress_gem5_v1_64") with open(revisions_dir + "/linux", "w+") as rev_file: - run_cmd("write revision of linux-kernel-vexpress_gem5 repo", + run_cmd( + "write revision of linux-kernel-vexpress_gem5 repo", kernel_vexpress_gem5_dir, ["git", "rev-parse", "--short", "HEAD"], - rev_file) + rev_file, + ) - run_cmd("configure kernel for arm64", + run_cmd( + "configure kernel for arm64", kernel_vexpress_gem5_dir, - ["make", "ARCH=arm64", "CROSS_COMPILE=aarch64-linux-gnu-", - "gem5_defconfig", make_jobs_str]) - run_cmd("compile kernel for arm64", + [ + "make", + "ARCH=arm64", + "CROSS_COMPILE=aarch64-linux-gnu-", + "gem5_defconfig", + make_jobs_str, + ], + ) + run_cmd( + "compile kernel for arm64", kernel_vexpress_gem5_dir, - ["make", "ARCH=arm64", "CROSS_COMPILE=aarch64-linux-gnu-", - make_jobs_str]) - run_cmd("copy arm64 vmlinux", + [ + "make", + "ARCH=arm64", + "CROSS_COMPILE=aarch64-linux-gnu-", + make_jobs_str, + ], + ) + run_cmd( + "copy arm64 vmlinux", kernel_vexpress_gem5_dir, - ["cp", "vmlinux", linux_bin]) - run_cmd("cleanup arm64 kernel compilation", + ["cp", "vmlinux", linux_bin], + ) + run_cmd( + "cleanup arm64 kernel compilation", kernel_vexpress_gem5_dir, - ["make", "distclean"]) + ["make", "distclean"], + ) + def linux32(): kernel_vexpress_gem5_dir = os.path.join( - args.dest_dir, "linux-kernel-vexpress_gem5") + args.dest_dir, "linux-kernel-vexpress_gem5" + ) - linux_bin = os.path.join( - binaries_dir, "vmlinux.vexpress_gem5_v1") + linux_bin = os.path.join(binaries_dir, "vmlinux.vexpress_gem5_v1") - run_cmd("configure kernel for arm", + run_cmd( + "configure kernel for arm", kernel_vexpress_gem5_dir, - ["make", "ARCH=arm", "CROSS_COMPILE=arm-linux-gnueabihf-", - "gem5_defconfig"]) - run_cmd("compile kernel for arm", + [ + "make", + "ARCH=arm", + "CROSS_COMPILE=arm-linux-gnueabihf-", + "gem5_defconfig", + ], + ) + run_cmd( + "compile kernel for arm", kernel_vexpress_gem5_dir, - ["make", "ARCH=arm", "CROSS_COMPILE=arm-linux-gnueabihf-", - make_jobs_str]) - run_cmd("copy arm vmlinux", + [ + "make", + "ARCH=arm", + "CROSS_COMPILE=arm-linux-gnueabihf-", + make_jobs_str, + ], + ) + run_cmd( + "copy arm vmlinux", kernel_vexpress_gem5_dir, - ["cp", "vmlinux", linux_bin]) + ["cp", "vmlinux", linux_bin], + ) + def linux(): """ @@ -128,17 +174,21 @@ def linux(): linux64() linux32() + def dtbs(): """ Build DTBs for VExpress_GEM5_V1 """ dt_dir = gem5_dir + "/system/arm/dt" - run_cmd("compile DTBs for VExpress_GEM5_V1 platform", + run_cmd( + "compile DTBs for VExpress_GEM5_V1 platform", dt_dir, - ["make", make_jobs_str]) - run_cmd("copy DTBs", - dt_dir, - ["cp"] + glob(dt_dir + "/*dtb") + [binaries_dir]) + ["make", make_jobs_str], + ) + run_cmd( + "copy DTBs", dt_dir, ["cp"] + glob(dt_dir + "/*dtb") + [binaries_dir] + ) + def bootloaders(): """ @@ -146,41 +196,37 @@ def bootloaders(): """ bootloader_arm64_dir = gem5_dir + "/system/arm/bootloader/arm64" - run_cmd("compile arm64 bootloader", + run_cmd("compile arm64 bootloader", bootloader_arm64_dir, ["make"]) + run_cmd( + "copy arm64 bootloader", bootloader_arm64_dir, - ["make"]) - run_cmd("copy arm64 bootloader", - bootloader_arm64_dir, - ["cp", "boot.arm64", "boot_emm.arm64", "boot_v2.arm64", binaries_dir]) + ["cp", "boot.arm64", "boot_emm.arm64", "boot_v2.arm64", binaries_dir], + ) bootloader_arm_dir = gem5_dir + "/system/arm/bootloader/arm" - run_cmd("compile arm bootloader", + run_cmd("compile arm bootloader", bootloader_arm_dir, ["make"]) + run_cmd( + "copy arm bootloaders", bootloader_arm_dir, - ["make"]) - run_cmd("copy arm bootloaders", - bootloader_arm_dir, - ["cp", "boot.arm", "boot_emm.arm", binaries_dir]) + ["cp", "boot.arm", "boot_emm.arm", binaries_dir], + ) + def m5(): """ Build m5 binaries """ m5_dir = gem5_dir + "/util/m5" - run_cmd("compile arm64 m5", - m5_dir, - ["make", "-f", "Makefile.aarch64"]) - run_cmd("copy arm64 m5", - m5_dir, - ["cp", "m5", binaries_dir + "/m5.aarch64"]) - run_cmd("clean arm64 m5", - m5_dir, - ["make", "clean", "-f", "Makefile.aarch64"]) - run_cmd("compile arm m5", - m5_dir, - ["make", "-f", "Makefile.arm"]) - run_cmd("copy arm m5", - m5_dir, - ["cp", "m5", binaries_dir + "/m5.aarch32"]) + run_cmd("compile arm64 m5", m5_dir, ["make", "-f", "Makefile.aarch64"]) + run_cmd( + "copy arm64 m5", m5_dir, ["cp", "m5", binaries_dir + "/m5.aarch64"] + ) + run_cmd( + "clean arm64 m5", m5_dir, ["make", "clean", "-f", "Makefile.aarch64"] + ) + run_cmd("compile arm m5", m5_dir, ["make", "-f", "Makefile.arm"]) + run_cmd("copy arm m5", m5_dir, ["cp", "m5", binaries_dir + "/m5.aarch32"]) + def xen(): """ @@ -189,24 +235,32 @@ def xen(): xen_dir = os.path.join(args.dest_dir, "xen") bootwrapper_dir = os.path.join(args.dest_dir, "bootwrapper") linux_cmdline = "console=hvc0 root=/dev/vda rw mem=1G" - xen_cmdline = "dtuart=/uart@1c090000 console=dtuart no-bootscrub " + \ - "dom0_mem=1G loglvl=all guest_loglvl=all" + xen_cmdline = ( + "dtuart=/uart@1c090000 console=dtuart no-bootscrub " + + "dom0_mem=1G loglvl=all guest_loglvl=all" + ) - run_cmd("clone Xen", + run_cmd( + "clone Xen", args.dest_dir, - ["git", "clone", "git://xenbits.xen.org/xen.git", - xen_dir]) + ["git", "clone", "git://xenbits.xen.org/xen.git", xen_dir], + ) - run_cmd("clone boot-wrapper-aarch64", + run_cmd( + "clone boot-wrapper-aarch64", args.dest_dir, - ["git", "clone", "git://git.kernel.org/pub/" + - "scm/linux/kernel/git/mark/boot-wrapper-aarch64.git", - bootwrapper_dir]) + [ + "git", + "clone", + "git://git.kernel.org/pub/" + + "scm/linux/kernel/git/mark/boot-wrapper-aarch64.git", + bootwrapper_dir, + ], + ) # Need to compile arm64 Linux linux_dir = os.path.join(args.dest_dir, "linux-kernel-vexpress_gem5") - linux_bin = os.path.join(linux_dir, - "arch", "arm64", "boot", "Image") + linux_bin = os.path.join(linux_dir, "arch", "arm64", "boot", "Image") if not os.path.exists(linux_bin): linux_clone() linux64() @@ -217,17 +271,30 @@ def xen(): dtbs() # Building Xen - run_cmd("building xen for aarch64", + run_cmd( + "building xen for aarch64", xen_dir, - ["make", "dist-xen", "XEN_TARGET_ARCH=arm64", - "CROSS_COMPILE=aarch64-linux-gnu-", - "CONFIG_EARLY_PRINTK=vexpress", make_jobs_str]) + [ + "make", + "dist-xen", + "XEN_TARGET_ARCH=arm64", + "CROSS_COMPILE=aarch64-linux-gnu-", + "CONFIG_EARLY_PRINTK=vexpress", + make_jobs_str, + ], + ) # Building boot-wrapper-aarch64 - run_cmd("autoreconf boot-wrapper-aarch64", - bootwrapper_dir, ["autoreconf", "-i"]) - run_cmd("configure boot-wrapper-aarch64", - bootwrapper_dir, ["./configure", + run_cmd( + "autoreconf boot-wrapper-aarch64", + bootwrapper_dir, + ["autoreconf", "-i"], + ) + run_cmd( + "configure boot-wrapper-aarch64", + bootwrapper_dir, + [ + "./configure", "--host=aarch64-linux-gnu", "--with-kernel-dir={}".format(linux_dir), "--with-dtb={}".format(dtb_bin), @@ -235,49 +302,72 @@ def xen(): "--with-xen-cmdline='{}'".format(xen_cmdline), "--with-xen={}".format(os.path.join(xen_dir, "xen", "xen")), "--enable-psci", - "--enable-gicv3"]) - run_cmd("build boot-wrapper-aarch64", - bootwrapper_dir, ["make"]) + "--enable-gicv3", + ], + ) + run_cmd("build boot-wrapper-aarch64", bootwrapper_dir, ["make"]) # Copying the final binary - run_cmd("copy xen binary", - bootwrapper_dir, ["cp", "xen-system.axf", binaries_dir]) + run_cmd( + "copy xen binary", + bootwrapper_dir, + ["cp", "xen-system.axf", binaries_dir], + ) with open(os.path.join(revisions_dir, "xen"), "w+") as rev_file: - run_cmd("write revision of xen repo", + run_cmd( + "write revision of xen repo", xen_dir, ["git", "rev-parse", "--short", "HEAD"], - rev_file) + rev_file, + ) + script_dir = os.path.dirname(os.path.abspath(sys.argv[0])) gem5_dir = os.path.dirname(script_dir) all_binaries = { - "linux" : linux, - "dtbs" : dtbs, - "bootloaders" : bootloaders, - "m5" : m5, - "xen" : xen, + "linux": linux, + "dtbs": dtbs, + "bootloaders": bootloaders, + "m5": m5, + "xen": xen, } parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) -parser.add_argument("--gem5-dir", default = gem5_dir, - metavar = "GEM5_DIR", - help = "gem5 root directory to be used for bootloader and " - "VExpress_GEM5_V1 DTB sources. The default value is the gem5 root " - "directory of the executed script") -parser.add_argument("--dest-dir", default = "/tmp", - metavar = "DEST_DIR", - help = "Directory to use for checking out the different kernel " - "repositories. Generated files will be copied to " - "DEST_DIR/binaries (which must not exist)") -parser.add_argument("-j", "--make-jobs", type = int, default = 1, - metavar = "MAKE_JOBS", - help = "Number of jobs to use with the 'make' commands.") -parser.add_argument("-b", "--fs-binaries", action="append", - choices=list(all_binaries.keys()), default=[], - help = "List of FS files to be generated. Defaulting to all") +parser.add_argument( + "--gem5-dir", + default=gem5_dir, + metavar="GEM5_DIR", + help="gem5 root directory to be used for bootloader and " + "VExpress_GEM5_V1 DTB sources. The default value is the gem5 root " + "directory of the executed script", +) +parser.add_argument( + "--dest-dir", + default="/tmp", + metavar="DEST_DIR", + help="Directory to use for checking out the different kernel " + "repositories. Generated files will be copied to " + "DEST_DIR/binaries (which must not exist)", +) +parser.add_argument( + "-j", + "--make-jobs", + type=int, + default=1, + metavar="MAKE_JOBS", + help="Number of jobs to use with the 'make' commands.", +) +parser.add_argument( + "-b", + "--fs-binaries", + action="append", + choices=list(all_binaries.keys()), + default=[], + help="List of FS files to be generated. Defaulting to all", +) args = parser.parse_args() @@ -302,19 +392,21 @@ if os.path.exists(binaries_dir): revisions_dir = args.dest_dir + "/revisions" if os.path.exists(revisions_dir): - print("Error: %s already exists." %revisions_dir) + print("Error: %s already exists." % revisions_dir) sys.exit(1) -os.mkdir(binaries_dir); -os.mkdir(revisions_dir); +os.mkdir(binaries_dir) +os.mkdir(revisions_dir) make_jobs_str = "-j" + str(args.make_jobs) rev_file = open(revisions_dir + "/gem5", "w+") -run_cmd("write revision of gem5 repo", +run_cmd( + "write revision of gem5 repo", gem5_dir, ["git", "rev-parse", "--short", "HEAD"], - rev_file) + rev_file, +) rev_file.close() binaries = args.fs_binaries if args.fs_binaries else list(all_binaries.keys()) @@ -324,4 +416,3 @@ for fs_binary in binaries: print("Done! All the generated files can be found in %s" % binaries_dir) sys.exit(0) - diff --git a/util/gerrit-bot/README.md b/util/gerrit-bot/README.md index 19eb26a8ab..0f806d9cbe 100644 --- a/util/gerrit-bot/README.md +++ b/util/gerrit-bot/README.md @@ -68,4 +68,4 @@ To run the Gerrit bot every 30 minutes, add the following line to the crontable, ```python */1 * * * * cd /path/to/gerrit/bot/directory && ./bot.py -``` \ No newline at end of file +``` diff --git a/util/gerrit-bot/bot.py b/util/gerrit-bot/bot.py index 6f6b018d7e..f6b9469d9b 100755 --- a/util/gerrit-bot/bot.py +++ b/util/gerrit-bot/bot.py @@ -34,13 +34,15 @@ import json import time import sys -sys.path.append('..') + +sys.path.append("..") import maint.lib.maintainers class GerritBotConfig: - def __init__(self, config = {}): + def __init__(self, config={}): self.__dict__.update(config) + @staticmethod def DefaultConfig(): default_config = GerritBotConfig() @@ -55,8 +57,9 @@ class GerritBotConfig: # path to the file containing the map each maintainer email address # to the one account id (ie, the "_account_id" field of ReviewerInfo) - default_config.maintainer_account_ids_file_path = \ + default_config.maintainer_account_ids_file_path = ( ".data/maintainer_ids.json" + ) # query changes made within 2 days if prev_query_time is not specified default_config.default_query_age = "2d" @@ -67,10 +70,11 @@ class GerritBotConfig: default_config.api_entry_point = "https://gem5-review.googlesource.com" default_config.projects_prefix = "public/gem5" - default_config.query_limit = 1000 # at most 1000 new changes per query - default_config.request_timeout = 10 # seconds + default_config.query_limit = 1000 # at most 1000 new changes per query + default_config.request_timeout = 10 # seconds return default_config + class GerritBot: def __init__(self, config): self.config = config @@ -78,15 +82,18 @@ class GerritBot: self.auth = self.__read_auth_file(self.config.auth_file_path) # Initalize the Gerrit API Object - self.gerrit_api = GerritRestAPI(self.auth, - self.config.api_entry_point, - self.config.request_timeout) + self.gerrit_api = GerritRestAPI( + self.auth, self.config.api_entry_point, self.config.request_timeout + ) self.account_id = self.__get_bot_account_id() self.maintainers = maint.lib.maintainers.Maintainers.from_file( - self.config.maintainers_file_path) + self.config.maintainers_file_path + ) self.maintainer_account_ids = self.__read_maintainer_account_id_file( - self.maintainers, self.config.maintainer_account_ids_file_path) + self.maintainers, self.config.maintainer_account_ids_file_path + ) + def __read_auth_file(self, auth_file_path): username = "" password = "" @@ -104,19 +111,25 @@ class GerritBot: lines = f.readlines() prev_query_time = int(float(lines[0].strip())) except FileNotFoundError: - print(f"warning: cannot find the time tracker file at " - f"`{file_path}`. Previous query time is set to 0.") + print( + f"warning: cannot find the time tracker file at " + f"`{file_path}`. Previous query time is set to 0." + ) except IndexError: - print(f"warning: cannot find the content of the time tracker file " - f"at `{file_path}`. Previous query time is set 0.") + print( + f"warning: cannot find the content of the time tracker file " + f"at `{file_path}`. Previous query time is set 0." + ) return prev_query_time def __update_time_tracker_file(self, file_path, prev_query_time): with open(file_path, "w") as f: f.write(f"{prev_query_time}\n") - f.write(f"# The above time is the result of calling time.time() " - f"in Python.") + f.write( + f"# The above time is the result of calling time.time() " + f"in Python." + ) def __read_maintainer_account_id_file(self, maintainers, file_path): account_ids = {} @@ -127,8 +140,9 @@ class GerritBot: # create a placeholder file with open(file_path, "w") as f: json.dump(account_ids, f) - account_ids = self.__update_maintainer_account_id_file(file_path, - maintainers) + account_ids = self.__update_maintainer_account_id_file( + file_path, maintainers + ) return account_ids def __update_maintainer_account_id_file(self, file_path, maintainers): @@ -157,8 +171,10 @@ class GerritBot: response = self.gerrit_api.query_account(query, 1) accounts = Parser.get_json_content(response) if len(accounts) == 0: - print(f"warn: unable to obtain the account id of " - f"\"{email_address}\"") + print( + f"warn: unable to obtain the account id of " + f'"{email_address}"' + ) print(vars(response)) return None return accounts[0]["_account_id"] @@ -168,11 +184,14 @@ class GerritBot: return account_info._account_id def __query_new_changes(self, query_age): - query = (f"projects:{self.config.projects_prefix} " - f"status:open -is:wip -age:{query_age}") + query = ( + f"projects:{self.config.projects_prefix} " + f"status:open -is:wip -age:{query_age}" + ) response = self.gerrit_api.query_changes( - query, self.config.query_limit, - ["CURRENT_REVISION", "REVIEWER_UPDATES", "DETAILED_ACCOUNTS"] + query, + self.config.query_limit, + ["CURRENT_REVISION", "REVIEWER_UPDATES", "DETAILED_ACCOUNTS"], ) if response.status_code >= 300: @@ -185,35 +204,40 @@ class GerritBot: return new_changes def _pre_run(self): - self.prev_query_time = \ - self.__read_time_tracker_file(self.config.time_tracker_file_path) + self.prev_query_time = self.__read_time_tracker_file( + self.config.time_tracker_file_path + ) self.curr_time = time.time() if self.prev_query_time > 0: # adding 10 seconds to the query age to make sure that # we won't miss any new changes - self.query_age = \ - convert_time_in_seconds( - int(self.curr_time - self.prev_query_time + 10)) + self.query_age = convert_time_in_seconds( + int(self.curr_time - self.prev_query_time + 10) + ) else: self.query_age = self.config.default_query_age def _run(self): new_changes = self.__query_new_changes(self.query_age) for new_change in new_changes: - add_maintainers_to_change(new_change, - self.maintainers, - self.maintainer_account_ids, - self.gerrit_api) + add_maintainers_to_change( + new_change, + self.maintainers, + self.maintainer_account_ids, + self.gerrit_api, + ) def _post_run(self): - self.__update_time_tracker_file(self.config.time_tracker_file_path, - self.curr_time) + self.__update_time_tracker_file( + self.config.time_tracker_file_path, self.curr_time + ) def run(self): self._pre_run() self._run() self._post_run() + if __name__ == "__main__": default_config = GerritBotConfig.DefaultConfig() gerrit_bot = GerritBot(default_config) diff --git a/util/gerrit-bot/extract_gitcookies.py b/util/gerrit-bot/extract_gitcookies.py index fbe9c80b6a..24f2ca0afa 100755 --- a/util/gerrit-bot/extract_gitcookies.py +++ b/util/gerrit-bot/extract_gitcookies.py @@ -26,12 +26,13 @@ import argparse + def parse_gitcookies_line(raw): # if this is a line from .gitcookies, the delimiter is `\t` - auth_info = raw.strip().split('\t') + auth_info = raw.strip().split("\t") if len(auth_info) < 7: # if this is a line from auth script, the delimiter is `,` - auth_info = raw.strip().split(',') + auth_info = raw.strip().split(",") if len(auth_info) != 7: return None, None auth_info = auth_info[-1] @@ -40,6 +41,7 @@ def parse_gitcookies_line(raw): password = auth_info[1] return username, password + def parse_gitcookies(input_path): username_password_dict = {} with open(input_path, "r") as input_stream: @@ -50,13 +52,18 @@ def parse_gitcookies(input_path): username_password_dict[username] = password return username_password_dict + if __name__ == "__main__": parser = argparse.ArgumentParser( - description=("Extract username and password from .gitcookies" - "or from the script used to write .gitcookies file")) - parser.add_argument("input", - help = ("Path to a .gitcookies file or a file with " - "a similar format")) + description=( + "Extract username and password from .gitcookies" + "or from the script used to write .gitcookies file" + ) + ) + parser.add_argument( + "input", + help=("Path to a .gitcookies file or a file with " "a similar format"), + ) parser.add_argument("output", help="Path to the output file") args = parser.parse_args() username_password_dict = parse_gitcookies(args.input) diff --git a/util/gerrit-bot/gerrit.py b/util/gerrit-bot/gerrit.py index 7dde34befd..2e68a70645 100644 --- a/util/gerrit-bot/gerrit.py +++ b/util/gerrit-bot/gerrit.py @@ -30,10 +30,11 @@ import requests from types import SimpleNamespace from urllib.parse import urljoin + class GerritResponseParser: @staticmethod def get_json_content(response): - assert(isinstance(response, requests.Response)) + assert isinstance(response, requests.Response) # If the status code is not in the 200s range, it doesn't have content. if response.status_code >= 300: @@ -64,31 +65,36 @@ class GerritRestAPI: self.timeout = timeout # helper methods for sending GET and POST requests - def _get(self, endpoint, params = None): + def _get(self, endpoint, params=None): request_url = urljoin(self.api_entry_point, endpoint) - return requests.get(request_url, - params = params, - timeout = self.timeout, - auth = (self.username, self.password)) + return requests.get( + request_url, + params=params, + timeout=self.timeout, + auth=(self.username, self.password), + ) + def _post(self, endpoint, json_content): request_url = urljoin(self.api_entry_point, endpoint) - return requests.post(request_url, - json = json_content, - timeout = self.timeout, - auth = (self.username, self.password)) + return requests.post( + request_url, + json=json_content, + timeout=self.timeout, + auth=(self.username, self.password), + ) # --------------- Account Endpoints --------------- # https://gerrit-review.googlesource.com/Documentation/ # rest-api-accounts.html#get-account def get_account(self, account_id="self"): - """ get an account detail from an account_id """ + """get an account detail from an account_id""" return self._get(f"/accounts/{account_id}") # https://gerrit-review.googlesource.com/Documentation/ # rest-api-accounts.html#query-account - def query_account(self, query, limit = None): - """ get accounts based on the query """ - params = { "q": query } + def query_account(self, query, limit=None): + """get accounts based on the query""" + params = {"q": query} if limit: params["n"] = str(limit) return self._get(f"/accounts/", params) @@ -97,9 +103,9 @@ class GerritRestAPI: # https://gerrit-review.googlesource.com/Documentation/ # rest-api-changes.html#list-changes def query_changes(self, query, limit=None, optional_field=None): - """ query changes with maximum limit returned queries """ + """query changes with maximum limit returned queries""" endpoint = f"/changes/" - params = { "q": query } + params = {"q": query} if limit: params["n"] = str(limit) if optional_field: @@ -110,9 +116,10 @@ class GerritRestAPI: # https://gerrit-review.googlesource.com/Documentation/ # rest-api-changes.html#list-reviewers def list_reviewers(self, change_id): - """ list reviewers of a change """ + """list reviewers of a change""" return self._get(f"/changes/{change_id}/reviewers") + def add_reviewer(self, change_id, reviewer_email): - """ add a reviewer using an email address """ + """add a reviewer using an email address""" data = {"reviewer": reviewer_email} return self._post(f"/changes/{change_id}/reviewers/", data) diff --git a/util/gerrit-bot/util.py b/util/gerrit-bot/util.py index 1d0037267f..b410858e14 100644 --- a/util/gerrit-bot/util.py +++ b/util/gerrit-bot/util.py @@ -27,7 +27,7 @@ # Utility functions def parse_commit_subject(subject): - parsed_subject = subject.split(":", maxsplit = 1) + parsed_subject = subject.split(":", maxsplit=1) # If the subject does not have a colon, it either does not have tags # or does not have a message. In this case, we assume that the subject @@ -35,11 +35,12 @@ def parse_commit_subject(subject): if len(parsed_subject) <= 1: return [], parsed_subject[0] - tags = [ tag.strip() for tag in parsed_subject[0].split(",") ] + tags = [tag.strip() for tag in parsed_subject[0].split(",")] message = parsed_subject[1] return tags, message + # Convert time in seconds to a plausible unit def convert_time_in_seconds(delta): time = int(delta) @@ -53,10 +54,13 @@ def convert_time_in_seconds(delta): return f"{time}{time_unit}" + # End of Utility functions -def add_maintainers_to_change(change, maintainers, maintainers_account_ids, - gerrit_api): + +def add_maintainers_to_change( + change, maintainers, maintainers_account_ids, gerrit_api +): tags, message = parse_commit_subject(change["subject"]) change_id = change["id"] maintainer_emails = set() @@ -73,8 +77,12 @@ def add_maintainers_to_change(change, maintainers, maintainers_account_ids, for name, email in maintainers[tag].maintainers: maintainer_emails.add(email) except KeyError: - print((f"warning: `change-{change_id}` has an unknown tag: " - f"`{tag}`")) + print( + ( + f"warning: `change-{change_id}` has an unknown tag: " + f"`{tag}`" + ) + ) for email in maintainer_emails: if email in avoid_emails: continue diff --git a/util/gerrit-commit-msg-hook b/util/gerrit-commit-msg-hook new file mode 100755 index 0000000000..5729239db2 --- /dev/null +++ b/util/gerrit-commit-msg-hook @@ -0,0 +1,104 @@ +#!/bin/sh +# From Gerrit Code Review 3.6.1-1565-g2ee3d30913 +# +# Part of Gerrit Code Review (https://www.gerritcodereview.com/) +# +# Copyright (C) 2009 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -u + +# avoid [[ which is not POSIX sh. +if test "$#" != 1 ; then + echo "$0 requires an argument." + exit 1 +fi + +if test ! -f "$1" ; then + echo "file does not exist: $1" + exit 1 +fi + +# Do not create a change id if requested +if test "false" = "$(git config --bool --get gerrit.createChangeId)" ; then + exit 0 +fi + +# Do not create a change id for squash commits. +if head -n1 "$1" | grep -q '^squash! '; then + exit 0 +fi + +if git rev-parse --verify HEAD >/dev/null 2>&1; then + refhash="$(git rev-parse HEAD)" +else + refhash="$(git hash-object -t tree /dev/null)" +fi + +random=$({ git var GIT_COMMITTER_IDENT ; echo "$refhash" ; cat "$1"; } | git hash-object --stdin) +dest="$1.tmp.${random}" + +trap 'rm -f "$dest" "$dest-2"' EXIT + +if ! git stripspace --strip-comments < "$1" > "${dest}" ; then + echo "cannot strip comments from $1" + exit 1 +fi + +if test ! -s "${dest}" ; then + echo "file is empty: $1" + exit 1 +fi + +reviewurl="$(git config --get gerrit.reviewUrl)" +if test -n "${reviewurl}" ; then + token="Link" + value="${reviewurl%/}/id/I$random" + pattern=".*/id/I[0-9a-f]\{40\}$" +else + token="Change-Id" + value="I$random" + pattern=".*" +fi + +if git interpret-trailers --parse < "$1" | grep -q "^$token: $pattern$" ; then + exit 0 +fi + +# There must be a Signed-off-by trailer for the code below to work. Insert a +# sentinel at the end to make sure there is one. +# Avoid the --in-place option which only appeared in Git 2.8 +if ! git interpret-trailers \ + --trailer "Signed-off-by: SENTINEL" < "$1" > "$dest-2" ; then + echo "cannot insert Signed-off-by sentinel line in $1" + exit 1 +fi + +# Make sure the trailer appears before any Signed-off-by trailers by inserting +# it as if it was a Signed-off-by trailer and then use sed to remove the +# Signed-off-by prefix and the Signed-off-by sentinel line. +# Avoid the --in-place option which only appeared in Git 2.8 +# Avoid the --where option which only appeared in Git 2.15 +if ! git -c trailer.where=before interpret-trailers \ + --trailer "Signed-off-by: $token: $value" < "$dest-2" | + sed -e "s/^Signed-off-by: \($token: \)/\1/" \ + -e "/^Signed-off-by: SENTINEL/d" > "$dest" ; then + echo "cannot insert $token line in $1" + exit 1 +fi + +if ! mv "${dest}" "$1" ; then + echo "cannot mv ${dest} to $1" + exit 1 +fi diff --git a/util/git-commit-msg.py b/util/git-commit-msg.py index b2ff164522..12baad8c19 100755 --- a/util/git-commit-msg.py +++ b/util/git-commit-msg.py @@ -36,27 +36,36 @@ from maint.lib import maintainers from style.repo import GitRepo + def _printErrorQuit(error_message): """ - Print an error message, followed my a help message and inform failure. + Print an error message, followed my a help message and inform failure. - @param error_message A message describing the error that caused the - failure. + @param error_message A message describing the error that caused the + failure. """ print(error_message) - print("The commit has been cancelled, but a copy of it can be found in " - + sys.argv[1] + " : ") + print( + "The commit has been cancelled, but a copy of it can be found in " + + sys.argv[1] + + " : " + ) - print(""" + print( + """ -------------------------------------------------------------------------- - """) + """ + ) print(open(sys.argv[1], "r").read()) - print(""" + print( + """ -------------------------------------------------------------------------- - """) + """ + ) - print(""" + print( + """ The first line of a commit must contain one or more gem5 tags separated by commas (see MAINTAINERS.yaml for the possible tags), followed by a colon and a commit title. There must be no leading nor trailing whitespaces. @@ -74,15 +83,17 @@ e.g.: mem,mem-cache: Improve packet class readability The packet class... -""") +""" + ) sys.exit(1) + def _validateTags(commit_header): """ - Check if all tags in the commit header belong to the list of valid - gem5 tags. + Check if all tags in the commit header belong to the list of valid + gem5 tags. - @param commit_header The first line of the commit message. + @param commit_header The first line of the commit message. """ # List of valid tags @@ -90,14 +101,15 @@ def _validateTags(commit_header): valid_tags = [tag for tag, _ in maintainer_dict] # Remove non-tag 'pmc' and add special tags not in MAINTAINERS.yaml - valid_tags.remove('pmc') - valid_tags.extend(['RFC', 'WIP']) + valid_tags.remove("pmc") + valid_tags.extend(["RFC", "WIP"]) - tags = ''.join(commit_header.split(':')[0].split()).split(',') - if (any(tag not in valid_tags for tag in tags)): + tags = "".join(commit_header.split(":")[0].split()).split(",") + if any(tag not in valid_tags for tag in tags): invalid_tag = next((tag for tag in tags if tag not in valid_tags)) _printErrorQuit("Invalid Gem5 tag: " + invalid_tag) + # Go to git directory os.chdir(GitRepo().repo_base()) @@ -108,9 +120,10 @@ commit_message = open(sys.argv[1]).read() # a commit title commit_message_lines = commit_message.splitlines() commit_header = commit_message_lines[0] -commit_header_match = \ - re.search("^(fixup! )?(\S[\w\-][,\s*[\w\-]+]*:.+\S$)", commit_header) -if ((commit_header_match is None)): +commit_header_match = re.search( + "^(fixup! )?(\S[\w\-][,\s*[\w\-]+]*:.+\S$)", commit_header +) +if commit_header_match is None: _printErrorQuit("Invalid commit header") if commit_header_match.group(1) == "fixup! ": sys.exit(0) @@ -119,21 +132,29 @@ _validateTags(commit_header_match.group(2)) # Make sure commit title does not exceed threshold. This line is limited to # a smaller number because version control systems may add a prefix, causing # line-wrapping for longer lines -commit_title = commit_header.split(':')[1] +commit_title = commit_header.split(":")[1] max_header_size = 65 -if (len(commit_header) > max_header_size): - _printErrorQuit("The commit header (tags + title) is too long (" + \ - str(len(commit_header)) + " > " + str(max_header_size) + ")") +if len(commit_header) > max_header_size: + _printErrorQuit( + "The commit header (tags + title) is too long (" + + str(len(commit_header)) + + " > " + + str(max_header_size) + + ")" + ) # Then there must be at least one empty line between the commit header and # the commit description -if (commit_message_lines[1] != ""): - _printErrorQuit("Please add an empty line between the commit title and " \ - "its description") +if commit_message_lines[1] != "": + _printErrorQuit( + "Please add an empty line between the commit title and " + "its description" + ) # Encourage providing descriptions -if (re.search("^(Signed-off-by|Change-Id|Reviewed-by):", - commit_message_lines[2])): +if re.search( + "^(Signed-off-by|Change-Id|Reviewed-by):", commit_message_lines[2] +): print("Warning: Commit does not have a description") sys.exit(0) diff --git a/util/git-pre-commit.py b/util/git-pre-commit.py index 82fcf39001..766013fe3c 100755 --- a/util/git-pre-commit.py +++ b/util/git-pre-commit.py @@ -36,7 +36,6 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - from tempfile import TemporaryFile import os import subprocess @@ -48,11 +47,11 @@ from style.style import StdioUI, check_ignores import argparse -parser = argparse.ArgumentParser( - description="gem5 git style checker hook") +parser = argparse.ArgumentParser(description="gem5 git style checker hook") -parser.add_argument("--verbose", "-v", action="store_true", - help="Produce verbose output") +parser.add_argument( + "--verbose", "-v", action="store_true", help="Produce verbose output" +) args = parser.parse_args() @@ -80,16 +79,22 @@ for status, fname in git.status(filter="MA", cached=True): try: status = git.file_from_index(fname) except UnicodeDecodeError: - print("Decoding '" + fname - + "' throws a UnicodeDecodeError.", file=sys.stderr) - print("Please check '" + fname - + "' exclusively uses utf-8 character encoding.", file=sys.stderr) + print( + "Decoding '" + fname + "' throws a UnicodeDecodeError.", + file=sys.stderr, + ) + print( + "Please check '" + + fname + + "' exclusively uses utf-8 character encoding.", + file=sys.stderr, + ) sys.exit(1) f = TemporaryFile() - f.write(status.encode('utf-8')) + f.write(status.encode("utf-8")) - verifiers = [ v(ui, opts, base=repo_base) for v in all_verifiers ] + verifiers = [v(ui, opts, base=repo_base) for v in all_verifiers] for v in verifiers: f.seek(0) # It is prefered that the first check is silent as it is in the @@ -113,14 +118,17 @@ if failing_files: "Please run the style checker manually to fix " "the offending files.\n" "To check your modifications, run: util/style.py -m", - file=sys.stderr) + file=sys.stderr, + ) print("\n", file=sys.stderr) if staged_mismatch: print( "It looks like you have forgotten to stage your " "fixes for commit in\n" - "the following files: ", file=sys.stderr) + "the following files: ", + file=sys.stderr, + ) for f in staged_mismatch: print("\t{}".format(f), file=sys.stderr) print("Please `git --add' them", file=sys.stderr) diff --git a/util/logroll.py b/util/logroll.py index 216f77ed45..02ca309e87 100755 --- a/util/logroll.py +++ b/util/logroll.py @@ -33,32 +33,45 @@ import unittest import unittest.mock parser = argparse.ArgumentParser( - description='''Circular buffer for text output. + description="""Circular buffer for text output. To capture the rolling last 25 lines of output from command "command": command | logroll.py -n 25 While that's running, to see the most recent 25 lines of output without interrupting "command", send SIGUSR1 to the logroll.py process: - kill -s USR1 ${PID of logroll.py}''') -parser.add_argument('-n', '--lines', default=10, type=int, - help='Maximum number of lines to buffer at a time.') -parser.add_argument('file', nargs='?', default=sys.stdin, - type=argparse.FileType('r', encoding='UTF-8'), - help='File to read from, default is stdin') + kill -s USR1 ${PID of logroll.py}""" +) +parser.add_argument( + "-n", + "--lines", + default=10, + type=int, + help="Maximum number of lines to buffer at a time.", +) +parser.add_argument( + "file", + nargs="?", + default=sys.stdin, + type=argparse.FileType("r", encoding="UTF-8"), + help="File to read from, default is stdin", +) args = parser.parse_args() + def dump_lines(lines, idx): for line in lines[idx:]: - print(line, end='') + print(line, end="") for line in lines[:idx]: - print(line, end='') + print(line, end="") + def dump_and_exit(lines, idx): dump_lines(lines, idx) sys.exit(0) + def main(target, incoming): idx = 0 lines = [] @@ -80,34 +93,35 @@ def main(target, incoming): dump_lines(lines, idx) -if __name__ == '__main__': + +if __name__ == "__main__": main(target=args.lines, incoming=args.file) - # Unit tests # + class CopyingMock(unittest.mock.MagicMock): def __call__(self, *args, **kwargs): args = copy.deepcopy(args) kwargs = copy.deepcopy(kwargs) return super(CopyingMock, self).__call__(*args, **kwargs) + class TestLogroll(unittest.TestCase): # Test data. - lines2 = ['First line', - 'Second line'] - lines3 = ['First line', - 'Second line', - 'Third line'] - lines8 = ['First line', - 'Second line', - 'Third line', - 'Fourth line', - 'Fifth line', - 'Sixth line', - 'Seventh line', - 'Eigth line'] + lines2 = ["First line", "Second line"] + lines3 = ["First line", "Second line", "Third line"] + lines8 = [ + "First line", + "Second line", + "Third line", + "Fourth line", + "Fifth line", + "Sixth line", + "Seventh line", + "Eigth line", + ] # Generator which returns lines like a file object would. def line_gen(self, lines): @@ -133,113 +147,157 @@ class TestLogroll(unittest.TestCase): # Set up a mock of signal.signal to record handlers in a dict. def mock_signal_dict(self, mock): signal_dict = {} + def signal_signal(num, action): signal_dict[num] = action + mock.side_effect = signal_signal return signal_dict # Actual test methods. def test_filling_dump_lines(self): - with unittest.mock.patch('builtins.print') as mock_print: + with unittest.mock.patch("builtins.print") as mock_print: dump_lines(self.lines2, len(self.lines2)) - calls = list([ unittest.mock.call(line, end='') for - line in self.lines2 ]) + calls = list( + [unittest.mock.call(line, end="") for line in self.lines2] + ) mock_print.assert_has_calls(calls) def test_full_dump_lines(self): - with unittest.mock.patch('builtins.print') as mock_print: + with unittest.mock.patch("builtins.print") as mock_print: dump_lines(self.lines2, 0) - calls = list([ unittest.mock.call(line, end='') for - line in self.lines2 ]) + calls = list( + [unittest.mock.call(line, end="") for line in self.lines2] + ) mock_print.assert_has_calls(calls) def test_offset_dump_lines(self): - with unittest.mock.patch('builtins.print') as mock_print: + with unittest.mock.patch("builtins.print") as mock_print: dump_lines(self.lines3, 1) - calls = [ unittest.mock.call(self.lines3[1], end=''), - unittest.mock.call(self.lines3[2], end=''), - unittest.mock.call(self.lines3[0], end='') ] + calls = [ + unittest.mock.call(self.lines3[1], end=""), + unittest.mock.call(self.lines3[2], end=""), + unittest.mock.call(self.lines3[0], end=""), + ] mock_print.assert_has_calls(calls) def test_dump_and_exit(self): - with unittest.mock.patch('sys.exit') as mock_sys_exit, \ - unittest.mock.patch(__name__ + '.dump_lines', - new_callable=CopyingMock) as mock_dump_lines: + with unittest.mock.patch( + "sys.exit" + ) as mock_sys_exit, unittest.mock.patch( + __name__ + ".dump_lines", new_callable=CopyingMock + ) as mock_dump_lines: idx = 1 dump_and_exit(self.lines3, idx) mock_dump_lines.assert_called_with(self.lines3, idx) mock_sys_exit.assert_called_with(0) def test_filling_main(self): - with unittest.mock.patch('builtins.print') as mock_print: + with unittest.mock.patch("builtins.print") as mock_print: main(5, self.line_gen(self.lines3)) - calls = list([ unittest.mock.call(line, end='') for - line in self.lines3 ]) + calls = list( + [unittest.mock.call(line, end="") for line in self.lines3] + ) mock_print.assert_has_calls(calls) def test_full_main(self): - with unittest.mock.patch('builtins.print') as mock_print: + with unittest.mock.patch("builtins.print") as mock_print: main(5, self.line_gen(self.lines8)) - calls = list([ unittest.mock.call(line, end='') for - line in self.lines8[-5:] ]) + calls = list( + [unittest.mock.call(line, end="") for line in self.lines8[-5:]] + ) mock_print.assert_has_calls(calls) def test_sigusr1_filling_main(self): - with unittest.mock.patch('signal.signal') as mock_signal, \ - unittest.mock.patch(__name__ + '.dump_lines', - new_callable=CopyingMock) as mock_dump_lines: + with unittest.mock.patch( + "signal.signal" + ) as mock_signal, unittest.mock.patch( + __name__ + ".dump_lines", new_callable=CopyingMock + ) as mock_dump_lines: signal_dict = self.mock_signal_dict(mock_signal) - main(4, self.signal_line_gen( - self.lines8, 3, signal_dict, signal.SIGUSR1)) + main( + 4, + self.signal_line_gen( + self.lines8, 3, signal_dict, signal.SIGUSR1 + ), + ) - mock_dump_lines.assert_has_calls([ - unittest.mock.call(self.lines8[0:3], 3 % 4), - unittest.mock.call(self.lines8[-4:], len(self.lines8) % 4) - ]) + mock_dump_lines.assert_has_calls( + [ + unittest.mock.call(self.lines8[0:3], 3 % 4), + unittest.mock.call(self.lines8[-4:], len(self.lines8) % 4), + ] + ) def test_sigint_filling_main(self): - with unittest.mock.patch('signal.signal') as mock_signal, \ - unittest.mock.patch(__name__ + '.dump_lines', - new_callable=CopyingMock) as mock_dump_lines: + with unittest.mock.patch( + "signal.signal" + ) as mock_signal, unittest.mock.patch( + __name__ + ".dump_lines", new_callable=CopyingMock + ) as mock_dump_lines: signal_dict = self.mock_signal_dict(mock_signal) with self.assertRaises(SystemExit): - main(4, self.signal_line_gen( - self.lines8, 3, signal_dict, signal.SIGINT)) + main( + 4, + self.signal_line_gen( + self.lines8, 3, signal_dict, signal.SIGINT + ), + ) - mock_dump_lines.assert_has_calls([ - unittest.mock.call(self.lines8[0:3], 3 % 4), - ]) + mock_dump_lines.assert_has_calls( + [unittest.mock.call(self.lines8[0:3], 3 % 4)] + ) def test_sigusr1_full_main(self): - with unittest.mock.patch('signal.signal') as mock_signal, \ - unittest.mock.patch(__name__ + '.dump_lines', - new_callable=CopyingMock) as mock_dump_lines: + with unittest.mock.patch( + "signal.signal" + ) as mock_signal, unittest.mock.patch( + __name__ + ".dump_lines", new_callable=CopyingMock + ) as mock_dump_lines: signal_dict = self.mock_signal_dict(mock_signal) - main(4, self.signal_line_gen( - self.lines8, 5, signal_dict, signal.SIGUSR1)) + main( + 4, + self.signal_line_gen( + self.lines8, 5, signal_dict, signal.SIGUSR1 + ), + ) - mock_dump_lines.assert_has_calls([ - unittest.mock.call(self.lines8[4:5] + self.lines8[1:4], 5 % 4), - unittest.mock.call(self.lines8[-4:], len(self.lines8) % 4) - ]) + mock_dump_lines.assert_has_calls( + [ + unittest.mock.call( + self.lines8[4:5] + self.lines8[1:4], 5 % 4 + ), + unittest.mock.call(self.lines8[-4:], len(self.lines8) % 4), + ] + ) def test_sigint_full_main(self): - with unittest.mock.patch('signal.signal') as mock_signal, \ - unittest.mock.patch(__name__ + '.dump_lines', - new_callable=CopyingMock) as mock_dump_lines: + with unittest.mock.patch( + "signal.signal" + ) as mock_signal, unittest.mock.patch( + __name__ + ".dump_lines", new_callable=CopyingMock + ) as mock_dump_lines: signal_dict = self.mock_signal_dict(mock_signal) with self.assertRaises(SystemExit): - main(4, self.signal_line_gen( - self.lines8, 5, signal_dict, signal.SIGINT)) + main( + 4, + self.signal_line_gen( + self.lines8, 5, signal_dict, signal.SIGINT + ), + ) - mock_dump_lines.assert_has_calls([ - unittest.mock.call(self.lines8[4:5] + self.lines8[1:4], 5 % 4), - ]) + mock_dump_lines.assert_has_calls( + [ + unittest.mock.call( + self.lines8[4:5] + self.lines8[1:4], 5 % 4 + ) + ] + ) diff --git a/util/m5/README.md b/util/m5/README.md index 0257d133dc..2362eed51b 100644 --- a/util/m5/README.md +++ b/util/m5/README.md @@ -410,5 +410,3 @@ subtley broken, when used to target a different ABI. To build these objects correctly, we would need to use a proper cross build environment for their corresponding languages. Something like this could likely be set up using a tool like buildroot. - - diff --git a/util/m5/src/command/SConscript b/util/m5/src/command/SConscript index a9a59a1345..0ca53f3a4b 100644 --- a/util/m5/src/command/SConscript +++ b/util/m5/src/command/SConscript @@ -38,6 +38,8 @@ command_ccs = [ 'readfile.cc', 'resetstats.cc', 'writefile.cc', + 'workbegin.cc', + 'workend.cc' ] command_objs = list(map(env.StaticObject, command_ccs)) diff --git a/src/arch/mips/vecregs.hh b/util/m5/src/command/workbegin.cc similarity index 74% rename from src/arch/mips/vecregs.hh rename to util/m5/src/command/workbegin.cc index 546e4cf4ee..82bf38b16f 100644 --- a/src/arch/mips/vecregs.hh +++ b/util/m5/src/command/workbegin.cc @@ -1,6 +1,5 @@ /* - * Copyright (c) 2006 The Regents of The University of Michigan - * Copyright (c) 2007 MIPS Technologies, Inc. + * Copyright (c) 2022 The Regents of the University of California. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -27,23 +26,27 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef __ARCH_MIPS_VECREGS_HH__ -#define __ARCH_MIPS_VECREGS_HH__ +#include "args.hh" +#include "command.hh" +#include "dispatch_table.hh" -#include "arch/generic/vec_pred_reg.hh" -#include "arch/generic/vec_reg.hh" - -namespace gem5 +namespace { -namespace MipsISA +bool +do_work_begin(const DispatchTable &dt, Args &args) { + uint64_t workid, threadid; + if (!args.pop(workid, 0) || !args.pop(threadid, 0)) + return false; -// Not applicable to MIPS -using VecRegContainer = ::gem5::DummyVecRegContainer; -using VecPredRegContainer = ::gem5::DummyVecPredRegContainer; + (*dt.m5_work_begin)(workid, threadid); -} // namespace MipsISA -} // namespace gem5 + return true; +} -#endif +Command workbegin = { + "workbegin", 0, 2, do_work_begin, "[workid][threadid]\n" + " Exit immediately" }; + +} // anonymous namespace diff --git a/src/arch/power/vecregs.hh b/util/m5/src/command/workend.cc similarity index 74% rename from src/arch/power/vecregs.hh rename to util/m5/src/command/workend.cc index 33ac377384..5134c807a3 100644 --- a/src/arch/power/vecregs.hh +++ b/util/m5/src/command/workend.cc @@ -1,6 +1,5 @@ /* - * Copyright (c) 2009 The University of Edinburgh - * Copyright (c) 2021 IBM Corporation + * Copyright (c) 2022 The Regents of the University of California. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -27,23 +26,27 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef __ARCH_POWER_VECREGS_HH__ -#define __ARCH_POWER_VECREGS_HH__ +#include "args.hh" +#include "command.hh" +#include "dispatch_table.hh" -#include "arch/generic/vec_pred_reg.hh" -#include "arch/generic/vec_reg.hh" - -namespace gem5 +namespace { -namespace PowerISA +bool +do_work_end(const DispatchTable &dt, Args &args) { + uint64_t workid, threadid; + if (!args.pop(workid, 0) || !args.pop(threadid, 0)) + return false; -// Not applicable to Power -using VecRegContainer = ::gem5::DummyVecRegContainer; -using VecPredRegContainer = ::gem5::DummyVecPredRegContainer; + (*dt.m5_work_end)(workid, threadid); -} // namespace PowerISA -} // namespace gem5 + return true; +} -#endif // __ARCH_POWER_VECREGS_HH__ +Command workend = { + "workend", 0, 2, do_work_end, "[workid [threadid]]\n" + " Exit immediately" }; + +} // anonymous namespace diff --git a/util/m5/src/java/gem5/ops.cc b/util/m5/src/java/gem5/ops.cc index da21840766..ef524a0e8e 100644 --- a/util/m5/src/java/gem5/ops.cc +++ b/util/m5/src/java/gem5/ops.cc @@ -327,4 +327,3 @@ Java_gem5_Ops_work_1end(JNIEnv *env, jobject obj, { getDispatchTable(env, obj)->m5_work_end(j_workid, j_threadid); } - diff --git a/util/maint/lib/maintainers.py b/util/maint/lib/maintainers.py index 6dd8d268e8..93ea1a17bf 100644 --- a/util/maint/lib/maintainers.py +++ b/util/maint/lib/maintainers.py @@ -38,60 +38,76 @@ import email.utils import enum import os -from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, \ - TextIO, Tuple, Union +from typing import ( + Any, + Dict, + Iterator, + List, + Mapping, + Optional, + Sequence, + TextIO, + Tuple, + Union, +) import yaml PathOrFile = Union[TextIO, str] + class FileFormatException(Exception): pass + class MissingFieldException(FileFormatException): pass + class IllegalValueException(FileFormatException): pass + class Status(enum.Enum): MAINTAINED = enum.auto() ORPHANED = enum.auto() @classmethod - def from_str(cls, key: str) -> 'Status': - _status_dict = { - 'maintained': cls.MAINTAINED, - 'orphaned': cls.ORPHANED, - } + def from_str(cls, key: str) -> "Status": + _status_dict = {"maintained": cls.MAINTAINED, "orphaned": cls.ORPHANED} return _status_dict[key] def __str__(self) -> str: - return { - Status.MAINTAINED: 'maintained', - Status.ORPHANED: 'orphaned', - }[self] + return {Status.MAINTAINED: "maintained", Status.ORPHANED: "orphaned"}[ + self + ] + class Subsystem(object): tag: str status: Status - maintainers: List[Tuple[str, str]] # Name, email + maintainers: List[Tuple[str, str]] # Name, email description: str - def __init__(self, tag: str, - maintainers: Optional[Sequence[Tuple[str, str]]], - description: str = '', - status: Status = Status.ORPHANED): + def __init__( + self, + tag: str, + maintainers: Optional[Sequence[Tuple[str, str]]], + description: str = "", + status: Status = Status.ORPHANED, + ): self.tag = tag self.status = status self.maintainers = list(maintainers) if maintainers is not None else [] - self.description = description if description is not None else '' + self.description = description if description is not None else "" + class Maintainers(object): - DEFAULT_MAINTAINERS = os.path.join(os.path.dirname(__file__), - '../../../MAINTAINERS.yaml') + DEFAULT_MAINTAINERS = os.path.join( + os.path.dirname(__file__), "../../../MAINTAINERS.yaml" + ) - _subsystems: Dict[str, Subsystem] # tag -> Subsystem + _subsystems: Dict[str, Subsystem] # tag -> Subsystem def __init__(self, ydict: Mapping[str, Any]): self._subsystems = {} @@ -99,8 +115,9 @@ class Maintainers(object): self._subsystems[tag] = Maintainers._parse_subsystem(tag, maint) @classmethod - def from_file(cls, path_or_file: Optional[PathOrFile] = None) \ - -> "Maintainers": + def from_file( + cls, path_or_file: Optional[PathOrFile] = None + ) -> "Maintainers": return cls(Maintainers._load_maintainers_file(path_or_file)) @@ -109,14 +126,14 @@ class Maintainers(object): return cls(yaml.load(yaml_str, Loader=yaml.SafeLoader)) @classmethod - def _load_maintainers_file(cls, - path_or_file: Optional[PathOrFile] = None) \ - -> Mapping[str, Any]: + def _load_maintainers_file( + cls, path_or_file: Optional[PathOrFile] = None + ) -> Mapping[str, Any]: if path_or_file is None: path_or_file = cls.DEFAULT_MAINTAINERS if isinstance(path_or_file, str): - with open(path_or_file, 'r') as fin: + with open(path_or_file, "r") as fin: return yaml.load(fin, Loader=yaml.SafeLoader) else: return yaml.load(path_or_file, Loader=yaml.SafeLoader) @@ -128,28 +145,36 @@ class Maintainers(object): return ydict[name] except KeyError: raise MissingFieldException( - f"{tag}: Required field '{name}' is missing") + f"{tag}: Required field '{name}' is missing" + ) maintainers: List[Tuple[str, str]] = [] - raw_maintainers = ydict.get('maintainers', []) + raw_maintainers = ydict.get("maintainers", []) if not isinstance(raw_maintainers, Sequence): raise IllegalValueException( - f"{tag}: Illegal field 'maintainers' isn't a list.") + f"{tag}: Illegal field 'maintainers' isn't a list." + ) for maintainer in raw_maintainers: name, address = email.utils.parseaddr(maintainer) - if name == '' and address == '': + if name == "" and address == "": raise IllegalValueException( - f"{tag}: Illegal maintainer field: '{maintainer}'") + f"{tag}: Illegal maintainer field: '{maintainer}'" + ) maintainers.append((name, address)) try: - status = Status.from_str(required_field('status')) + status = Status.from_str(required_field("status")) except KeyError: raise IllegalValueException( - f"{tag}: Invalid status '{ydict['status']}'") + f"{tag}: Invalid status '{ydict['status']}'" + ) - return Subsystem(tag, maintainers=maintainers, status=status, - description=ydict.get('desc', '')) + return Subsystem( + tag, + maintainers=maintainers, + status=status, + description=ydict.get("desc", ""), + ) def __iter__(self) -> Iterator[Tuple[str, Subsystem]]: return iter(list(self._subsystems.items())) @@ -157,17 +182,19 @@ class Maintainers(object): def __getitem__(self, key: str) -> Subsystem: return self._subsystems[key] + def _main(): maintainers = Maintainers.from_file() for tag, subsys in maintainers: - print(f'{tag}: {subsys.description}') - print(f' Status: {subsys.status}') - print(f' Maintainers:') + print(f"{tag}: {subsys.description}") + print(f" Status: {subsys.status}") + print(f" Maintainers:") for maint in subsys.maintainers: - print(f' - {maint[0]} <{maint[1]}>') + print(f" - {maint[0]} <{maint[1]}>") print() -if __name__ == '__main__': + +if __name__ == "__main__": _main() __all__ = [ diff --git a/util/maint/lib/tests/maintainers.py b/util/maint/lib/tests/maintainers.py index cc71f21626..eeea6ed701 100644 --- a/util/maint/lib/tests/maintainers.py +++ b/util/maint/lib/tests/maintainers.py @@ -71,6 +71,7 @@ key: maintainers: """ + class StatusTestSuite(unittest.TestCase): """Test cases for maintainers.Status""" @@ -84,23 +85,27 @@ class StatusTestSuite(unittest.TestCase): assert value == Status.from_str(name) assert str(value) == name + class MaintainersTestSuite(unittest.TestCase): """Test cases for Maintainers""" def test_parser_valid(self): maint = Maintainers.from_yaml(YAML_VALID) - subsys = maint['maintained'] + subsys = maint["maintained"] self.assertEqual(subsys.status, Status.MAINTAINED) - self.assertEqual(subsys.description, '') - self.assertEqual(subsys.maintainers, [ - ('John Doe', 'john.doe@test.gem5.org'), - ('Jane Doe', 'jane.doe@test.gem5.org'), - ]) + self.assertEqual(subsys.description, "") + self.assertEqual( + subsys.maintainers, + [ + ("John Doe", "john.doe@test.gem5.org"), + ("Jane Doe", "jane.doe@test.gem5.org"), + ], + ) - subsys = maint['orphaned'] + subsys = maint["orphaned"] self.assertEqual(subsys.status, Status.ORPHANED) - self.assertEqual(subsys.description, 'Abandoned') + self.assertEqual(subsys.description, "Abandoned") self.assertEqual(subsys.maintainers, []) def test_parser_invalid(self): @@ -113,5 +118,6 @@ class MaintainersTestSuite(unittest.TestCase): with self.assertRaises(IllegalValueException): Maintainers.from_yaml(YAML_MAINTAINERS_NOT_LIST) -if __name__ == '__main__': + +if __name__ == "__main__": unittest.main() diff --git a/util/maint/list_changes.py b/util/maint/list_changes.py index e366cc26c1..9ada2b52f0 100755 --- a/util/maint/list_changes.py +++ b/util/maint/list_changes.py @@ -40,6 +40,7 @@ import subprocess import re from functools import wraps + class Commit(object): _re_tag = re.compile(r"^((?:\w|-)+): (.*)$") @@ -49,7 +50,7 @@ class Commit(object): self._tags = None def _git(self, args): - return subprocess.check_output([ "git", ] + args).decode() + return subprocess.check_output(["git"] + args).decode() @property def log(self): @@ -58,9 +59,11 @@ class Commit(object): """ if self._log is None: - self._log = self._git( - ["show", "--format=%B", "--no-patch", str(self.rev) ] - ).rstrip("\n").split("\n") + self._log = ( + self._git(["show", "--format=%B", "--no-patch", str(self.rev)]) + .rstrip("\n") + .split("\n") + ) return self._log @property @@ -79,7 +82,7 @@ class Commit(object): try: tags[key].append(value) except KeyError: - tags[key] = [ value ] + tags[key] = [value] self._tags = tags return self._tags @@ -103,6 +106,7 @@ class Commit(object): def __str__(self): return "%s: %s" % (self.rev[0:8], self.log[0]) + def list_revs(branch, baseline=None, paths=[]): """Get a generator that lists git revisions that exist in 'branch'. If the optional parameter 'baseline' is specified, the generator @@ -118,7 +122,7 @@ def list_revs(branch, baseline=None, paths=[]): query = str(branch) changes = subprocess.check_output( - [ "git", "rev-list", query, '--'] + paths + ["git", "rev-list", query, "--"] + paths ).decode() if changes == "": @@ -128,53 +132,92 @@ def list_revs(branch, baseline=None, paths=[]): assert rev != "" yield Commit(rev) + def list_changes(upstream, feature, paths=[]): feature_revs = tuple(list_revs(upstream, feature, paths=paths)) upstream_revs = tuple(list_revs(feature, upstream, paths=paths)) - feature_cids = dict([ - (c.change_id, c) for c in feature_revs if c.change_id is not None ]) - upstream_cids = dict([ - (c.change_id, c) for c in upstream_revs if c.change_id is not None ]) + feature_cids = dict( + [(c.change_id, c) for c in feature_revs if c.change_id is not None] + ) + upstream_cids = dict( + [(c.change_id, c) for c in upstream_revs if c.change_id is not None] + ) - incoming = [r for r in reversed(upstream_revs) \ - if r.change_id and r.change_id not in feature_cids] - outgoing = [r for r in reversed(feature_revs) \ - if r.change_id and r.change_id not in upstream_cids] - common = [r for r in reversed(feature_revs) \ - if r.change_id in upstream_cids] - upstream_unknown = [r for r in reversed(upstream_revs) \ - if r.change_id is None] - feature_unknown = [r for r in reversed(feature_revs) \ - if r.change_id is None] + incoming = [ + r + for r in reversed(upstream_revs) + if r.change_id and r.change_id not in feature_cids + ] + outgoing = [ + r + for r in reversed(feature_revs) + if r.change_id and r.change_id not in upstream_cids + ] + common = [ + r for r in reversed(feature_revs) if r.change_id in upstream_cids + ] + upstream_unknown = [ + r for r in reversed(upstream_revs) if r.change_id is None + ] + feature_unknown = [ + r for r in reversed(feature_revs) if r.change_id is None + ] return incoming, outgoing, common, upstream_unknown, feature_unknown + def _main(): import argparse - parser = argparse.ArgumentParser( - description="List incoming and outgoing changes in a feature branch") - parser.add_argument("--upstream", "-u", type=str, default="origin/master", - help="Upstream branch for comparison. " \ - "Default: %(default)s") - parser.add_argument("--feature", "-f", type=str, default="HEAD", - help="Feature branch for comparison. " \ - "Default: %(default)s") - parser.add_argument("--show-unknown", action="store_true", - help="Print changes without Change-Id tags") - parser.add_argument("--show-common", action="store_true", - help="Print common changes") - parser.add_argument("--deep-search", action="store_true", - help="Use a deep search to find incorrectly " \ - "rebased changes") - parser.add_argument("paths", metavar="PATH", type=str, nargs="*", - help="Paths to list changes for") + parser = argparse.ArgumentParser( + description="List incoming and outgoing changes in a feature branch" + ) + + parser.add_argument( + "--upstream", + "-u", + type=str, + default="origin/master", + help="Upstream branch for comparison. " "Default: %(default)s", + ) + parser.add_argument( + "--feature", + "-f", + type=str, + default="HEAD", + help="Feature branch for comparison. " "Default: %(default)s", + ) + parser.add_argument( + "--show-unknown", + action="store_true", + help="Print changes without Change-Id tags", + ) + parser.add_argument( + "--show-common", action="store_true", help="Print common changes" + ) + parser.add_argument( + "--deep-search", + action="store_true", + help="Use a deep search to find incorrectly " "rebased changes", + ) + parser.add_argument( + "paths", + metavar="PATH", + type=str, + nargs="*", + help="Paths to list changes for", + ) args = parser.parse_args() - incoming, outgoing, common, upstream_unknown, feature_unknown = \ - list_changes(args.upstream, args.feature, paths=args.paths) + ( + incoming, + outgoing, + common, + upstream_unknown, + feature_unknown, + ) = list_changes(args.upstream, args.feature, paths=args.paths) if incoming: print("Incoming changes:") @@ -208,15 +251,19 @@ def _main(): if args.deep_search: print("Incorrectly rebased changes:") all_upstream_revs = list_revs(args.upstream, paths=args.paths) - all_upstream_cids = dict([ - (c.change_id, c) for c in all_upstream_revs \ - if c.change_id is not None ]) - incorrect_outgoing = [r for r in outgoing if r.change_id in all_upstream_cids] + all_upstream_cids = dict( + [ + (c.change_id, c) + for c in all_upstream_revs + if c.change_id is not None + ] + ) + incorrect_outgoing = [ + r for r in outgoing if r.change_id in all_upstream_cids + ] for rev in incorrect_outgoing: print(rev) - - if __name__ == "__main__": _main() diff --git a/util/maint/show_changes_by_file.py b/util/maint/show_changes_by_file.py index 3c08b2199d..be222620a0 100755 --- a/util/maint/show_changes_by_file.py +++ b/util/maint/show_changes_by_file.py @@ -35,11 +35,13 @@ import subprocess from collections import OrderedDict, defaultdict + class OrderedDefaultDict(OrderedDict, defaultdict): def __init__(self, default_factory=None, *args, **kwargs): super(OrderedDefaultDict, self).__init__(*args, **kwargs) self.default_factory = default_factory + def diff_files(upstream, feature, paths=[]): """Given two git branches and an optional parameter 'path', determine which files differ between the two branches. Afterwards, organize the @@ -48,12 +50,12 @@ def diff_files(upstream, feature, paths=[]): Returns: Dictionary of directories with their corresponding files """ - raw = subprocess.check_output( - [ "git", "diff", "--name-status", "%s..%s" % (upstream, feature), - "--" ] + paths + raw = subprocess.check_output( + ["git", "diff", "--name-status", "%s..%s" % (upstream, feature), "--"] + + paths ) - path = [line.split('\t')[1] for line in raw.splitlines()] + path = [line.split("\t")[1] for line in raw.splitlines()] odd = OrderedDefaultDict(list) for p in path: @@ -63,6 +65,7 @@ def diff_files(upstream, feature, paths=[]): return odd + def cl_hash(upstream, feature, path): """Given two git branches and full path, record the identifier hash for changesets which diff between the upstream branch and feature branch. @@ -73,26 +76,41 @@ def cl_hash(upstream, feature, path): """ raw = subprocess.check_output( - [ "git", "log", "--oneline", "%s..%s" % (upstream, feature), - "--", path ] + ["git", "log", "--oneline", "%s..%s" % (upstream, feature), "--", path] ) return [l.split()[0] for l in raw.splitlines()] + def _main(): import argparse - parser = argparse.ArgumentParser( - description="List all changes between an upstream branch and a " \ - "feature branch by filename(s) and changeset hash(es).") - parser.add_argument("--upstream", "-u", type=str, default="origin/master", - help="Upstream branch for comparison. " \ - "Default: %(default)s") - parser.add_argument("--feature", "-f", type=str, default="HEAD", - help="Feature branch for comparison. " \ - "Default: %(default)s") - parser.add_argument("paths", metavar="PATH", type=str, nargs="*", - help="Paths to list changes for") + parser = argparse.ArgumentParser( + description="List all changes between an upstream branch and a " + "feature branch by filename(s) and changeset hash(es)." + ) + + parser.add_argument( + "--upstream", + "-u", + type=str, + default="origin/master", + help="Upstream branch for comparison. " "Default: %(default)s", + ) + parser.add_argument( + "--feature", + "-f", + type=str, + default="HEAD", + help="Feature branch for comparison. " "Default: %(default)s", + ) + parser.add_argument( + "paths", + metavar="PATH", + type=str, + nargs="*", + help="Paths to list changes for", + ) args = parser.parse_args() @@ -108,5 +126,6 @@ def _main(): print("\t%s" % s) print() + if __name__ == "__main__": _main() diff --git a/src/arch/riscv/O3CPU.py b/util/mem/Makefile similarity index 82% rename from src/arch/riscv/O3CPU.py rename to util/mem/Makefile index 74e658b0db..e6085e879c 100644 --- a/src/arch/riscv/O3CPU.py +++ b/util/mem/Makefile @@ -1,4 +1,4 @@ -# Copyright 2021 Google, Inc. +# Copyright 2022 Google, Inc. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are @@ -23,9 +23,21 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from m5.objects.RiscvCPU import RiscvO3CPU +.PHONY: all clean -O3CPU = RiscvO3CPU +CXXFLAGS ?= -g -O2 +CPPFLAGS ?= -MD -MP -# Deprecated -DerivO3CPU = O3CPU +SRCS = shared_memory_client_example.cc +EXES = $(SRCS:.cc=) +DEPS = $(SRCS:.cc=.d) + +all: $(EXES) + +clean: + rm -rf $(EXES) $(DEPS) + +$(EXES): %: %.cc + $(CXX) $(CPPFLAGS) $(CXXFLAGS) $(LDFLAGS) -o $@ $< $(LDLIBS) + +-include $(DEPS) diff --git a/util/mem/shared_memory_client.hh b/util/mem/shared_memory_client.hh new file mode 100644 index 0000000000..15a1a4fbd6 --- /dev/null +++ b/util/mem/shared_memory_client.hh @@ -0,0 +1,278 @@ +/* + * Copyright 2022 Google, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __UTIL_MEM_SHARED_MEMORY_CLIENT_HH__ +#define __UTIL_MEM_SHARED_MEMORY_CLIENT_HH__ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +namespace gem5 +{ +namespace util +{ +namespace memory +{ + +class SharedMemoryClient +{ + public: + enum RequestType : int + { + kGetPhysRange = 0 + }; + + explicit SharedMemoryClient(const std::string& server_path); + + // Request to access the range [start, end] of physical memory from the + // viewpoint of the gem5 system providing the shared memory service. + // There is no guarantee that the physical memory range is not used by + // others. It is the user's responsibility to make sure not to break other + // services or IPs accessing the same range. For example, you might want to + // configure the kernel running in gem5 simulator to reserve such range. + void* MapMemory(uint64_t start, uint64_t end); + + // Unmap previous mapped region, no client is needed here. + static bool UnmapMemory(void* mem); + + private: + using AllocRecordStorage = std::unordered_map; + + int GetConnection(); + bool SendGetPhysRangeRequest(int sock_fd, uint64_t start, uint64_t end); + bool RecvGetPhysRangeResponse(int sock_fd, int* ptr_fd, off_t* ptr_offset); + void* DoMap(int shm_fd, off_t shm_offset, size_t size); + + bool SendAll(int sock_fd, const void* buffer, size_t size); + + static AllocRecordStorage& GetAllocRecordStorage(); + + std::string server_path_; +}; + +inline SharedMemoryClient::SharedMemoryClient(const std::string& server_path) + : server_path_(server_path) +{ +} + + +inline void* +SharedMemoryClient::MapMemory(uint64_t start, uint64_t end) +{ + void* mem = nullptr; + int sock_fd = -1; + int shm_fd = -1; + off_t shm_offset; + + do { + if (start > end) { + warnx("invalid range %" PRIu64 "-%" PRIu64, start, end); + break; + } + sock_fd = GetConnection(); + if (sock_fd < 0) { + warnx("cannot connect to shared memory server"); + break; + } + if (!SendGetPhysRangeRequest(sock_fd, start, end)) { + warnx("cannot send request to shared memory server"); + break; + } + if (!RecvGetPhysRangeResponse(sock_fd, &shm_fd, &shm_offset)) { + warnx("failed to read shared memory server response"); + break; + } + mem = DoMap(shm_fd, shm_offset, end - start + 1); + if (mem == nullptr) { + warnx("failed to create memory mapping"); + break; + } + } while (false); + + if (sock_fd >= 0) { + close(sock_fd); + } + if (shm_fd >= 0) { + close(shm_fd); + } + + return mem; +} + +inline bool +SharedMemoryClient::UnmapMemory(void* mem) +{ + auto& storage = GetAllocRecordStorage(); + auto it = storage.find(mem); + if (it == storage.end()) { + return false; + } + if (munmap(mem, it->second) < 0) { + warn("munmap failed"); + return false; + } + storage.erase(it); + return true; +} + +inline int +SharedMemoryClient::GetConnection() +{ + int sock_fd = socket(AF_UNIX, SOCK_STREAM, 0); + if (sock_fd < 0) { + warn("create unix socket failed"); + return -1; + } + + sockaddr_un serv_addr; + memset(&serv_addr, 0, sizeof(serv_addr)); + serv_addr.sun_family = AF_UNIX; + strncpy(serv_addr.sun_path, server_path_.c_str(), + sizeof(serv_addr.sun_path) - 1); + if (strlen(serv_addr.sun_path) != server_path_.size()) { + warnx("server address truncated"); + close(sock_fd); + return -1; + } + if (connect(sock_fd, reinterpret_cast(&serv_addr), + sizeof(serv_addr)) < 0) { + warn("connect failed"); + close(sock_fd); + return -1; + } + return sock_fd; +} + +inline bool +SharedMemoryClient::SendGetPhysRangeRequest(int sock_fd, uint64_t start, + uint64_t end) +{ + int req_type = RequestType::kGetPhysRange; + struct + { + uint64_t start; + uint64_t end; + } request = {start, end}; + return SendAll(sock_fd, &req_type, sizeof(req_type)) && + SendAll(sock_fd, &request, sizeof(request)); +} + +inline bool +SharedMemoryClient::RecvGetPhysRangeResponse(int sock_fd, int* ptr_fd, + off_t* ptr_offset) +{ + if (!ptr_fd || !ptr_offset) { + return false; + } + + msghdr msg = {}; + // Setup ptr_offset as buffer. + iovec io = {.iov_base = ptr_offset, .iov_len = sizeof(*ptr_offset)}; + msg.msg_iov = &io; + msg.msg_iovlen = 1; + // Setup buffer for fd. + union + { + char buffer[CMSG_SPACE(sizeof(*ptr_fd))]; + struct cmsghdr align; + } cmsgs; + msg.msg_control = cmsgs.buffer; + msg.msg_controllen = sizeof(cmsgs.buffer); + cmsghdr* cmsg = CMSG_FIRSTHDR(&msg); + cmsg->cmsg_level = SOL_SOCKET; + cmsg->cmsg_type = SCM_RIGHTS; + cmsg->cmsg_len = CMSG_LEN(sizeof(*ptr_fd)); + // Try receive the message. + ssize_t retv = recvmsg(sock_fd, &msg, 0); + if (retv < 0) { + warn("recvmsg failed"); + return false; + } + if (retv != sizeof(*ptr_offset)) { + warnx("cannot receive all response"); + return false; + } + memcpy(ptr_fd, CMSG_DATA(cmsg), sizeof(*ptr_fd)); + return true; +} + +inline void* +SharedMemoryClient::DoMap(int shm_fd, off_t shm_offset, size_t size) +{ + void* mem = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, + shm_offset); + if (mem == MAP_FAILED) { + warn("mmap failed"); + return nullptr; + } + // If we cannot record a new mapping, our mapping are probably corrupted. + if (!GetAllocRecordStorage().emplace(mem, size).second) { + errx(EXIT_FAILURE, "cannot register memory mapping!"); + } + return mem; +} + +inline bool +SharedMemoryClient::SendAll(int sock_fd, const void* buffer, size_t size) +{ + const char* char_buffer = reinterpret_cast(buffer); + for (size_t offset = 0; offset < size;) { + ssize_t retv = send(sock_fd, char_buffer + offset, size - offset, 0); + if (retv >= 0) { + offset += retv; + } else if (errno != EINTR) { + warn("send failed"); + return false; + } + } + return true; +} + +inline SharedMemoryClient::AllocRecordStorage& +SharedMemoryClient::GetAllocRecordStorage() +{ + static auto storage = new SharedMemoryClient::AllocRecordStorage(); + return *storage; +} + +} // namespace memory +} // namespace util +} // namespace gem5 + +#endif // __UTIL_MEM_SHARED_MEMORY_CLIENT_HH__ diff --git a/util/mem/shared_memory_client_example.cc b/util/mem/shared_memory_client_example.cc new file mode 100644 index 0000000000..c1de4af999 --- /dev/null +++ b/util/mem/shared_memory_client_example.cc @@ -0,0 +1,117 @@ +/* + * Copyright 2022 Google, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include + +#include + +#include "shared_memory_client.hh" + +/** + * Consider that + * 1. you have a SimpleMemory that spans from 0x0 to 0x10000 + * 2. you have a SharedMemoryServer with server_path=ram.sock + * 3. The SHM server and the Mem is under the same System + * + * You should see a unix socket m5out/ram.sock, and you should be able to run + * our example with commands like: + * `./shared_memory_client_example m5out/ram.sock 0x1000 0x1010` + * + * The example will use the client to map the range 0x1000-0x1010 of the + * SimpleMemory into the address space of the example. As a result, the example + * will be able to access the backing store of the SimpleMemory with just a + * normal pointer. + */ + +static void PrintAsHexString(char *buffer, uint64_t size); + +int +main(int argc, char *argv[]) +{ + if (argc != 4) { + std::cerr << "Usage: " << argv[0] << " " + << std::endl; + return 1; + } + + // Each SharedMemoryServer in gem5 will create a unix socket at the + // location specified in its server_path parameter, and the socket can be + // used by the client to communicate with the server. + gem5::util::memory::SharedMemoryClient shm_client(argv[1]); + + // Before request access to the simulated physical memory in gem5, we need + // to first determine what's the range we'd like to access. + uint64_t start = std::stoull(argv[2], nullptr, 0); + uint64_t end = std::stoull(argv[3], nullptr, 0); + + // One thing important is that, non-align request is not supported now, so + // we'll need to ensure the start address is on page boundary. + long page_size = sysconf(_SC_PAGESIZE); + if (page_size < 0) { + std::cerr << "Cannot determine page size" << std::endl; + return 1; + } + if (start % page_size != 0) { + std::cerr << "Start address must be aligned" << std::endl; + return 1; + } + + // The Map request, if success, will return a void* pointer, which can then + // be used as a backdoor to the physical memory range in gem5. If there's + // any error, nullptr will be returned. + char *mem = reinterpret_cast(shm_client.MapMemory(start, end)); + uint64_t size = end - start + 1; + if (mem == nullptr) { + std::cerr << "Unable to map memory" << std::endl; + return 1; + } + + // A simple use case that print and randomly fill the memory. + std::cout << "Content was: "; + PrintAsHexString(mem, size); + // Override content with random value. + std::random_device rand_dev; + std::uniform_int_distribution rand_dist(0, 255); + for (uint64_t i = 0; i < size; ++i) { + mem[i] = rand_dist(rand_dev); + } + std::cout << "Content is: "; + PrintAsHexString(mem, size); +} + +static void +PrintAsHexString(char *buffer, uint64_t size) +{ + for (uint64_t i = 0; i < size; ++i) { + std::cout << std::setw(2) << std::setfill('0') << std::hex + << (static_cast(buffer[i]) & 0xff); + } + std::cout << std::endl; +} diff --git a/util/memtest-soak.py b/util/memtest-soak.py index 8bea048006..524cfaec58 100755 --- a/util/memtest-soak.py +++ b/util/memtest-soak.py @@ -47,15 +47,21 @@ parser = argparse.ArgumentParser() # of ticks. Both the iteration count and the ticks for each run can be # set on the command line. -parser.add_argument('-c', '--count', type=int, default=100) -parser.add_argument('-t', '--ticks', type=int, default=100000000000) -parser.add_argument('binary') +parser.add_argument("-c", "--count", type=int, default=100) +parser.add_argument("-t", "--ticks", type=int, default=100000000000) +parser.add_argument("binary") args = parser.parse_args() for i in range(args.count): - status = subprocess.call([args.binary, 'configs/example/memtest.py', - '-r', '-m %d' % (args.ticks)]) + status = subprocess.call( + [ + args.binary, + "configs/example/memtest.py", + "-r", + "-m %d" % (args.ticks), + ] + ) if status != 0: print("Error: memtest run failed\n") sys.exit(1) diff --git a/util/minorview.py b/util/minorview.py index 313a6f7950..f7a53b1d26 100755 --- a/util/minorview.py +++ b/util/minorview.py @@ -51,23 +51,44 @@ from minorview.model import BlobModel from minorview.view import BlobView, BlobController, BlobWindow from minorview.point import Point -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Minor visualiser') +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Minor visualiser") - parser.add_argument('--picture', metavar='picture-file', - default=minorviewDir + '/minorview/minor.pic', - help='markup file containing blob information ' - + '(default: /minor.pic)') - parser.add_argument('--prefix', metavar='name', default='system.cpu', - help='name prefix in trace for CPU to be visualised (default: ' - + 'system.cpu)') - parser.add_argument('--start-time', metavar='time', type=int, default=0, - help='time of first event to load from file') - parser.add_argument('--end-time', metavar='time', type=int, default=None, - help='time of last event to load from file') - parser.add_argument('--mini-views', action='store_true', default=False, - help='show tiny views of the next 10 time steps') - parser.add_argument('eventFile', metavar='event-file', default='ev') + parser.add_argument( + "--picture", + metavar="picture-file", + default=minorviewDir + "/minorview/minor.pic", + help="markup file containing blob information " + + "(default: /minor.pic)", + ) + parser.add_argument( + "--prefix", + metavar="name", + default="system.cpu", + help="name prefix in trace for CPU to be visualised (default: " + + "system.cpu)", + ) + parser.add_argument( + "--start-time", + metavar="time", + type=int, + default=0, + help="time of first event to load from file", + ) + parser.add_argument( + "--end-time", + metavar="time", + type=int, + default=None, + help="time of last event to load from file", + ) + parser.add_argument( + "--mini-views", + action="store_true", + default=False, + help="show tiny views of the next 10 time steps", + ) + parser.add_argument("eventFile", metavar="event-file", default="ev") args = parser.parse_args(sys.argv[1:]) @@ -76,13 +97,16 @@ if __name__ == '__main__': if args.picture and os.access(args.picture, os.O_RDONLY): model.load_picture(args.picture) else: - parser.error('Can\'t read picture file: ' + args.picture) + parser.error("Can't read picture file: " + args.picture) # Make the key objects view = BlobView(model) - controller = BlobController(model, view, + controller = BlobController( + model, + view, defaultEventFile=args.eventFile, - defaultPictureFile=args.picture) + defaultPictureFile=args.picture, + ) window = BlobWindow(model, view, controller) window.add_control_bar(controller.bar) @@ -96,10 +120,11 @@ if __name__ == '__main__': if args.eventFile and os.access(args.eventFile, os.O_RDONLY): controller.startTime = args.start_time controller.endTime = args.end_time - model.load_events(args.eventFile, startTime=args.start_time, - endTime=args.end_time) + model.load_events( + args.eventFile, startTime=args.start_time, endTime=args.end_time + ) controller.set_time_index(0) else: - parser.error('Can\'t read event file: ' + args.eventFile) + parser.error("Can't read event file: " + args.eventFile) gtk.main() diff --git a/util/minorview/blobs.py b/util/minorview/blobs.py index af8b2982c8..51b28d0d1e 100644 --- a/util/minorview/blobs.py +++ b/util/minorview/blobs.py @@ -39,7 +39,8 @@ # import pygtk -pygtk.require('2.0') + +pygtk.require("2.0") import gtk import gobject import cairo @@ -52,6 +53,7 @@ from . import colours from .colours import backgroundColour, black from . import model + def centre_size_to_sides(centre, size): """Returns a 4-tuple of the relevant ordinates of the left, right, top and bottom sides of the described rectangle""" @@ -63,6 +65,7 @@ def centre_size_to_sides(centre, size): bottom = y + half_height return (left, right, top, bottom) + def box(cr, centre, size): """Draw a simple box""" (left, right, top, bottom) = centre_size_to_sides(centre, size) @@ -72,6 +75,7 @@ def box(cr, centre, size): cr.line_to(left, bottom) cr.close_path() + def stroke_and_fill(cr, colour): """Stroke with the current colour then fill the same path with the given colour""" @@ -84,6 +88,7 @@ def stroke_and_fill(cr, colour): cr.fill() cr.set_line_join(join) + def striped_box(cr, centre, size, colours): """Fill a rectangle (without outline) striped with the colours given""" num_colours = len(colours) @@ -100,7 +105,7 @@ def striped_box(cr, centre, size, colours): x_stripe_width = width / num_colours half_x_stripe_width = x_stripe_width / 2.0 # Left triangle - cr.move_to(left, bottom) + cr.move_to(left, bottom) cr.line_to(left + half_x_stripe_width, bottom) cr.line_to(left + x_stripe_width + half_x_stripe_width, top) cr.line_to(left, top) @@ -110,10 +115,12 @@ def striped_box(cr, centre, size, colours): xOffset = x_stripe_width * i cr.move_to(left + xOffset - half_x_stripe_width, bottom) cr.line_to(left + xOffset + half_x_stripe_width, bottom) - cr.line_to(left + xOffset + x_stripe_width + - half_x_stripe_width, top) - cr.line_to(left + xOffset + x_stripe_width - - half_x_stripe_width, top) + cr.line_to( + left + xOffset + x_stripe_width + half_x_stripe_width, top + ) + cr.line_to( + left + xOffset + x_stripe_width - half_x_stripe_width, top + ) stroke_and_fill(cr, colours[i]) # Right triangle cr.move_to((right - x_stripe_width) - half_x_stripe_width, bottom) @@ -122,25 +129,30 @@ def striped_box(cr, centre, size, colours): cr.line_to((right - x_stripe_width) + half_x_stripe_width, top) stroke_and_fill(cr, colours[num_colours - 1]) + def speech_bubble(cr, top_left, size, unit): """Draw a speech bubble with 'size'-sized internal space with its top left corner at Point(2.0 * unit, 2.0 * unit)""" + def local_arc(centre, angleFrom, angleTo): - cr.arc(centre.x, centre.y, unit, angleFrom * math.pi, - angleTo * math.pi) + cr.arc( + centre.x, centre.y, unit, angleFrom * math.pi, angleTo * math.pi + ) cr.move_to(*top_left.to_pair()) cr.rel_line_to(unit * 2.0, unit) cr.rel_line_to(size.x, 0.0) local_arc(top_left + Point(size.x + unit * 2.0, unit * 2.0), -0.5, 0.0) cr.rel_line_to(0.0, size.y) - local_arc(top_left + Point(size.x + unit * 2.0, size.y + unit * 2.0), - 0, 0.5) + local_arc( + top_left + Point(size.x + unit * 2.0, size.y + unit * 2.0), 0, 0.5 + ) cr.rel_line_to(-size.x, 0.0) local_arc(top_left + Point(unit * 2.0, size.y + unit * 2.0), 0.5, 1.0) cr.rel_line_to(0, -size.y) cr.close_path() + def open_bottom(cr, centre, size): """Draw a box with left, top and right sides""" (left, right, top, bottom) = centre_size_to_sides(centre, size) @@ -149,6 +161,7 @@ def open_bottom(cr, centre, size): cr.line_to(right, top) cr.line_to(right, bottom) + def fifo(cr, centre, size): """Draw just the vertical sides of a box""" (left, right, top, bottom) = centre_size_to_sides(centre, size) @@ -157,6 +170,7 @@ def fifo(cr, centre, size): cr.move_to(right, bottom) cr.line_to(right, top) + def cross(cr, centre, size): """Draw a cross parallel with the axes""" (left, right, top, bottom) = centre_size_to_sides(centre, size) @@ -166,13 +180,15 @@ def cross(cr, centre, size): cr.move_to(x, top) cr.line_to(x, bottom) + class Blob(object): """Blob super class""" - def __init__(self, picChar, unit, topLeft, colour, size = Point(1,1)): + + def __init__(self, picChar, unit, topLeft, colour, size=Point(1, 1)): self.picChar = picChar self.unit = unit self.displayName = unit - self.nameLoc = 'top' + self.nameLoc = "top" self.topLeft = topLeft self.colour = colour self.size = size @@ -187,43 +203,50 @@ class Blob(object): the canvas are within the blob""" return None + class Block(Blob): """Blocks are rectangular blogs colourable with a 2D grid of striped blocks. visualDecoder specifies how event data becomes this coloured grid""" - def __init__(self, picChar, unit, topLeft=Point(0,0), + + def __init__( + self, + picChar, + unit, + topLeft=Point(0, 0), colour=colours.black, - size=Point(1,1)): - super(Block,self).__init__(picChar, unit, topLeft, colour, - size = size) + size=Point(1, 1), + ): + super(Block, self).__init__(picChar, unit, topLeft, colour, size=size) # {horiz, vert} - self.stripDir = 'horiz' + self.stripDir = "horiz" # {LR, RL}: LR means the first strip will be on the left/top, # RL means the first strip will be on the right/bottom - self.stripOrd = 'LR' + self.stripOrd = "LR" # Number of blank strips if this is a frame self.blankStrips = 0 # {box, fifo, openBottom} - self.shape = 'box' + self.shape = "box" self.visualDecoder = None def render(self, cr, view, event, select, time): # Find the right event, visuals and sizes for things - if event is None or self.displayName.startswith('_'): + if event is None or self.displayName.startswith("_"): event = model.BlobEvent(self.unit, time) if self.picChar in event.visuals: strips = event.visuals[self.picChar].to_striped_block( - select & self.dataSelect) + select & self.dataSelect + ) else: strips = [[[colours.unknownColour]]] - if self.stripOrd == 'RL': + if self.stripOrd == "RL": strips.reverse() if len(strips) == 0: strips = [[colours.errorColour]] - print('Problem with the colour of event:', event) + print("Problem with the colour of event:", event) num_strips = len(strips) strip_proportion = 1.0 / num_strips @@ -240,11 +263,10 @@ class Block(Blob): cr.save() cr.scale(*view.pitch.to_pair()) cr.translate(*self.topLeft.to_pair()) - cr.translate(*(size - Point(1,1)).scale(0.5).to_pair()) + cr.translate(*(size - Point(1, 1)).scale(0.5).to_pair()) translated_centre = Point(*cr.user_to_device(0.0, 0.0)) - translated_size = \ - Point(*cr.user_to_device_distance(*size.to_pair())) + translated_size = Point(*cr.user_to_device_distance(*size.to_pair())) # The 2D grid is a grid of strips of blocks. Data [[1,2],[3]] # is 2 strips of 2 and 1 blocks respectively. @@ -255,20 +277,22 @@ class Block(Blob): # from left to right if stripOf == 'LR' or right to left if # stripOrd == 'RL'. - strip_is_horiz = self.stripDir == 'horiz' + strip_is_horiz = self.stripDir == "horiz" if strip_is_horiz: - strip_step_base = Point(1.0,0.0) - block_step_base = Point(0.0,1.0) + strip_step_base = Point(1.0, 0.0) + block_step_base = Point(0.0, 1.0) else: - strip_step_base = Point(0.0,1.0) - block_step_base = Point(1.0,0.0) + strip_step_base = Point(0.0, 1.0) + block_step_base = Point(1.0, 0.0) - strip_size = (box_size * (strip_step_base.scale(strip_proportion) + - block_step_base)) + strip_size = box_size * ( + strip_step_base.scale(strip_proportion) + block_step_base + ) strip_step = strip_size * strip_step_base - strip_centre = Point(0,0) - (strip_size * - strip_step_base.scale(first_strip_offset)) + strip_centre = Point(0, 0) - ( + strip_size * strip_step_base.scale(first_strip_offset) + ) cr.set_line_width(view.midLineWidth / view.pitch.x) @@ -278,66 +302,70 @@ class Block(Blob): block_proportion = 1.0 / num_blocks firstBlockOffset = (num_blocks / 2.0) - 0.5 - block_size = (strip_size * - (block_step_base.scale(block_proportion) + - strip_step_base)) + block_size = strip_size * ( + block_step_base.scale(block_proportion) + strip_step_base + ) block_step = block_size * block_step_base - block_centre = (strip_centre + strip_step.scale(strip_index) - - (block_size * block_step_base.scale(firstBlockOffset))) + block_centre = ( + strip_centre + + strip_step.scale(strip_index) + - (block_size * block_step_base.scale(firstBlockOffset)) + ) for block_index in range(num_blocks): - striped_box(cr, block_centre + - block_step.scale(block_index), block_size, - strips[strip_index][block_index]) + striped_box( + cr, + block_centre + block_step.scale(block_index), + block_size, + strips[strip_index][block_index], + ) cr.set_font_size(0.7) if self.border > 0.5: weight = cairo.FONT_WEIGHT_BOLD else: weight = cairo.FONT_WEIGHT_NORMAL - cr.select_font_face('Helvetica', cairo.FONT_SLANT_NORMAL, - weight) + cr.select_font_face("Helvetica", cairo.FONT_SLANT_NORMAL, weight) xb, yb, width, height, dx, dy = cr.text_extents(self.displayName) text_comfort_space = 0.15 - if self.nameLoc == 'left': + if self.nameLoc == "left": # Position text vertically along left side, top aligned cr.save() - cr.rotate(- (math.pi / 2.0)) + cr.rotate(-(math.pi / 2.0)) text_point = Point(size.y, size.x).scale(0.5) * Point(-1, -1) text_point += Point(max(0, size.y - width), 0) text_point += Point(-text_comfort_space, -text_comfort_space) - else: # Including top + else: # Including top # Position text above the top left hand corner - text_point = size.scale(0.5) * Point(-1,-1) + text_point = size.scale(0.5) * Point(-1, -1) text_point += Point(0.00, -text_comfort_space) - if (self.displayName != '' and - not self.displayName.startswith('_')): + if self.displayName != "" and not self.displayName.startswith("_"): cr.set_source_color(self.colour) cr.move_to(*text_point.to_pair()) cr.show_text(self.displayName) - if self.nameLoc == 'left': + if self.nameLoc == "left": cr.restore() # Draw the outline shape cr.save() if strip_is_horiz: - cr.rotate(- (math.pi / 2.0)) + cr.rotate(-(math.pi / 2.0)) box_size = Point(box_size.y, box_size.x) if self.stripOrd == "RL": cr.rotate(math.pi) - if self.shape == 'box': - box(cr, Point(0,0), box_size) - elif self.shape == 'openBottom': - open_bottom(cr, Point(0,0), box_size) - elif self.shape == 'fifo': - fifo(cr, Point(0,0), box_size) + if self.shape == "box": + box(cr, Point(0, 0), box_size) + elif self.shape == "openBottom": + open_bottom(cr, Point(0, 0), box_size) + elif self.shape == "fifo": + fifo(cr, Point(0, 0), box_size) cr.restore() # Restore scale and stroke the outline @@ -347,18 +375,21 @@ class Block(Blob): cr.stroke() # Return blob size/position - if self.unit == '_': + if self.unit == "_": return None else: return (translated_centre, translated_size) + class Key(Blob): """Draw a key to the special (and numeric colours) with swatches of the colours half as wide as the key""" - def __init__(self, picChar, unit, topLeft, colour=colours.black, - size=Point(1,1)): - super(Key,self).__init__(picChar, unit, topLeft, colour, size = size) - self.colours = 'BBBB' + + def __init__( + self, picChar, unit, topLeft, colour=colours.black, size=Point(1, 1) + ): + super(Key, self).__init__(picChar, unit, topLeft, colour, size=size) + self.colours = "BBBB" self.displayName = unit def render(self, cr, view, event, select, time): @@ -367,17 +398,20 @@ class Key(Blob): cr.translate(*self.topLeft.to_pair()) # cr.translate(*(self.size - Point(1,1)).scale(0.5).to_pair()) half_width = self.size.x / 2.0 - cr.translate(*(self.size - Point(1.0 + half_width,1.0)).scale(0.5). - to_pair()) + cr.translate( + *(self.size - Point(1.0 + half_width, 1.0)).scale(0.5).to_pair() + ) num_colours = len(self.colours) cr.set_line_width(view.midLineWidth / view.pitch.x) - blob_size = (Point(half_width,0.0) + - (self.size * Point(0.0,1.0 / num_colours))) - blob_step = Point(0.0,1.0) * blob_size - first_blob_centre = (Point(0.0,0.0) - - blob_step.scale((num_colours / 2.0) - 0.5)) + blob_size = Point(half_width, 0.0) + ( + self.size * Point(0.0, 1.0 / num_colours) + ) + blob_step = Point(0.0, 1.0) * blob_size + first_blob_centre = Point(0.0, 0.0) - blob_step.scale( + (num_colours / 2.0) - 0.5 + ) cr.set_source_color(self.colour) cr.set_line_width(view.thinLineWidth / view.pitch.x) @@ -387,8 +421,9 @@ class Key(Blob): real_blob_size = blob_size.scale(blob_proportion) cr.set_font_size(0.8 * blob_size.y * blob_proportion) - cr.select_font_face('Helvetica', cairo.FONT_SLANT_NORMAL, - cairo.FONT_WEIGHT_BOLD) + cr.select_font_face( + "Helvetica", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_BOLD + ) for i in range(num_colours): centre = first_blob_centre + blob_step.scale(i) @@ -396,9 +431,8 @@ class Key(Blob): colour_char = self.colours[i] if colour_char.isdigit(): - cr.set_source_color(colours.number_to_colour( - int(colour_char))) - label = '...' + colour_char + cr.set_source_color(colours.number_to_colour(int(colour_char))) + label = "..." + colour_char else: cr.set_source_color(model.special_state_colours[colour_char]) label = model.special_state_names[colour_char] @@ -409,34 +443,46 @@ class Key(Blob): xb, yb, width, height, dx, dy = cr.text_extents(label) - text_left = (centre + (Point(0.5,0.0) * blob_size) + - Point(0.0, height / 2.0)) + text_left = ( + centre + + (Point(0.5, 0.0) * blob_size) + + Point(0.0, height / 2.0) + ) cr.move_to(*text_left.to_pair()) cr.show_text(label) + class Arrow(Blob): """Draw a left or right facing arrow""" - def __init__(self, unit, topLeft, colour=colours.black, - size=Point(1.0,1.0), direc='right'): - super(Arrow,self).__init__(unit, unit, topLeft, colour, size = size) + + def __init__( + self, + unit, + topLeft, + colour=colours.black, + size=Point(1.0, 1.0), + direc="right", + ): + super(Arrow, self).__init__(unit, unit, topLeft, colour, size=size) self.direc = direc def render(self, cr, view, event, select, time): cr.save() cr.scale(*view.pitch.to_pair()) cr.translate(*self.topLeft.to_pair()) - cr.translate(*(self.size - Point(1,1)).scale(0.5).to_pair()) + cr.translate(*(self.size - Point(1, 1)).scale(0.5).to_pair()) cr.scale(*self.size.to_pair()) - (blob_indent_x, blob_indent_y) = \ - (view.blobIndentFactor / self.size).to_pair() + (blob_indent_x, blob_indent_y) = ( + view.blobIndentFactor / self.size + ).to_pair() left = -0.5 - blob_indent_x right = 0.5 + blob_indent_x thickness = 0.2 flare = 0.2 - if self.direc == 'left': + if self.direc == "left": cr.rotate(math.pi) cr.move_to(left, -thickness) diff --git a/util/minorview/colours.py b/util/minorview/colours.py index d0887ec0bf..e29860f3de 100644 --- a/util/minorview/colours.py +++ b/util/minorview/colours.py @@ -36,18 +36,19 @@ import gtk # All the miscellaneous colours used in the interface -unknownColour = gtk.gdk.color_parse('magenta') -blockedColour = gtk.gdk.color_parse('grey') -bubbleColour = gtk.gdk.color_parse('bisque') -emptySlotColour = gtk.gdk.color_parse('grey90') -reservedSlotColour = gtk.gdk.color_parse('cyan') -errorColour = gtk.gdk.color_parse('blue') -backgroundColour = gtk.gdk.color_parse('white') -faultColour = gtk.gdk.color_parse('dark cyan') -readColour = gtk.gdk.color_parse('red') -writeColour = gtk.gdk.color_parse('white') +unknownColour = gtk.gdk.color_parse("magenta") +blockedColour = gtk.gdk.color_parse("grey") +bubbleColour = gtk.gdk.color_parse("bisque") +emptySlotColour = gtk.gdk.color_parse("grey90") +reservedSlotColour = gtk.gdk.color_parse("cyan") +errorColour = gtk.gdk.color_parse("blue") +backgroundColour = gtk.gdk.color_parse("white") +faultColour = gtk.gdk.color_parse("dark cyan") +readColour = gtk.gdk.color_parse("red") +writeColour = gtk.gdk.color_parse("white") + +black = gtk.gdk.color_parse("black") -black = gtk.gdk.color_parse('black') def name_to_colour(name): """Convert a colour name to a GdkColor""" @@ -57,8 +58,25 @@ def name_to_colour(name): ret = unknownColour return ret -number_colour_code = list(map(name_to_colour, ['black', 'brown', 'red', - 'orange', 'yellow', 'green', 'blue', 'violet', 'grey', 'white'])) + +number_colour_code = list( + map( + name_to_colour, + [ + "black", + "brown", + "red", + "orange", + "yellow", + "green", + "blue", + "violet", + "grey", + "white", + ], + ) +) + def number_to_colour(num): """Convert the last decimal digit of an integer into a resistor diff --git a/util/minorview/model.py b/util/minorview/model.py index bb66616fa5..86f49a3da0 100644 --- a/util/minorview/model.py +++ b/util/minorview/model.py @@ -47,8 +47,10 @@ id_parts = "TSPLFE" all_ids = set(id_parts) no_ids = set([]) + class BlobDataSelect(object): """Represents which data is displayed for Ided object""" + def __init__(self): # Copy all_ids self.ids = set(all_ids) @@ -59,8 +61,10 @@ class BlobDataSelect(object): ret.ids = self.ids.intersection(rhs.ids) return ret + class BlobVisualData(object): """Super class for block data colouring""" + def to_striped_block(self, select): """Return an array of colours to use for a striped block""" return unknownColour @@ -74,14 +78,17 @@ class BlobVisualData(object): return None def __repr__(self): - return self.__class__.__name__ + '().from_string(' + \ - self.__str__() + ')' + return ( + self.__class__.__name__ + "().from_string(" + self.__str__() + ")" + ) def __str__(self): - return '' + return "" + class Id(BlobVisualData): """A line or instruction id""" + def __init__(self): self.isFault = False self.threadId = 0 @@ -92,15 +99,22 @@ class Id(BlobVisualData): self.execSeqNum = 0 def as_list(self): - return [self.threadId, self.streamSeqNum, self.predictionSeqNum, - self.lineSeqNum, self.fetchSeqNum, self.execSeqNum] + return [ + self.threadId, + self.streamSeqNum, + self.predictionSeqNum, + self.lineSeqNum, + self.fetchSeqNum, + self.execSeqNum, + ] def __cmp__(self, right): return cmp(self.as_list(), right.as_list()) def from_string(self, string): - m = re.match('^(F;)?(\d+)/(\d+)\.(\d+)/(\d+)(/(\d+)(\.(\d+))?)?', - string) + m = re.match( + "^(F;)?(\d+)/(\d+)\.(\d+)/(\d+)(/(\d+)(\.(\d+))?)?", string + ) def seqnum_from_string(string): if string is None: @@ -109,7 +123,7 @@ class Id(BlobVisualData): return int(string) if m is None: - print('Invalid Id string', string) + print("Invalid Id string", string) else: elems = m.groups() @@ -138,12 +152,18 @@ class Id(BlobVisualData): def __str__(self): """Returns the usual id T/S.P/L/F.E string""" return ( - str(self.threadId) + '/' + - str(self.streamSeqNum) + '.' + - str(self.predictionSeqNum) + '/' + - str(self.lineSeqNum) + '/' + - str(self.fetchSeqNum) + '.' + - str(self.execSeqNum)) + str(self.threadId) + + "/" + + str(self.streamSeqNum) + + "." + + str(self.predictionSeqNum) + + "/" + + str(self.lineSeqNum) + + "/" + + str(self.fetchSeqNum) + + "." + + str(self.execSeqNum) + ) def to_striped_block(self, select): ret = [] @@ -151,17 +171,17 @@ class Id(BlobVisualData): if self.isFault: ret.append(colours.faultColour) - if 'T' in select.ids: + if "T" in select.ids: ret.append(colours.number_to_colour(self.threadId)) - if 'S' in select.ids: + if "S" in select.ids: ret.append(colours.number_to_colour(self.streamSeqNum)) - if 'P' in select.ids: + if "P" in select.ids: ret.append(colours.number_to_colour(self.predictionSeqNum)) - if 'L' in select.ids: + if "L" in select.ids: ret.append(colours.number_to_colour(self.lineSeqNum)) - if self.fetchSeqNum != 0 and 'F' in select.ids: + if self.fetchSeqNum != 0 and "F" in select.ids: ret.append(colours.number_to_colour(self.fetchSeqNum)) - if self.execSeqNum != 0 and 'E' in select.ids: + if self.execSeqNum != 0 and "E" in select.ids: ret.append(colours.number_to_colour(self.execSeqNum)) if len(ret) == 0: @@ -172,9 +192,11 @@ class Id(BlobVisualData): return ret + class Branch(BlobVisualData): """Branch data new stream and prediction sequence numbers, a branch reason and a new PC""" + def __init__(self): self.newStreamSeqNum = 0 self.newPredictionSeqNum = 0 @@ -183,11 +205,16 @@ class Branch(BlobVisualData): self.id = Id() def from_string(self, string): - m = re.match('^(\w+);(\d+)\.(\d+);([0-9a-fA-Fx]+);(.*)$', string) + m = re.match("^(\w+);(\d+)\.(\d+);([0-9a-fA-Fx]+);(.*)$", string) if m is not None: - self.reason, newStreamSeqNum, newPredictionSeqNum, \ - newPC, id = m.groups() + ( + self.reason, + newStreamSeqNum, + newPredictionSeqNum, + newPC, + id, + ) = m.groups() self.newStreamSeqNum = int(newStreamSeqNum) self.newPredictionSeqNum = int(newPredictionSeqNum) @@ -199,39 +226,47 @@ class Branch(BlobVisualData): return self def to_striped_block(self, select): - return [colours.number_to_colour(self.newStreamSeqNum), + return [ + colours.number_to_colour(self.newStreamSeqNum), colours.number_to_colour(self.newPredictionSeqNum), - colours.number_to_colour(self.newPC)] + colours.number_to_colour(self.newPC), + ] + class Counts(BlobVisualData): """Treat the input data as just a /-separated list of count values (or just a single value)""" + def __init__(self): self.counts = [] def from_string(self, string): - self.counts = list(map(int, re.split('/', string))) + self.counts = list(map(int, re.split("/", string))) return self def to_striped_block(self, select): return list(map(colours.number_to_colour, self.counts)) + class Colour(BlobVisualData): """A fixed colour block, used for special colour decoding""" + def __init__(self, colour): self.colour = colour def to_striped_block(self, select): return [self.colour] + class DcacheAccess(BlobVisualData): """Data cache accesses [RW];id""" + def __init__(self): - self.direc = 'R' + self.direc = "R" self.id = Id() def from_string(self, string): - self.direc, id = re.match('^([RW]);([^;]*);.*$', string).groups() + self.direc, id = re.match("^([RW]);([^;]*);.*$", string).groups() self.id.from_string(id) return self @@ -239,51 +274,65 @@ class DcacheAccess(BlobVisualData): return self.id def to_striped_block(self, select): - if self.direc == 'R': + if self.direc == "R": direc_colour = colours.readColour - elif self.direc == 'R': + elif self.direc == "R": direc_colour = colours.writeColour else: direc_colour = colours.errorColour return [direc_colour] + self.id.to_striped_block(select) + class ColourPattern(object): """Super class for decoders that make 2D grids rather than just single striped blocks""" + def elems(self): return [] def to_striped_block(self, select): return [[[colours.errorColour]]] + def special_view_decoder(class_): """Generate a decode function that checks for special character arguments first (and generates a fixed colour) before building a BlobVisualData of the given class""" + def decode(symbol): if symbol in special_state_colours: return Colour(special_state_colours[symbol]) else: return class_().from_string(symbol) + return decode + class TwoDColours(ColourPattern): """A 2D grid pattern decoder""" + def __init__(self, blockss): self.blockss = blockss @classmethod def decoder(class_, elemClass, dataName): """Factory for making decoders for particular block types""" + def decode(pairs): if dataName not in pairs: - print('TwoDColours: no event data called:', \ - dataName, 'in:', pairs) + print( + "TwoDColours: no event data called:", + dataName, + "in:", + pairs, + ) return class_([[Colour(colours.errorColour)]]) else: parsed = parse.list_parser(pairs[dataName]) - return class_(parse.map2(special_view_decoder(elemClass), \ - parsed)) + return class_( + parse.map2(special_view_decoder(elemClass), parsed) + ) + return decode @classmethod @@ -294,34 +343,47 @@ class TwoDColours(ColourPattern): data on the decoder's picture file. This gives a 2D layout of the values with index 0 at strip=0, elem=0 and index 1 at strip=0, elem=1""" + def decode(pairs): if dataName not in pairs: - print('TwoDColours: no event data called:', \ - dataName, 'in:', pairs) + print( + "TwoDColours: no event data called:", + dataName, + "in:", + pairs, + ) return class_([[Colour(colours.errorColour)]]) else: - strips = int(picPairs['strips']) - strip_elems = int(picPairs['stripelems']) + strips = int(picPairs["strips"]) + strip_elems = int(picPairs["stripelems"]) raw_iv_pairs = pairs[dataName] parsed = parse.parse_indexed_list(raw_iv_pairs) - array = [[Colour(colours.emptySlotColour) - for i in range(0, strip_elems)] - for j in range(0, strips)] + array = [ + [ + Colour(colours.emptySlotColour) + for i in range(0, strip_elems) + ] + for j in range(0, strips) + ] for index, value in parsed: try: - array[index % strips][index / strips] = \ - special_view_decoder(elemClass)(value) + array[index % strips][ + index / strips + ] = special_view_decoder(elemClass)(value) except: - print("Element out of range strips: %d," \ - " stripelems %d, index: %d" % (strips, - strip_elems, index)) + print( + "Element out of range strips: %d," + " stripelems %d, index: %d" + % (strips, strip_elems, index) + ) # return class_(array) return class_(array) + return decode def elems(self): @@ -334,10 +396,12 @@ class TwoDColours(ColourPattern): def to_striped_block(self, select): return parse.map2(lambda d: d.to_striped_block(select), self.blockss) + class FrameColours(ColourPattern): """Decode to a 2D grid which has a single occupied row from the event data and some blank rows forming a frame with the occupied row as a 'title' coloured stripe""" + def __init__(self, block, numBlankSlots): self.numBlankSlots = numBlankSlots self.block = block @@ -345,78 +409,90 @@ class FrameColours(ColourPattern): @classmethod def decoder(class_, elemClass, numBlankSlots, dataName): """Factory for element type""" + def decode(pairs): if dataName not in pairs: - print('FrameColours: no event data called:', dataName, \ - 'in:', pairs) + print( + "FrameColours: no event data called:", + dataName, + "in:", + pairs, + ) return class_([Colour(colours.errorColour)]) else: parsed = parse.list_parser(pairs[dataName]) - return class_(special_view_decoder(elemClass) - (parsed[0][0]), numBlankSlots) + return class_( + special_view_decoder(elemClass)(parsed[0][0]), + numBlankSlots, + ) + return decode def elems(self): return [self.block] def to_striped_block(self, select): - return ([[self.block.to_striped_block(select)]] + - (self.numBlankSlots * [[[colours.backgroundColour]]])) + return [[self.block.to_striped_block(select)]] + ( + self.numBlankSlots * [[[colours.backgroundColour]]] + ) + special_state_colours = { - 'U': colours.unknownColour, - 'B': colours.blockedColour, - '-': colours.bubbleColour, - '': colours.emptySlotColour, - 'E': colours.emptySlotColour, - 'R': colours.reservedSlotColour, - 'X': colours.errorColour, - 'F': colours.faultColour, - 'r': colours.readColour, - 'w': colours.writeColour - } + "U": colours.unknownColour, + "B": colours.blockedColour, + "-": colours.bubbleColour, + "": colours.emptySlotColour, + "E": colours.emptySlotColour, + "R": colours.reservedSlotColour, + "X": colours.errorColour, + "F": colours.faultColour, + "r": colours.readColour, + "w": colours.writeColour, +} special_state_names = { - 'U': '(U)nknown', - 'B': '(B)locked', - '-': '(-)Bubble', - '': '()Empty', - 'E': '(E)mpty', - 'R': '(R)eserved', - 'X': '(X)Error', - 'F': '(F)ault', - 'r': '(r)ead', - 'w': '(w)rite' - } + "U": "(U)nknown", + "B": "(B)locked", + "-": "(-)Bubble", + "": "()Empty", + "E": "(E)mpty", + "R": "(R)eserved", + "X": "(X)Error", + "F": "(F)ault", + "r": "(r)ead", + "w": "(w)rite", +} special_state_chars = list(special_state_colours.keys()) # The complete set of available block data types decoder_element_classes = { - 'insts': Id, - 'lines': Id, - 'branch': Branch, - 'dcache': DcacheAccess, - 'counts': Counts - } + "insts": Id, + "lines": Id, + "branch": Branch, + "dcache": DcacheAccess, + "counts": Counts, +} + +indexed_decoder_element_classes = {"indexedCounts": Counts} -indexed_decoder_element_classes = { - 'indexedCounts' : Counts - } def find_colour_decoder(stripSpace, decoderName, dataName, picPairs): """Make a colour decoder from some picture file blob attributes""" - if decoderName == 'frame': + if decoderName == "frame": return FrameColours.decoder(Counts, stripSpace, dataName) elif decoderName in decoder_element_classes: - return TwoDColours.decoder(decoder_element_classes[decoderName], - dataName) + return TwoDColours.decoder( + decoder_element_classes[decoderName], dataName + ) elif decoderName in indexed_decoder_element_classes: return TwoDColours.indexed_decoder( - indexed_decoder_element_classes[decoderName], dataName, picPairs) + indexed_decoder_element_classes[decoderName], dataName, picPairs + ) else: return None + class IdedObj(object): """An object identified by an Id carrying paired data. The super class for Inst and Line""" @@ -435,15 +511,17 @@ class IdedObj(object): # FIXME, add a table column titles? def __repr__(self): - return ' '.join(self.table_line()) + return " ".join(self.table_line()) + class Inst(IdedObj): """A non-fault instruction""" + def __init__(self, id, disassembly, addr, pairs={}): - super(Inst,self).__init__(id, pairs) - if 'nextAddr' in pairs: - self.nextAddr = int(pairs['nextAddr'], 0) - del pairs['nextAddr'] + super(Inst, self).__init__(id, pairs) + if "nextAddr" in pairs: + self.nextAddr = int(pairs["nextAddr"], 0) + del pairs["nextAddr"] else: self.nextAddr = None self.disassembly = disassembly @@ -451,18 +529,20 @@ class Inst(IdedObj): def table_line(self): if self.nextAddr is not None: - addrStr = '0x%x->0x%x' % (self.addr, self.nextAddr) + addrStr = "0x%x->0x%x" % (self.addr, self.nextAddr) else: - addrStr = '0x%x' % self.addr + addrStr = "0x%x" % self.addr ret = [addrStr, self.disassembly] for name, value in self.pairs.items(): ret.append("%s=%s" % (name, str(value))) return ret + class InstFault(IdedObj): """A fault instruction""" + def __init__(self, id, fault, addr, pairs={}): - super(InstFault,self).__init__(id, pairs) + super(InstFault, self).__init__(id, pairs) self.fault = fault self.addr = addr @@ -472,10 +552,12 @@ class InstFault(IdedObj): ret.append("%s=%s", name, str(value)) return ret + class Line(IdedObj): """A fetched line""" + def __init__(self, id, vaddr, paddr, size, pairs={}): - super(Line,self).__init__(id, pairs) + super(Line, self).__init__(id, pairs) self.vaddr = vaddr self.paddr = paddr self.size = size @@ -486,10 +568,12 @@ class Line(IdedObj): ret.append("%s=%s", name, str(value)) return ret + class LineFault(IdedObj): """A faulting line""" + def __init__(self, id, fault, vaddr, pairs={}): - super(LineFault,self).__init__(id, pairs) + super(LineFault, self).__init__(id, pairs) self.vaddr = vaddr self.fault = fault @@ -499,9 +583,11 @@ class LineFault(IdedObj): ret.append("%s=%s", name, str(value)) return ret + class BlobEvent(object): """Time event for a single blob""" - def __init__(self, unit, time, pairs = {}): + + def __init__(self, unit, time, pairs={}): # blob's unit name self.unit = unit self.time = time @@ -518,6 +604,7 @@ class BlobEvent(object): ret = [] if picChar in self.visuals: blocks = self.visuals[picChar].elems() + def find_inst(data): instId = data.get_inst() lineId = data.get_line() @@ -532,17 +619,20 @@ class BlobEvent(object): line = model.find_line(lineId) if line is not None: ret.append(line) + list(map(find_inst, blocks)) return sorted(ret) + class BlobModel(object): """Model bringing together blob definitions and parsed events""" - def __init__(self, unitNamePrefix=''): + + def __init__(self, unitNamePrefix=""): self.blobs = [] self.unitNameToBlobs = {} self.unitEvents = {} self.clear_events() - self.picSize = Point(20,10) + self.picSize = Point(20, 10) self.lastTime = 0 self.unitNamePrefix = unitNamePrefix @@ -598,7 +688,7 @@ class BlobModel(object): time >= the current maximum time""" if event.unit in self.unitEvents: events = self.unitEvents[event.unit] - if len(events) > 0 and events[len(events)-1].time > event.time: + if len(events) > 0 and events[len(events) - 1].time > event.time: print("Bad event ordering") events.append(event) self.numEvents += 1 @@ -619,16 +709,17 @@ class BlobModel(object): key = id.lineSeqNum return self.lines.get(key, None) - def find_event_bisection(self, unit, time, events, - lower_index, upper_index): + def find_event_bisection( + self, unit, time, events, lower_index, upper_index + ): """Find an event by binary search on time indices""" while lower_index <= upper_index: pivot = (upper_index + lower_index) / 2 pivotEvent = events[pivot] - event_equal = (pivotEvent.time == time or - (pivotEvent.time < time and - (pivot == len(events) - 1 or - events[pivot + 1].time > time))) + event_equal = pivotEvent.time == time or ( + pivotEvent.time < time + and (pivot == len(events) - 1 or events[pivot + 1].time > time) + ) if event_equal: return pivotEvent @@ -650,8 +741,9 @@ class BlobModel(object): """Find the last event for the given unit at time <= time""" if unit in self.unitEvents: events = self.unitEvents[unit] - ret = self.find_event_bisection(unit, time, events, - 0, len(events)-1) + ret = self.find_event_bisection( + unit, time, events, 0, len(events) - 1 + ) return ret else: @@ -671,25 +763,24 @@ class BlobModel(object): pairs = parse.parse_pairs(rest) other_pairs = dict(pairs) - id = Id().from_string(pairs['id']) - del other_pairs['id'] + id = Id().from_string(pairs["id"]) + del other_pairs["id"] - addr = int(pairs['addr'], 0) - del other_pairs['addr'] + addr = int(pairs["addr"], 0) + del other_pairs["addr"] - if 'inst' in other_pairs: - del other_pairs['inst'] + if "inst" in other_pairs: + del other_pairs["inst"] # Collapse unnecessary spaces in disassembly - disassembly = re.sub(' *', ' ', - re.sub('^ *', '', pairs['inst'])) + disassembly = re.sub(" *", " ", re.sub("^ *", "", pairs["inst"])) inst = Inst(id, disassembly, addr, other_pairs) self.add_inst(inst) - elif 'fault' in other_pairs: - del other_pairs['fault'] + elif "fault" in other_pairs: + del other_pairs["fault"] - inst = InstFault(id, pairs['fault'], addr, other_pairs) + inst = InstFault(id, pairs["fault"], addr, other_pairs) self.add_inst(inst) @@ -698,27 +789,27 @@ class BlobModel(object): pairs = parse.parse_pairs(rest) other_pairs = dict(pairs) - id = Id().from_string(pairs['id']) - del other_pairs['id'] + id = Id().from_string(pairs["id"]) + del other_pairs["id"] - vaddr = int(pairs['vaddr'], 0) - del other_pairs['vaddr'] + vaddr = int(pairs["vaddr"], 0) + del other_pairs["vaddr"] - if 'paddr' in other_pairs: - del other_pairs['paddr'] - del other_pairs['size'] - paddr = int(pairs['paddr'], 0) - size = int(pairs['size'], 0) + if "paddr" in other_pairs: + del other_pairs["paddr"] + del other_pairs["size"] + paddr = int(pairs["paddr"], 0) + size = int(pairs["size"], 0) - self.add_line(Line(id, - vaddr, paddr, size, other_pairs)) - elif 'fault' in other_pairs: - del other_pairs['fault'] + self.add_line(Line(id, vaddr, paddr, size, other_pairs)) + elif "fault" in other_pairs: + del other_pairs["fault"] - self.add_line(LineFault(id, pairs['fault'], vaddr, other_pairs)) + self.add_line(LineFault(id, pairs["fault"], vaddr, other_pairs)) def load_events(self, file, startTime=0, endTime=None): """Load an event file and add everything to this model""" + def update_comments(comments, time): # Add a list of comments to an existing event, if there is one at # the given time, or create a new, correctly-timed, event from @@ -752,10 +843,10 @@ class BlobModel(object): next_progress_print_event_count = 1000 if not os.access(file, os.R_OK): - print('Can\'t open file', file) + print("Can't open file", file) exit(1) else: - print('Opening file', file) + print("Opening file", file) f = open(file) @@ -765,7 +856,7 @@ class BlobModel(object): still_skipping = True l = f.readline() while l and still_skipping: - match = re.match('^\s*(\d+):', l) + match = re.match("^\s*(\d+):", l) if match is not None: event_time = match.groups() if int(event_time[0]) >= startTime: @@ -776,7 +867,8 @@ class BlobModel(object): l = f.readline() match_line_re = re.compile( - '^\s*(\d+):\s*([\w\.]+):\s*(Minor\w+:)?\s*(.*)$') + "^\s*(\d+):\s*([\w\.]+):\s*(Minor\w+:)?\s*(.*)$" + ) # Parse each line of the events file, accumulating comments to be # attached to MinorTrace events when the time changes @@ -787,15 +879,15 @@ class BlobModel(object): event_time, unit, line_type, rest = match.groups() event_time = int(event_time) - unit = re.sub('^' + self.unitNamePrefix + '\.?(.*)$', - '\\1', unit) + unit = re.sub( + "^" + self.unitNamePrefix + "\.?(.*)$", "\\1", unit + ) # When the time changes, resolve comments if event_time != time: if self.numEvents > next_progress_print_event_count: - print(('Parsed to time: %d' % event_time)) - next_progress_print_event_count = ( - self.numEvents + 1000) + print(("Parsed to time: %d" % event_time)) + next_progress_print_event_count = self.numEvents + 1000 update_comments(comments, time) comments = [] time = event_time @@ -803,7 +895,7 @@ class BlobModel(object): if line_type is None: # Treat this line as just a 'comment' comments.append((unit, rest)) - elif line_type == 'MinorTrace:': + elif line_type == "MinorTrace:": minor_trace_line_count += 1 # Only insert this event if it's not the same as @@ -817,14 +909,15 @@ class BlobModel(object): blobs = self.unitNameToBlobs.get(unit, []) for blob in blobs: if blob.visualDecoder is not None: - event.visuals[blob.picChar] = ( - blob.visualDecoder(pairs)) + event.visuals[ + blob.picChar + ] = blob.visualDecoder(pairs) self.add_unit_event(event) last_time_lines[unit] = rest - elif line_type == 'MinorInst:': + elif line_type == "MinorInst:": self.add_minor_inst(rest) - elif line_type == 'MinorLine:': + elif line_type == "MinorLine:": self.add_minor_line(rest) if endTime is not None and time > endTime: @@ -838,9 +931,13 @@ class BlobModel(object): end_wall_time = wall_time() - print('Total events:', minor_trace_line_count, 'unique events:', \ - self.numEvents) - print('Time to parse:', end_wall_time - start_wall_time) + print( + "Total events:", + minor_trace_line_count, + "unique events:", + self.numEvents, + ) + print("Time to parse:", end_wall_time - start_wall_time) def add_blob_picture(self, offset, pic, nameDict): """Add a parsed ASCII-art pipeline markup to the model""" @@ -865,14 +962,15 @@ class BlobModel(object): if y >= len(pic) or x >= len(pic[y]): return None else: - return pic[y][x:x + charsPerPixel] + return pic[y][x : x + charsPerPixel] def clear_pic_at(point): """Clear the chars at point so we don't trip over them again""" line = pic[point.y] x = point.x * charsPerPixel - pic[point.y] = line[0:x] + (' ' * charsPerPixel) + \ - line[x + charsPerPixel:] + pic[point.y] = ( + line[0:x] + (" " * charsPerPixel) + line[x + charsPerPixel :] + ) def skip_same_char(start, increment): """Skip characters which match pic_at(start)""" @@ -887,9 +985,9 @@ class BlobModel(object): start consisting of (at least) a -. shaped corner describing the top right corner of a rectangle of the same char""" char = pic_at(start) - hunt_x = skip_same_char(start, Point(1,0)) - hunt_y = skip_same_char(start, Point(0,1)) - off_bottom_right = (hunt_x * Point(1,0)) + (hunt_y * Point(0,1)) + hunt_x = skip_same_char(start, Point(1, 0)) + hunt_y = skip_same_char(start, Point(0, 1)) + off_bottom_right = (hunt_x * Point(1, 0)) + (hunt_y * Point(0, 1)) return off_bottom_right - start def point_return(point): @@ -909,33 +1007,40 @@ class BlobModel(object): if pic_at(arrow_point) == endChar: clear_pic_at(arrow_point) - self.add_blob(blobs.Arrow('_', start + offset, - direc = direc, - size = (Point(1, 1) + arrow_point - start))) + self.add_blob( + blobs.Arrow( + "_", + start + offset, + direc=direc, + size=(Point(1, 1) + arrow_point - start), + ) + ) else: - print('Bad arrow', start) + print("Bad arrow", start) char = pic_at(start) - if char == '-\\': - body('-/', ' :', 'right') - elif char == '/-': - body('\\-', ': ', 'left') + if char == "-\\": + body("-/", " :", "right") + elif char == "/-": + body("\\-", ": ", "left") - blank_chars = [' ', ' :', ': '] + blank_chars = [" ", " :", ": "] # Traverse the picture left to right, top to bottom to find blobs seen_dict = {} - point = Point(0,0) + point = Point(0, 0) while pic_at(point) is not None: while pic_at(point) is not None: char = pic_at(point) - if char == '->': - self.add_blob(blobs.Arrow('_', point + offset, - direc = 'right')) - elif char == '<-': - self.add_blob(blobs.Arrow('_', point + offset, - direc = 'left')) - elif char == '-\\' or char == '/-': + if char == "->": + self.add_blob( + blobs.Arrow("_", point + offset, direc="right") + ) + elif char == "<-": + self.add_blob( + blobs.Arrow("_", point + offset, direc="left") + ) + elif char == "-\\" or char == "/-": find_arrow(point) elif char in blank_chars: pass @@ -945,9 +1050,14 @@ class BlobModel(object): topLeft = point + offset if char not in nameDict: # Unnamed blobs - self.add_blob(blobs.Block(char, - nameDict.get(char, '_'), - topLeft, size = size)) + self.add_blob( + blobs.Block( + char, + nameDict.get(char, "_"), + topLeft, + size=size, + ) + ) else: # Named blobs, set visual info. blob = nameDict[char] @@ -955,11 +1065,12 @@ class BlobModel(object): blob.topLeft = topLeft self.add_blob(blob) seen_dict[char] = True - point = skip_same_char(point, Point(1,0)) + point = skip_same_char(point, Point(1, 0)) point = point_return(point) def load_picture(self, filename): """Load a picture file into the model""" + def parse_blob_description(char, unit, macros, pairsList): # Parse the name value pairs in a blob-describing line def expand_macros(pairs, newPairs): @@ -975,58 +1086,58 @@ class BlobModel(object): ret = None - typ = pairs.get('type', 'block') - colour = colours.name_to_colour(pairs.get('colour', 'black')) + typ = pairs.get("type", "block") + colour = colours.name_to_colour(pairs.get("colour", "black")) - if typ == 'key': - ret = blobs.Key(char, unit, Point(0,0), colour) - elif typ == 'block': - ret = blobs.Block(char, unit, Point(0,0), colour) + if typ == "key": + ret = blobs.Key(char, unit, Point(0, 0), colour) + elif typ == "block": + ret = blobs.Block(char, unit, Point(0, 0), colour) else: print("Bad picture blog type:", typ) - if 'hideId' in pairs: - hide = pairs['hideId'] + if "hideId" in pairs: + hide = pairs["hideId"] ret.dataSelect.ids -= set(hide) - if typ == 'block': - ret.displayName = pairs.get('name', unit) - ret.nameLoc = pairs.get('nameLoc', 'top') - ret.shape = pairs.get('shape', 'box') - ret.stripDir = pairs.get('stripDir', 'horiz') - ret.stripOrd = pairs.get('stripOrd', 'LR') - ret.blankStrips = int(pairs.get('blankStrips', '0')) - ret.shorten = int(pairs.get('shorten', '0')) + if typ == "block": + ret.displayName = pairs.get("name", unit) + ret.nameLoc = pairs.get("nameLoc", "top") + ret.shape = pairs.get("shape", "box") + ret.stripDir = pairs.get("stripDir", "horiz") + ret.stripOrd = pairs.get("stripOrd", "LR") + ret.blankStrips = int(pairs.get("blankStrips", "0")) + ret.shorten = int(pairs.get("shorten", "0")) - if 'decoder' in pairs: - decoderName = pairs['decoder'] - dataElement = pairs.get('dataElement', decoderName) + if "decoder" in pairs: + decoderName = pairs["decoder"] + dataElement = pairs.get("dataElement", decoderName) - decoder = find_colour_decoder(ret.blankStrips, - decoderName, dataElement, pairs) + decoder = find_colour_decoder( + ret.blankStrips, decoderName, dataElement, pairs + ) if decoder is not None: ret.visualDecoder = decoder else: - print('Bad visualDecoder requested:', decoderName) + print("Bad visualDecoder requested:", decoderName) - if 'border' in pairs: - border = pairs['border'] - if border == 'thin': + if "border" in pairs: + border = pairs["border"] + if border == "thin": ret.border = 0.2 - elif border == 'mid': + elif border == "mid": ret.border = 0.5 else: ret.border = 1.0 - elif typ == 'key': - ret.colours = pairs.get('colours', ret.colours) + elif typ == "key": + ret.colours = pairs.get("colours", ret.colours) return ret def line_is_comment(line): """Returns true if a line starts with #, returns False for lines which are None""" - return line is not None \ - and re.match('^\s*#', line) is not None + return line is not None and re.match("^\s*#", line) is not None def get_line(f): """Get a line from file f extending that line if it ends in @@ -1038,15 +1149,15 @@ class BlobModel(object): ret = f.readline() if ret is not None: - extend_match = re.match('^(.*)\\\\$', ret) + extend_match = re.match("^(.*)\\\\$", ret) while extend_match is not None: new_line = f.readline() if new_line is not None and not line_is_comment(new_line): - line_wo_backslash, = extend_match.groups() + (line_wo_backslash,) = extend_match.groups() ret = line_wo_backslash + new_line - extend_match = re.match('^(.*)\\\\$', ret) + extend_match = re.match("^(.*)\\\\$", ret) else: extend_match = None @@ -1056,10 +1167,10 @@ class BlobModel(object): macros = {} if not os.access(filename, os.R_OK): - print('Can\'t open file', filename) + print("Can't open file", filename) exit(1) else: - print('Opening file', filename) + print("Opening file", filename) f = open(filename) l = get_line(f) @@ -1073,35 +1184,37 @@ class BlobModel(object): in_picture = False while l: l = parse.remove_trailing_ws(l) - l = re.sub('#.*', '', l) + l = re.sub("#.*", "", l) if re.match("^\s*$", l) is not None: pass - elif l == '<<<': + elif l == "<<<": in_picture = True - elif l == '>>>': + elif l == ">>>": in_picture = False elif in_picture: - picture.append(re.sub('\s*$', '', l)) + picture.append(re.sub("\s*$", "", l)) else: line_match = re.match( - '^([a-zA-Z0-9][a-zA-Z0-9]):\s+([\w.]+)\s*(.*)', l) - macro_match = re.match('macro\s+(\w+):(.*)', l) + "^([a-zA-Z0-9][a-zA-Z0-9]):\s+([\w.]+)\s*(.*)", l + ) + macro_match = re.match("macro\s+(\w+):(.*)", l) if macro_match is not None: name, defn = macro_match.groups() macros[name] = parse.parse_pairs_list(defn) elif line_match is not None: char, unit, pairs = line_match.groups() - blob = parse_blob_description(char, unit, macros, - parse.parse_pairs_list(pairs)) + blob = parse_blob_description( + char, unit, macros, parse.parse_pairs_list(pairs) + ) blob_char_dict[char] = blob # Setup the events structure self.unitEvents[unit] = [] else: - print('Problem with Blob line:', l) + print("Problem with Blob line:", l) l = get_line(f) self.blobs = [] - self.add_blob_picture(Point(0,1), picture, blob_char_dict) + self.add_blob_picture(Point(0, 1), picture, blob_char_dict) diff --git a/util/minorview/parse.py b/util/minorview/parse.py index d888f13460..5b6bea0c79 100644 --- a/util/minorview/parse.py +++ b/util/minorview/parse.py @@ -35,22 +35,23 @@ import re + def list_parser(names): """Parse a list of elements, some of which might be one-level sublists within parentheses, into a a list of lists of those elements. For example: list_parser('(a,b),c') -> [['a', 'b'], 'c']""" - elems = re.split(',', names) + elems = re.split(",", names) ret = [] accum = [] for elem in elems: - if re.search('^\((.*)\)$', elem): - accum.append(re.sub('^\((.*)\)', '\\1', elem)) + if re.search("^\((.*)\)$", elem): + accum.append(re.sub("^\((.*)\)", "\\1", elem)) ret.append(accum) accum = [] - elif re.search('^\(', elem): - accum.append(re.sub('^\(', '', elem)) - elif re.search('\)$', elem): - accum.append(re.sub('\)$', '', elem)) + elif re.search("^\(", elem): + accum.append(re.sub("^\(", "", elem)) + elif re.search("\)$", elem): + accum.append(re.sub("\)$", "", elem)) ret.append(accum) accum = [] elif len(accum) != 0: @@ -59,20 +60,24 @@ def list_parser(names): ret.append([elem]) if len(accum) > 0: - print('Non matching brackets in', names) + print("Non matching brackets in", names) return ret + def map2(f, ls): """map to a depth of 2. That is, given a list of lists, apply - f to those innermost elements """ + f to those innermost elements""" return [list(map(f, l)) for l in ls] + def remove_trailing_ws(line): - return re.sub('\s*$', '', line) + return re.sub("\s*$", "", line) + def remove_leading_and_trailing_ws(line): - return re.sub('\s*$', '', re.sub('^\s*', '', line)) + return re.sub("\s*$", "", re.sub("^\s*", "", line)) + def parse_pairs_list(pairString): """parse a string like 'name=value name2=value2' into a @@ -82,12 +87,13 @@ def parse_pairs_list(pairString): for pair in pairs: name, rest, value = pair.groups() if value is not None: - value = re.sub('^"(.*)"$', '\\1', value) + value = re.sub('^"(.*)"$', "\\1", value) ret.append((name, value)) else: - ret.append((name, '')) + ret.append((name, "")) return ret + def parse_indexed_list(string): """parse a string of the form "(index,value),(index,value)..." into a list of index, value pairs""" @@ -101,7 +107,8 @@ def parse_indexed_list(string): return ret + def parse_pairs(pairString): """parse a string like 'name=value name2=value2' into a - dictionary of {'name': 'value', 'name2': 'value2'} """ + dictionary of {'name': 'value', 'name2': 'value2'}""" return dict(parse_pairs_list(pairString)) diff --git a/util/minorview/point.py b/util/minorview/point.py index 81836e3863..5df990998f 100644 --- a/util/minorview/point.py +++ b/util/minorview/point.py @@ -33,8 +33,10 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + class Point(object): """2D point coordinates/size type""" + def __init__(self, x, y): self.x = x self.y = y @@ -69,8 +71,9 @@ class Point(object): half_size = size.scale(0.5) top_left = centre - half_size bottom_right = centre + half_size - return (top_left.x < self.x and - top_left.y < self.y and - bottom_right.x > self.x and - bottom_right.y > self.y) - + return ( + top_left.x < self.x + and top_left.y < self.y + and bottom_right.x > self.x + and bottom_right.y > self.y + ) diff --git a/util/minorview/view.py b/util/minorview/view.py index 189443f5cc..7c1aef873f 100644 --- a/util/minorview/view.py +++ b/util/minorview/view.py @@ -34,7 +34,8 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import pygtk -pygtk.require('2.0') + +pygtk.require("2.0") import gtk import gobject import cairo @@ -47,8 +48,10 @@ from . import model from .model import Id, BlobModel, BlobDataSelect, special_state_chars from . import blobs + class BlobView(object): """The canvas view of the pipeline""" + def __init__(self, model): # A unit blob will appear at size blobSize inside a space of # size pitch. @@ -62,7 +65,7 @@ class BlobView(object): self.midLineWidth = 6.0 # The scale from the units of pitch to device units (nominally # pixels for 1.0 to 1.0 - self.masterScale = Point(1.0,1.0) + self.masterScale = Point(1.0, 1.0) self.model = model self.fillColour = colours.emptySlotColour self.timeIndex = 0 @@ -82,9 +85,11 @@ class BlobView(object): self.overlays = [] self.da = gtk.DrawingArea() + def draw(arg1, arg2): self.redraw() - self.da.connect('expose_event', draw) + + self.da.connect("expose_event", draw) # Handy offsets from the blob size self.blobIndent = (self.pitch - self.blobSize).scale(0.5) @@ -99,7 +104,8 @@ class BlobView(object): surface = cairo.ImageSurface( cairo.FORMAT_ARGB32, self.da.get_allocation().width, - self.da.get_allocation().height) + self.da.get_allocation().height, + ) cr = gtk.gdk.CairoContext(cairo.Context(surface)) self.draw_to_cr(cr) surface.write_to_png(filename) @@ -113,16 +119,16 @@ class BlobView(object): cr.scale(*self.masterScale.to_pair()) cr.translate(*self.origin.to_pair()) - positions = [] # {} + positions = [] # {} # Draw each blob for blob in self.model.blobs: blob_event = self.model.find_unit_event_by_time( - blob.unit, self.time) + blob.unit, self.time + ) cr.save() - pos = blob.render(cr, self, blob_event, self.dataSelect, - self.time) + pos = blob.render(cr, self, blob_event, self.dataSelect, self.time) cr.restore() if pos is not None: (centre, size) = pos @@ -141,7 +147,8 @@ class BlobView(object): buffer = cairo.ImageSurface( cairo.FORMAT_ARGB32, self.da.get_allocation().width, - self.da.get_allocation().height) + self.da.get_allocation().height, + ) cr = gtk.gdk.CairoContext(cairo.Context(buffer)) positions = self.draw_to_cr(cr) @@ -173,17 +180,21 @@ class BlobView(object): def get_pic_size(self): """Return the size of ASCII-art picture of the pipeline scaled by the blob pitch""" - return (self.origin + self.pitch * - (self.model.picSize + Point(1.0,1.0))) + return self.origin + self.pitch * ( + self.model.picSize + Point(1.0, 1.0) + ) def set_da_size(self): """Set the DrawingArea size after scaling""" - self.da.set_size_request(10 , int(self.initialHeight)) + self.da.set_size_request(10, int(self.initialHeight)) + class BlobController(object): """The controller bar for the viewer""" - def __init__(self, model, view, - defaultEventFile="", defaultPictureFile=""): + + def __init__( + self, model, view, defaultEventFile="", defaultPictureFile="" + ): self.model = model self.view = view self.playTimer = None @@ -209,17 +220,17 @@ class BlobController(object): self.timeEntry = gtk.Entry() - t = gtk.ToggleButton('T') + t = gtk.ToggleButton("T") t.set_active(False) - s = gtk.ToggleButton('S') + s = gtk.ToggleButton("S") s.set_active(True) - p = gtk.ToggleButton('P') + p = gtk.ToggleButton("P") p.set_active(True) - l = gtk.ToggleButton('L') + l = gtk.ToggleButton("L") l.set_active(True) - f = gtk.ToggleButton('F') + f = gtk.ToggleButton("F") f.set_active(True) - e = gtk.ToggleButton('E') + e = gtk.ToggleButton("E") e.set_active(True) # Should really generate this from above @@ -228,35 +239,38 @@ class BlobController(object): self.bar = gtk.VBox() self.bar.set_homogeneous(False) - row1 = make_bar([ - (gtk.Button('Start'), 'clicked', self.time_start), - (gtk.Button('End'), 'clicked', self.time_end), - (gtk.Button('Back'), 'clicked', self.time_back), - (gtk.Button('Forward'), 'clicked', self.time_forward), - (gtk.Button('Play'), 'clicked', self.time_play), - (gtk.Button('Stop'), 'clicked', self.time_stop), - (self.timeEntry, 'activate', self.time_set), - (gtk.Label('Visible ids:'), None, None), - (t, 'clicked', self.toggle_id('T')), - (gtk.Label('/'), None, None), - (s, 'clicked', self.toggle_id('S')), - (gtk.Label('.'), None, None), - (p, 'clicked', self.toggle_id('P')), - (gtk.Label('/'), None, None), - (l, 'clicked', self.toggle_id('L')), - (gtk.Label('/'), None, None), - (f, 'clicked', self.toggle_id('F')), - (gtk.Label('.'), None, None), - (e, 'clicked', self.toggle_id('E')), - (self.filenameEntry, 'activate', self.load_events), - (gtk.Button('Reload'), 'clicked', self.load_events) - ]) + row1 = make_bar( + [ + (gtk.Button("Start"), "clicked", self.time_start), + (gtk.Button("End"), "clicked", self.time_end), + (gtk.Button("Back"), "clicked", self.time_back), + (gtk.Button("Forward"), "clicked", self.time_forward), + (gtk.Button("Play"), "clicked", self.time_play), + (gtk.Button("Stop"), "clicked", self.time_stop), + (self.timeEntry, "activate", self.time_set), + (gtk.Label("Visible ids:"), None, None), + (t, "clicked", self.toggle_id("T")), + (gtk.Label("/"), None, None), + (s, "clicked", self.toggle_id("S")), + (gtk.Label("."), None, None), + (p, "clicked", self.toggle_id("P")), + (gtk.Label("/"), None, None), + (l, "clicked", self.toggle_id("L")), + (gtk.Label("/"), None, None), + (f, "clicked", self.toggle_id("F")), + (gtk.Label("."), None, None), + (e, "clicked", self.toggle_id("E")), + (self.filenameEntry, "activate", self.load_events), + (gtk.Button("Reload"), "clicked", self.load_events), + ] + ) self.bar.pack_start(row1, False, True, 0) self.set_time_index(0) def toggle_id(self, id): """One of the sequence number selector buttons has been toggled""" + def toggle(button): if button.get_active(): self.view.dataSelect.ids.add(id) @@ -268,6 +282,7 @@ class BlobController(object): self.view.dataSelect.ids.add(id) button.set_active(True) self.view.redraw() + return toggle def set_time_index(self, time): @@ -292,8 +307,9 @@ class BlobController(object): def time_forward(self, button): """Step forward pressed""" - self.set_time_index(min(self.view.timeIndex + 1, - len(self.model.times) - 1)) + self.set_time_index( + min(self.view.timeIndex + 1, len(self.model.times) - 1) + ) self.view.redraw() gtk.gdk.flush() @@ -311,8 +327,10 @@ class BlobController(object): def time_step(self): """Time step while playing""" - if not self.playTimer \ - or self.view.timeIndex == len(self.model.times) - 1: + if ( + not self.playTimer + or self.view.timeIndex == len(self.model.times) - 1 + ): self.time_stop(None) return False else: @@ -332,14 +350,20 @@ class BlobController(object): def load_events(self, button): """Reload events file""" - self.model.load_events(self.filenameEntry.get_text(), - startTime=self.startTime, endTime=self.endTime) - self.set_time_index(min(len(self.model.times) - 1, - self.view.timeIndex)) + self.model.load_events( + self.filenameEntry.get_text(), + startTime=self.startTime, + endTime=self.endTime, + ) + self.set_time_index( + min(len(self.model.times) - 1, self.view.timeIndex) + ) self.view.redraw() + class Overlay(object): """An Overlay is a speech bubble explaining the data in a blob""" + def __init__(self, model, view, point, blob): self.model = model self.view = view @@ -348,8 +372,9 @@ class Overlay(object): def find_event(self): """Find the event for a changing time and a fixed blob""" - return self.model.find_unit_event_by_time(self.blob.unit, - self.view.time) + return self.model.find_unit_event_by_time( + self.blob.unit, self.view.time + ) def show(self, cr): """Draw the overlay""" @@ -358,12 +383,11 @@ class Overlay(object): if event is None: return - insts = event.find_ided_objects(self.model, self.blob.picChar, - False) + insts = event.find_ided_objects(self.model, self.blob.picChar, False) cr.set_line_width(self.view.thinLineWidth) - cr.translate(*(Point(0.0,0.0) - self.view.origin).to_pair()) - cr.scale(*(Point(1.0,1.0) / self.view.masterScale).to_pair()) + cr.translate(*(Point(0.0, 0.0) - self.view.origin).to_pair()) + cr.scale(*(Point(1.0, 1.0) / self.view.masterScale).to_pair()) # Get formatted data from the insts to format into a table lines = list(inst.table_line() for inst in insts) @@ -403,7 +427,7 @@ class Overlay(object): cr.set_source_color(colours.black) cr.stroke() - text_point += Point(1.0,1.0).scale(2.0 * text_size) + text_point += Point(1.0, 1.0).scale(2.0 * text_size) id_size = Point(id_width, text_size) @@ -412,8 +436,12 @@ class Overlay(object): row_point = text_point inst = insts[i] line = lines[i] - blobs.striped_box(cr, row_point + id_size.scale(0.5), - id_size, inst.id.to_striped_block(self.view.dataSelect)) + blobs.striped_box( + cr, + row_point + id_size.scale(0.5), + id_size, + inst.id.to_striped_block(self.view.dataSelect), + ) cr.set_source_color(colours.black) row_point += Point(1.0, 0.0).scale(id_width) @@ -427,8 +455,10 @@ class Overlay(object): text_point += text_step + class BlobWindow(object): """The top-level window and its mouse control""" + def __init__(self, model, view, controller): self.model = model self.view = view @@ -469,18 +499,17 @@ class BlobWindow(object): self.window.add(self.vbox) def show_event(picChar, event): - print('**** Comments for', event.unit, \ - 'at time', self.view.time) + print("**** Comments for", event.unit, "at time", self.view.time) for name, value in event.pairs.items(): - print(name, '=', value) + print(name, "=", value) for comment in event.comments: print(comment) if picChar in event.visuals: # blocks = event.visuals[picChar].elems() - print('**** Colour data') + print("**** Colour data") objs = event.find_ided_objects(self.model, picChar, True) for obj in objs: - print(' '.join(obj.table_line())) + print(" ".join(obj.table_line())) def clicked_da(da, b): point = Point(b.x, b.y) @@ -488,12 +517,14 @@ class BlobWindow(object): overlay = None for blob, centre, size in self.view.positions: if point.is_within_box((centre, size)): - event = self.model.find_unit_event_by_time(blob.unit, - self.view.time) + event = self.model.find_unit_event_by_time( + blob.unit, self.view.time + ) if event is not None: if overlay is None: - overlay = Overlay(self.model, self.view, point, - blob) + overlay = Overlay( + self.model, self.view, point, blob + ) show_event(blob.picChar, event) if overlay is not None: self.view.overlays = [overlay] @@ -505,8 +536,8 @@ class BlobWindow(object): # Set initial size and event callbacks self.view.set_da_size() self.view.da.add_events(gtk.gdk.BUTTON_PRESS_MASK) - self.view.da.connect('button-press-event', clicked_da) - self.window.connect('destroy', lambda widget: gtk.main_quit()) + self.view.da.connect("button-press-event", clicked_da) + self.window.connect("destroy", lambda widget: gtk.main_quit()) def resize(window, event): """Resize DrawingArea to match new window size""" @@ -517,6 +548,6 @@ class BlobWindow(object): self.view.masterScale = Point(daScale, daScale) self.view.overlays = [] - self.view.da.connect('configure-event', resize) + self.view.da.connect("configure-event", resize) self.window.show_all() diff --git a/util/o3-pipeview.py b/util/o3-pipeview.py index 588560995e..18f66129c8 100755 --- a/util/o3-pipeview.py +++ b/util/o3-pipeview.py @@ -49,32 +49,45 @@ import copy # It is assumed that the instructions are not out of order for more then # 'min_threshold' places - otherwise they will appear out of order. insts = { - 'queue': [] , # Instructions to print. - 'max_threshold':2000, # Instructions are sorted out and printed when - # their number reaches this threshold. - 'min_threshold':1000, # Printing stops when this number is reached. - 'sn_start':0, # The first instruction seq. number to be printed. - 'sn_stop':0, # The last instruction seq. number to be printed. - 'tick_start':0, # The first tick to be printed - 'tick_stop':0, # The last tick to be printed - 'tick_drift':2000, # Used to calculate the start and the end of main - # loop. We assume here that the instructions are not - # out of order for more then 2000 CPU ticks, - # otherwise the print may not start/stop - # at the time specified by tick_start/stop. - 'only_committed':0, # Set if only committed instructions are printed. + "queue": [], # Instructions to print. + "max_threshold": 2000, # Instructions are sorted out and printed when + # their number reaches this threshold. + "min_threshold": 1000, # Printing stops when this number is reached. + "sn_start": 0, # The first instruction seq. number to be printed. + "sn_stop": 0, # The last instruction seq. number to be printed. + "tick_start": 0, # The first tick to be printed + "tick_stop": 0, # The last tick to be printed + "tick_drift": 2000, # Used to calculate the start and the end of main + # loop. We assume here that the instructions are not + # out of order for more then 2000 CPU ticks, + # otherwise the print may not start/stop + # at the time specified by tick_start/stop. + "only_committed": 0, # Set if only committed instructions are printed. } -def process_trace(trace, outfile, cycle_time, width, color, timestamps, - committed_only, store_completions, start_tick, stop_tick, start_sn, stop_sn): + +def process_trace( + trace, + outfile, + cycle_time, + width, + color, + timestamps, + committed_only, + store_completions, + start_tick, + stop_tick, + start_sn, + stop_sn, +): global insts - insts['sn_start'] = start_sn - insts['sn_stop'] = stop_sn - insts['tick_start'] = start_tick - insts['tick_stop'] = stop_tick - insts['tick_drift'] = insts['tick_drift'] * cycle_time - insts['only_committed'] = committed_only + insts["sn_start"] = start_sn + insts["sn_stop"] = stop_sn + insts["tick_start"] = start_tick + insts["tick_stop"] = stop_tick + insts["tick_drift"] = insts["tick_drift"] * cycle_time + insts["only_committed"] = committed_only line = None fields = None @@ -82,294 +95,442 @@ def process_trace(trace, outfile, cycle_time, width, color, timestamps, if start_tick != 0: while True: line = trace.readline() - if not line: return - fields = line.split(':') - if fields[0] != 'O3PipeView': continue - if int(fields[2]) >= start_tick: break + if not line: + return + fields = line.split(":") + if fields[0] != "O3PipeView": + continue + if int(fields[2]) >= start_tick: + break elif start_sn != 0: while True: line = trace.readline() - if not line: return - fields = line.split(':') - if fields[0] != 'O3PipeView': continue - if fields[1] == 'fetch' and int(fields[5]) >= start_sn: break + if not line: + return + fields = line.split(":") + if fields[0] != "O3PipeView": + continue + if fields[1] == "fetch" and int(fields[5]) >= start_sn: + break else: line = trace.readline() - if not line: return - fields = line.split(':') + if not line: + return + fields = line.split(":") # Skip lines up to next instruction fetch - while fields[0] != 'O3PipeView' or fields[1] != 'fetch': + while fields[0] != "O3PipeView" or fields[1] != "fetch": line = trace.readline() - if not line: return - fields = line.split(':') + if not line: + return + fields = line.split(":") # Print header - outfile.write('// f = fetch, d = decode, n = rename, p = dispatch, ' - 'i = issue, c = complete, r = retire') + outfile.write( + "// f = fetch, d = decode, n = rename, p = dispatch, " + "i = issue, c = complete, r = retire" + ) if store_completions: - outfile.write(', s = store-complete') - outfile.write('\n\n') + outfile.write(", s = store-complete") + outfile.write("\n\n") - outfile.write(' ' + 'timeline'.center(width) + - ' ' + 'tick'.center(15) + - ' ' + 'pc.upc'.center(12) + - ' ' + 'disasm'.ljust(25) + - ' ' + 'seq_num'.center(10)) + outfile.write( + " " + + "timeline".center(width) + + " " + + "tick".center(15) + + " " + + "pc.upc".center(12) + + " " + + "disasm".ljust(25) + + " " + + "seq_num".center(10) + ) if timestamps: - outfile.write('timestamps'.center(25)) - outfile.write('\n') + outfile.write("timestamps".center(25)) + outfile.write("\n") # Region of interest curr_inst = {} while True: - if fields[0] == 'O3PipeView': + if fields[0] == "O3PipeView": curr_inst[fields[1]] = int(fields[2]) - if fields[1] == 'fetch': - if ((stop_tick > 0 and int(fields[2]) > stop_tick+insts['tick_drift']) or - (stop_sn > 0 and int(fields[5]) > (stop_sn+insts['max_threshold']))): - print_insts(outfile, cycle_time, width, color, timestamps, - store_completions, 0) + if fields[1] == "fetch": + if ( + stop_tick > 0 + and int(fields[2]) > stop_tick + insts["tick_drift"] + ) or ( + stop_sn > 0 + and int(fields[5]) > (stop_sn + insts["max_threshold"]) + ): + print_insts( + outfile, + cycle_time, + width, + color, + timestamps, + store_completions, + 0, + ) return - (curr_inst['pc'], curr_inst['upc']) = fields[3:5] - curr_inst['sn'] = int(fields[5]) - curr_inst['disasm'] = ' '.join(fields[6][:-1].split()) - elif fields[1] == 'retire': - if curr_inst['retire'] == 0: - curr_inst['disasm'] = '-----' + curr_inst['disasm'] + (curr_inst["pc"], curr_inst["upc"]) = fields[3:5] + curr_inst["sn"] = int(fields[5]) + curr_inst["disasm"] = " ".join(fields[6][:-1].split()) + elif fields[1] == "retire": + if curr_inst["retire"] == 0: + curr_inst["disasm"] = "-----" + curr_inst["disasm"] if store_completions: curr_inst[fields[3]] = int(fields[4]) - queue_inst(outfile, curr_inst, cycle_time, width, color, timestamps, store_completions) + queue_inst( + outfile, + curr_inst, + cycle_time, + width, + color, + timestamps, + store_completions, + ) line = trace.readline() if not line: - print_insts(outfile, cycle_time, width, color, timestamps, store_completions, 0) + print_insts( + outfile, + cycle_time, + width, + color, + timestamps, + store_completions, + 0, + ) return - fields = line.split(':') + fields = line.split(":") # Puts new instruction into the print queue. # Sorts out and prints instructions when their number reaches threshold value -def queue_inst(outfile, inst, cycle_time, width, color, timestamps, store_completions): +def queue_inst( + outfile, inst, cycle_time, width, color, timestamps, store_completions +): global insts l_copy = copy.deepcopy(inst) - insts['queue'].append(l_copy) - if len(insts['queue']) > insts['max_threshold']: - print_insts(outfile, cycle_time, width, color, timestamps, store_completions, insts['min_threshold']) + insts["queue"].append(l_copy) + if len(insts["queue"]) > insts["max_threshold"]: + print_insts( + outfile, + cycle_time, + width, + color, + timestamps, + store_completions, + insts["min_threshold"], + ) + # Sorts out and prints instructions in print queue -def print_insts(outfile, cycle_time, width, color, timestamps, store_completions, lower_threshold): +def print_insts( + outfile, + cycle_time, + width, + color, + timestamps, + store_completions, + lower_threshold, +): global insts # sort the list of insts by sequence numbers - insts['queue'].sort(key=lambda inst: inst['sn']) - while len(insts['queue']) > lower_threshold: - print_item=insts['queue'].pop(0) + insts["queue"].sort(key=lambda inst: inst["sn"]) + while len(insts["queue"]) > lower_threshold: + print_item = insts["queue"].pop(0) # As the instructions are processed out of order the main loop starts # earlier then specified by start_sn/tick and finishes later then what # is defined in stop_sn/tick. # Therefore, here we have to filter out instructions that reside out of # the specified boundaries. - if (insts['sn_start'] > 0 and print_item['sn'] < insts['sn_start']): - continue; # earlier then the starting sequence number - if (insts['sn_stop'] > 0 and print_item['sn'] > insts['sn_stop']): - continue; # later then the ending sequence number - if (insts['tick_start'] > 0 and print_item['fetch'] < insts['tick_start']): - continue; # earlier then the starting tick number - if (insts['tick_stop'] > 0 and print_item['fetch'] > insts['tick_stop']): - continue; # later then the ending tick number + if insts["sn_start"] > 0 and print_item["sn"] < insts["sn_start"]: + continue + # earlier then the starting sequence number + if insts["sn_stop"] > 0 and print_item["sn"] > insts["sn_stop"]: + continue + # later then the ending sequence number + if ( + insts["tick_start"] > 0 + and print_item["fetch"] < insts["tick_start"] + ): + continue + # earlier then the starting tick number + if insts["tick_stop"] > 0 and print_item["fetch"] > insts["tick_stop"]: + continue + # later then the ending tick number + + if insts["only_committed"] != 0 and print_item["retire"] == 0: + continue + # retire is set to zero if it hasn't been completed + print_inst( + outfile, + print_item, + cycle_time, + width, + color, + timestamps, + store_completions, + ) - if (insts['only_committed'] != 0 and print_item['retire'] == 0): - continue; # retire is set to zero if it hasn't been completed - print_inst(outfile, print_item, cycle_time, width, color, timestamps, store_completions) # Prints a single instruction -def print_inst(outfile, inst, cycle_time, width, color, timestamps, store_completions): +def print_inst( + outfile, inst, cycle_time, width, color, timestamps, store_completions +): if color: from m5.util.terminal import termcap else: from m5.util.terminal import no_termcap as termcap # Pipeline stages - stages = [{'name': 'fetch', - 'color': termcap.Blue + termcap.Reverse, - 'shorthand': 'f'}, - {'name': 'decode', - 'color': termcap.Yellow + termcap.Reverse, - 'shorthand': 'd'}, - {'name': 'rename', - 'color': termcap.Magenta + termcap.Reverse, - 'shorthand': 'n'}, - {'name': 'dispatch', - 'color': termcap.Green + termcap.Reverse, - 'shorthand': 'p'}, - {'name': 'issue', - 'color': termcap.Red + termcap.Reverse, - 'shorthand': 'i'}, - {'name': 'complete', - 'color': termcap.Cyan + termcap.Reverse, - 'shorthand': 'c'}, - {'name': 'retire', - 'color': termcap.Blue + termcap.Reverse, - 'shorthand': 'r'} - ] + stages = [ + { + "name": "fetch", + "color": termcap.Blue + termcap.Reverse, + "shorthand": "f", + }, + { + "name": "decode", + "color": termcap.Yellow + termcap.Reverse, + "shorthand": "d", + }, + { + "name": "rename", + "color": termcap.Magenta + termcap.Reverse, + "shorthand": "n", + }, + { + "name": "dispatch", + "color": termcap.Green + termcap.Reverse, + "shorthand": "p", + }, + { + "name": "issue", + "color": termcap.Red + termcap.Reverse, + "shorthand": "i", + }, + { + "name": "complete", + "color": termcap.Cyan + termcap.Reverse, + "shorthand": "c", + }, + { + "name": "retire", + "color": termcap.Blue + termcap.Reverse, + "shorthand": "r", + }, + ] if store_completions: stages.append( - {'name': 'store', - 'color': termcap.Yellow + termcap.Reverse, - 'shorthand': 's'}) + { + "name": "store", + "color": termcap.Yellow + termcap.Reverse, + "shorthand": "s", + } + ) # Print time_width = width * cycle_time - base_tick = (inst['fetch'] // time_width) * time_width + base_tick = (inst["fetch"] // time_width) * time_width # Find out the time of the last event - it may not # be 'retire' if the instruction is not comlpeted. - last_event_time = max(inst['fetch'], inst['decode'],inst['rename'], - inst['dispatch'],inst['issue'], inst['complete'], inst['retire']) + last_event_time = max( + inst["fetch"], + inst["decode"], + inst["rename"], + inst["dispatch"], + inst["issue"], + inst["complete"], + inst["retire"], + ) if store_completions: - last_event_time = max(last_event_time, inst['store']) + last_event_time = max(last_event_time, inst["store"]) # Timeline shorter then time_width is printed in compact form where # the print continues at the start of the same line. - if ((last_event_time - inst['fetch']) < time_width): - num_lines = 1 # compact form + if (last_event_time - inst["fetch"]) < time_width: + num_lines = 1 # compact form else: num_lines = ((last_event_time - base_tick) // time_width) + 1 curr_color = termcap.Normal # This will visually distinguish completed and abandoned intructions. - if inst['retire'] == 0: dot = '=' # abandoned instruction - else: dot = '.' # completed instruction + if inst["retire"] == 0: + dot = "=" # abandoned instruction + else: + dot = "." # completed instruction for i in range(num_lines): start_tick = base_tick + i * time_width end_tick = start_tick + time_width if num_lines == 1: # compact form - end_tick += (inst['fetch'] - base_tick) + end_tick += inst["fetch"] - base_tick events = [] for stage_idx in range(len(stages)): - tick = inst[stages[stage_idx]['name']] + tick = inst[stages[stage_idx]["name"]] if tick != 0: if tick >= start_tick and tick < end_tick: - events.append((tick % time_width, - stages[stage_idx]['name'], - stage_idx, tick)) + events.append( + ( + tick % time_width, + stages[stage_idx]["name"], + stage_idx, + tick, + ) + ) events.sort() - outfile.write('[') + outfile.write("[") pos = 0 if num_lines == 1 and events[0][2] != 0: # event is not fetch - curr_color = stages[events[0][2] - 1]['color'] + curr_color = stages[events[0][2] - 1]["color"] for event in events: - if (stages[event[2]]['name'] == 'dispatch' and - inst['dispatch'] == inst['issue']): + if ( + stages[event[2]]["name"] == "dispatch" + and inst["dispatch"] == inst["issue"] + ): continue outfile.write(curr_color + dot * ((event[0] // cycle_time) - pos)) - outfile.write(stages[event[2]]['color'] + - stages[event[2]]['shorthand']) + outfile.write( + stages[event[2]]["color"] + stages[event[2]]["shorthand"] + ) if event[3] != last_event_time: # event is not the last one - curr_color = stages[event[2]]['color'] + curr_color = stages[event[2]]["color"] else: curr_color = termcap.Normal pos = (event[0] // cycle_time) + 1 - outfile.write(curr_color + dot * (width - pos) + termcap.Normal + - ']-(' + str(base_tick + i * time_width).rjust(15) + ') ') + outfile.write( + curr_color + + dot * (width - pos) + + termcap.Normal + + "]-(" + + str(base_tick + i * time_width).rjust(15) + + ") " + ) if i == 0: - outfile.write('%s.%s %s [%s]' % ( - inst['pc'].rjust(10), - inst['upc'], - inst['disasm'].ljust(25), - str(inst['sn']).rjust(10))) + outfile.write( + "%s.%s %s [%s]" + % ( + inst["pc"].rjust(10), + inst["upc"], + inst["disasm"].ljust(25), + str(inst["sn"]).rjust(10), + ) + ) if timestamps: - outfile.write(' f=%s, r=%s' % (inst['fetch'], inst['retire'])) - outfile.write('\n') + outfile.write(" f=%s, r=%s" % (inst["fetch"], inst["retire"])) + outfile.write("\n") else: - outfile.write('...'.center(12) + '\n') + outfile.write("...".center(12) + "\n") def validate_range(my_range): - my_range = [int(i) for i in my_range.split(':')] - if (len(my_range) != 2 or - my_range[0] < 0 or - my_range[1] > 0 and my_range[0] >= my_range[1]): + my_range = [int(i) for i in my_range.split(":")] + if ( + len(my_range) != 2 + or my_range[0] < 0 + or my_range[1] > 0 + and my_range[0] >= my_range[1] + ): return None return my_range def main(): # Parse args - usage = ('%(prog)s [OPTION]... TRACE_FILE') + usage = "%(prog)s [OPTION]... TRACE_FILE" parser = argparse.ArgumentParser( - usage=usage, - formatter_class=argparse.ArgumentDefaultsHelpFormatter) + usage=usage, formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) parser.add_argument( - '-o', - dest='outfile', - default=os.path.join(os.getcwd(), 'o3-pipeview.out'), - help="output file") + "-o", + dest="outfile", + default=os.path.join(os.getcwd(), "o3-pipeview.out"), + help="output file", + ) parser.add_argument( - '-t', - dest='tick_range', - default='0:-1', - help="tick range (-1 == inf.)") + "-t", dest="tick_range", default="0:-1", help="tick range (-1 == inf.)" + ) parser.add_argument( - '-i', - dest='inst_range', - default='0:-1', - help="instruction range (-1 == inf.)") + "-i", + dest="inst_range", + default="0:-1", + help="instruction range (-1 == inf.)", + ) parser.add_argument( - '-w', - dest='width', - type=int, default=80, - help="timeline width") + "-w", dest="width", type=int, default=80, help="timeline width" + ) parser.add_argument( - '--color', - action='store_true', default=False, - help="enable colored output") + "--color", + action="store_true", + default=False, + help="enable colored output", + ) parser.add_argument( - '-c', '--cycle-time', - type=int, default=1000, - help="CPU cycle time in ticks") + "-c", + "--cycle-time", + type=int, + default=1000, + help="CPU cycle time in ticks", + ) parser.add_argument( - '--timestamps', - action='store_true', default=False, - help="print fetch and retire timestamps") + "--timestamps", + action="store_true", + default=False, + help="print fetch and retire timestamps", + ) parser.add_argument( - '--only_committed', - action='store_true', default=False, - help="display only committed (completed) instructions") + "--only_committed", + action="store_true", + default=False, + help="display only committed (completed) instructions", + ) parser.add_argument( - '--store_completions', - action='store_true', default=False, - help="additionally display store completion ticks") - parser.add_argument( - 'tracefile') + "--store_completions", + action="store_true", + default=False, + help="additionally display store completion ticks", + ) + parser.add_argument("tracefile") args = parser.parse_args() tick_range = validate_range(args.tick_range) if not tick_range: - parser.error('invalid range') + parser.error("invalid range") sys.exit(1) inst_range = validate_range(args.inst_range) if not inst_range: - parser.error('invalid range') + parser.error("invalid range") sys.exit(1) # Process trace - print('Processing trace... ', end=' ') - with open(args.tracefile, 'r') as trace: - with open(args.outfile, 'w') as out: - process_trace(trace, out, args.cycle_time, args.width, - args.color, args.timestamps, - args.only_committed, args.store_completions, - *(tick_range + inst_range)) - print('done!') + print("Processing trace... ", end=" ") + with open(args.tracefile, "r") as trace: + with open(args.outfile, "w") as out: + process_trace( + trace, + out, + args.cycle_time, + args.width, + args.color, + args.timestamps, + args.only_committed, + args.store_completions, + *(tick_range + inst_range) + ) + print("done!") -if __name__ == '__main__': - sys.path.append(os.path.join( - os.path.dirname(os.path.abspath(__file__)), - '..', 'src', 'python')) +if __name__ == "__main__": + sys.path.append( + os.path.join( + os.path.dirname(os.path.abspath(__file__)), "..", "src", "python" + ) + ) main() diff --git a/util/on-chip-network-power-area.py b/util/on-chip-network-power-area.py index 18bb301da2..6a576bbdb3 100644 --- a/util/on-chip-network-power-area.py +++ b/util/on-chip-network-power-area.py @@ -33,19 +33,19 @@ import string, sys, subprocess, os print("Attempting compilation") from subprocess import call -src_dir = 'ext/dsent' -build_dir = 'build/ext/dsent' +src_dir = "ext/dsent" +build_dir = "build/ext/dsent" if not os.path.exists(build_dir): os.makedirs(build_dir) os.chdir(build_dir) -error = call(['cmake', '../../../%s' % src_dir]) +error = call(["cmake", "../../../%s" % src_dir]) if error: print("Failed to run cmake") exit(-1) -error = call(['make']) +error = call(["make"]) if error: print("Failed to run make") exit(-1) @@ -67,29 +67,41 @@ def parseConfig(config_file): print(("ERROR: Ruby network not found in '", config_file)) sys.exit(1) - if config.get("system.ruby.network", "type") != "GarnetNetwork_d" : + if config.get("system.ruby.network", "type") != "GarnetNetwork_d": print(("ERROR: Garnet network not used in '", config_file)) sys.exit(1) - number_of_virtual_networks = config.getint("system.ruby.network", - "number_of_virtual_networks") + number_of_virtual_networks = config.getint( + "system.ruby.network", "number_of_virtual_networks" + ) vcs_per_vnet = config.getint("system.ruby.network", "vcs_per_vnet") - buffers_per_data_vc = config.getint("system.ruby.network", - "buffers_per_data_vc") - buffers_per_control_vc = config.getint("system.ruby.network", - "buffers_per_ctrl_vc") + buffers_per_data_vc = config.getint( + "system.ruby.network", "buffers_per_data_vc" + ) + buffers_per_control_vc = config.getint( + "system.ruby.network", "buffers_per_ctrl_vc" + ) - ni_flit_size_bits = 8 * config.getint("system.ruby.network", - "ni_flit_size") + ni_flit_size_bits = 8 * config.getint( + "system.ruby.network", "ni_flit_size" + ) routers = config.get("system.ruby.network", "routers").split() int_links = config.get("system.ruby.network", "int_links").split() ext_links = config.get("system.ruby.network", "ext_links").split() - return (config, number_of_virtual_networks, vcs_per_vnet, - buffers_per_data_vc, buffers_per_control_vc, ni_flit_size_bits, - routers, int_links, ext_links) + return ( + config, + number_of_virtual_networks, + vcs_per_vnet, + buffers_per_data_vc, + buffers_per_control_vc, + ni_flit_size_bits, + routers, + int_links, + ext_links, + ) def getClock(obj, config): @@ -99,33 +111,48 @@ def getClock(obj, config): if config.get(obj, "type") == "DerivedClockDomain": source = config.get(obj, "clk_domain") divider = config.getint(obj, "clk_divider") - return getClock(source, config) / divider + return getClock(source, config) / divider source = config.get(obj, "clk_domain") return getClock(source, config) ## Compute the power consumed by the given router -def computeRouterPowerAndArea(router, stats_file, config, int_links, ext_links, - number_of_virtual_networks, vcs_per_vnet, - buffers_per_data_vc, buffers_per_control_vc, - ni_flit_size_bits): +def computeRouterPowerAndArea( + router, + stats_file, + config, + int_links, + ext_links, + number_of_virtual_networks, + vcs_per_vnet, + buffers_per_data_vc, + buffers_per_control_vc, + ni_flit_size_bits, +): frequency = getClock(router, config) num_ports = 0 for int_link in int_links: - if config.get(int_link, "node_a") == router or \ - config.get(int_link, "node_b") == router: - num_ports += 1 + if ( + config.get(int_link, "node_a") == router + or config.get(int_link, "node_b") == router + ): + num_ports += 1 for ext_link in ext_links: if config.get(ext_link, "int_node") == router: - num_ports += 1 + num_ports += 1 - power = dsent.computeRouterPowerAndArea(frequency, num_ports, num_ports, - number_of_virtual_networks, - vcs_per_vnet, buffers_per_data_vc, - ni_flit_size_bits) + power = dsent.computeRouterPowerAndArea( + frequency, + num_ports, + num_ports, + number_of_virtual_networks, + vcs_per_vnet, + buffers_per_data_vc, + ni_flit_size_bits, + ) print("%s Power: " % router, power) @@ -141,15 +168,25 @@ def computeLinkPower(link, stats_file, config, sim_seconds): print("%s.nls1 Power: " % link, power) -def parseStats(stats_file, config, router_config_file, link_config_file, - routers, int_links, ext_links, number_of_virtual_networks, - vcs_per_vnet, buffers_per_data_vc, buffers_per_control_vc, - ni_flit_size_bits): +def parseStats( + stats_file, + config, + router_config_file, + link_config_file, + routers, + int_links, + ext_links, + number_of_virtual_networks, + vcs_per_vnet, + buffers_per_data_vc, + buffers_per_control_vc, + ni_flit_size_bits, +): # Open the stats.txt file and parse it to for the required numbers # and the number of routers. try: - stats_handle = open(stats_file, 'r') + stats_handle = open(stats_file, "r") stats_handle.close() except IOError: print("Failed to open ", stats_file, " for reading") @@ -157,12 +194,13 @@ def parseStats(stats_file, config, router_config_file, link_config_file, # Now parse the stats pattern = "sim_seconds" - lines = string.split(subprocess.check_output( - ["grep", pattern, stats_file]), '\n', -1) + lines = string.split( + subprocess.check_output(["grep", pattern, stats_file]), "\n", -1 + ) assert len(lines) >= 1 ## Assume that the first line is the one required - [l1,l2,l3] = lines[0].partition(" ") + [l1, l2, l3] = lines[0].partition(" ") l4 = l3.strip().partition(" ") simulation_length_in_seconds = float(l4[0]) @@ -171,10 +209,18 @@ def parseStats(stats_file, config, router_config_file, link_config_file, # Compute the power consumed by the routers for router in routers: - computeRouterPowerAndArea(router, stats_file, config, int_links, - ext_links, number_of_virtual_networks, - vcs_per_vnet, buffers_per_data_vc, - buffers_per_control_vc, ni_flit_size_bits) + computeRouterPowerAndArea( + router, + stats_file, + config, + int_links, + ext_links, + number_of_virtual_networks, + vcs_per_vnet, + buffers_per_data_vc, + buffers_per_control_vc, + ni_flit_size_bits, + ) # Finalize DSENT dsent.finalize() @@ -184,34 +230,62 @@ def parseStats(stats_file, config, router_config_file, link_config_file, # Compute the power consumed by the links for link in int_links: - computeLinkPower(link, stats_file, config, - simulation_length_in_seconds) + computeLinkPower( + link, stats_file, config, simulation_length_in_seconds + ) for link in ext_links: - computeLinkPower(link, stats_file, config, - simulation_length_in_seconds) + computeLinkPower( + link, stats_file, config, simulation_length_in_seconds + ) # Finalize DSENT dsent.finalize() + # This script parses the config.ini and the stats.txt from a run and # generates the power and the area of the on-chip network using DSENT def main(): if len(sys.argv) != 5: - print("Usage: ", sys.argv[0], " " \ - " ") + print( + "Usage: ", + sys.argv[0], + " " + " ", + ) exit(-1) - print("WARNING: configuration files for DSENT and McPAT are separate. " \ - "Changes made to one are not reflected in the other.") + print( + "WARNING: configuration files for DSENT and McPAT are separate. " + "Changes made to one are not reflected in the other." + ) - (config, number_of_virtual_networks, vcs_per_vnet, buffers_per_data_vc, - buffers_per_control_vc, ni_flit_size_bits, routers, int_links, - ext_links) = parseConfig("%s/%s/config.ini" % (sys.argv[1], sys.argv[2])) + ( + config, + number_of_virtual_networks, + vcs_per_vnet, + buffers_per_data_vc, + buffers_per_control_vc, + ni_flit_size_bits, + routers, + int_links, + ext_links, + ) = parseConfig("%s/%s/config.ini" % (sys.argv[1], sys.argv[2])) + + parseStats( + "%s/%s/stats.txt" % (sys.argv[1], sys.argv[2]), + config, + sys.argv[3], + sys.argv[4], + routers, + int_links, + ext_links, + number_of_virtual_networks, + vcs_per_vnet, + buffers_per_data_vc, + buffers_per_control_vc, + ni_flit_size_bits, + ) - parseStats("%s/%s/stats.txt" % (sys.argv[1], sys.argv[2]), config, - sys.argv[3], sys.argv[4], routers, int_links, ext_links, - number_of_virtual_networks, vcs_per_vnet, buffers_per_data_vc, - buffers_per_control_vc, ni_flit_size_bits) if __name__ == "__main__": main() diff --git a/util/oprofile-top.py b/util/oprofile-top.py index 72bea650f5..b71c629f4e 100755 --- a/util/oprofile-top.py +++ b/util/oprofile-top.py @@ -33,7 +33,8 @@ import re import getopt from categories import * -def category(app,sym): + +def category(app, sym): if re.search("vmlinux-2.6", app): name = sym else: @@ -45,24 +46,25 @@ def category(app,sym): if regexp.match(name): return cat print("no match for symbol %s" % name) - return 'other' + return "other" + try: - (opts, files) = getopt.getopt(sys.argv[1:], 'i') + (opts, files) = getopt.getopt(sys.argv[1:], "i") except getopt.GetoptError: - print("usage", sys.argv[0], "[-i] ") - sys.exit(2) + print("usage", sys.argv[0], "[-i] ") + sys.exit(2) showidle = True -for o,v in opts: +for o, v in opts: if o == "-i": showidle = False print(files) f = open(files.pop()) total = 0 prof = {} -linenum = 0 +linenum = 0 for line in f.readlines(): line = re.sub("\(no symbols\)", "nosym", line) line = re.sub("anonymous.*", "nosym", line) @@ -70,23 +72,31 @@ for line in f.readlines(): if linenum < 4: continue (count, percent, app, sym) = line.split() - #total += int(count) - cat = category(app,sym) - if cat != 'idle' or showidle: - total += int(count) - prof[cat] = prof.get(cat,0) + int(count) + # total += int(count) + cat = category(app, sym) + if cat != "idle" or showidle: + total += int(count) + prof[cat] = prof.get(cat, 0) + int(count) -cats = ['other', 'user', 'copy', 'bufmgt', 'stack', 'driver', 'interrupt', 'alignment' ] +cats = [ + "other", + "user", + "copy", + "bufmgt", + "stack", + "driver", + "interrupt", + "alignment", +] if showidle: - cats.insert(0,'idle') + cats.insert(0, "idle") -#syms = [(i[1], i[0]) for i in prof.items()] -#syms.sort() -#for i in range(len(syms)): +# syms = [(i[1], i[0]) for i in prof.items()] +# syms.sort() +# for i in range(len(syms)): # print "%s -- %5.1f%% " % (prof[i][1], 100 * float(prof[i][0])/float(total)) for d in cats: if d in prof: - print("%s -- %5.1f%% " % (d, 100 * float(prof[d])/float(total))) - + print("%s -- %5.1f%% " % (d, 100 * float(prof[d]) / float(total))) diff --git a/util/plot_dram/PlotPowerStates.py b/util/plot_dram/PlotPowerStates.py index c2fb2f4011..7f0b326812 100755 --- a/util/plot_dram/PlotPowerStates.py +++ b/util/plot_dram/PlotPowerStates.py @@ -34,7 +34,8 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import matplotlib -matplotlib.use('Agg') + +matplotlib.use("Agg") import matplotlib.pyplot as plt from matplotlib.font_manager import FontProperties import numpy as np @@ -55,52 +56,56 @@ stackWidth = 18.0 barWidth = 0.5 plotFontSize = 18 -States = ['IDLE', 'ACT', 'REF', 'ACT_PDN', 'PRE_PDN', 'SREF'] +States = ["IDLE", "ACT", "REF", "ACT_PDN", "PRE_PDN", "SREF"] -EnergyStates = ['ACT_E', -'PRE_E', -'READ_E', -'REF_E', -'ACT_BACK_E', -'PRE_BACK_E', -'ACT_PDN_E', -'PRE_PDN_E', -'SREF_E'] +EnergyStates = [ + "ACT_E", + "PRE_E", + "READ_E", + "REF_E", + "ACT_BACK_E", + "PRE_BACK_E", + "ACT_PDN_E", + "PRE_PDN_E", + "SREF_E", +] StackColors = { -'IDLE' : 'black', # time spent in states -'ACT' : 'lightskyblue', -'REF' : 'limegreen', -'ACT_PDN' : 'crimson', -'PRE_PDN' : 'orange', -'SREF' : 'gold', -'ACT_E' : 'lightskyblue', # energy of states -'PRE_E' : 'black', -'READ_E' : 'white', -'REF_E' : 'limegreen', -'ACT_BACK_E' : 'lightgray', -'PRE_BACK_E' : 'gray', -'ACT_PDN_E' : 'crimson', -'PRE_PDN_E' : 'orange', -'SREF_E' : 'gold' + "IDLE": "black", # time spent in states + "ACT": "lightskyblue", + "REF": "limegreen", + "ACT_PDN": "crimson", + "PRE_PDN": "orange", + "SREF": "gold", + "ACT_E": "lightskyblue", # energy of states + "PRE_E": "black", + "READ_E": "white", + "REF_E": "limegreen", + "ACT_BACK_E": "lightgray", + "PRE_BACK_E": "gray", + "ACT_PDN_E": "crimson", + "PRE_PDN_E": "orange", + "SREF_E": "gold", } StatToKey = { -'system.mem_ctrls_0.actEnergy' : 'ACT_E', -'system.mem_ctrls_0.preEnergy' : 'PRE_E', -'system.mem_ctrls_0.readEnergy' : 'READ_E', -'system.mem_ctrls_0.refreshEnergy' : 'REF_E', -'system.mem_ctrls_0.actBackEnergy' : 'ACT_BACK_E', -'system.mem_ctrls_0.preBackEnergy' : 'PRE_BACK_E', -'system.mem_ctrls_0.actPowerDownEnergy' : 'ACT_PDN_E', -'system.mem_ctrls_0.prePowerDownEnergy' : 'PRE_PDN_E', -'system.mem_ctrls_0.selfRefreshEnergy' : 'SREF_E' + "system.mem_ctrls_0.actEnergy": "ACT_E", + "system.mem_ctrls_0.preEnergy": "PRE_E", + "system.mem_ctrls_0.readEnergy": "READ_E", + "system.mem_ctrls_0.refreshEnergy": "REF_E", + "system.mem_ctrls_0.actBackEnergy": "ACT_BACK_E", + "system.mem_ctrls_0.preBackEnergy": "PRE_BACK_E", + "system.mem_ctrls_0.actPowerDownEnergy": "ACT_PDN_E", + "system.mem_ctrls_0.prePowerDownEnergy": "PRE_PDN_E", + "system.mem_ctrls_0.selfRefreshEnergy": "SREF_E", } # Skipping write energy, the example script issues 100% reads by default # 'system.mem_ctrls_0.writeEnergy' : "WRITE" -def plotLowPStates(plot_dir, stats_fname, bank_util_list, seqbytes_list, - delay_list): + +def plotLowPStates( + plot_dir, stats_fname, bank_util_list, seqbytes_list, delay_list +): """ plotLowPStates generates plots by parsing statistics output by the DRAM sweep simulation described in the the configs/dram/low_power_sweep.py @@ -122,7 +127,7 @@ def plotLowPStates(plot_dir, stats_fname, bank_util_list, seqbytes_list, @param delay_list: list of itt max multipliers (e.g. [1, 20, 200]) """ - stats_file = open(stats_fname, 'r') + stats_file = open(stats_fname, "r") global bankUtilValues bankUtilValues = bank_util_list @@ -136,7 +141,7 @@ def plotLowPStates(plot_dir, stats_fname, bank_util_list, seqbytes_list, # throw away the first two lines of the stats file stats_file.readline() - stats_file.readline() # the 'Begin' line + stats_file.readline() # the 'Begin' line ####################################### # Parse stats file and gather results @@ -147,24 +152,25 @@ def plotLowPStates(plot_dir, stats_fname, bank_util_list, seqbytes_list, for seq_bytes in seqBytesValues: for line in stats_file: - if 'Begin' in line: + if "Begin" in line: break if len(line.strip()) == 0: continue #### state time values #### - if 'system.mem_ctrls_0.memoryStateTime' in line: + if "system.mem_ctrls_0.memoryStateTime" in line: # remove leading and trailing white spaces line = line.strip() # Example format: # 'system.mem_ctrls_0.memoryStateTime::ACT 1000000' statistic, stime = line.split()[0:2] # Now grab the state, i.e. 'ACT' - state = statistic.split('::')[1] + state = statistic.split("::")[1] # store the value of the stat in the results dict - results[delay][bank_util][seq_bytes][state] = \ - int(stime) + results[delay][bank_util][seq_bytes][state] = int( + stime + ) #### state energy values #### elif line.strip().split()[0] in list(StatToKey.keys()): # Example format: @@ -177,15 +183,15 @@ def plotLowPStates(plot_dir, stats_fname, bank_util_list, seqbytes_list, # To add last traffic gen idle period stats to the results dict for line in stats_file: - if 'system.mem_ctrls_0.memoryStateTime' in line: - line = line.strip() # remove leading and trailing white spaces + if "system.mem_ctrls_0.memoryStateTime" in line: + line = line.strip() # remove leading and trailing white spaces # Example format: # 'system.mem_ctrls_0.memoryStateTime::ACT 1000000' statistic, stime = line.split()[0:2] # Now grab the state energy, .e.g 'ACT' - state = statistic.split('::')[1] + state = statistic.split("::")[1] idleResults[state] = int(stime) - if state == 'ACT_PDN': + if state == "ACT_PDN": break ######################################## @@ -193,15 +199,25 @@ def plotLowPStates(plot_dir, stats_fname, bank_util_list, seqbytes_list, ######################################## # one plot per delay value for delay in delayValues: - plot_path = plot_dir + delay + '-' + plot_path = plot_dir + delay + "-" - plotStackedStates(delay, States, 'IDLE', stateTimePlotName(plot_path), - 'Time (ps) spent in a power state') - plotStackedStates(delay, EnergyStates, 'ACT_E', - stateEnergyPlotName(plot_path), - 'Energy (pJ) of a power state') + plotStackedStates( + delay, + States, + "IDLE", + stateTimePlotName(plot_path), + "Time (ps) spent in a power state", + ) + plotStackedStates( + delay, + EnergyStates, + "ACT_E", + stateEnergyPlotName(plot_path), + "Energy (pJ) of a power state", + ) plotIdle(plot_dir) + def plotIdle(plot_dir): """ Create a bar chart for the time spent in power states during the idle phase @@ -213,15 +229,16 @@ def plotIdle(plot_dir): ind = np.arange(len(States)) l1 = ax.bar(ind, [idleResults[x] for x in States], width) - ax.xaxis.set_ticks(ind + width/2) + ax.xaxis.set_ticks(ind + width / 2) ax.xaxis.set_ticklabels(States) - ax.set_ylabel('Time (ps) spent in a power state') + ax.set_ylabel("Time (ps) spent in a power state") fig.suptitle("Idle 50 us") print("saving plot:", idlePlotName(plot_dir)) - plt.savefig(idlePlotName(plot_dir), format='eps') + plt.savefig(idlePlotName(plot_dir), format="eps") plt.close(fig) + def plotStackedStates(delay, states_list, bottom_state, plot_name, ylabel_str): """ Create a stacked bar chart for the list that is passed in as arg, which @@ -237,7 +254,7 @@ def plotStackedStates(delay, states_list, bottom_state, plot_name, ylabel_str): fig.set_figheight(stackHeight) fig.set_figwidth(stackWidth) width = barWidth - plt.rcParams.update({'font.size': plotFontSize}) + plt.rcParams.update({"font.size": plotFontSize}) # Get the number of seq_bytes values N = len(seqBytesValues) @@ -251,50 +268,62 @@ def plotStackedStates(delay, states_list, bottom_state, plot_name, ylabel_str): # Must have a bottom of the stack first state = bottom_state - l_states[state] = [results[delay][bank_util][x][state] \ - for x in seqBytesValues] - p_states[state] = ax[sub_idx].bar(ind, l_states[state], width, - color=StackColors[state]) + l_states[state] = [ + results[delay][bank_util][x][state] for x in seqBytesValues + ] + p_states[state] = ax[sub_idx].bar( + ind, l_states[state], width, color=StackColors[state] + ) time_sum = l_states[state] for state in states_list[1:]: - l_states[state] = [results[delay][bank_util][x][state] \ - for x in seqBytesValues] + l_states[state] = [ + results[delay][bank_util][x][state] for x in seqBytesValues + ] # Now add on top of the bottom = sum of values up until now - p_states[state] = ax[sub_idx].bar(ind, l_states[state], width, - color=StackColors[state], - bottom=time_sum) + p_states[state] = ax[sub_idx].bar( + ind, + l_states[state], + width, + color=StackColors[state], + bottom=time_sum, + ) # Now add the bit of the stack that we just ploted to the bottom # resulting in a new bottom for the next iteration - time_sum = [prev_sum + new_s for prev_sum, new_s in \ - zip(time_sum, l_states[state])] + time_sum = [ + prev_sum + new_s + for prev_sum, new_s in zip(time_sum, l_states[state]) + ] - ax[sub_idx].set_title('Bank util %s' % bank_util) - ax[sub_idx].xaxis.set_ticks(ind + width/2.) + ax[sub_idx].set_title("Bank util %s" % bank_util) + ax[sub_idx].xaxis.set_ticks(ind + width / 2.0) ax[sub_idx].xaxis.set_ticklabels(seqBytesValues, rotation=45) - ax[sub_idx].set_xlabel('Seq. bytes') + ax[sub_idx].set_xlabel("Seq. bytes") if bank_util == bankUtilValues[0]: ax[sub_idx].set_ylabel(ylabel_str) - myFontSize='small' + myFontSize = "small" fontP = FontProperties() fontP.set_size(myFontSize) - fig.legend([p_states[x] for x in states_list], states_list, - prop=fontP) + fig.legend([p_states[x] for x in states_list], states_list, prop=fontP) - plt.savefig(plot_name, format='eps', bbox_inches='tight') + plt.savefig(plot_name, format="eps", bbox_inches="tight") print("saving plot:", plot_name) plt.close(fig) + # These plat name functions are also called in the main script def idlePlotName(plot_dir): - return (plot_dir + 'idle.eps') + return plot_dir + "idle.eps" + def stateTimePlotName(plot_dir): - return (plot_dir + 'state-time.eps') + return plot_dir + "state-time.eps" + def stateEnergyPlotName(plot_dir): - return (plot_dir + 'state-energy.eps') + return plot_dir + "state-energy.eps" + def initResults(): for delay in delayValues: diff --git a/util/plot_dram/dram_lat_mem_rd_plot.py b/util/plot_dram/dram_lat_mem_rd_plot.py index fac5340bba..0d0e8d052b 100755 --- a/util/plot_dram/dram_lat_mem_rd_plot.py +++ b/util/plot_dram/dram_lat_mem_rd_plot.py @@ -56,15 +56,15 @@ def main(): exit(-1) try: - stats = open(sys.argv[1] + '/stats.txt', 'r') + stats = open(sys.argv[1] + "/stats.txt", "r") except IOError: - print("Failed to open ", sys.argv[1] + '/stats.txt', " for reading") + print("Failed to open ", sys.argv[1] + "/stats.txt", " for reading") exit(-1) try: - simout = open(sys.argv[1] + '/simout', 'r') + simout = open(sys.argv[1] + "/simout", "r") except IOError: - print("Failed to open ", sys.argv[1] + '/simout', " for reading") + print("Failed to open ", sys.argv[1] + "/simout", " for reading") exit(-1) # Get the address ranges @@ -116,8 +116,10 @@ def main(): # Sanity check if not (len(ranges) == len(final_rd_lat)): - print("Address ranges (%d) and read latency (%d) do not match" % \ - (len(ranges), len(final_rd_lat))) + print( + "Address ranges (%d) and read latency (%d) do not match" + % (len(ranges), len(final_rd_lat)) + ) exit(-1) for (r, l) in zip(ranges, final_rd_lat): @@ -134,9 +136,9 @@ def main(): xticks_labels = [] for x in xticks_locations: if x < 1024: - xticks_labels.append('%d kB' % x) + xticks_labels.append("%d kB" % x) else: - xticks_labels.append('%d MB' % (x / 1024)) + xticks_labels.append("%d MB" % (x / 1024)) plt.xticks(xticks_locations, xticks_labels, rotation=-45) plt.minorticks_off() @@ -145,5 +147,6 @@ def main(): plt.grid(True) plt.show() + if __name__ == "__main__": main() diff --git a/util/plot_dram/dram_sweep_plot.py b/util/plot_dram/dram_sweep_plot.py index bb4f29cc28..8acb6ab681 100755 --- a/util/plot_dram/dram_sweep_plot.py +++ b/util/plot_dram/dram_sweep_plot.py @@ -57,10 +57,15 @@ def main(): print("Usage: ", sys.argv[0], "-u|p|e ") exit(-1) - if len(sys.argv[1]) != 2 or sys.argv[1][0] != '-' or \ - not sys.argv[1][1] in "upe": - print("Choose -u (utilisation), -p (total power), or -e " \ - "(power efficiency)") + if ( + len(sys.argv[1]) != 2 + or sys.argv[1][0] != "-" + or not sys.argv[1][1] in "upe" + ): + print( + "Choose -u (utilisation), -p (total power), or -e " + "(power efficiency)" + ) exit(-1) # Choose the appropriate mode, either utilisation, total power, or @@ -68,15 +73,15 @@ def main(): mode = sys.argv[1][1] try: - stats = open(sys.argv[2] + '/stats.txt', 'r') + stats = open(sys.argv[2] + "/stats.txt", "r") except IOError: - print("Failed to open ", sys.argv[2] + '/stats.txt', " for reading") + print("Failed to open ", sys.argv[2] + "/stats.txt", " for reading") exit(-1) try: - simout = open(sys.argv[2] + '/simout', 'r') + simout = open(sys.argv[2] + "/simout", "r") except IOError: - print("Failed to open ", sys.argv[2] + '/simout', " for reading") + print("Failed to open ", sys.argv[2] + "/simout", " for reading") exit(-1) # Get the burst size, number of banks and the maximum stride from @@ -84,8 +89,10 @@ def main(): got_sweep = False for line in simout: - match = re.match("DRAM sweep with " - "burst: (\d+), banks: (\d+), max stride: (\d+)", line) + match = re.match( + "DRAM sweep with " "burst: (\d+), banks: (\d+), max stride: (\d+)", + line, + ) if match: burst_size = int(match.groups(0)[0]) banks = int(match.groups(0)[1]) @@ -117,10 +124,11 @@ def main(): avg_pwr.append(float(match.groups(0)[0])) stats.close() - # Sanity check if not (len(peak_bw) == len(bus_util) and len(bus_util) == len(avg_pwr)): - print("Peak bandwidth, bus utilisation, and average power do not match") + print( + "Peak bandwidth, bus utilisation, and average power do not match" + ) exit(-1) # Collect the selected metric as our Z-axis, we do this in a 2D @@ -131,11 +139,11 @@ def main(): i = 0 for j in range(len(peak_bw)): - if mode == 'u': + if mode == "u": z.append(bus_util[j]) - elif mode == 'p': + elif mode == "p": z.append(avg_pwr[j]) - elif mode == 'e': + elif mode == "e": # avg_pwr is in mW, peak_bw in MiByte/s, bus_util in percent z.append(avg_pwr[j] / (bus_util[j] / 100.0 * peak_bw[j] / 1000.0)) else: @@ -156,7 +164,7 @@ def main(): exit(-1) fig = plt.figure() - ax = fig.gca(projection='3d') + ax = fig.gca(projection="3d") X = np.arange(burst_size, max_size + 1, burst_size) Y = np.arange(1, banks + 1, 1) X, Y = np.meshgrid(X, Y) @@ -165,27 +173,36 @@ def main(): # stride size in order Z = np.array(zs) - surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm, - linewidth=0, antialiased=False) + surf = ax.plot_surface( + X, + Y, + Z, + rstride=1, + cstride=1, + cmap=cm.coolwarm, + linewidth=0, + antialiased=False, + ) # Change the tick frequency to 64 start, end = ax.get_xlim() ax.xaxis.set_ticks(np.arange(start, end + 1, 64)) - ax.set_xlabel('Bytes per activate') - ax.set_ylabel('Banks') + ax.set_xlabel("Bytes per activate") + ax.set_ylabel("Banks") - if mode == 'u': - ax.set_zlabel('Utilisation (%)') - elif mode == 'p': - ax.set_zlabel('Power (mW)') - elif mode == 'e': - ax.set_zlabel('Power efficiency (mW / GByte / s)') + if mode == "u": + ax.set_zlabel("Utilisation (%)") + elif mode == "p": + ax.set_zlabel("Power (mW)") + elif mode == "e": + ax.set_zlabel("Power efficiency (mW / GByte / s)") # Add a colorbar - fig.colorbar(surf, shrink=0.5, pad=.1, aspect=10) + fig.colorbar(surf, shrink=0.5, pad=0.1, aspect=10) plt.show() + if __name__ == "__main__": main() diff --git a/util/plot_dram/lowp_dram_sweep_plot.py b/util/plot_dram/lowp_dram_sweep_plot.py index 299d8cd5ea..03a47f9abd 100755 --- a/util/plot_dram/lowp_dram_sweep_plot.py +++ b/util/plot_dram/lowp_dram_sweep_plot.py @@ -40,56 +40,70 @@ import argparse import os from subprocess import call -parser = argparse.ArgumentParser(formatter_class= - argparse.ArgumentDefaultsHelpFormatter) +parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter +) parser.add_argument("--statsfile", required=True, help="stats file path") -parser.add_argument("--bankutils", default="b1 b2 b3", help="target bank " \ - "utilization values separated by space, e.g. \"1 4 8\"") +parser.add_argument( + "--bankutils", + default="b1 b2 b3", + help="target bank " 'utilization values separated by space, e.g. "1 4 8"', +) -parser.add_argument("--seqbytes", default="s1 s2 s3", help="no. of " \ - "sequential bytes requested by each traffic gen request." \ - " e.g. \"64 256 512\"") +parser.add_argument( + "--seqbytes", + default="s1 s2 s3", + help="no. of " + "sequential bytes requested by each traffic gen request." + ' e.g. "64 256 512"', +) -parser.add_argument("--delays", default="d1 d2 d3", help="string of delay" - " values separated by a space. e.g. \"1 20 100\"") +parser.add_argument( + "--delays", + default="d1 d2 d3", + help="string of delay" ' values separated by a space. e.g. "1 20 100"', +) -parser.add_argument("--outdir", help="directory to output plots", - default='plot_test') +parser.add_argument( + "--outdir", help="directory to output plots", default="plot_test" +) + +parser.add_argument("--pdf", action="store_true", help="output Latex and pdf") -parser.add_argument("--pdf", action='store_true', help="output Latex and pdf") def main(): args = parser.parse_args() if not os.path.isfile(args.statsfile): - exit('Error! File not found: %s' % args.statsfile) + exit("Error! File not found: %s" % args.statsfile) if not os.path.isdir(args.outdir): os.mkdir(args.outdir) bank_util_list = args.bankutils.strip().split() seqbyte_list = args.seqbytes.strip().split() delays = args.delays.strip().split() - plotter.plotLowPStates(args.outdir + '/', args.statsfile, bank_util_list, - seqbyte_list, delays) + plotter.plotLowPStates( + args.outdir + "/", args.statsfile, bank_util_list, seqbyte_list, delays + ) if args.pdf: - textwidth = '0.5' + textwidth = "0.5" ### Time and energy plots ### ############################# # place tex and pdf files in outdir os.chdir(args.outdir) - texfile_s = 'stacked_lowp_sweep.tex' + texfile_s = "stacked_lowp_sweep.tex" print("\t", texfile_s) - outfile = open(texfile_s, 'w') + outfile = open(texfile_s, "w") startDocText(outfile) outfile.write("\\begin{figure} \n\\centering\n") ## Time plots for all delay values for delay in delays: # Time - filename = plotter.stateTimePlotName(str(delay) + '-') + filename = plotter.stateTimePlotName(str(delay) + "-") outfile.write(wrapForGraphic(filename, textwidth)) outfile.write(getCaption(delay)) outfile.write("\end{figure}\n") @@ -98,7 +112,7 @@ def main(): outfile.write("\\begin{figure} \n\\centering\n") for delay in delays: # Energy - filename = plotter.stateEnergyPlotName(str(delay) + '-') + filename = plotter.stateEnergyPlotName(str(delay) + "-") outfile.write(wrapForGraphic(filename, textwidth)) outfile.write(getCaption(delay)) outfile.write("\\end{figure}\n") @@ -111,18 +125,19 @@ def main(): print("\tpdflatex ", texfile_s) # Run pdflatex to generate to pdf call(["pdflatex", texfile_s]) - call(["open", texfile_s.split('.')[0] + '.pdf']) + call(["open", texfile_s.split(".")[0] + ".pdf"]) def getCaption(delay): - return ('\\caption{' + - 'itt delay = ' + str(delay) + - '}\n') + return "\\caption{" + "itt delay = " + str(delay) + "}\n" -def wrapForGraphic(filename, width='1.0'): + +def wrapForGraphic(filename, width="1.0"): # \t is tab and needs to be escaped, therefore \\textwidth - return '\\includegraphics[width=' + width + \ - '\\textwidth]{' + filename + '}\n' + return ( + "\\includegraphics[width=" + width + "\\textwidth]{" + filename + "}\n" + ) + def startDocText(outfile): @@ -135,15 +150,17 @@ def startDocText(outfile): """ outfile.write(start_stuff) + def endDocText(outfile): - end_stuff = ''' + end_stuff = """ \\end{document} -''' +""" outfile.write(end_stuff) + # Call main -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/util/pre-commit-install.sh b/util/pre-commit-install.sh new file mode 100755 index 0000000000..2e98be8ae8 --- /dev/null +++ b/util/pre-commit-install.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +# Copyright (c) 2022 The Regents of the University of California +# All Rights Reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +set -e + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +GEM5_ROOT="${DIR}/.." + +cd ${GEM5_ROOT} + +if ! command -v pre-commit &> /dev/null +then + echo "Cannot find 'pre-commit'. Please ensure all Python requirements are " + echo "installed. This can be done via 'pip install -r requirements.txt'." + exit 1 +fi + +pre-commit install -t pre-commit -t commit-msg diff --git a/util/protolib.py b/util/protolib.py index ea0ff097f5..dcfb7aabb5 100644 --- a/util/protolib.py +++ b/util/protolib.py @@ -71,6 +71,7 @@ import gzip import struct + def openFileRd(in_file): """ This opens the file passed as argument for reading using an appropriate @@ -81,7 +82,7 @@ def openFileRd(in_file): # First see if this file is gzipped try: # Opening the file works even if it is not a gzip file - proto_in = gzip.open(in_file, 'rb') + proto_in = gzip.open(in_file, "rb") # Force a check of the magic number by seeking in the # file. If we do not do it here the error will occur when @@ -89,12 +90,13 @@ def openFileRd(in_file): proto_in.seek(1) proto_in.seek(0) except IOError: - proto_in = open(in_file, 'rb') + proto_in = open(in_file, "rb") except IOError: print("Failed to open ", in_file, " for reading") exit(-1) return proto_in + def _DecodeVarint32(in_file): """ The decoding of the Varint32 is copied from @@ -106,24 +108,25 @@ def _DecodeVarint32(in_file): shift = 0 pos = 0 # Use a 32-bit mask - mask = 0xffffffff + mask = 0xFFFFFFFF while 1: c = in_file.read(1) if len(c) == 0: return (0, 0) - b = struct.unpack(' 0x7fffffffffffffff: - result -= (1 << 64) + if result > 0x7FFFFFFFFFFFFFFF: + result -= 1 << 64 result |= ~mask else: result &= mask return (result, pos) shift += 7 if shift >= 64: - raise IOError('Too many bytes when decoding varint.') + raise IOError("Too many bytes when decoding varint.") + def decodeMessage(in_file, message): """ @@ -140,19 +143,21 @@ def decodeMessage(in_file, message): except IOError: return False + def _EncodeVarint32(out_file, value): - """ - The encoding of the Varint32 is copied from - google.protobuf.internal.encoder and is only repeated here to - avoid depending on the internal functions in the library. - """ - bits = value & 0x7f - value >>= 7 - while value: - out_file.write(struct.pack('>= 7 - out_file.write(struct.pack('>= 7 + out_file.write(struct.pack("> 7 - if (((x == 0) and ((b & 0x40) == 0)) or \ - ((x == -1) and ((b & 0x40) != 0))): + if ((x == 0) and ((b & 0x40) == 0)) or ( + (x == -1) and ((b & 0x40) != 0) + ): more = False else: b = b | 0x80 ret.append(b) return ret + # For historical reasons, 32/64-bit versions of functions are presevered def packed64(x): return packed32(x) + # variable length packed 4-byte signed value def unsigned_packed32(x): ret = [] - if ((x & 0xffffff80) == 0): - ret.append(x & 0x7f) - elif ((x & 0xffffc000) == 0): - ret.append((x | 0x80) & 0xff) - ret.append((x >> 7) & 0x7f) - elif ((x & 0xffe00000) == 0): - ret.append((x | 0x80) & 0xff) - ret.append(((x >> 7) | 0x80) & 0xff) - ret.append((x >> 14) & 0x7f) - elif ((x & 0xf0000000) == 0): - ret.append((x | 0x80) & 0xff) - ret.append(((x >> 7) | 0x80) & 0xff) - ret.append(((x >> 14) | 0x80) & 0xff) - ret.append((x >> 21) & 0x7f) + if (x & 0xFFFFFF80) == 0: + ret.append(x & 0x7F) + elif (x & 0xFFFFC000) == 0: + ret.append((x | 0x80) & 0xFF) + ret.append((x >> 7) & 0x7F) + elif (x & 0xFFE00000) == 0: + ret.append((x | 0x80) & 0xFF) + ret.append(((x >> 7) | 0x80) & 0xFF) + ret.append((x >> 14) & 0x7F) + elif (x & 0xF0000000) == 0: + ret.append((x | 0x80) & 0xFF) + ret.append(((x >> 7) | 0x80) & 0xFF) + ret.append(((x >> 14) | 0x80) & 0xFF) + ret.append((x >> 21) & 0x7F) else: - ret.append((x | 0x80) & 0xff) - ret.append(((x >> 7) | 0x80) & 0xff) - ret.append(((x >> 14) | 0x80) & 0xff) - ret.append(((x >> 21) | 0x80) & 0xff) - ret.append((x >> 28) & 0x0f) + ret.append((x | 0x80) & 0xFF) + ret.append(((x >> 7) | 0x80) & 0xFF) + ret.append(((x >> 14) | 0x80) & 0xFF) + ret.append(((x >> 21) | 0x80) & 0xFF) + ret.append((x >> 28) & 0x0F) return ret + # variable length packed 8-byte signed value def unsigned_packed64(x): ret = [] - if ((x & 0xffffffffffffff80) == 0): - ret.append(x & 0x7f) - elif ((x & 0xffffffffffffc000) == 0): - ret.append((x | 0x80) & 0xff) - ret.append((x >> 7) & 0x7f) - elif ((x & 0xffffffffffe00000) == 0): - ret.append((x | 0x80) & 0xff) - ret.append(((x >> 7) | 0x80) & 0xff) - ret.append((x >> 14) & 0x7f) - elif ((x & 0xfffffffff0000000) == 0): - ret.append((x | 0x80) & 0xff) - ret.append(((x >> 7) | 0x80) & 0xff) - ret.append(((x >> 14) | 0x80) & 0xff) - ret.append((x >> 21) & 0x7f) - elif ((x & 0xfffffff800000000) == 0): - ret.append((x | 0x80) & 0xff) - ret.append(((x >> 7) | 0x80) & 0xff) - ret.append(((x >> 14) | 0x80) & 0xff) - ret.append(((x >> 21) | 0x80) & 0xff) - ret.append((x >> 28) & 0x7f) - elif ((x & 0xfffffc0000000000) == 0): - ret.append((x | 0x80) & 0xff) - ret.append(((x >> 7) | 0x80) & 0xff) - ret.append(((x >> 14) | 0x80) & 0xff) - ret.append(((x >> 21) | 0x80) & 0xff) - ret.append(((x >> 28) | 0x80) & 0xff) - ret.append((x >> 35) & 0x7f) - elif ((x & 0xfffe000000000000) == 0): - ret.append((x | 0x80) & 0xff) - ret.append(((x >> 7) | 0x80) & 0xff) - ret.append(((x >> 14) | 0x80) & 0xff) - ret.append(((x >> 21) | 0x80) & 0xff) - ret.append(((x >> 28) | 0x80) & 0xff) - ret.append(((x >> 35) | 0x80) & 0xff) - ret.append((x >> 42) & 0x7f) - elif ((x & 0xff00000000000000) == 0): - ret.append((x | 0x80) & 0xff) - ret.append(((x >> 7) | 0x80) & 0xff) - ret.append(((x >> 14) | 0x80) & 0xff) - ret.append(((x >> 21) | 0x80) & 0xff) - ret.append(((x >> 28) | 0x80) & 0xff) - ret.append(((x >> 35) | 0x80) & 0xff) - ret.append(((x >> 42) | 0x80) & 0xff) - ret.append((x >> 49) & 0x7f) - elif ((x & 0x8000000000000000) == 0): - ret.append((x | 0x80) & 0xff) - ret.append(((x >> 7) | 0x80) & 0xff) - ret.append(((x >> 14) | 0x80) & 0xff) - ret.append(((x >> 21) | 0x80) & 0xff) - ret.append(((x >> 28) | 0x80) & 0xff) - ret.append(((x >> 35) | 0x80) & 0xff) - ret.append(((x >> 42) | 0x80) & 0xff) - ret.append(((x >> 49) | 0x80) & 0xff) - ret.append((x >> 56) & 0x7f) + if (x & 0xFFFFFFFFFFFFFF80) == 0: + ret.append(x & 0x7F) + elif (x & 0xFFFFFFFFFFFFC000) == 0: + ret.append((x | 0x80) & 0xFF) + ret.append((x >> 7) & 0x7F) + elif (x & 0xFFFFFFFFFFE00000) == 0: + ret.append((x | 0x80) & 0xFF) + ret.append(((x >> 7) | 0x80) & 0xFF) + ret.append((x >> 14) & 0x7F) + elif (x & 0xFFFFFFFFF0000000) == 0: + ret.append((x | 0x80) & 0xFF) + ret.append(((x >> 7) | 0x80) & 0xFF) + ret.append(((x >> 14) | 0x80) & 0xFF) + ret.append((x >> 21) & 0x7F) + elif (x & 0xFFFFFFF800000000) == 0: + ret.append((x | 0x80) & 0xFF) + ret.append(((x >> 7) | 0x80) & 0xFF) + ret.append(((x >> 14) | 0x80) & 0xFF) + ret.append(((x >> 21) | 0x80) & 0xFF) + ret.append((x >> 28) & 0x7F) + elif (x & 0xFFFFFC0000000000) == 0: + ret.append((x | 0x80) & 0xFF) + ret.append(((x >> 7) | 0x80) & 0xFF) + ret.append(((x >> 14) | 0x80) & 0xFF) + ret.append(((x >> 21) | 0x80) & 0xFF) + ret.append(((x >> 28) | 0x80) & 0xFF) + ret.append((x >> 35) & 0x7F) + elif (x & 0xFFFE000000000000) == 0: + ret.append((x | 0x80) & 0xFF) + ret.append(((x >> 7) | 0x80) & 0xFF) + ret.append(((x >> 14) | 0x80) & 0xFF) + ret.append(((x >> 21) | 0x80) & 0xFF) + ret.append(((x >> 28) | 0x80) & 0xFF) + ret.append(((x >> 35) | 0x80) & 0xFF) + ret.append((x >> 42) & 0x7F) + elif (x & 0xFF00000000000000) == 0: + ret.append((x | 0x80) & 0xFF) + ret.append(((x >> 7) | 0x80) & 0xFF) + ret.append(((x >> 14) | 0x80) & 0xFF) + ret.append(((x >> 21) | 0x80) & 0xFF) + ret.append(((x >> 28) | 0x80) & 0xFF) + ret.append(((x >> 35) | 0x80) & 0xFF) + ret.append(((x >> 42) | 0x80) & 0xFF) + ret.append((x >> 49) & 0x7F) + elif (x & 0x8000000000000000) == 0: + ret.append((x | 0x80) & 0xFF) + ret.append(((x >> 7) | 0x80) & 0xFF) + ret.append(((x >> 14) | 0x80) & 0xFF) + ret.append(((x >> 21) | 0x80) & 0xFF) + ret.append(((x >> 28) | 0x80) & 0xFF) + ret.append(((x >> 35) | 0x80) & 0xFF) + ret.append(((x >> 42) | 0x80) & 0xFF) + ret.append(((x >> 49) | 0x80) & 0xFF) + ret.append((x >> 56) & 0x7F) else: - ret.append((x | 0x80) & 0xff) - ret.append(((x >> 7) | 0x80) & 0xff) - ret.append(((x >> 14) | 0x80) & 0xff) - ret.append(((x >> 21) | 0x80) & 0xff) - ret.append(((x >> 28) | 0x80) & 0xff) - ret.append(((x >> 35) | 0x80) & 0xff) - ret.append(((x >> 42) | 0x80) & 0xff) - ret.append(((x >> 49) | 0x80) & 0xff) - ret.append(((x >> 56) | 0x80) & 0xff) - ret.append((x >> 63) & 0x7f) + ret.append((x | 0x80) & 0xFF) + ret.append(((x >> 7) | 0x80) & 0xFF) + ret.append(((x >> 14) | 0x80) & 0xFF) + ret.append(((x >> 21) | 0x80) & 0xFF) + ret.append(((x >> 28) | 0x80) & 0xFF) + ret.append(((x >> 35) | 0x80) & 0xFF) + ret.append(((x >> 42) | 0x80) & 0xFF) + ret.append(((x >> 49) | 0x80) & 0xFF) + ret.append(((x >> 56) | 0x80) & 0xFF) + ret.append((x >> 63) & 0x7F) return ret + # 4-byte signed little endian def int32(x): ret = [] - ret.append(x & 0xff) - ret.append((x >> 8) & 0xff) - ret.append((x >> 16) & 0xff) - ret.append((x >> 24) & 0xff) + ret.append(x & 0xFF) + ret.append((x >> 8) & 0xFF) + ret.append((x >> 16) & 0xFF) + ret.append((x >> 24) & 0xFF) return ret + # 2-byte signed little endian def int16(x): ret = [] - ret.append(x & 0xff) - ret.append((x >> 8) & 0xff) + ret.append(x & 0xFF) + ret.append((x >> 8) & 0xFF) return ret + # a packed32 length followed by the specified number of characters def stringList(x): ret = [] @@ -332,12 +361,14 @@ def stringList(x): ret.append(i) return ret + def utf8StringList(x): ret = [] for i in x: ret.append(ord(i)) return ret + # packed64 time value in nanoseconds relative to the uptime from the # Summary message. def timestampList(x): @@ -349,14 +380,17 @@ def timestampList(x): # Write binary ############################################################ + def writeBinary(outfile, binary_list): for i in binary_list: outfile.write("%c" % i) + ############################################################ # APC Protocol Frame Types ############################################################ + def addFrameHeader(frame_type, body, core): ret = [] @@ -405,11 +439,13 @@ def summaryFrame(timestamp, uptime): ret = addFrameHeader(frame_type, body, 0) return ret + # Backtrace frame # - not implemented yet def backtraceFrame(): pass + # Cookie name message # - cookie: packed32 # - name: string @@ -420,6 +456,7 @@ def cookieNameFrame(cookie, name): ret = addFrameHeader(frame_type, body, 0) return ret + # Thread name message # - timestamp: timestamp # - thread id: packed32 @@ -427,11 +464,16 @@ def cookieNameFrame(cookie, name): def threadNameFrame(timestamp, thread_id, name): frame_type = "Name" packed_code = packed32(2) - body = packed_code + timestampList(timestamp) + \ - packed32(thread_id) + stringList(name) + body = ( + packed_code + + timestampList(timestamp) + + packed32(thread_id) + + stringList(name) + ) ret = addFrameHeader(frame_type, body, 0) return ret + # Core name message # - name: string # - core_id: packed32 @@ -443,6 +485,7 @@ def coreNameFrame(name, core_id, cpuid): ret = addFrameHeader(frame_type, body, 0) return ret + # IRQ Cookie name message # - cookie: packed32 # - name: string @@ -454,6 +497,7 @@ def irqCookieNameFrame(cookie, name, irq): ret = addFrameHeader(frame_type, body, 0) return ret + # Counter frame message # - timestamp: timestamp # - core: packed32 @@ -461,11 +505,16 @@ def irqCookieNameFrame(cookie, name, irq): # - value: packed64 def counterFrame(timestamp, core, key, value): frame_type = "Counter" - body = timestampList(timestamp) + packed32(core) + packed32(key) + \ - packed64(value) + body = ( + timestampList(timestamp) + + packed32(core) + + packed32(key) + + packed64(value) + ) ret = addFrameHeader(frame_type, body, core) return ret + # Block Counter frame message # - key: packed32 # - value: packed64 @@ -475,6 +524,7 @@ def blockCounterFrame(core, key, value): ret = addFrameHeader(frame_type, body, core) return ret + # Annotate frame messages # - core: packed32 # - tid: packed32 @@ -483,11 +533,17 @@ def blockCounterFrame(core, key, value): # - body def annotateFrame(core, tid, timestamp, size, userspace_body): frame_type = "Annotate" - body = packed32(core) + packed32(tid) + timestampList(timestamp) + \ - packed32(size) + userspace_body + body = ( + packed32(core) + + packed32(tid) + + timestampList(timestamp) + + packed32(size) + + userspace_body + ) ret = addFrameHeader(frame_type, body, core) return ret + # Scheduler Trace frame messages # Sched Switch # - Code: 1 @@ -498,11 +554,18 @@ def annotateFrame(core, tid, timestamp, size, userspace_body): # - state: packed32 def schedSwitchFrame(core, timestamp, pid, tid, cookie, state): frame_type = "Sched Trace" - body = packed32(1) + timestampList(timestamp) + packed32(pid) + \ - packed32(tid) + packed32(cookie) + packed32(state) + body = ( + packed32(1) + + timestampList(timestamp) + + packed32(pid) + + packed32(tid) + + packed32(cookie) + + packed32(state) + ) ret = addFrameHeader(frame_type, body, core) return ret + # Sched Thread Exit # - Code: 2 # - timestamp: timestamp @@ -513,11 +576,13 @@ def schedThreadExitFrame(core, timestamp, pid, tid, cookie, state): ret = addFrameHeader(frame_type, body, core) return ret + # GPU Trace frame messages # - Not implemented yet def gpuTraceFrame(): pass + # Idle frame messages # Enter Idle # - code: 1 @@ -529,6 +594,7 @@ def enterIdleFrame(timestamp, core): ret = addFrameHeader(frame_type, body, core) return ret + # Exit Idle # - code: 2 # - timestamp: timestamp @@ -557,7 +623,7 @@ def parseProcessInfo(task_file): for cpu in range(num_cpus): event_list.append([]) - uid = 1 # uid 0 is reserved for idle + uid = 1 # uid 0 is reserved for idle # Dummy Tasks for frame buffers and system diagrams process = Task(uid, 9999, 9999, "framebuffer", True, 0) @@ -579,16 +645,18 @@ def parseProcessInfo(task_file): try: if ext == ".gz": - process_file = gzip.open(task_file, 'rb') + process_file = gzip.open(task_file, "rb") else: - process_file = open(task_file, 'rb') + process_file = open(task_file, "rb") except: print("ERROR opening task file:", task_file) print("Make sure context switch task dumping is enabled in gem5.") sys.exit(1) - process_re = re.compile("tick=(\d+)\s+(\d+)\s+cpu_id=(\d+)\s+" + - "next_pid=([-\d]+)\s+next_tgid=([-\d]+)\s+next_task=(.*)") + process_re = re.compile( + "tick=(\d+)\s+(\d+)\s+cpu_id=(\d+)\s+" + + "next_pid=([-\d]+)\s+next_tgid=([-\d]+)\s+next_task=(.*)" + ) task_name_failure_warned = False @@ -596,7 +664,7 @@ def parseProcessInfo(task_file): match = re.match(process_re, line) if match: tick = int(match.group(1)) - if (start_tick < 0): + if start_tick < 0: start_tick = tick cpu_id = int(match.group(3)) pid = int(match.group(4)) @@ -607,7 +675,9 @@ def parseProcessInfo(task_file): if task_name == "FailureIn_curTaskName": print("-------------------------------------------------") print("WARNING: Task name not set correctly!") - print("Process/Thread info will not be displayed correctly") + print( + "Process/Thread info will not be displayed correctly" + ) print("Perhaps forgot to apply m5struct.patch to kernel?") print("-------------------------------------------------") task_name_failure_warned = True @@ -629,8 +699,10 @@ def parseProcessInfo(task_file): idle_uid = 0 else: # parent process name not known yet - process = Task(uid, tgid, tgid, "_Unknown_", True, tick) - if tgid == -1: # kernel + process = Task( + uid, tgid, tgid, "_Unknown_", True, tick + ) + if tgid == -1: # kernel kernel_uid = 0 uid += 1 process_dict[tgid] = process @@ -639,16 +711,27 @@ def parseProcessInfo(task_file): if tgid == pid: if process_dict[tgid].task_name == "_Unknown_": if args.verbose: - print("new process", \ - process_dict[tgid].uid, pid, tgid, task_name) + print( + "new process", + process_dict[tgid].uid, + pid, + tgid, + task_name, + ) process_dict[tgid].task_name = task_name if process_dict[tgid].task_name != task_name and tgid != 0: process_dict[tgid].task_name = task_name if not pid in thread_dict: if args.verbose: - print("new thread", \ - uid, process_dict[tgid].uid, pid, tgid, task_name) + print( + "new thread", + uid, + process_dict[tgid].uid, + pid, + tgid, + task_name, + ) thread = Task(uid, pid, tgid, task_name, False, tick) uid += 1 thread_dict[pid] = thread @@ -671,15 +754,26 @@ def parseProcessInfo(task_file): print("Found %d events." % len(unified_event_list)) for process in process_list: - if process.pid > 9990: # fix up framebuffer ticks + if process.pid > 9990: # fix up framebuffer ticks process.tick = start_tick - print(process.uid, process.pid, process.tgid, \ - process.task_name, str(process.tick)) + print( + process.uid, + process.pid, + process.tgid, + process.task_name, + str(process.tick), + ) for thread in process.children: if thread.pid > 9990: thread.tick = start_tick - print("\t", thread.uid, thread.pid, thread.tgid, \ - thread.task_name, str(thread.tick)) + print( + "\t", + thread.uid, + thread.pid, + thread.tgid, + thread.task_name, + str(thread.tick), + ) end_tick = tick @@ -694,6 +788,7 @@ def initOutput(output_path): if not os.path.exists(output_path): os.mkdir(output_path) + def ticksToNs(tick): if ticks_in_ns < 0: print("ticks_in_ns not set properly!") @@ -701,6 +796,7 @@ def ticksToNs(tick): return tick / ticks_in_ns + def writeXmlFile(xml, filename): f = open(filename, "w") txt = ET.tostring(xml) @@ -765,19 +861,24 @@ class StatsEntry(object): self.per_cpu_name.append(per_cpu_name) print("\t", per_cpu_name) - self.per_cpu_regex_string.\ - append("^" + per_cpu_name + "\s+[\d\.]+") - self.per_cpu_regex.append(re.compile("^" + per_cpu_name + \ - "\s+([\d\.e\-]+)\s+# (.*)$", re.M)) + self.per_cpu_regex_string.append( + "^" + per_cpu_name + "\s+[\d\.]+" + ) + self.per_cpu_regex.append( + re.compile( + "^" + per_cpu_name + "\s+([\d\.e\-]+)\s+# (.*)$", re.M + ) + ) self.values.append([]) self.per_cpu_found.append(False) - def append_value(self, val, per_cpu_index = None): + def append_value(self, val, per_cpu_index=None): if self.per_cpu: self.values[per_cpu_index].append(str(val)) else: self.values.append(str(val)) + # Global stats object that contains the list of stats entries # and other utility functions class Stats(object): @@ -788,13 +889,14 @@ class Stats(object): def register(self, name, group, group_index, per_cpu): print("registering stat:", name, "group:", group, group_index) - self.stats_list.append(StatsEntry(name, group, group_index, per_cpu, \ - self.next_key)) + self.stats_list.append( + StatsEntry(name, group, group_index, per_cpu, self.next_key) + ) self.next_key += 1 # Union of all stats to accelerate parsing speed def createStatsRegex(self): - regex_strings = []; + regex_strings = [] print("\nnum entries in stats_list", len(self.stats_list)) for entry in self.stats_list: if entry.per_cpu: @@ -803,7 +905,7 @@ class Stats(object): else: regex_strings.append(entry.regex_string) - self.regex = re.compile('|'.join(regex_strings)) + self.regex = re.compile("|".join(regex_strings)) def registerStats(config_file): @@ -821,19 +923,19 @@ def registerStats(config_file): stats = Stats() - per_cpu_stat_groups = config.options('PER_CPU_STATS') + per_cpu_stat_groups = config.options("PER_CPU_STATS") for group in per_cpu_stat_groups: i = 0 - per_cpu_stats_list = config.get('PER_CPU_STATS', group).split('\n') + per_cpu_stats_list = config.get("PER_CPU_STATS", group).split("\n") for item in per_cpu_stats_list: if item: stats.register(item, group, i, True) i += 1 - per_l2_stat_groups = config.options('PER_L2_STATS') + per_l2_stat_groups = config.options("PER_L2_STATS") for group in per_l2_stat_groups: i = 0 - per_l2_stats_list = config.get('PER_L2_STATS', group).split('\n') + per_l2_stats_list = config.get("PER_L2_STATS", group).split("\n") for item in per_l2_stats_list: if item: for l2 in range(num_l2): @@ -844,10 +946,10 @@ def registerStats(config_file): stats.register(name, group, i, False) i += 1 - other_stat_groups = config.options('OTHER_STATS') + other_stat_groups = config.options("OTHER_STATS") for group in other_stat_groups: i = 0 - other_stats_list = config.get('OTHER_STATS', group).split('\n') + other_stats_list = config.get("OTHER_STATS", group).split("\n") for item in other_stats_list: if item: stats.register(item, group, i, False) @@ -857,6 +959,7 @@ def registerStats(config_file): return stats + # Parse and read in gem5 stats file # Streamline counters are organized per CPU def readGem5Stats(stats, gem5_stats_file): @@ -866,10 +969,12 @@ def readGem5Stats(stats, gem5_stats_file): print("===============================\n") ext = os.path.splitext(gem5_stats_file)[1] - window_start_regex = \ - re.compile("^---------- Begin Simulation Statistics ----------") - window_end_regex = \ - re.compile("^---------- End Simulation Statistics ----------") + window_start_regex = re.compile( + "^---------- Begin Simulation Statistics ----------" + ) + window_end_regex = re.compile( + "^---------- End Simulation Statistics ----------" + ) final_tick_regex = re.compile("^final_tick\s+(\d+)") global ticks_in_ns @@ -888,7 +993,7 @@ def readGem5Stats(stats, gem5_stats_file): stats_not_found_list = stats.stats_list[:] window_num = 0 - while (True): + while True: error = False try: line = f.readline() @@ -904,10 +1009,12 @@ def readGem5Stats(stats, gem5_stats_file): if sim_freq < 0: m = sim_freq_regex.match(line) if m: - sim_freq = int(m.group(1)) # ticks in 1 sec + sim_freq = int(m.group(1)) # ticks in 1 sec ticks_in_ns = int(sim_freq / 1e9) - print("Simulation frequency found! 1 tick == %e sec\n" \ - % (1.0 / sim_freq)) + print( + "Simulation frequency found! 1 tick == %e sec\n" + % (1.0 / sim_freq) + ) # Final tick in gem5 stats: current absolute timestamp m = final_tick_regex.match(line) @@ -917,8 +1024,7 @@ def readGem5Stats(stats, gem5_stats_file): break stats.tick_list.append(tick) - - if (window_end_regex.match(line) or error): + if window_end_regex.match(line) or error: if args.verbose: print("new window") for stat in stats.stats_list: @@ -926,18 +1032,28 @@ def readGem5Stats(stats, gem5_stats_file): for i in range(num_cpus): if not stat.per_cpu_found[i]: if not stat.not_found_at_least_once: - print("WARNING: stat not found in window #", \ - window_num, ":", stat.per_cpu_name[i]) - print("suppressing further warnings for " + \ - "this stat") + print( + "WARNING: stat not found in window #", + window_num, + ":", + stat.per_cpu_name[i], + ) + print( + "suppressing further warnings for " + + "this stat" + ) stat.not_found_at_least_once = True stat.values[i].append(str(0)) stat.per_cpu_found[i] = False else: if not stat.found: if not stat.not_found_at_least_once: - print("WARNING: stat not found in window #", \ - window_num, ":", stat.name) + print( + "WARNING: stat not found in window #", + window_num, + ":", + stat.name, + ) print("suppressing further warnings for this stat") stat.not_found_at_least_once = True stat.values.append(str(0)) @@ -1043,6 +1159,7 @@ def doCapturedXML(output_path, stats): writeXmlFile(xml, captured_file) + # Writes out Streamline cookies (unique IDs per process/thread) def writeCookiesThreads(blob): thread_list = [] @@ -1056,12 +1173,22 @@ def writeCookiesThreads(blob): thread_list.append(thread) # Threads need to be sorted in timestamp order - thread_list.sort(key = lambda x: x.tick) + thread_list.sort(key=lambda x: x.tick) for thread in thread_list: - print("thread", thread.task_name, (ticksToNs(thread.tick)),\ - thread.tgid, thread.pid) - writeBinary(blob, threadNameFrame(ticksToNs(thread.tick),\ - thread.pid, thread.task_name)) + print( + "thread", + thread.task_name, + (ticksToNs(thread.tick)), + thread.tgid, + thread.pid, + ) + writeBinary( + blob, + threadNameFrame( + ticksToNs(thread.tick), thread.pid, thread.task_name + ), + ) + # Writes context switch info as Streamline scheduling events def writeSchedEvents(blob): @@ -1086,8 +1213,10 @@ def writeSchedEvents(blob): if args.verbose: print(cpu, timestamp, pid, tid, cookie) - writeBinary(blob,\ - schedSwitchFrame(cpu, timestamp, pid, tid, cookie, state)) + writeBinary( + blob, schedSwitchFrame(cpu, timestamp, pid, tid, cookie, state) + ) + # Writes selected gem5 statistics as Streamline counters def writeCounters(blob, stats): @@ -1107,11 +1236,26 @@ def writeCounters(blob, stats): for stat in stats.stats_list: if stat.per_cpu: for i in range(num_cpus): - writeBinary(blob, counterFrame(timestamp_list[n], i, \ - stat.key, int(float(stat.values[i][n])))) + writeBinary( + blob, + counterFrame( + timestamp_list[n], + i, + stat.key, + int(float(stat.values[i][n])), + ), + ) else: - writeBinary(blob, counterFrame(timestamp_list[n], 0, \ - stat.key, int(float(stat.values[n])))) + writeBinary( + blob, + counterFrame( + timestamp_list[n], + 0, + stat.key, + int(float(stat.values[n])), + ), + ) + # Streamline can display LCD frame buffer dumps (gzipped bmp) # This function converts the frame buffer dumps to the Streamline format @@ -1143,8 +1287,8 @@ def writeVisualAnnotations(blob, input_path, output_path): frame_count += 1 userspace_body = [] - userspace_body += packed32(0x1C) # escape code - userspace_body += packed32(0x04) # visual code + userspace_body += packed32(0x1C) # escape code + userspace_body += packed32(0x04) # visual code text_annotation = "image_" + str(ticksToNs(tick)) + ".bmp.gz" userspace_body += int16(len(text_annotation)) @@ -1160,8 +1304,16 @@ def writeVisualAnnotations(blob, input_path, output_path): userspace_body += int32(len(bytes_read)) userspace_body += bytes_read - writeBinary(blob, annotateFrame(0, annotate_pid, ticksToNs(tick), \ - len(userspace_body), userspace_body)) + writeBinary( + blob, + annotateFrame( + 0, + annotate_pid, + ticksToNs(tick), + len(userspace_body), + userspace_body, + ), + ) print("\nfound", frame_count, "frames for visual annotation.\n") @@ -1192,7 +1344,6 @@ def createApcProject(input_path, output_path, stats): blob.close() - ####################### # Main Routine @@ -1226,10 +1377,13 @@ stats = registerStats(stat_config_file) # Parse gem5 stats #### # Check if both stats.txt and stats.txt.gz exist and warn if both exist -if os.path.exists(input_path + "/stats.txt") and \ - os.path.exists(input_path + "/stats.txt.gz"): - print("WARNING: Both stats.txt.gz and stats.txt exist. \ - Using stats.txt.gz by default.") +if os.path.exists(input_path + "/stats.txt") and os.path.exists( + input_path + "/stats.txt.gz" +): + print( + "WARNING: Both stats.txt.gz and stats.txt exist. \ + Using stats.txt.gz by default." + ) gem5_stats_file = input_path + "/stats.txt.gz" if not os.path.exists(gem5_stats_file): diff --git a/util/style.py b/util/style.py index 07505c0e05..4007ff9f9e 100755 --- a/util/style.py +++ b/util/style.py @@ -45,34 +45,43 @@ from style.region import all_regions from style.style import StdioUI from style import repo -verifier_names = dict([ - (c.__name__, c) for c in style.verifiers.all_verifiers ]) +verifier_names = dict([(c.__name__, c) for c in style.verifiers.all_verifiers]) -def verify(filename, regions=all_regions, verbose=False, verifiers=None, - auto_fix=False): + +def verify( + filename, + regions=all_regions, + verbose=False, + verifiers=None, + auto_fix=False, +): ui = StdioUI() - opts = { - "fix_all" : auto_fix, - } + opts = {"fix_all": auto_fix} base = os.path.join(os.path.dirname(__file__), "..") if verifiers is None: verifiers = style.verifiers.all_verifiers if verbose: print("Verifying %s[%s]..." % (filename, regions)) - for verifier in [ v(ui, opts, base=base) for v in verifiers ]: + for verifier in [v(ui, opts, base=base) for v in verifiers]: if verbose: - print("Applying %s (%s)" % ( - verifier.test_name, verifier.__class__.__name__)) + print( + "Applying %s (%s)" + % (verifier.test_name, verifier.__class__.__name__) + ) if verifier.apply(filename, regions=regions): return False return True + def detect_repo(): repo_classes = repo.detect_repo() if not repo_classes: - print("Error: Failed to detect repository type, no " \ - "known repository type found.", file=sys.stderr) + print( + "Error: Failed to detect repository type, no " + "known repository type found.", + file=sys.stderr, + ) sys.exit(1) elif len(repo_classes) > 1: print("Error: Detected multiple repository types.", file=sys.stderr) @@ -80,55 +89,77 @@ def detect_repo(): else: return repo_classes[0]() -repo_types = { - "auto" : detect_repo, - "none" : lambda : None, - "git" : repo.GitRepo, -} -if __name__ == '__main__': +repo_types = {"auto": detect_repo, "none": lambda: None, "git": repo.GitRepo} + +if __name__ == "__main__": import argparse parser = argparse.ArgumentParser( description="Check a file for gem5 style violations", epilog="""If no files are specified, the style checker tries to determine the list of modified and added files from the version - control system and checks those.""" + control system and checks those.""", ) - parser.add_argument("--verbose", "-v", action="count", - help="Produce verbose output") + parser.add_argument( + "--verbose", "-v", action="count", help="Produce verbose output" + ) - parser.add_argument("--fix", "-f", action="store_true", - help="Automatically fix style violations.") + parser.add_argument( + "--fix", + "-f", + action="store_true", + help="Automatically fix style violations.", + ) - parser.add_argument("--modifications", "-m", action="store_true", - help="""Apply the style checker to modified regions - instead of whole files""") + parser.add_argument( + "--modifications", + "-m", + action="store_true", + help="""Apply the style checker to modified regions + instead of whole files""", + ) - parser.add_argument("--repo-type", choices=repo_types, default="auto", - help="Repository type to use to detect changes") + parser.add_argument( + "--repo-type", + choices=repo_types, + default="auto", + help="Repository type to use to detect changes", + ) - parser.add_argument("--checker", "-c", choices=verifier_names, default=[], - action="append", - help="""Style checkers to run. Can be specified - multiple times.""") + parser.add_argument( + "--checker", + "-c", + choices=verifier_names, + default=[], + action="append", + help="""Style checkers to run. Can be specified + multiple times.""", + ) - parser.add_argument("files", metavar="FILE", nargs="*", - type=str, - help="Source file(s) to inspect") + parser.add_argument( + "files", + metavar="FILE", + nargs="*", + type=str, + help="Source file(s) to inspect", + ) args = parser.parse_args() repo = repo_types[args.repo_type]() - verifiers = [ verifier_names[name] for name in args.checker ] \ - if args.checker else None + verifiers = ( + [verifier_names[name] for name in args.checker] + if args.checker + else None + ) files = args.files if not files and repo: added, modified = repo.staged_files() - files = [ repo.file_path(f) for f in added + modified ] + files = [repo.file_path(f) for f in added + modified] for filename in files: if args.modifications and repo and repo.in_repo(filename): @@ -136,8 +167,11 @@ if __name__ == '__main__': else: regions = all_regions - if not verify(filename, regions=regions, - verbose=args.verbose, - verifiers=verifiers, - auto_fix=args.fix): + if not verify( + filename, + regions=regions, + verbose=args.verbose, + verifiers=verifiers, + auto_fix=args.fix, + ): sys.exit(1) diff --git a/util/style/file_types.py b/util/style/file_types.py index 463c408b0e..3a6b93098b 100644 --- a/util/style/file_types.py +++ b/util/style/file_types.py @@ -28,67 +28,64 @@ import os # lanuage type for each file extension lang_types = { - '.c' : "C", - '.cl' : "C", - '.h' : "C", - '.cc' : "C++", - '.hh' : "C++", - '.cxx' : "C++", - '.hxx' : "C++", - '.cpp' : "C++", - '.hpp' : "C++", - '.C' : "C++", - '.H' : "C++", - '.i' : "swig", - '.py' : "python", - '.pl' : "perl", - '.pm' : "perl", - '.s' : "asm", - '.S' : "asm", - '.l' : "lex", - '.ll' : "lex", - '.y' : "yacc", - '.yy' : "yacc", - '.isa' : "isa", - '.sh' : "shell", - '.slicc' : "slicc", - '.sm' : "slicc", - '.awk' : "awk", - '.el' : "lisp", - '.txt' : "text", - '.tex' : "tex", - '.mk' : "make", - '.dts' : "dts", - } + ".c": "C", + ".cl": "C", + ".h": "C", + ".cc": "C++", + ".hh": "C++", + ".cxx": "C++", + ".hxx": "C++", + ".cpp": "C++", + ".hpp": "C++", + ".C": "C++", + ".H": "C++", + ".i": "swig", + ".py": "python", + ".pl": "perl", + ".pm": "perl", + ".s": "asm", + ".S": "asm", + ".l": "lex", + ".ll": "lex", + ".y": "yacc", + ".yy": "yacc", + ".isa": "isa", + ".sh": "shell", + ".slicc": "slicc", + ".sm": "slicc", + ".awk": "awk", + ".el": "lisp", + ".txt": "text", + ".tex": "tex", + ".mk": "make", + ".dts": "dts", +} # languages based on file prefix lang_prefixes = ( - ('SCons', 'scons'), - ('Make', 'make'), - ('make', 'make'), - ('Doxyfile', 'doxygen'), - ) + ("SCons", "scons"), + ("Make", "make"), + ("make", "make"), + ("Doxyfile", "doxygen"), +) # languages based on #! line of first file -hash_bang = ( - ('python', 'python'), - ('perl', 'perl'), - ('sh', 'shell'), - ) +hash_bang = (("python", "python"), ("perl", "perl"), ("sh", "shell")) # the list of all languages that we detect all_languages = frozenset(lang_types.values()) -all_languages |= frozenset(lang for start,lang in lang_prefixes) -all_languages |= frozenset(lang for start,lang in hash_bang) +all_languages |= frozenset(lang for start, lang in lang_prefixes) +all_languages |= frozenset(lang for start, lang in hash_bang) + def lang_type(filename, firstline=None, openok=True): - '''identify the language of a given filename and potentially the + """identify the language of a given filename and potentially the firstline of the file. If the firstline of the file is not provided and openok is True, open the file and read the first line - if necessary''' + if necessary""" basename = os.path.basename(filename) - name,extension = os.path.splitext(basename) + name, extension = os.path.splitext(basename) # first try to detect language based on file extension try: @@ -97,49 +94,54 @@ def lang_type(filename, firstline=None, openok=True): pass # now try to detect language based on file prefix - for start,lang in lang_prefixes: + for start, lang in lang_prefixes: if basename.startswith(start): return lang # if a first line was not provided but the file is ok to open, # grab the first line of the file. if firstline is None and openok: - handle = open(filename, 'r') + handle = open(filename, "r") firstline = handle.readline() handle.close() # try to detect language based on #! in first line - if firstline and firstline.startswith('#!'): - for string,lang in hash_bang: + if firstline and firstline.startswith("#!"): + for string, lang in hash_bang: if firstline.find(string) > 0: return lang # sorry, we couldn't detect the language return None -# directories and files to ignore by default -default_dir_ignore = frozenset(('build', 'ext')) -default_file_ignore = frozenset(('parsetab.py', )) -def find_files(base, languages=all_languages, - dir_ignore=default_dir_ignore, - file_ignore=default_file_ignore): - '''find all files in a directory and its subdirectories based on a +# directories and files to ignore by default +default_dir_ignore = frozenset(("build", "ext")) +default_file_ignore = frozenset(("parsetab.py",)) + + +def find_files( + base, + languages=all_languages, + dir_ignore=default_dir_ignore, + file_ignore=default_file_ignore, +): + """find all files in a directory and its subdirectories based on a set of languages, ignore directories specified in dir_ignore and - files specified in file_ignore''' - if base[-1] != '/': - base += '/' + files specified in file_ignore""" + if base[-1] != "/": + base += "/" def update_dirs(dirs): - '''strip the ignored directories out of the provided list''' + """strip the ignored directories out of the provided list""" index = len(dirs) - 1 - for i,d in enumerate(reversed(dirs)): + for i, d in enumerate(reversed(dirs)): if d in dir_ignore: del dirs[index - i] # walk over base - for root,dirs,files in os.walk(base): - root = root.replace(base, '', 1) + for root, dirs, files in os.walk(base): + root = root.replace(base, "", 1) # strip ignored directories from the list update_dirs(dirs) @@ -158,11 +160,12 @@ def find_files(base, languages=all_languages, if language in languages: yield fullpath, language + def update_file(dst, src, language, mutator): - '''update a file of the specified language with the provided + """update a file of the specified language with the provided mutator generator. If inplace is provided, update the file in place and return the handle to the updated file. If inplace is - false, write the updated file to cStringIO''' + false, write the updated file to cStringIO""" # if the source and destination are the same, we're updating in place inplace = dst == src @@ -170,15 +173,15 @@ def update_file(dst, src, language, mutator): if isinstance(src, str): # if a filename was provided, open the file if inplace: - mode = 'r+' + mode = "r+" else: - mode = 'r' + mode = "r" src = open(src, mode) orig_lines = [] # grab all of the lines of the file and strip them of their line ending - old_lines = list(line.rstrip('\r\n') for line in src) + old_lines = list(line.rstrip("\r\n") for line in src) new_lines = list(mutator(old_lines, src.name, language)) for line in src: @@ -196,8 +199,8 @@ def update_file(dst, src, language, mutator): elif isinstance(dst, str): # if we're not updating in place and a destination file name # was provided, create a file object - dst = open(dst, 'w') + dst = open(dst, "w") for line in new_lines: dst.write(line) - dst.write('\n') + dst.write("\n") diff --git a/util/style/region.py b/util/style/region.py index 024743c855..39eaba50be 100644 --- a/util/style/region.py +++ b/util/style/region.py @@ -24,31 +24,67 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + class _neg_inf(object): - '''This object always compares less than any other object''' - def __repr__(self): return '' - def __lt__(self, other): return type(self) != type(other) - def __le__(self, other): return True - def __gt__(self, other): return False - def __ge__(self, other): return type(self) == type(other) - def __eq__(self, other): return type(self) == type(other) - def __ne__(self, other): return type(self) != type(other) + """This object always compares less than any other object""" + + def __repr__(self): + return "" + + def __lt__(self, other): + return type(self) != type(other) + + def __le__(self, other): + return True + + def __gt__(self, other): + return False + + def __ge__(self, other): + return type(self) == type(other) + + def __eq__(self, other): + return type(self) == type(other) + + def __ne__(self, other): + return type(self) != type(other) + + neg_inf = _neg_inf() + class _pos_inf(object): - '''This object always compares greater than any other object''' - def __repr__(self): return '' - def __lt__(self, other): return False - def __le__(self, other): return type(self) == type(other) - def __gt__(self, other): return type(self) != type(other) - def __ge__(self, other): return True - def __eq__(self, other): return type(self) == type(other) - def __ne__(self, other): return type(self) != type(other) + """This object always compares greater than any other object""" + + def __repr__(self): + return "" + + def __lt__(self, other): + return False + + def __le__(self, other): + return type(self) == type(other) + + def __gt__(self, other): + return type(self) != type(other) + + def __ge__(self, other): + return True + + def __eq__(self, other): + return type(self) == type(other) + + def __ne__(self, other): + return type(self) != type(other) + + pos_inf = _pos_inf() + class Region(tuple): - '''A region (range) of [start, end). - This includes utility functions to compare overlap of regions.''' + """A region (range) of [start, end). + This includes utility functions to compare overlap of regions.""" + def __new__(cls, *args): if len(args) == 1: arg = args[0] @@ -58,12 +94,13 @@ class Region(tuple): if len(args) != 2: raise AttributeError( - "Only one or two arguments allowed, %d provided" % (alen, )) + "Only one or two arguments allowed, %d provided" % (alen,) + ) return tuple.__new__(cls, args) def __repr__(self): - return 'Region(%s, %s)' % (self[0], self[1]) + return "Region(%s, %s)" % (self[0], self[1]) @property def start(self): @@ -74,17 +111,17 @@ class Region(tuple): return self[1] def __contains__(self, other): - '''other is + """other is region: True if self and other is fully contained within self. - pos: True if other is within the region''' + pos: True if other is within the region""" if isinstance(other, tuple): return self[0] <= other[0] and self[1] >= other[1] return self[0] <= other and other < self[1] def __eq__(self, other): - '''other is + """other is region: True if self and other are identical. - pos: True if other is within the region''' + pos: True if other is within the region""" if isinstance(other, tuple): return self[0] == other[0] and self[1] == other[1] return self[0] <= other and other < self[1] @@ -93,9 +130,9 @@ class Region(tuple): # @param other is a region. # @return if self and other are not identical. def __ne__(self, other): - '''other is + """other is region: true if they are not identical - pos: True if other is not in the region''' + pos: True if other is not in the region""" if isinstance(other, tuple): return self[0] != other[0] or self[1] != other[1] return other < self[0] or self[1] <= other @@ -138,10 +175,12 @@ class Region(tuple): return self[1] >= other[1] return self[1] > other + class Regions(object): - '''A set of regions (ranges). Basically a region with holes. + """A set of regions (ranges). Basically a region with holes. Includes utility functions to merge regions and figure out if - something is in one of the regions.''' + something is in one of the regions.""" + def __init__(self, *args): self.regions = [] self.extend(*args) @@ -228,22 +267,23 @@ class Regions(object): return result def __repr__(self): - return 'Regions(%s)' % ([(r[0], r[1]) for r in self.regions], ) + return "Regions(%s)" % ([(r[0], r[1]) for r in self.regions],) + all_regions = Regions(Region(neg_inf, pos_inf)) -if __name__ == '__main__': - x = Regions(*((i, i + 1) for i in range(0,30,2))) - y = Regions(*((i, i + 4) for i in range(0,30,5))) - z = Region(6,7) - n = Region(9,10) +if __name__ == "__main__": + x = Regions(*((i, i + 1) for i in range(0, 30, 2))) + y = Regions(*((i, i + 4) for i in range(0, 30, 5))) + z = Region(6, 7) + n = Region(9, 10) def test(left, right): print("%s == %s: %s" % (left, right, left == right)) print("%s != %s: %s" % (left, right, left != right)) - print("%s < %s: %s" % (left, right, left < right)) + print("%s < %s: %s" % (left, right, left < right)) print("%s <= %s: %s" % (left, right, left <= right)) - print("%s > %s: %s" % (left, right, left > right)) + print("%s > %s: %s" % (left, right, left > right)) print("%s >= %s: %s" % (left, right, left >= right)) print("\n") diff --git a/util/style/repo.py b/util/style/repo.py index 68f16ebe42..20dfde4042 100644 --- a/util/style/repo.py +++ b/util/style/repo.py @@ -42,6 +42,7 @@ import subprocess from .region import * from .style import modified_regions + class AbstractRepo(object, metaclass=ABCMeta): def file_path(self, fname): """Get the absolute path to a file relative within the repository. The @@ -107,6 +108,7 @@ class AbstractRepo(object, metaclass=ABCMeta): """ pass + class GitRepo(AbstractRepo): def __init__(self): self.git = "git" @@ -115,9 +117,13 @@ class GitRepo(AbstractRepo): def repo_base(self): if self._repo_base is None: - self._repo_base = subprocess.check_output( - [ self.git, "rev-parse", "--show-toplevel" ]) \ - .decode().rstrip("\n") + self._repo_base = ( + subprocess.check_output( + [self.git, "rev-parse", "--show-toplevel"] + ) + .decode() + .rstrip("\n") + ) return self._repo_base @@ -133,7 +139,7 @@ class GitRepo(AbstractRepo): return added, modified def staged_regions(self, fname, context=0): - if self.file_status(fname, cached=True) in ("", "A", ): + if self.file_status(fname, cached=True) in ("", "A"): return all_regions old = self.file_from_head(self.repo_path(fname)).split("\n") @@ -142,7 +148,7 @@ class GitRepo(AbstractRepo): return modified_regions(old, new, context=context) def modified_regions(self, fname, context=0): - if self.file_status(fname) in ("", "A", ): + if self.file_status(fname) in ("", "A"): return all_regions old = self.file_from_head(self.repo_path(fname)).split("\n") @@ -150,15 +156,19 @@ class GitRepo(AbstractRepo): return modified_regions(old, new, context=context) - def head_revision(self): if self._head_revision is not None: return self._head_revision try: - self._head_revision = subprocess.check_output( - [ self.git, "rev-parse", "--verify", "HEAD" ], - stderr=subprocess.PIPE).decode().rstrip("\n") + self._head_revision = ( + subprocess.check_output( + [self.git, "rev-parse", "--verify", "HEAD"], + stderr=subprocess.PIPE, + ) + .decode() + .rstrip("\n") + ) except subprocess.CalledProcessError: # Assume that the repo is empty and use the semi-magic # empty tree revision if git rev-parse returned an error. @@ -178,27 +188,29 @@ class GitRepo(AbstractRepo): return "" def status(self, filter=None, files=[], cached=False): - cmd = [ self.git, "diff-index", "--name-status" ] + cmd = [self.git, "diff-index", "--name-status"] if cached: cmd.append("--cached") if filter: - cmd += [ "--diff-filter=%s" % filter ] - cmd += [ self.head_revision(), "--" ] + files - status = subprocess.check_output(cmd).decode('utf-8').rstrip("\n") + cmd += ["--diff-filter=%s" % filter] + cmd += [self.head_revision(), "--"] + files + status = subprocess.check_output(cmd).decode("utf-8").rstrip("\n") if status: - return [ f.split("\t") for f in status.split("\n") ] + return [f.split("\t") for f in status.split("\n")] else: return [] def file_from_index(self, name): return subprocess.check_output( - [ self.git, "show", ":%s" % (name, ) ]).decode('utf-8') + [self.git, "show", ":%s" % (name,)] + ).decode("utf-8") def file_from_head(self, name): return subprocess.check_output( - [ self.git, "show", "%s:%s" % (self.head_revision(), name) ]) \ - .decode('utf-8') + [self.git, "show", "%s:%s" % (self.head_revision(), name)] + ).decode("utf-8") + def detect_repo(path="."): """Auto-detect the revision control system used for a source code @@ -211,9 +223,7 @@ def detect_repo(path="."): """ - _repo_types = ( - (".git", GitRepo), - ) + _repo_types = ((".git", GitRepo),) repo_types = [] for repo_dir, repo_class in _repo_types: diff --git a/util/style/sort_includes.py b/util/style/sort_includes.py index 0e4216b1df..67e7ca9fbc 100644 --- a/util/style/sort_includes.py +++ b/util/style/sort_includes.py @@ -45,29 +45,31 @@ import sys from .file_types import * cpp_c_headers = { - 'assert.h' : 'cassert', - 'ctype.h' : 'cctype', - 'errno.h' : 'cerrno', - 'float.h' : 'cfloat', - 'limits.h' : 'climits', - 'locale.h' : 'clocale', - 'math.h' : 'cmath', - 'setjmp.h' : 'csetjmp', - 'signal.h' : 'csignal', - 'stdarg.h' : 'cstdarg', - 'stddef.h' : 'cstddef', - 'stdio.h' : 'cstdio', - 'stdlib.h' : 'cstdlib', - 'string.h' : 'cstring', - 'time.h' : 'ctime', - 'wchar.h' : 'cwchar', - 'wctype.h' : 'cwctype', + "assert.h": "cassert", + "ctype.h": "cctype", + "errno.h": "cerrno", + "float.h": "cfloat", + "limits.h": "climits", + "locale.h": "clocale", + "math.h": "cmath", + "setjmp.h": "csetjmp", + "signal.h": "csignal", + "stdarg.h": "cstdarg", + "stddef.h": "cstddef", + "stdio.h": "cstdio", + "stdlib.h": "cstdlib", + "string.h": "cstring", + "time.h": "ctime", + "wchar.h": "cwchar", + "wctype.h": "cwctype", } include_re = re.compile(r'([#%])(include|import).*[<"](.*)[">]') + + def include_key(line): - '''Mark directories with a leading space so directories - are sorted before files''' + """Mark directories with a leading space so directories + are sorted before files""" match = include_re.match(line) assert match, line @@ -75,15 +77,15 @@ def include_key(line): include = match.group(3) # Everything but the file part needs to have a space prepended - parts = include.split('/') - if len(parts) == 2 and parts[0] == 'dnet': + parts = include.split("/") + if len(parts) == 2 and parts[0] == "dnet": # Don't sort the dnet includes with respect to each other, but # make them sorted with respect to non dnet includes. Python # guarantees that sorting is stable, so just clear the # basename part of the filename. - parts[1] = ' ' - parts[0:-1] = [ ' ' + s for s in parts[0:-1] ] - key = '/'.join(parts) + parts[1] = " " + parts[0:-1] = [" " + s for s in parts[0:-1]] + key = "/".join(parts) return key @@ -92,14 +94,15 @@ def _include_matcher(keyword="#include", delim="<>"): """Match an include statement and return a (keyword, file, extra) duple, or a touple of None values if there isn't a match.""" - rex = re.compile(r'^(%s)\s*%s(.*)%s(.*)$' % (keyword, delim[0], delim[1])) + rex = re.compile(r"^(%s)\s*%s(.*)%s(.*)$" % (keyword, delim[0], delim[1])) def matcher(context, line): m = rex.match(line) - return m.groups() if m else (None, ) * 3 + return m.groups() if m else (None,) * 3 return matcher + def _include_matcher_fname(fname, **kwargs): """Match an include of a specific file name. Any keyword arguments are forwarded to _include_matcher, which is used to match the @@ -113,7 +116,7 @@ def _include_matcher_fname(fname, **kwargs): if fname and rex.match(fname): return (keyword, fname, extra) else: - return (None, ) * 3 + return (None,) * 3 return matcher @@ -124,15 +127,12 @@ def _include_matcher_main(): base_matcher = _include_matcher(delim='""') rex = re.compile(r"^src/(.*)\.([^.]+)$") - header_map = { - "c" : "h", - "cc" : "hh", - "cpp" : "hh", - } + header_map = {"c": "h", "cc": "hh", "cpp": "hh"} + def matcher(context, line): m = rex.match(context["filename"]) if not m: - return (None, ) * 3 + return (None,) * 3 base, ext = m.groups() (keyword, fname, extra) = base_matcher(context, line) try: @@ -141,10 +141,11 @@ def _include_matcher_main(): except KeyError: pass - return (None, ) * 3 + return (None,) * 3 return matcher + class SortIncludes(object): # different types of includes for different sorting of headers # - Python header needs to be first if it exists @@ -153,32 +154,35 @@ class SortIncludes(object): # <*.(hh|hxx|hpp|H)> - C++ Headers (directories before files) # "*" - M5 headers (directories before files) includes_re = ( - ('main', '""', _include_matcher_main()), - ('python', '<>', _include_matcher_fname("^Python\.h$")), - ('pybind', '""', _include_matcher_fname("^pybind11/.*\.h$", - delim='""')), - ('m5shared', '<>', _include_matcher_fname("^gem5/")), - ('c', '<>', _include_matcher_fname("^.*\.h$")), - ('stl', '<>', _include_matcher_fname("^\w+$")), - ('cc', '<>', _include_matcher_fname("^.*\.(hh|hxx|hpp|H)$")), - ('m5header', '""', _include_matcher_fname("^.*\.h{1,2}$", delim='""')), - ('swig0', '<>', _include_matcher(keyword="%import")), - ('swig1', '<>', _include_matcher(keyword="%include")), - ('swig2', '""', _include_matcher(keyword="%import", delim='""')), - ('swig3', '""', _include_matcher(keyword="%include", delim='""')), - ) + ("main", '""', _include_matcher_main()), + ("python", "<>", _include_matcher_fname("^Python\.h$")), + ( + "pybind", + '""', + _include_matcher_fname("^pybind11/.*\.h$", delim='""'), + ), + ("m5shared", "<>", _include_matcher_fname("^gem5/")), + ("c", "<>", _include_matcher_fname("^.*\.h$")), + ("stl", "<>", _include_matcher_fname("^\w+$")), + ("cc", "<>", _include_matcher_fname("^.*\.(hh|hxx|hpp|H)$")), + ("m5header", '""', _include_matcher_fname("^.*\.h{1,2}$", delim='""')), + ("swig0", "<>", _include_matcher(keyword="%import")), + ("swig1", "<>", _include_matcher(keyword="%include")), + ("swig2", '""', _include_matcher(keyword="%import", delim='""')), + ("swig3", '""', _include_matcher(keyword="%include", delim='""')), + ) block_order = ( - ('python', ), - ('pybind', ), - ('main', ), - ('c', ), - ('stl', ), - ('cc', ), - ('m5shared', ), - ('m5header', ), - ('swig0', 'swig1', 'swig2', 'swig3', ), - ) + ("python",), + ("pybind",), + ("main",), + ("c",), + ("stl",), + ("cc",), + ("m5shared",), + ("m5header",), + ("swig0", "swig1", "swig2", "swig3"), + ) def __init__(self): self.block_priority = {} @@ -219,10 +223,7 @@ class SortIncludes(object): def __call__(self, lines, filename, language): self.reset() - context = { - "filename" : filename, - "language" : language, - } + context = {"filename": filename, "language": language} def match_line(line): if not line: @@ -233,14 +234,16 @@ class SortIncludes(object): if keyword: # if we've got a match, clean up the #include line, # fix up stl headers and store it in the proper category - if include_type == 'c' and language == 'C++': + if include_type == "c" and language == "C++": stl_inc = cpp_c_headers.get(include, None) if stl_inc: include = stl_inc - include_type = 'stl' + include_type = "stl" - return (include_type, - keyword + ' ' + ldelim + include + rdelim + extra) + return ( + include_type, + keyword + " " + ldelim + include + rdelim + extra, + ) return (None, line) @@ -251,7 +254,7 @@ class SortIncludes(object): try: self.includes[include_type].append(line) except KeyError: - self.includes[include_type] = [ line ] + self.includes[include_type] = [line] processing_includes = True elif processing_includes and not line.strip(): @@ -265,7 +268,7 @@ class SortIncludes(object): # current l. for include in self.dump_includes(): yield include - yield '' + yield "" yield line else: # We are not in an include block, so just emit the line @@ -276,47 +279,68 @@ class SortIncludes(object): for include in self.dump_includes(): yield include + # default language types to try to apply our sorting rules to -default_languages = frozenset(('C', 'C++', 'isa', 'python', 'scons', 'swig')) +default_languages = frozenset(("C", "C++", "isa", "python", "scons", "swig")) + def options(): import argparse + parser = argparse.ArgumentParser() parser.add_argument( - '-d', '--dir_ignore', metavar="DIR[,DIR]", type=str, - default=','.join(default_dir_ignore), - help="ignore directories") + "-d", + "--dir_ignore", + metavar="DIR[,DIR]", + type=str, + default=",".join(default_dir_ignore), + help="ignore directories", + ) parser.add_argument( - '-f', '--file_ignore', metavar="FILE[,FILE]", type=str, - default=','.join(default_file_ignore), - help="ignore files") + "-f", + "--file_ignore", + metavar="FILE[,FILE]", + type=str, + default=",".join(default_file_ignore), + help="ignore files", + ) parser.add_argument( - '-l', '--languages', metavar="LANG[,LANG]", type=str, - default=','.join(default_languages), - help="languages") + "-l", + "--languages", + metavar="LANG[,LANG]", + type=str, + default=",".join(default_languages), + help="languages", + ) parser.add_argument( - '-n', '--dry-run', action='store_true', - help="don't overwrite files") - parser.add_argument('bases', nargs='*') + "-n", "--dry-run", action="store_true", help="don't overwrite files" + ) + parser.add_argument("bases", nargs="*") return parser + def parse_args(parser): args = parser.parse_args() - args.dir_ignore = frozenset(args.dir_ignore.split(',')) - args.file_ignore = frozenset(args.file_ignore.split(',')) - args.languages = frozenset(args.languages.split(',')) + args.dir_ignore = frozenset(args.dir_ignore.split(",")) + args.file_ignore = frozenset(args.file_ignore.split(",")) + args.languages = frozenset(args.languages.split(",")) return args -if __name__ == '__main__': + +if __name__ == "__main__": parser = options() args = parse_args(parser) for base in args.bases: - for filename,language in find_files(base, languages=args.languages, - file_ignore=args.file_ignore, dir_ignore=args.dir_ignore): + for filename, language in find_files( + base, + languages=args.languages, + file_ignore=args.file_ignore, + dir_ignore=args.dir_ignore, + ): if args.dry_run: print("{}: {}".format(filename, language)) else: diff --git a/util/style/style.py b/util/style/style.py index d8afd099b8..1c6ed1cf96 100644 --- a/util/style/style.py +++ b/util/style/style.py @@ -47,9 +47,9 @@ import sys from .region import * tabsize = 8 -lead = re.compile(r'^([ \t]+)') -trail = re.compile(r'([ \t]+)$') -any_control = re.compile(r'\b(if|while|for)([ \t]*)\(') +lead = re.compile(r"^([ \t]+)") +trail = re.compile(r"([ \t]+)$") +any_control = re.compile(r"\b(if|while|for)([ \t]*)\(") class UserInterface(object, metaclass=ABCMeta): @@ -70,6 +70,7 @@ class UserInterface(object, metaclass=ABCMeta): def write(self, string): pass + class StdioUI(UserInterface): def _prompt(self, prompt, results, default): return input(prompt) or default @@ -77,24 +78,31 @@ class StdioUI(UserInterface): def write(self, string): sys.stdout.write(string) + def _re_ignore(expr): """Helper function to create regular expression ignore file matcher functions""" rex = re.compile(expr) + def match_re(fname): return rex.match(fname) + return match_re + def _re_only(expr): """Helper function to create regular expressions to only keep matcher functions""" rex = re.compile(expr) + def match_re(fname): return not rex.match(fname) + return match_re + # This list contains a list of functions that are called to determine # if a file should be excluded from the style matching rules or # not. The functions are called with the file name relative to the @@ -109,10 +117,13 @@ style_ignores = [ _re_ignore("^tests/test-progs/hello/bin/"), # Only include Scons files and those with extensions that suggest source # code - _re_only("^((.*\/)?(SConscript|SConstruct)|" - ".*\.(c|h|cc|hh|cpp|hpp|py|isa|proto))$") + _re_only( + "^((.*\/)?(SConscript|SConstruct)|" + ".*\.(c|h|cc|hh|cpp|hpp|isa|proto))$" + ), ] + def check_ignores(fname): """Check if a file name matches any of the ignore rules""" @@ -128,13 +139,14 @@ def normalized_len(line): count = 0 for c in line: - if c == '\t': + if c == "\t": count += tabsize - count % tabsize else: count += 1 return count + def modified_regions(old, new, context=0): regions = Regions() m = difflib.SequenceMatcher(a=old, b=new, autojunk=False) diff --git a/util/style/verifiers.py b/util/style/verifiers.py index 7ab7344636..4ccd35af48 100644 --- a/util/style/verifiers.py +++ b/util/style/verifiers.py @@ -54,21 +54,23 @@ from .file_types import lang_type def safefix(fix_func): - """ Decorator for the fix functions of the Verifier class. - This function wraps the fix function and creates a backup file - just in case there is an error. + """Decorator for the fix functions of the Verifier class. + This function wraps the fix function and creates a backup file + just in case there is an error. """ + def safefix_wrapper(*args, **kwargs): # Check to be sure that this is decorating a function we expect: # a class method with filename as the first argument (after self) - assert(os.path.exists(args[1])) + assert os.path.exists(args[1]) self = args[0] - assert(is_verifier(self.__class__)) + assert is_verifier(self.__class__) filename = args[1] # Now, Let's make a backup file. from shutil import copyfile - backup_name = filename+'.bak' + + backup_name = filename + ".bak" copyfile(filename, backup_name) # Try to apply the fix. If it fails, then we revert the file @@ -86,6 +88,7 @@ def safefix(fix_func): return safefix_wrapper + def _modified_regions(old, new): try: m = SequenceMatcher(a=old, b=new, autojunk=False) @@ -117,21 +120,20 @@ class Verifier(object, metaclass=ABCMeta): """ - def __init__(self, ui, opts, base=None): self.ui = ui self.base = base # opt_name must be defined as a class attribute of derived classes. # Check test-specific opts first as these have precedence. - self.opt_fix = opts.get('fix_' + self.opt_name, False) - self.opt_ignore = opts.get('ignore_' + self.opt_name, False) - self.opt_skip = opts.get('skip_' + self.opt_name, False) + self.opt_fix = opts.get("fix_" + self.opt_name, False) + self.opt_ignore = opts.get("ignore_" + self.opt_name, False) + self.opt_skip = opts.get("skip_" + self.opt_name, False) # If no test-specific opts were set, then set based on "-all" opts. if not (self.opt_fix or self.opt_ignore or self.opt_skip): - self.opt_fix = opts.get('fix_all', False) - self.opt_ignore = opts.get('ignore_all', False) - self.opt_skip = opts.get('skip_all', False) + self.opt_fix = opts.get("fix_all", False) + self.opt_ignore = opts.get("ignore_all", False) + self.opt_skip = opts.get("skip_all", False) def normalize_filename(self, name): abs_name = os.path.abspath(name) @@ -145,7 +147,7 @@ class Verifier(object, metaclass=ABCMeta): try: f = open(filename, mode) except OSError as msg: - print('could not open file {}: {}'.format(filename, msg)) + print("could not open file {}: {}".format(filename, msg)) return None return f @@ -177,12 +179,13 @@ class Verifier(object, metaclass=ABCMeta): if self.opt_fix: self.fix(filename, regions) else: - result = self.ui.prompt("(a)bort, (i)gnore, or (f)ix?", - 'aif', 'a') - if result == 'f': + result = self.ui.prompt( + "(a)bort, (i)gnore, or (f)ix?", "aif", "a" + ) + if result == "f": self.fix(filename, regions) - elif result == 'a': - return True # abort + elif result == "a": + return True # abort return False @@ -221,25 +224,28 @@ class Verifier(object, metaclass=ABCMeta): """ pass + class LineVerifier(Verifier): def check(self, filename, regions=all_regions, fobj=None, silent=False): close = False if fobj is None: - fobj = self.open(filename, 'rb') + fobj = self.open(filename, "rb") close = True lang = lang_type(filename) assert lang in self.languages errors = 0 - for num,line in enumerate(fobj): + for num, line in enumerate(fobj): if num not in regions: continue - s_line = line.decode('utf-8').rstrip('\n') + s_line = line.decode("utf-8").rstrip("\n") if not self.check_line(s_line, language=lang): if not silent: - self.ui.write("invalid %s in %s:%d\n" % \ - (self.test_name, filename, num + 1)) + self.ui.write( + "invalid %s in %s:%d\n" + % (self.test_name, filename, num + 1) + ) if self.ui.verbose: self.ui.write(">>%s<<\n" % s_line[:-1]) errors += 1 @@ -249,7 +255,7 @@ class LineVerifier(Verifier): @safefix def fix(self, filename, regions=all_regions): - f = self.open(filename, 'r+') + f = self.open(filename, "r+") lang = lang_type(filename) assert lang in self.languages @@ -259,8 +265,8 @@ class LineVerifier(Verifier): f.seek(0) f.truncate() - for i,line in enumerate(lines): - line = line.rstrip('\n') + for i, line in enumerate(lines): + line = line.rstrip("\n") if i in regions: line = self.fix_line(line, language=lang) @@ -277,6 +283,7 @@ class LineVerifier(Verifier): def fix_line(self, line, **kwargs): pass + class Whitespace(LineVerifier): """Check whitespace. @@ -285,16 +292,16 @@ class Whitespace(LineVerifier): - No trailing whitespace """ - languages = set(('C', 'C++', 'swig', 'python', 'asm', 'isa', 'scons', - 'make', 'dts')) - trail_only = set(('make', 'dts')) + languages = set( + ("C", "C++", "swig", "python", "asm", "isa", "scons", "make", "dts") + ) + trail_only = set(("make", "dts")) - test_name = 'whitespace' - opt_name = 'white' - - _lead = re.compile(r'^([ \t]+)') - _trail = re.compile(r'([ \t]+)$') + test_name = "whitespace" + opt_name = "white" + _lead = re.compile(r"^([ \t]+)") + _trail = re.compile(r"([ \t]+)$") def skip_lead(self, language): return language in Whitespace.trail_only @@ -302,7 +309,7 @@ class Whitespace(LineVerifier): def check_line(self, line, language): if not self.skip_lead(language): match = Whitespace._lead.search(line) - if match and match.group(1).find('\t') != -1: + if match and match.group(1).find("\t") != -1: return False match = Whitespace._trail.search(line) @@ -313,13 +320,14 @@ class Whitespace(LineVerifier): def fix_line(self, line, language): if not self.skip_lead(language) and Whitespace._lead.search(line): - newline = '' - for i,c in enumerate(line): - if c == ' ': - newline += ' ' - elif c == '\t': - newline += ' ' * (style.tabsize - \ - len(newline) % style.tabsize) + newline = "" + for i, c in enumerate(line): + if c == " ": + newline += " " + elif c == "\t": + newline += " " * ( + style.tabsize - len(newline) % style.tabsize + ) else: newline += line[i:] break @@ -333,8 +341,8 @@ class SortedIncludes(Verifier): """Check for proper sorting of include statements""" languages = sort_includes.default_languages - test_name = 'include file order' - opt_name = 'include' + test_name = "include file order" + opt_name = "include" def __init__(self, *args, **kwargs): super(SortedIncludes, self).__init__(*args, **kwargs) @@ -343,11 +351,11 @@ class SortedIncludes(Verifier): def check(self, filename, regions=all_regions, fobj=None, silent=False): close = False if fobj is None: - fobj = self.open(filename, 'rb') + fobj = self.open(filename, "rb") close = True norm_fname = self.normalize_filename(filename) - old = [ l.decode('utf-8').rstrip('\n') for l in fobj ] + old = [l.decode("utf-8").rstrip("\n") for l in fobj] if close: fobj.close() @@ -361,10 +369,11 @@ class SortedIncludes(Verifier): if modified: if not silent: - self.ui.write("invalid sorting of includes in %s. Note: If " - "there is more than one empty line under the " - "#include region, please reduce it to one.\n" - % (filename)) + self.ui.write( + "invalid sorting of includes in %s. Note: If " + "there is more than one empty line under the " + "#include region, please reduce it to one.\n" % (filename) + ) if self.ui.verbose: for start, end in modified.regions: self.ui.write("bad region [%d, %d)\n" % (start, end)) @@ -374,46 +383,46 @@ class SortedIncludes(Verifier): @safefix def fix(self, filename, regions=all_regions): - f = self.open(filename, 'r+') + f = self.open(filename, "r+") norm_fname = self.normalize_filename(filename) old = f.readlines() - lines = [ l.rstrip('\n') for l in old ] + lines = [l.rstrip("\n") for l in old] language = lang_type(filename, lines[0]) sort_lines = list(self.sort_includes(lines, norm_fname, language)) - new = ''.join(line + '\n' for line in sort_lines) + new = "".join(line + "\n" for line in sort_lines) f.seek(0) f.truncate() - for i,line in enumerate(sort_lines): + for i, line in enumerate(sort_lines): f.write(line) - f.write('\n') + f.write("\n") f.close() class ControlSpace(LineVerifier): """Check for exactly one space after if/while/for""" - languages = set(('C', 'C++')) - test_name = 'spacing after if/while/for' - opt_name = 'control' + languages = set(("C", "C++")) + test_name = "spacing after if/while/for" + opt_name = "control" - _any_control = re.compile(r'\b(if|while|for)([ \t]*)\(') + _any_control = re.compile(r"\b(if|while|for)([ \t]*)\(") def check_line(self, line, **kwargs): match = ControlSpace._any_control.search(line) return not (match and match.group(2) != " ") def fix_line(self, line, **kwargs): - new_line = ControlSpace._any_control.sub(r'\1 (', line) + new_line = ControlSpace._any_control.sub(r"\1 (", line) return new_line class LineLength(LineVerifier): - languages = set(('C', 'C++', 'swig', 'python', 'asm', 'isa', 'scons')) - test_name = 'line length' - opt_name = 'length' + languages = set(("C", "C++", "swig", "python", "asm", "isa", "scons")) + test_name = "line length" + opt_name = "length" def check_line(self, line, **kwargs): return style.normalized_len(line) <= 79 @@ -424,26 +433,29 @@ class LineLength(LineVerifier): def fix_line(self, line): pass -class ControlCharacters(LineVerifier): - languages = set(('C', 'C++', 'swig', 'python', 'asm', 'isa', 'scons')) - test_name = 'control character' - opt_name = 'ascii' - invalid = "".join([chr(i) for i in range(0, 0x20) \ - if chr(i) not in ('\n', '\t')]) +class ControlCharacters(LineVerifier): + languages = set(("C", "C++", "swig", "python", "asm", "isa", "scons")) + test_name = "control character" + opt_name = "ascii" + + invalid = "".join( + [chr(i) for i in range(0, 0x20) if chr(i) not in ("\n", "\t")] + ) def check_line(self, line, **kwargs): return self.fix_line(line) == line def fix_line(self, line, **kwargs): - return ''.join(c for c in line if c not in ControlCharacters.invalid) + return "".join(c for c in line if c not in ControlCharacters.invalid) + class BoolCompare(LineVerifier): - languages = set(('C', 'C++', 'python')) - test_name = 'boolean comparison' - opt_name = 'boolcomp' + languages = set(("C", "C++", "python")) + test_name = "boolean comparison" + opt_name = "boolcomp" - regex = re.compile(r'\s*==\s*([Tt]rue|[Ff]alse)\b') + regex = re.compile(r"\s*==\s*([Tt]rue|[Ff]alse)\b") def check_line(self, line, **kwargs): return self.regex.search(line) == None @@ -451,81 +463,100 @@ class BoolCompare(LineVerifier): def fix_line(self, line, **kwargs): match = self.regex.search(line) if match: - if match.group(1) in ('true', 'True'): - line = self.regex.sub('', line) + if match.group(1) in ("true", "True"): + line = self.regex.sub("", line) else: - self.ui.write("Warning: cannot automatically fix " - "comparisons with false/False.\n") + self.ui.write( + "Warning: cannot automatically fix " + "comparisons with false/False.\n" + ) return line + class StructureBraces(LineVerifier): - """ Check if the opening braces of structures are not on the same line of - the structure name. This includes classes, structs, enums and unions. + """Check if the opening braces of structures are not on the same line of + the structure name. This includes classes, structs, enums and unions. - This verifier matches lines starting in optional indent, followed by - an optional typedef and the structure's keyword, followed by any - character until the first opening brace is seen. Any extra characters - after the opening brace are saved for a recursive check, if needed. + This verifier matches lines starting in optional indent, followed by + an optional typedef and the structure's keyword, followed by any + character until the first opening brace is seen. Any extra characters + after the opening brace are saved for a recursive check, if needed. - This fixes, for example: - 1) "struct A {" - 2) "enum{" - 3) " class B { // This is a class" - 4) "union { struct C {" - to: - 1) "struct A\n{" - 2) "enum\n{" - 3) " class B\n {\n // This is a class" - 4) "union\n{\n struct C\n {" + This fixes, for example: + 1) "struct A {" + 2) "enum{" + 3) " class B { // This is a class" + 4) "union { struct C {" + to: + 1) "struct A\n{" + 2) "enum\n{" + 3) " class B\n {\n // This is a class" + 4) "union\n{\n struct C\n {" - @todo Make this work for multi-line structure declarations. e.g., + @todo Make this work for multi-line structure declarations. e.g., - class MultiLineClass - : public BaseClass { + class MultiLineClass + : public BaseClass { """ - languages = set(('C', 'C++')) - test_name = 'structure opening brace position' - opt_name = 'structurebrace' + languages = set(("C", "C++")) + test_name = "structure opening brace position" + opt_name = "structurebrace" # Matches the indentation of the line - regex_indentation = '(?P\s*)' + regex_indentation = "(?P\s*)" # Matches an optional "typedef" before the keyword - regex_typedef = '(?P(typedef\s+)?)' + regex_typedef = "(?P(typedef\s+)?)" # Matches the structure's keyword - regex_keyword = '(?Pclass|struct|enum|union)' + regex_keyword = "(?Pclass|struct|enum|union)" # A negative lookahead to avoid incorrect matches with variable's names # e.g., "classifications = {" should not be fixed here. - regex_avoid = '(?![^\{\s])' + regex_avoid = "(?![^\{\s])" # Matches anything after the keyword and before the opening brace. # e.g., structure name, base type, type of inheritance, etc - regex_name = '(?P[^\{]*)' + regex_name = "(?P[^\{]*)" # Matches anything after the opening brace, which should be # parsed recursively - regex_extra = '(?P.*)$' - regex = re.compile(r'^' + regex_indentation + regex_typedef + - regex_keyword + regex_avoid + regex_name + '\{' + regex_extra) + regex_extra = "(?P.*)$" + regex = re.compile( + r"^" + + regex_indentation + + regex_typedef + + regex_keyword + + regex_avoid + + regex_name + + "\{" + + regex_extra + ) def check_line(self, line, **kwargs): - return (self.regex.search(line) == None) or \ - (line.count('{') == line.count('};')) + return (self.regex.search(line) == None) or ( + line.count("{") == line.count("};") + ) def fix_line(self, line, **kwargs): match = self.regex.search(line) if match: # Move the opening brace to the next line - match_indentation = match.group('indentation') - match_typedef = match.group('typedef') - match_keyword = match.group('keyword') - match_name = match.group('name').rstrip() - match_extra = match.group('extra').lstrip() - line = match_indentation + match_typedef + match_keyword + \ - match_name + "\n" + match_indentation + "{" + match_indentation = match.group("indentation") + match_typedef = match.group("typedef") + match_keyword = match.group("keyword") + match_name = match.group("name").rstrip() + match_extra = match.group("extra").lstrip() + line = ( + match_indentation + + match_typedef + + match_keyword + + match_name + + "\n" + + match_indentation + + "{" + ) # The opening brace should be alone in its own line, so move any # extra contents to the next line - if match_extra != '': + if match_extra != "": # Check if the extra line obeys the opening brace rule # (in case there are nested declarations) line_extra = match_indentation + " " + match_extra @@ -535,12 +566,18 @@ class StructureBraces(LineVerifier): return line + def is_verifier(cls): """Determine if a class is a Verifier that can be instantiated""" - return inspect.isclass(cls) and issubclass(cls, Verifier) and \ - not inspect.isabstract(cls) + return ( + inspect.isclass(cls) + and issubclass(cls, Verifier) + and not inspect.isabstract(cls) + ) + # list of all verifier classes -all_verifiers = [ v for n, v in \ - inspect.getmembers(sys.modules[__name__], is_verifier) ] +all_verifiers = [ + v for n, v in inspect.getmembers(sys.modules[__name__], is_verifier) +] diff --git a/util/systemc/gem5_within_systemc/main.cc b/util/systemc/gem5_within_systemc/main.cc index c7f9dd65b7..2d453507e6 100644 --- a/util/systemc/gem5_within_systemc/main.cc +++ b/util/systemc/gem5_within_systemc/main.cc @@ -157,7 +157,7 @@ SimControl::SimControl(sc_core::sc_module_name name, usage(prog_name); /* Pass DPRINTF messages to SystemC */ - Trace::setDebugLogger(&logger); + trace::setDebugLogger(&logger); /* @todo need this as an option */ Gem5SystemC::setTickFrequency(); @@ -179,7 +179,7 @@ SimControl::SimControl(sc_core::sc_module_name name, statistics::initSimStats(); statistics::registerHandlers(CxxConfig::statsReset, CxxConfig::statsDump); - Trace::enable(); + trace::enable(); setDebugFlag("Terminal"); checkpoint_restore = false; diff --git a/util/systemc/gem5_within_systemc/sc_gem5_control.cc b/util/systemc/gem5_within_systemc/sc_gem5_control.cc index 568c0eb4de..a9b9fd5f83 100644 --- a/util/systemc/gem5_within_systemc/sc_gem5_control.cc +++ b/util/systemc/gem5_within_systemc/sc_gem5_control.cc @@ -174,12 +174,6 @@ Gem5Control::clearDebugFlag(const char *flag) ::gem5::clearDebugFlag(flag); } -void -Gem5Control::setRemoteGDBPort(unsigned int port) -{ - ::gem5::setRemoteGDBPort(port); -} - Gem5System * Gem5Control::makeSystem(const std::string &system_name, const std::string &instance_name) @@ -215,7 +209,7 @@ Gem5TopLevelModule::Gem5TopLevelModule(sc_core::sc_module_name name, SC_THREAD(run); /* Pass DPRINTF messages to SystemC */ - gem5::Trace::setDebugLogger(&logger); + gem5::trace::setDebugLogger(&logger); /* @todo need this as an option */ Gem5SystemC::setTickFrequency(); @@ -238,7 +232,7 @@ Gem5TopLevelModule::Gem5TopLevelModule(sc_core::sc_module_name name, gem5::statistics::registerHandlers(CxxConfig::statsReset, CxxConfig::statsDump); - gem5::Trace::enable(); + gem5::trace::enable(); config_file = new gem5::CxxIniFile(); diff --git a/util/systemc/gem5_within_systemc/sc_gem5_control.hh b/util/systemc/gem5_within_systemc/sc_gem5_control.hh index f74e001fc8..2959ba2e04 100644 --- a/util/systemc/gem5_within_systemc/sc_gem5_control.hh +++ b/util/systemc/gem5_within_systemc/sc_gem5_control.hh @@ -142,10 +142,6 @@ class Gem5Control virtual void setDebugFlag(const char *flag); virtual void clearDebugFlag(const char *flag); - /** Choose a base port number for GDB to connect to the model - * (0 disables connections) */ - virtual void setRemoteGDBPort(unsigned int port); - /* Register an action to happen at the end of elaboration */ virtual void registerEndOfElaboration(void (*func)()); diff --git a/util/systemc/gem5_within_systemc/sc_logger.cc b/util/systemc/gem5_within_systemc/sc_logger.cc index c833cc50f7..1b7553a4c6 100644 --- a/util/systemc/gem5_within_systemc/sc_logger.cc +++ b/util/systemc/gem5_within_systemc/sc_logger.cc @@ -60,9 +60,9 @@ class CuttingStreambuf : public std::streambuf std::ostringstream line; /** Logger to send complete lines to */ - gem5::Trace::Logger *logger; + gem5::trace::Logger *logger; - CuttingStreambuf(gem5::Trace::Logger *logger_) : logger(logger_) + CuttingStreambuf(gem5::trace::Logger *logger_) : logger(logger_) { } /** Accumulate to line up to \n and then emit */ diff --git a/util/systemc/gem5_within_systemc/sc_logger.hh b/util/systemc/gem5_within_systemc/sc_logger.hh index cbcea31ebd..01e700b427 100644 --- a/util/systemc/gem5_within_systemc/sc_logger.hh +++ b/util/systemc/gem5_within_systemc/sc_logger.hh @@ -53,7 +53,7 @@ namespace Gem5SystemC { /** sc_report logging class */ -class Logger : public gem5::Trace::Logger +class Logger : public gem5::trace::Logger { protected: /** Stream to offer getOstream. This will cut messages up newlines and diff --git a/util/systemc/systemc_within_gem5/systemc_gem5_tlm/SystemC_Example.py b/util/systemc/systemc_within_gem5/systemc_gem5_tlm/SystemC_Example.py index 0f1c9eda7f..f2bee1653a 100644 --- a/util/systemc/systemc_within_gem5/systemc_gem5_tlm/SystemC_Example.py +++ b/util/systemc/systemc_within_gem5/systemc_gem5_tlm/SystemC_Example.py @@ -34,8 +34,8 @@ from m5.objects.Tlm import TlmTargetSocket # This class is a subclass of sc_module, and all the special magic which makes # that work is handled in the base classes. class TLM_Target(SystemC_ScModule): - type = 'TLM_Target' - cxx_class = 'Target' - cxx_header = 'systemc_gem5_tlm/sc_tlm_target.hh' - tlm = TlmTargetSocket(32, 'TLM target socket') + type = "TLM_Target" + cxx_class = "Target" + cxx_header = "systemc_gem5_tlm/sc_tlm_target.hh" + tlm = TlmTargetSocket(32, "TLM target socket") system = Param.System(Parent.any, "system") diff --git a/util/systemc/systemc_within_gem5/systemc_gem5_tlm/config.py b/util/systemc/systemc_within_gem5/systemc_gem5_tlm/config.py index 4d6e260763..71529ba879 100755 --- a/util/systemc/systemc_within_gem5/systemc_gem5_tlm/config.py +++ b/util/systemc/systemc_within_gem5/systemc_gem5_tlm/config.py @@ -34,8 +34,8 @@ from m5.objects import * # Create a config to be used by the traffic generator cfg_file_name = "memcheck.cfg" -cfg_file_path = os.path.dirname(__file__) + "/" +cfg_file_name -cfg_file = open(cfg_file_path, 'w') +cfg_file_path = os.path.dirname(__file__) + "/" + cfg_file_name +cfg_file = open(cfg_file_path, "w") # Three states, with random, linear and idle behaviours. The random # and linear states access memory in the range [0 : 16 Mbyte] with 8 @@ -53,16 +53,18 @@ cfg_file.write("TRANSITION 2 1 0.5\n") cfg_file.close() system = System() -vd = VoltageDomain(voltage = '1V') +vd = VoltageDomain(voltage="1V") -system.mem_mode = 'timing' +system.mem_mode = "timing" -system.cpu = TrafficGen(config_file = cfg_file_path) +system.cpu = TrafficGen(config_file=cfg_file_path) system.target = TLM_Target() -system.physmem = SimpleMemory() # This must be instanciated, even if not needed -#system.mem.addr_ranges = [AddrRange('512MB')] +system.physmem = ( + SimpleMemory() +) # This must be instanciated, even if not needed +# system.mem.addr_ranges = [AddrRange('512MB')] system.transactor = Gem5ToTlmBridge32() -system.clk_domain = SrcClockDomain(clock = '1.5GHz', voltage_domain = vd) +system.clk_domain = SrcClockDomain(clock="1.5GHz", voltage_domain=vd) # Connect everything: system.transactor.gem5 = system.cpu.port diff --git a/util/systemc/systemc_within_gem5/systemc_sc_main/config.py b/util/systemc/systemc_within_gem5/systemc_sc_main/config.py index 0d20e9e0ff..454c3b5813 100755 --- a/util/systemc/systemc_within_gem5/systemc_sc_main/config.py +++ b/util/systemc/systemc_within_gem5/systemc_sc_main/config.py @@ -38,8 +38,12 @@ kernel = SystemC_Kernel() root = Root(full_system=True, systemc_kernel=kernel) parser = argparse.ArgumentParser() -parser.add_argument('--word', action="append", default=[], - help='Add a word to the list of words to print. Can be repeated.') +parser.add_argument( + "--word", + action="append", + default=[], + help="Add a word to the list of words to print. Can be repeated.", +) args = parser.parse_args() @@ -50,7 +54,7 @@ args = parser.parse_args() # # The arguements passed to this function will be treated as the argv values # passed to the c++ sc_main, with the argc value set appropriately. -m5.systemc.sc_main(*args.word); +m5.systemc.sc_main(*args.word) # Construct the SimObject hierarchy. Anything sc_main built has already been # constructed. diff --git a/util/systemc/systemc_within_gem5/systemc_simple_object/SystemC_Example.py b/util/systemc/systemc_within_gem5/systemc_simple_object/SystemC_Example.py index 9d3faf8f99..66b05bf79f 100644 --- a/util/systemc/systemc_within_gem5/systemc_simple_object/SystemC_Example.py +++ b/util/systemc/systemc_within_gem5/systemc_simple_object/SystemC_Example.py @@ -31,21 +31,22 @@ from m5.objects.SystemC import SystemC_ScModule # This class is a subclass of sc_module, and all the special magic which makes # that work is handled in the base classes. class SystemC_Printer(SystemC_ScModule): - type = 'SystemC_Printer' - cxx_class = 'Printer' - cxx_header = 'systemc_simple_object/printer.hh' + type = "SystemC_Printer" + cxx_class = "Printer" + cxx_header = "systemc_simple_object/printer.hh" # This parameter will be available in the SystemC_PrinterParams::create # function and can be passed to the c++ object's constructor, used to set # one of its member variables, as a parameter to one of its methods, etc. - prefix = Param.String('', 'Prefix for each word') + prefix = Param.String("", "Prefix for each word") + # This is a standard gem5 SimObject class with no special accomodation for the # fact that one of its parameters is a systemc object. class Gem5_Feeder(SimObject): - type = 'Gem5_Feeder' - cxx_class = 'Feeder' - cxx_header = 'systemc_simple_object/feeder.hh' + type = "Gem5_Feeder" + cxx_class = "Feeder" + cxx_header = "systemc_simple_object/feeder.hh" # This parameter will be a pointer to an instance of the class above. - printer = Param.SystemC_Printer('Printer for our words.') - delay = Param.Latency('1ns', 'Time to wait between each word.') - strings = VectorParam.String([], 'Words to print.') + printer = Param.SystemC_Printer("Printer for our words.") + delay = Param.Latency("1ns", "Time to wait between each word.") + strings = VectorParam.String([], "Words to print.") diff --git a/util/systemc/systemc_within_gem5/systemc_simple_object/config.py b/util/systemc/systemc_within_gem5/systemc_simple_object/config.py index a5d54b070b..8a86e1fb01 100755 --- a/util/systemc/systemc_within_gem5/systemc_simple_object/config.py +++ b/util/systemc/systemc_within_gem5/systemc_simple_object/config.py @@ -24,7 +24,6 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - import argparse import m5 import sys @@ -34,9 +33,9 @@ from m5.objects import SystemC_Kernel, Root, SystemC_Printer, Gem5_Feeder # pylint:disable=unused-variable parser = argparse.ArgumentParser() -parser.add_argument('--word', action="append", default=[]) -parser.add_argument('--delay', default='1ns') -parser.add_argument('--prefix', default='') +parser.add_argument("--word", action="append", default=[]) +parser.add_argument("--delay", default="1ns") +parser.add_argument("--prefix", default="") args = parser.parse_args() diff --git a/util/systemc/systemc_within_gem5/systemc_tlm/config.py b/util/systemc/systemc_within_gem5/systemc_tlm/config.py index 0d20e9e0ff..454c3b5813 100755 --- a/util/systemc/systemc_within_gem5/systemc_tlm/config.py +++ b/util/systemc/systemc_within_gem5/systemc_tlm/config.py @@ -38,8 +38,12 @@ kernel = SystemC_Kernel() root = Root(full_system=True, systemc_kernel=kernel) parser = argparse.ArgumentParser() -parser.add_argument('--word', action="append", default=[], - help='Add a word to the list of words to print. Can be repeated.') +parser.add_argument( + "--word", + action="append", + default=[], + help="Add a word to the list of words to print. Can be repeated.", +) args = parser.parse_args() @@ -50,7 +54,7 @@ args = parser.parse_args() # # The arguements passed to this function will be treated as the argv values # passed to the c++ sc_main, with the argc value set appropriately. -m5.systemc.sc_main(*args.word); +m5.systemc.sc_main(*args.word) # Construct the SimObject hierarchy. Anything sc_main built has already been # constructed. diff --git a/util/term/term.c b/util/term/term.c index ca88ad47e4..529712c870 100644 --- a/util/term/term.c +++ b/util/term/term.c @@ -302,8 +302,8 @@ raw_term() memcpy(&saved_ios, &ios, sizeof(struct termios)); ios.c_iflag &= ~(ISTRIP|ICRNL|IGNCR|ICRNL|IXOFF|IXON); - ios.c_oflag &= ~(OPOST); - ios.c_oflag &= (ONLCR); + ios.c_oflag |= OPOST; + ios.c_oflag |= ONLCR; ios.c_lflag &= ~(ISIG|ICANON|ECHO); ios.c_cc[VMIN] = 1; ios.c_cc[VTIME] = 0; diff --git a/util/tlm/conf/tgen.cfg b/util/tlm/conf/tgen.cfg index 8204959083..67830a8b28 100644 --- a/util/tlm/conf/tgen.cfg +++ b/util/tlm/conf/tgen.cfg @@ -1,21 +1,21 @@ # Copyright (c) 2015, University of Kaiserslautern # All rights reserved. -# +# # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: -# +# # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. -# +# # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. -# +# # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. -# +# # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR diff --git a/util/tlm/conf/tlm_elastic_slave.py b/util/tlm/conf/tlm_elastic_slave.py index d9bbceb1d5..30a412b2f8 100644 --- a/util/tlm/conf/tlm_elastic_slave.py +++ b/util/tlm/conf/tlm_elastic_slave.py @@ -33,7 +33,7 @@ import m5 from m5.objects import * from m5.util import addToPath, fatal -addToPath('../../../configs/common/') +addToPath("../../../configs/common/") from Caches import * @@ -64,26 +64,30 @@ from Caches import * # Create a system with a Crossbar and an Elastic Trace Player as CPU: # Setup System: -system = System(cpu=TraceCPU(cpu_id=0), - mem_mode='timing', - mem_ranges = [AddrRange('512MB')], - cache_line_size = 64) +system = System( + cpu=TraceCPU(cpu_id=0), + mem_mode="timing", + mem_ranges=[AddrRange("512MB")], + cache_line_size=64, +) # Create a top-level voltage domain: system.voltage_domain = VoltageDomain() # Create a source clock for the system. This is used as the clock period for # xbar and memory: -system.clk_domain = SrcClockDomain(clock = '1GHz', - voltage_domain = system.voltage_domain) +system.clk_domain = SrcClockDomain( + clock="1GHz", voltage_domain=system.voltage_domain +) # Create a CPU voltage domain: system.cpu_voltage_domain = VoltageDomain() # Create a separate clock domain for the CPUs. In case of Trace CPUs this clock # is actually used only by the caches connected to the CPU: -system.cpu_clk_domain = SrcClockDomain(clock = '1GHz', - voltage_domain = system.cpu_voltage_domain) +system.cpu_clk_domain = SrcClockDomain( + clock="1GHz", voltage_domain=system.cpu_voltage_domain +) # Setup CPU and its L1 caches: system.cpu.createInterruptController() @@ -93,16 +97,18 @@ system.cpu.icache.cpu_side = system.cpu.icache_port system.cpu.dcache.cpu_side = system.cpu.dcache_port # Assign input trace files to the eTraceCPU: -system.cpu.instTraceFile="system.cpu.traceListener.inst.gz" -system.cpu.dataTraceFile="system.cpu.traceListener.data.gz" +system.cpu.instTraceFile = "system.cpu.traceListener.inst.gz" +system.cpu.dataTraceFile = "system.cpu.traceListener.data.gz" # Setting up L1 BUS: -system.membus = IOXBar(width = 16) -system.physmem = SimpleMemory() # This must be instantiated, even if not needed +system.membus = IOXBar(width=16) +system.physmem = ( + SimpleMemory() +) # This must be instantiated, even if not needed # Create a external TLM port: system.tlm = ExternalSlave() -system.tlm.addr_ranges = [AddrRange('512MB')] +system.tlm.addr_ranges = [AddrRange("512MB")] system.tlm.port_type = "tlm_slave" system.tlm.port_data = "transactor" @@ -114,7 +120,7 @@ system.cpu.dcache.mem_side = system.membus.slave system.membus.master = system.tlm.port # Start the simulation: -root = Root(full_system = False, system = system) -root.system.mem_mode = 'timing' +root = Root(full_system=False, system=system) +root.system.mem_mode = "timing" m5.instantiate() -m5.simulate() #Simulation time specified later on commandline +m5.simulate() # Simulation time specified later on commandline diff --git a/util/tlm/conf/tlm_master.py b/util/tlm/conf/tlm_master.py index fb570d337f..a3782a72cc 100644 --- a/util/tlm/conf/tlm_master.py +++ b/util/tlm/conf/tlm_master.py @@ -50,10 +50,11 @@ import os # Create a system with a Crossbar and a simple Memory: system = System() -system.membus = IOXBar(width = 16) -system.physmem = SimpleMemory(range = AddrRange('512MB')) -system.clk_domain = SrcClockDomain(clock = '1.5GHz', - voltage_domain = VoltageDomain(voltage = '1V')) +system.membus = IOXBar(width=16) +system.physmem = SimpleMemory(range=AddrRange("512MB")) +system.clk_domain = SrcClockDomain( + clock="1.5GHz", voltage_domain=VoltageDomain(voltage="1V") +) # Create a external TLM port: system.tlm = ExternalMaster() @@ -64,9 +65,9 @@ system.tlm.port_data = "transactor" system.system_port = system.membus.slave system.physmem.port = system.membus.master system.tlm.port = system.membus.slave -system.mem_mode = 'timing' +system.mem_mode = "timing" # Start the simulation: -root = Root(full_system = False, system = system) +root = Root(full_system=False, system=system) m5.instantiate() m5.simulate() diff --git a/util/tlm/conf/tlm_slave.py b/util/tlm/conf/tlm_slave.py index d5fd505941..1b2b679df1 100644 --- a/util/tlm/conf/tlm_slave.py +++ b/util/tlm/conf/tlm_slave.py @@ -52,15 +52,18 @@ from m5.objects import * # Create a system with a Crossbar and a TrafficGenerator as CPU: system = System() -system.membus = IOXBar(width = 16) -system.physmem = SimpleMemory() # This must be instanciated, even if not needed -system.cpu = TrafficGen(config_file = "conf/tgen.cfg") -system.clk_domain = SrcClockDomain(clock = '1.5GHz', - voltage_domain = VoltageDomain(voltage = '1V')) +system.membus = IOXBar(width=16) +system.physmem = ( + SimpleMemory() +) # This must be instanciated, even if not needed +system.cpu = TrafficGen(config_file="conf/tgen.cfg") +system.clk_domain = SrcClockDomain( + clock="1.5GHz", voltage_domain=VoltageDomain(voltage="1V") +) # Create a external TLM port: system.tlm = ExternalSlave() -system.tlm.addr_ranges = [AddrRange('512MB')] +system.tlm.addr_ranges = [AddrRange("512MB")] system.tlm.port_type = "tlm_slave" system.tlm.port_data = "transactor" @@ -70,7 +73,7 @@ system.system_port = system.membus.slave system.membus.master = system.tlm.port # Start the simulation: -root = Root(full_system = False, system = system) -root.system.mem_mode = 'timing' +root = Root(full_system=False, system=system) +root.system.mem_mode = "timing" m5.instantiate() -m5.simulate() #Simulation time specified later on commandline +m5.simulate() # Simulation time specified later on commandline diff --git a/util/tlm/examples/slave_port/sc_target.hh b/util/tlm/examples/slave_port/sc_target.hh index 5d93cfd184..e3624e1555 100644 --- a/util/tlm/examples/slave_port/sc_target.hh +++ b/util/tlm/examples/slave_port/sc_target.hh @@ -98,4 +98,3 @@ struct Target: sc_module }; #endif //__SIM_SC_TARGET_HH__ - diff --git a/util/tlm/examples/tlm_elastic_slave_with_l2.py b/util/tlm/examples/tlm_elastic_slave_with_l2.py index ff2bbde38f..c72bc8976c 100644 --- a/util/tlm/examples/tlm_elastic_slave_with_l2.py +++ b/util/tlm/examples/tlm_elastic_slave_with_l2.py @@ -33,7 +33,7 @@ import m5 from m5.objects import * from m5.util import addToPath, fatal -addToPath('../../../configs/common/') +addToPath("../../../configs/common/") from Caches import * @@ -71,26 +71,30 @@ from Caches import * # Create a system with a Crossbar and an Elastic Trace Player as CPU: # Setup System: -system = System(cpu=TraceCPU(cpu_id=0), - mem_mode='timing', - mem_ranges = [AddrRange('1024MB')], - cache_line_size = 64) +system = System( + cpu=TraceCPU(cpu_id=0), + mem_mode="timing", + mem_ranges=[AddrRange("1024MB")], + cache_line_size=64, +) # Create a top-level voltage domain: system.voltage_domain = VoltageDomain() # Create a source clock for the system. This is used as the clock period for # xbar and memory: -system.clk_domain = SrcClockDomain(clock = '1GHz', - voltage_domain = system.voltage_domain) +system.clk_domain = SrcClockDomain( + clock="1GHz", voltage_domain=system.voltage_domain +) # Create a CPU voltage domain: system.cpu_voltage_domain = VoltageDomain() # Create a separate clock domain for the CPUs. In case of Trace CPUs this clock # is actually used only by the caches connected to the CPU: -system.cpu_clk_domain = SrcClockDomain(clock = '1GHz', - voltage_domain = system.cpu_voltage_domain) +system.cpu_clk_domain = SrcClockDomain( + clock="1GHz", voltage_domain=system.cpu_voltage_domain +) # Setup CPU and its L1 caches: system.cpu.createInterruptController() @@ -100,17 +104,19 @@ system.cpu.icache.cpu_side = system.cpu.icache_port system.cpu.dcache.cpu_side = system.cpu.dcache_port # Assign input trace files to the eTraceCPU: -system.cpu.instTraceFile="system.cpu.traceListener.inst.gz" -system.cpu.dataTraceFile="system.cpu.traceListener.data.gz" +system.cpu.instTraceFile = "system.cpu.traceListener.inst.gz" +system.cpu.dataTraceFile = "system.cpu.traceListener.data.gz" # Setting up L1 BUS: system.tol2bus = L2XBar() system.l2cache = L2Cache(size="1MB") -system.physmem = SimpleMemory() # This must be instantiated, even if not needed +system.physmem = ( + SimpleMemory() +) # This must be instantiated, even if not needed # Create a external TLM port: system.tlm = ExternalSlave() -system.tlm.addr_ranges = [AddrRange('4096MB')] +system.tlm.addr_ranges = [AddrRange("4096MB")] system.tlm.port_type = "tlm_slave" system.tlm.port_data = "transactor1" @@ -124,7 +130,7 @@ system.l2cache.mem_side = system.membus.slave system.membus.master = system.tlm.port # Start the simulation: -root = Root(full_system = False, system = system) -root.system.mem_mode = 'timing' +root = Root(full_system=False, system=system) +root.system.mem_mode = "timing" m5.instantiate() -m5.simulate() # Simulation time specified later on commandline +m5.simulate() # Simulation time specified later on commandline diff --git a/util/tlm/run_gem5_fs.sh b/util/tlm/run_gem5_fs.sh index 8f81be0f88..9065cbf9f5 100755 --- a/util/tlm/run_gem5_fs.sh +++ b/util/tlm/run_gem5_fs.sh @@ -1,22 +1,22 @@ #!/bin/bash # Copyright (c) 2015, University of Kaiserslautern # All rights reserved. -# +# # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: -# +# # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. -# +# # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. -# +# # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. -# +# # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR diff --git a/util/tlm/src/sim_control.cc b/util/tlm/src/sim_control.cc index c706fd9b60..a8a3da4a3f 100644 --- a/util/tlm/src/sim_control.cc +++ b/util/tlm/src/sim_control.cc @@ -80,7 +80,7 @@ Gem5SimControl::Gem5SimControl(sc_core::sc_module_name name, gem5::ExternalMaster::registerHandler("tlm_master", new SCMasterPortHandler(*this)); - gem5::Trace::setDebugLogger(&logger); + gem5::trace::setDebugLogger(&logger); Gem5SystemC::setTickFrequency(); assert(sc_core::sc_get_time_resolution() @@ -93,7 +93,7 @@ Gem5SimControl::Gem5SimControl(sc_core::sc_module_name name, gem5::statistics::registerHandlers(CxxConfig::statsReset, CxxConfig::statsDump); - gem5::Trace::enable(); + gem5::trace::enable(); gem5::CxxConfigFileBase* conf = new gem5::CxxIniFile(); diff --git a/util/tracediff b/util/tracediff index ebe34d452b..89cef9ab98 100755 --- a/util/tracediff +++ b/util/tracediff @@ -146,6 +146,3 @@ $fullcmd = "$FindBin::Bin/rundiff '$cmd1' '$cmd2' 2>&1 > tracediff-$$.out"; print "Executing $fullcmd\n"; system($fullcmd); - - - diff --git a/util/update-copyright.py b/util/update-copyright.py index 7cb0a759f8..c22638c2eb 100755 --- a/util/update-copyright.py +++ b/util/update-copyright.py @@ -44,8 +44,8 @@ import git_filter_repo import update_copyright -parser = argparse.ArgumentParser(description= -"""Update copyright headers on files of a range of commits. +parser = argparse.ArgumentParser( + description="""Update copyright headers on files of a range of commits. This can be used to easily update copyright headers at once on an entire patchset before submitting. @@ -87,40 +87,41 @@ which is equivalent to the previous invocation. """, formatter_class=argparse.RawTextHelpFormatter, ) -parser.add_argument('start', - nargs='?', - help="The commit before the last commit to be modified") -parser.add_argument('org-string', - nargs='?', - help="Copyright holder name") -parser.add_argument('-o', '--org', choices=('arm',), - help="Alias for known organizations") +parser.add_argument( + "start", nargs="?", help="The commit before the last commit to be modified" +) +parser.add_argument("org-string", nargs="?", help="Copyright holder name") +parser.add_argument( + "-o", "--org", choices=("arm",), help="Alias for known organizations" +) args = parser.parse_args() + def error(msg): - print('error: ' + msg, file=sys.stderr) + print("error: " + msg, file=sys.stderr) sys.exit(1) + # The existing safety checks are too strict, so we just disable them # with force, and do our own checks to not overwrite uncommited changes # checks. # https://github.com/newren/git-filter-repo/issues/159 -if subprocess.call(['git', 'diff', '--staged', '--quiet']): +if subprocess.call(["git", "diff", "--staged", "--quiet"]): error("uncommitted changes") -if subprocess.call(['git', 'diff', '--quiet']): +if subprocess.call(["git", "diff", "--quiet"]): error("unstaged changes") # Handle CLI arguments. if args.start is None: error("the start argument must be given") -if args.org is None and getattr(args, 'org-string') is None: +if args.org is None and getattr(args, "org-string") is None: error("either --org or org-string must be given") -if args.org is not None and getattr(args, 'org-string') is not None: +if args.org is not None and getattr(args, "org-string") is not None: error("both --org and org-string given") if args.org is not None: org_bytes = update_copyright.org_alias_map[args.org] else: - org_bytes = getattr(args, 'org-string').encode() + org_bytes = getattr(args, "org-string").encode() # Call git_filter_repo. # Args deduced from: @@ -129,13 +130,17 @@ else: filter_repo_args = git_filter_repo.FilteringOptions.default_options() filter_repo_args.force = True filter_repo_args.partial = True -filter_repo_args.refs = ['{}..HEAD'.format(args.start)] -filter_repo_args.repack=False -filter_repo_args.replace_refs='update-no-add' +filter_repo_args.refs = ["{}..HEAD".format(args.start)] +filter_repo_args.repack = False +filter_repo_args.replace_refs = "update-no-add" + + def blob_callback(blob, callback_metadata, org_bytes): - blob.data = update_copyright.update_copyright(blob.data, - datetime.datetime.now().year, org_bytes) + blob.data = update_copyright.update_copyright( + blob.data, datetime.datetime.now().year, org_bytes + ) + + git_filter_repo.RepoFilter( - filter_repo_args, - blob_callback=lambda x, y: blob_callback( x, y, org_bytes) + filter_repo_args, blob_callback=lambda x, y: blob_callback(x, y, org_bytes) ).run() diff --git a/util/update_copyright/__init__.py b/util/update_copyright/__init__.py index 8046b58c3d..3b5a534696 100644 --- a/util/update_copyright/__init__.py +++ b/util/update_copyright/__init__.py @@ -40,14 +40,15 @@ Utilities to parse and modify copyright headers in gem5 source. import re org_alias_map = { - 'arm': b'ARM Limited', - 'uc': b'The Regents of the University of California', + "arm": b"ARM Limited", + "uc": b"The Regents of the University of California", } -_update_copyright_year_regexp = re.compile(b'(.*?)([0-9]+)$') +_update_copyright_year_regexp = re.compile(b"(.*?)([0-9]+)$") + def _update_copyright_years(m, cur_year, org_bytes): - ''' + """ Does e.g.: b'2016, 2018-2019' -> b'2016, 2018-2020'. :param m: match containing only the years part of the string @@ -56,7 +57,7 @@ def _update_copyright_years(m, cur_year, org_bytes): :type cur_year: int :return: the new years part of the string :rtype: bytes - ''' + """ global _update_copyright_year_regexp cur_year_bytes = str(cur_year).encode() m = _update_copyright_year_regexp.match(m.group(1)) @@ -66,19 +67,19 @@ def _update_copyright_years(m, cur_year, org_bytes): if old_year == cur_year: new_years_string = old_year_bytes elif old_year == cur_year - 1: - if len(years_prefix) > 0 and years_prefix[-1:] == b'-': + if len(years_prefix) > 0 and years_prefix[-1:] == b"-": new_years_string = cur_year_bytes else: - new_years_string = old_year_bytes + b'-' + cur_year_bytes + new_years_string = old_year_bytes + b"-" + cur_year_bytes else: - new_years_string = old_year_bytes + b', ' + cur_year_bytes + new_years_string = old_year_bytes + b", " + cur_year_bytes new_years_string = years_prefix + new_years_string - return b' Copyright (c) %b %b\n' % (new_years_string, org_bytes) + return b" Copyright (c) %b %b\n" % (new_years_string, org_bytes) + def update_copyright(data, cur_year, org_bytes): update_copyright_regexp = re.compile( - b' Copyright \\(c\\) ([0-9,\- ]+) ' + org_bytes + b'\n', - re.IGNORECASE + b" Copyright \\(c\\) ([0-9,\- ]+) " + org_bytes + b"\n", re.IGNORECASE ) return update_copyright_regexp.sub( lambda m: _update_copyright_years(m, cur_year, org_bytes), diff --git a/util/update_copyright/test/test_copyright.py b/util/update_copyright/test/test_copyright.py index 1813ed0733..a94d0efc5f 100644 --- a/util/update_copyright/test/test_copyright.py +++ b/util/update_copyright/test/test_copyright.py @@ -37,50 +37,84 @@ import unittest import update_copyright + class TestUpdateCopyright(unittest.TestCase): def update_arm_copyright(self, data, cur_year): return update_copyright.update_copyright( - data, cur_year, - update_copyright.org_alias_map['arm']) + data, cur_year, update_copyright.org_alias_map["arm"] + ) + def update_uc_copyright(self, data, cur_year): return update_copyright.update_copyright( - data, cur_year, - update_copyright.org_alias_map['uc']) + data, cur_year, update_copyright.org_alias_map["uc"] + ) + def test_cpp(self): - self.assertEqual(self.update_arm_copyright( - b' * Copyright (c) 2019 ARM Limited\n', 2020), - b' * Copyright (c) 2019-2020 ARM Limited\n') - self.assertEqual(self.update_uc_copyright( -b' * Copyright (c) 2019 The Regents of the University of California\n', 2020), -b' * Copyright (c) 2019-2020 The Regents of the University of California\n') + self.assertEqual( + self.update_arm_copyright( + b" * Copyright (c) 2019 ARM Limited\n", 2020 + ), + b" * Copyright (c) 2019-2020 ARM Limited\n", + ) + self.assertEqual( + self.update_uc_copyright( + b" * Copyright (c) 2019 The Regents of the University of California\n", + 2020, + ), + b" * Copyright (c) 2019-2020 The Regents of the University of California\n", + ) + def test_python(self): - self.assertEqual(self.update_arm_copyright( - b'# Copyright (c) 2019 ARM Limited\n', 2020), - b'# Copyright (c) 2019-2020 ARM Limited\n') + self.assertEqual( + self.update_arm_copyright( + b"# Copyright (c) 2019 ARM Limited\n", 2020 + ), + b"# Copyright (c) 2019-2020 ARM Limited\n", + ) + def test_multiline(self): - self.assertEqual(self.update_arm_copyright( - b'''/* + self.assertEqual( + self.update_arm_copyright( + b"""/* * Copyright (c) 2019 ARM Limited * All rights reserved. -''', 2020), - b'''/* +""", + 2020, + ), + b"""/* * Copyright (c) 2019-2020 ARM Limited * All rights reserved. -''', - ) +""", + ) + def test_comma(self): - self.assertEqual(self.update_arm_copyright( - b'# Copyright (c) 2018 ARM Limited\n', 2020), - b'# Copyright (c) 2018, 2020 ARM Limited\n') + self.assertEqual( + self.update_arm_copyright( + b"# Copyright (c) 2018 ARM Limited\n", 2020 + ), + b"# Copyright (c) 2018, 2020 ARM Limited\n", + ) + def test_extend_dash(self): - self.assertEqual(self.update_arm_copyright( - b'# Copyright (c) 2018-2019 ARM Limited\n', 2020), - b'# Copyright (c) 2018-2020 ARM Limited\n') + self.assertEqual( + self.update_arm_copyright( + b"# Copyright (c) 2018-2019 ARM Limited\n", 2020 + ), + b"# Copyright (c) 2018-2020 ARM Limited\n", + ) + def test_comma_and_dash_extend(self): - self.assertEqual(self.update_arm_copyright( - b'# Copyright (c) 2016, 2018-2019 ARM Limited\n', 2020), - b'# Copyright (c) 2016, 2018-2020 ARM Limited\n') + self.assertEqual( + self.update_arm_copyright( + b"# Copyright (c) 2016, 2018-2019 ARM Limited\n", 2020 + ), + b"# Copyright (c) 2016, 2018-2020 ARM Limited\n", + ) + def test_standardize_case(self): - self.assertEqual(self.update_arm_copyright( - b'# Copyright (c) 2020 Arm Limited\n', 2020), - b'# Copyright (c) 2020 ARM Limited\n') + self.assertEqual( + self.update_arm_copyright( + b"# Copyright (c) 2020 Arm Limited\n", 2020 + ), + b"# Copyright (c) 2020 ARM Limited\n", + )