generated from jhudsl/OTTR_Template
-
Notifications
You must be signed in to change notification settings - Fork 2
/
book.bib
1240 lines (1076 loc) · 94.1 KB
/
book.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
@Book{xie2015,
title = {Dynamic Documents with {R} and knitr},
author = {Yihui Xie},
publisher = {Chapman and Hall/CRC},
address = {Boca Raton, Florida},
year = {2015},
edition = {2nd},
note = {ISBN 978-1498716963},
url = {http://yihui.name/knitr/},
}
@book{Pokropivny2007,
author = {Pokropivny, V. and Lõhmus, Rünno and nova, I. and Pokropivny, Alex and Vlassov, Sergei},
year = {2007},
month = {01},
pages = {},
title = {Introduction in nanomaterials and nanotechnology}
}
@article{liu_imaging_2017,
title = {The {Current} {Role} of {Image} {Compression} {Standards} in {Medical} {Imaging}},
volume = {8},
copyright = {http://creativecommons.org/licenses/by/3.0/},
url = {https://www.mdpi.com/2078-2489/8/4/131},
doi = {10.3390/info8040131},
abstract = {With the increasing utilization of medical imaging in clinical practice and the growing dimensions of data volumes generated by various medical imaging modalities, the distribution, storage, and management of digital medical image data sets requires data compression. Over the past few decades, several image compression standards have been proposed by international standardization organizations. This paper discusses the current status of these image compression standards in medical imaging applications together with some of the legal and regulatory issues surrounding the use of compression in medical settings.},
language = {en},
number = {4},
urldate = {2022-01-10},
journal = {Information},
author = {Liu, Feng and Hernandez-Cabronero, Miguel and Sanchez, Victor and Marcellin, Michael W. and Bilgin, Ali},
month = dec,
year = {2017},
note = {Number: 4
Publisher: Multidisciplinary Digital Publishing Institute},
keywords = {DICOM, HEVC, image compression, JPEG, JPEG-LS, JPEG-XR, JPEG2000, medical imaging, standards},
pages = {131},
}
@misc{computerhope,
title = {What is {ASCII} ({American} {Standard} {Code} for {Information} {Interexchange})?},
url = {https://www.computerhope.com/jargon/a/ascii.htm},
abstract = {Computer dictionary definition for what ASCII (American Standard Code for Information Interexchange) means including related links, information, and terms.},
language = {en},
urldate = {2021-10-12}
}
@misc{explainthatstuff,
title = {How do logic gates work?},
url = {http://www.explainthatstuff.com/logicgates.html},
abstract = {An easy explanation of electronic logic gates, including AND, OR, NOT, and NOR.},
urldate = {2021-10-12},
journal = {Explain that Stuff}
}
@misc{Wikipedia_Silicon,
title = {Silicon - {Wikipedia}},
url = {https://en.wikipedia.org/wiki/Silicon},
language = {en},
urldate = {2021-10-13}
}
@misc{IBM_data_security,
title = {What is {Data} {Security}? {Data} {Security} {Definition} and {Overview}},
shorttitle = {What is {Data} {Security}?},
url = {https://www.ibm.com/topics/data-security},
abstract = {Find out how data security helps protect digital information from unauthorized access, corruption, or theft throughout its entire lifecycle.},
language = {en-us},
urldate = {2021-11-11},
}
@misc{Forcepoint_2018,
title = {What is {Data} {Encryption}?},
url = {https://www.forcepoint.com/cyber-edu/data-encryption},
abstract = {Data encryption is a security method where information is encoded and can only be accessed or decrypted by a user with the correct encryption key. Encrypted data, also known as ciphertext, appears scrambled or unreadable to a person or entity accessing without permission.},
language = {en},
urldate = {2021-11-11},
journal = {Forcepoint},
month = dec,
year = {2018},
}
@misc{IBM_encryption,
title = {What is encryption? {Data} encryption defined},
shorttitle = {What is encryption?},
url = {https://www.ibm.com/topics/encryption},
abstract = {Encryption is the security method of encoding data from plaintext to ciphertext, which can only be decrypted by the user with the encryption key.},
language = {en-us},
urldate = {2021-11-11},
}
@misc{cyware_social_encryption,
title = {Exploring the {Differences} {Between} {Symmetric} and {Asymmetric} {Encryption} {\textbar} {Cyware} {Hacker} {News}},
url = {https://cyware.com/news/exploring-the-differences-between-symmetric-and-asymmetric-encryption-8de86e8a},
abstract = {Encryption is the process of scrambling data to make sure unintended recipients are not able to make any sense of the data. Two common encryption types are the symmetric and asymmetric encryption.},
language = {en},
urldate = {2021-11-11},
journal = {Cyware Labs},
author = {Labs, Cyware}
}
@misc{CISA_Least_Privilege,
title = {Least {Privilege} {\textbar} {CISA}},
url = {https://us-cert.cisa.gov/bsi/articles/knowledge/principles/least-privilege#footnoteref1_jh2zxwu},
urldate = {2021-11-16}
}
@misc{Wikipedia_principle,
title = {Wikipedia: Principle of least privilege},
copyright = {Creative Commons Attribution-ShareAlike License},
url = {https://en.wikipedia.org/w/index.php?title=Principle_of_least_privilege&oldid=1053221709},
abstract = {In information security, computer science, and other fields, the principle of least privilege (PoLP), also known as the principle of minimal privilege or the principle of least authority, requires that in a particular abstraction layer of a computing environment, every module (such as a process, a user, or a program, depending on the subject) must be able to access only the information and resources that are necessary for its legitimate purpose.},
language = {en},
urldate = {2021-11-16},
journal = {Wikipedia},
month = nov,
year = {2021},
note = {Page Version ID: 1053221709}
}
@misc{file-system_2021,
title = {File-system permissions},
copyright = {Creative Commons Attribution-ShareAlike License},
url = {https://en.wikipedia.org/w/index.php?title=File-system_permissions&oldid=1035316630},
abstract = {Most file systems include attributes of files and directories that control the ability of users to read, change, navigate, and execute the contents of the file system. In some cases, menu options or functions may be made visible or hidden depending on a user's permission level; this kind of user interface is referred to as permission-driven.
Two types of permissions are very widely available: traditional Unix permissions and Access Control Lists (ACLs) which are capable of more specific control.},
language = {en},
urldate = {2021-11-16},
journal = {Wikipedia},
month = jul,
year = {2021},
note = {Page Version ID: 1035316630}
}
@misc{authentication,
title = {Understanding {Authentication}, {Authorization}, and {Encryption} : {TechWeb} : {Boston} {University}},
url = {https://www.bu.edu/tech/about/security-resources/bestpractice/auth/},
urldate = {2021-11-17}
}
@misc{digicert,
title = {What {Is} {SSL} ({Secure} {Sockets} {Layer})? {\textbar} {What} is an {SSL} {Certificate}? {\textbar} {DigiCert}},
shorttitle = {digicert.com},
url = {https://www.digicert.com/what-is-an-ssl-certificate},
abstract = {What is a TLS/SSL Certificate and how does it work? TLS/SSL Certificates are small data files that digitally bind a cryptographic key to a company, business or organization’s details. TLS certificates are what enable websites to move from HTTP to HTTPS, which is more secure.},
language = {en-US},
urldate = {2021-11-17}
}
@misc{chmod,
shorttitle = {Indiana University},
title = {Manage file permissions on {Unix}-like systems},
url = {https://kb.iu.edu/d/abdb},
urldate = {2021-11-18}
}
@misc{Holland_2020,
title = {What's the difference between data deletion and data erasure?},
shorttitle = {Holland, 2020},
url = {https://www.ontrack.com/en-us/blog/whats-the-difference-between-data-deletion-and-data-erasure},
abstract = {Confusing the terms erasure and deletion can result in severe consequences. Make sure you're clear on the difference to avoid the risks of a data breach.},
language = {en-US},
urldate = {2021-11-18},
journal = {Ontrack},
author = {{Tilly Holland}},
month = jan,
year = {2020}
}
@misc{wikipedia_erasure_2021,
title = {Data erasure},
copyright = {Creative Commons Attribution-ShareAlike License},
url = {https://en.wikipedia.org/w/index.php?title=Data_erasure&oldid=1055626662},
abstract = {Data erasure (sometimes referred to as data clearing, data wiping, or data destruction) is a software-based method of overwriting the data that aims to completely destroy all electronic data residing on a hard disk drive or other digital media by using zeros and ones to overwrite data onto all sectors of the device. By overwriting the data on the storage device, the data is rendered irrecoverable and achieves data sanitization.
Ideally, software designed for data erasure should:
Allow for selection of a specific standard, based on unique needs, and
Verify the overwriting method has been successful and removed data across the entire device.Permanent data erasure goes beyond basic file deletion commands, which only remove direct pointers to the data disk sectors and make the data recovery possible with common software tools. Unlike degaussing and physical destruction, which render the storage media unusable, data erasure removes all information while leaving the disk operable. New flash memory-based media implementations, such as solid-state drives or USB flash drives, can cause data erasure techniques to fail allowing remnant data to be recoverable.Software-based overwriting uses a software application to write a stream of zeros, ones or meaningless pseudorandom data onto all sectors of a hard disk drive. There are key differentiators between data erasure and other overwriting methods, which can leave data intact and raise the risk of data breach, identity theft or failure to achieve regulatory compliance. Many data eradication programs also provide multiple overwrites so that they support recognized government and industry standards, though a single-pass overwrite is widely considered to be sufficient for modern hard disk drives. Good software should provide verification of data removal, which is necessary for meeting certain standards.
To protect the data on lost or stolen media, some data erasure applications remotely destroy the data if the password is incorrectly entered. Data erasure tools can also target specific data on a disk for routine erasure, providing a hacking protection method that is less time-consuming than software encryption. Hardware/firmware encryption built into the drive itself or integrated controllers is a popular solution with no degradation in performance at all.},
language = {en},
urldate = {2021-11-18},
journal = {Wikipedia},
month = nov,
year = {2021},
note = {Page Version ID: 1055626662}
}
@misc{transistor_count,
title = {Transistor count},
copyright = {Creative Commons Attribution-ShareAlike License},
url = {https://en.wikipedia.org/w/index.php?title=Transistor_count&oldid=1059186358},
abstract = {The transistor count is the number of transistors in an electronic device. It typically refers to the number of MOSFETs (metal-oxide-semiconductor field-effect transistors, or MOS transistors) on an integrated circuit (IC) chip, as all modern ICs use MOSFETs. It is the most common measure of IC complexity (although the majority of transistors in modern microprocessors are contained in the cache memories, which consist mostly of the same memory cell circuits replicated many times). The rate at which MOS transistor counts have increased generally follows Moore's law, which observed that the transistor count doubles approximately every two years.
As of 2021, the largest transistor count in a commercially available microprocessor is 57 billion MOSFETs, in Apple's ARM-based M1 Max system on a chip, which is fabricated using TSMC's 5 nm semiconductor manufacturing process. As of November 2021, the highest transistor count GPU is AMD's Instinct MI250(X), built on TSMC's N6 process and totalling 59 billion MOSFETs across two dies. As of 2019, the highest transistor count in any IC chip was Samsung's 1 terabyte eUFS (3D-stacked) V-NAND flash memory chip, with 2 trillion floating-gate MOSFETs (4 bits per transistor). As of 2020, the highest transistor count in any IC chip is a deep learning engine called the Wafer Scale Engine 2 by Cerebras, using a special design to route around any non-functional core on the device; it has 2.6 trillion MOSFETs, manufactured using TSMC's 7 nm FinFET process.
In terms of computer systems that consist of numerous integrated circuits, the supercomputer with the highest transistor count as of 2016 is the Chinese-designed Sunway TaihuLight, which has for all CPUs/nodes combined "about 400 trillion transistors in the processing part of the hardware" and "the DRAM includes about 12 quadrillion transistors, and that's about 97 percent of all the transistors." To compare, the smallest computer, as of 2018 dwarfed by a grain of rice, has on the order of 100,000 transistors. Early experimental solid state computers had as few as 130 transistors, but used large amounts of diode logic. The first carbon nanotube computer has 178 transistors and is a 1-bit one-instruction set computer, while a later one is 16-bit (while the instruction set is 32-bit RISC-V).
In terms of the total number of transistors in existence, it has been estimated that a total of 13 sextillion (1.3×1022) MOSFETs have been manufactured worldwide between 1960 and 2018. MOSFETs account for at least 99.9\% of all transistors, the majority of which have been used for NAND flash memory manufactured during the early 21st century. This makes the MOSFET the most widely manufactured device in history.},
language = {en},
urldate = {2021-12-09},
journal = {Wikipedia},
month = dec,
year = {2021},
note = {Page Version ID: 1059186358}
}
@misc{finfet_2021,
title = {{FinFET}},
copyright = {Creative Commons Attribution-ShareAlike License},
url = {https://en.wikipedia.org/w/index.php?title=FinFET&oldid=1040651882},
abstract = {A fin field-effect transistor (FinFET) is a multigate device, a MOSFET (metal-oxide-semiconductor field-effect transistor) built on a substrate where the gate is placed on two, three, or four sides of the channel or wrapped around the channel, forming a double or even multi gate structure. These devices have been given the generic name "FinFETs" because the source/drain region forms fins on the silicon surface. The FinFET devices have significantly faster switching times and higher current density than planar CMOS (complementary metal-oxide-semiconductor) technology.
FinFET is a type of non-planar transistor, or "3D" transistor. It is the basis for modern nanoelectronic semiconductor device fabrication. Microchips utilizing FinFET gates first became commercialized in the first half of the 2010s, and became the dominant gate design at 14 nm, 10 nm and 7 nm process nodes.
It is common for a single FinFET transistor to contain several fins, arranged side by side and all covered by the same gate, that act electrically as one, to increase drive strength and performance.},
language = {en},
urldate = {2021-12-09},
journal = {Wikipedia},
month = aug,
year = {2021},
note = {Page Version ID: 1040651882}
}
@misc{ligand-gated_2021,
title = {Ligand-gated ion channel},
copyright = {Creative Commons Attribution-ShareAlike License},
url = {https://en.wikipedia.org/w/index.php?title=Ligand-gated_ion_channel&oldid=1047592762},
abstract = {Ligand-gated ion channels (LICs, LGIC), also commonly referred to as ionotropic receptors, are a group of transmembrane ion-channel proteins which open to allow ions such as Na+, K+, Ca2+, and/or Cl− to pass through the membrane in response to the binding of a chemical messenger (i.e. a ligand), such as a neurotransmitter.When a presynaptic neuron is excited, it releases a neurotransmitter from vesicles into the synaptic cleft. The neurotransmitter then binds to receptors located on the postsynaptic neuron. If these receptors are ligand-gated ion channels, a resulting conformational change opens the ion channels, which leads to a flow of ions across the cell membrane. This, in turn, results in either a depolarization, for an excitatory receptor response, or a hyperpolarization, for an inhibitory response.
These receptor proteins are typically composed of at least two different domains: a transmembrane domain which includes the ion pore, and an extracellular domain which includes the ligand binding location (an allosteric binding site). This modularity has enabled a 'divide and conquer' approach to finding the structure of the proteins (crystallising each domain separately). The function of such receptors located at synapses is to convert the chemical signal of presynaptically released neurotransmitter directly and very quickly into a postsynaptic electrical signal. Many LICs are additionally modulated by allosteric ligands, by channel blockers, ions, or the membrane potential. LICs are classified into three superfamilies which lack evolutionary relationship: cys-loop receptors, ionotropic glutamate receptors and ATP-gated channels.},
language = {en},
urldate = {2021-12-14},
journal = {Wikipedia},
month = oct,
year = {2021},
note = {Page Version ID: 1047592762}
}
@article{ligand-gated_2011,
title = {Ligand-{Gated} {Ion} {Channels}},
volume = {164},
issn = {0007-1188},
url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3315629/},
doi = {10.1111/j.1476-5381.2011.01649_4.x},
number = {Suppl 1},
urldate = {2021-12-14},
journal = {British Journal of Pharmacology},
month = nov,
year = {2011},
pmid = {null},
pmcid = {PMC3315629},
pages = {S115--S135}
}
@misc{how_transistors_work,
title = {How does a transistor work?},
url = {https://www.physlink.com/education/askexperts/ae430.cfm},
urldate = {2021-12-14},
}
@misc{transistor_basics,
title = {Transistor {Basics}},
author = {V.Ryan},
date = 2002,
url = {https://technologystudent.com/elec1/transis1.htm},
urldate = {2021-12-14},
}
@misc{Wikipedia_logic_gate_2021,
shorttitle = {Wikipedia_logic_gate_2021},
title = {Logic gate},
copyright = {Creative Commons Attribution-ShareAlike License},
url = {https://en.wikipedia.org/w/index.php?title=Logic_gate&oldid=1058563875},
abstract = {A logic gate is an idealized model of computation or physical electronic device implementing a Boolean function, a logical operation performed on one or more binary inputs that produces a single binary output. Depending on the context, the term may refer to an ideal logic gate, one that has for instance zero rise time and unlimited fan-out, or it may refer to a non-ideal physical device (see Ideal and real op-amps for comparison).
Logic gates are primarily implemented using diodes or transistors acting as electronic switches, but can also be constructed using vacuum tubes, electromagnetic relays (relay logic), fluidic logic, pneumatic logic, optics, molecules, or even mechanical elements. With amplification, logic gates can be cascaded in the same way that Boolean functions can be composed, allowing the construction of a physical model of all of Boolean logic, and therefore, all of the algorithms and mathematics that can be described with Boolean logic.
Logic circuits include such devices as multiplexers, registers, arithmetic logic units (ALUs), and computer memory, all the way up through complete microprocessors, which may contain more than 100 million gates. In modern practice, most gates are made from MOSFETs (metal–oxide–semiconductor field-effect transistors).
Compound logic gates AND-OR-Invert (AOI) and OR-AND-Invert (OAI) are often employed in circuit design because their construction using MOSFETs is simpler and more efficient than the sum of the individual gates.In reversible logic, Toffoli gates are used.},
language = {en},
urldate = {2021-12-14},
journal = {Wikipedia},
month = dec,
year = {2021},
note = {Page Version ID: 1058563875}
}
@misc{tom_ward_2017,
shorttitle = {Futurism_Ward_2017},
title = {This {Could} {Mark} the {End} of the {Silicon} {Age}},
url = {https://futurism.com/could-mark-end-silicon-age},
urldate = {2021-12-14},
journal = {Futurism},
author = {{Tom Ward}},
month = jun,
year = {2017}
}
@misc{Wikipedia_silicon_2021,
title = {Silicon {Valley}},
shorttitle = {Wikipedia_Silicon_Valley},
copyright = {Creative Commons Attribution-ShareAlike License},
url = {https://en.wikipedia.org/w/index.php?title=Silicon_Valley&oldid=1059899296},
abstract = {Silicon Valley is a region in Northern California that serves as a global center for high technology and innovation. Located in the southern part of the San Francisco Bay Area, it corresponds roughly to the geographical Santa Clara Valley. San Jose is Silicon Valley's largest city, the third-largest in California, and the tenth-largest in the United States; other major Silicon Valley cities include Sunnyvale, Santa Clara, Redwood City, Mountain View, Palo Alto, Menlo Park, and Cupertino. The San Jose Metropolitan Area has the third-highest GDP per capita in the world (after Zurich, Switzerland and Oslo, Norway), according to the Brookings Institution, and, as of June 2021, has the highest percentage in the country of homes valued at \$1 million or more.Silicon Valley is home to many of the world's largest high-tech corporations, including the headquarters of more than 30 businesses in the Fortune 1000, and thousands of startup companies. Silicon Valley also accounts for one-third of all of the venture capital investment in the United States, which has helped it to become a leading hub and startup ecosystem for high-tech innovation. It was in Silicon Valley that the silicon-based integrated circuit, the microprocessor, and the microcomputer, among other technologies, were developed. As of 2013, the region employed about a quarter of a million information technology workers.As more high-tech companies were established across San Jose and the Santa Clara Valley, and then north towards the Bay Area's two other major cities, San Francisco and Oakland, the term "Silicon Valley" came to have two definitions: a narrower geographic one, referring to Santa Clara County and southeastern San Mateo County, and a metonymical definition referring to high-tech businesses in the entire Bay Area. The term Silicon Valley is often used as a synecdoche for the American high-technology economic sector. The name also became a global synonym for leading high-tech research and enterprises, and thus inspired similarly named locations, as well as research parks and technology centers with comparable structures all around the world. Many headquarters of tech companies in Silicon Valley have become hotspots for tourism.},
language = {en},
urldate = {2021-12-14},
journal = {Wikipedia},
month = dec,
year = {2021},
note = {Page Version ID: 1059899296}
}
@misc{woodford_how_2007,
title = {How do transistors work?},
url = {http://www.explainthatstuff.com/howtransistorswork.html},
abstract = {An easy-to-understand introduction to transistors},
urldate = {2021-12-14},
journal = {Explain that Stuff},
author = {Woodford, Chris},
month = jan,
year = {2007},
}
@misc{Wikipedia_boolean_2021,
title = {Boolean algebra},
copyright = {Creative Commons Attribution-ShareAlike License},
url = {https://en.wikipedia.org/w/index.php?title=Boolean_algebra&oldid=1058093529},
abstract = {In mathematics and mathematical logic, Boolean algebra is the branch of algebra in which the values of the variables are the truth values true and false, usually denoted 1 and 0, respectively. Instead of elementary algebra, where the values of the variables are numbers and the prime operations are addition and multiplication, the main operations of Boolean algebra are the conjunction (and) denoted as ∧, the disjunction (or) denoted as ∨, and the negation (not) denoted as ¬. It is thus a formalism for describing logical operations, in the same way that elementary algebra describes numerical operations.
Boolean algebra was introduced by George Boole in his first book The Mathematical Analysis of Logic (1847), and set forth more fully in his An Investigation of the Laws of Thought (1854).
According to Huntington, the term "Boolean algebra" was first suggested by Sheffer in 1913, although Charles Sanders Peirce gave the title "A Boolean Algebra with One Constant" to the first chapter of his "The Simplest Mathematics" in 1880.
Boolean algebra has been fundamental in the development of digital electronics, and is provided for in all modern programming languages. It is also used in set theory and statistics.},
language = {en},
urldate = {2021-12-14},
journal = {Wikipedia},
month = dec,
year = {2021},
note = {Page Version ID: 1058093529}
}
@misc{ComputerHope_64-bit,
title = {What is 64-bit ({WOW64} and {X64})?},
url = {https://www.computerhope.com/jargon/num/64bit.htm},
abstract = {Computer dictionary definition of what 64-bit / WOW64 / x64 means, including related links, information, and terms.},
language = {en},
urldate = {2021-12-14},
journal = {ComputerHope}
}
@misc{Wikipedia_AND_gate_and_2021,
title = {{AND} gate},
shorttitle = {Wikipedia_AND_gate_and_2021},
copyright = {Creative Commons Attribution-ShareAlike License},
url = {https://en.wikipedia.org/w/index.php?title=AND_gate&oldid=1057207215},
abstract = {The AND gate is a basic digital logic gate that implements logical conjunction (∧) from mathematical logic – it behaves according to the truth table above. A HIGH output (1) results only if all the inputs to the AND gate are HIGH (1). If none or not all inputs to the AND gate are HIGH, LOW output results. The function can be extended to any number of inputs.},
language = {en},
urldate = {2021-12-14},
journal = {Wikipedia},
month = {nov},
year = {2021},
note = {Page Version ID: 1057207215}
}
@misc{Wikipedia_OR_gate_2021,
title = {{OR} gate},
shorttitle = {Wikipedia_OR_gate_2021},
copyright = {Creative Commons Attribution-ShareAlike License},
url = {https://en.wikipedia.org/w/index.php?title=OR_gate&oldid=1029393190},
abstract = {The OR gate is a digital logic gate that implements logical disjunction (∨) from mathematical logic – it behaves according to the truth table above. A HIGH output (1) results if one or both the inputs to the gate are HIGH (1). If neither input is high, a LOW output (0) results. In another sense, the function of OR effectively finds the maximum between two binary digits, just as the complementary AND function finds the minimum.},
language = {en},
urldate = {2021-12-14},
journal = {Wikipedia},
month = jun,
year = {2021},
note = {Page Version ID: 1029393190}
}
@misc{Wikipedia_adder_2021,
shorttitle = {Wikipedia_adder_2021},
title = {Adder (electronics)},
copyright = {Creative Commons Attribution-ShareAlike License},
url = {https://en.wikipedia.org/w/index.php?title=Adder_(electronics)&oldid=1054161978},
abstract = {An adder is a digital circuit that performs addition of numbers.
In many computers and other kinds of processors adders are used in the arithmetic logic units or ALU. They are also used in other parts of the processor, where they are used to calculate addresses, table indices, increment and decrement operators and similar operations.
Although adders can be constructed for many number representations, such as binary-coded decimal or excess-3, the most common adders operate on binary numbers.
In cases where two's complement or ones' complement is being used to represent negative numbers, it is trivial to modify an adder into an adder–subtractor.
Other signed number representations require more logic around the basic adder.},
language = {en},
urldate = {2021-12-14},
journal = {Wikipedia},
month = nov,
year = {2021},
note = {Page Version ID: 1054161978},
}
@misc{binary_calc,
shorttitle = {Binary_calculations},
title = {Binary {Calculator}},
url = {https://www.calculator.net/binary-calculator.html},
urldate = {2021-12-14},
file = {Binary Calculator:/Users/carriewright/Zotero/storage/XFVCUPZ2/binary-calculator.html:text/html},
}
@misc{Wikipedia_flip-flop_2021,
title = {Flip-flop (electronics)},
copyright = {Creative Commons Attribution-ShareAlike License},
url = {https://en.wikipedia.org/w/index.php?title=Flip-flop_(electronics)&oldid=1055073341},
abstract = {In electronics, a flip-flop or latch is a circuit that has two stable states and can be used to store state information – a bistable multivibrator. The circuit can be made to change state by signals applied to one or more control inputs and will have one or two outputs. It is the basic storage element in sequential logic. Flip-flops and latches are fundamental building blocks of digital electronics systems used in computers, communications, and many other types of systems.
Flip-flops and latches are used as data storage elements. A flip-flop is a device which stores a single bit (binary digit) of data; one of its two states represents a "one" and the other represents a "zero". Such data storage can be used for storage of state, and such a circuit is described as sequential logic in electronics. When used in a finite-state machine, the output and next state depend not only on its current input, but also on its current state (and hence, previous inputs). It can also be used for counting of pulses, and for synchronizing variably-timed input signals to some reference timing signal.
Flip-flops can be either level-triggered (asynchronous, transparent or opaque) or edge-triggered (synchronous, or clocked). The term flip-flop has historically referred generically to both level-triggered and edge-triggered circuits that store a single bit of data using gates. Recently, some authors reserve the term flip-flop exclusively for discussing clocked circuits; the simple ones are commonly called transparent latches. Using this terminology, a level-sensitive flip-flop is called a transparent latch, whereas an edge-triggered flip-flop is simply called a flip-flop. Using either terminology, the term "flip-flop" refers to a device that stores a single bit of data, but the term "latch" may also refer to a device that stores any number of bits of data using a single trigger. The terms "edge-triggered", and "level-triggered" may be used to avoid ambiguity.When a level-triggered latch is enabled it becomes transparent, but an edge-triggered flip-flop's output only changes on a single type (positive going or negative going) of clock edge.},
language = {en},
urldate = {2021-12-14},
journal = {Wikipedia},
month = nov,
year = {2021},
note = {Page Version ID: 1055073341},
}
@misc{Wikipedia_memory_2021,
title = {Memory cell (computing)},
copyright = {Creative Commons Attribution-ShareAlike License},
url = {https://en.wikipedia.org/w/index.php?title=Memory_cell_(computing)&oldid=1055073514},
abstract = {The memory cell is the fundamental building block of computer memory. The memory cell is an electronic circuit that stores one bit of binary information and it must be set to store a logic 1 (high voltage level) and reset to store a logic 0 (low voltage level). Its value is maintained/stored until it is changed by the set/reset process. The value in the memory cell can be accessed by reading it.
Over the history of computing, different memory cell architectures have been used, including core memory and bubble memory. Today, the most common memory cell architecture is MOS memory, which consists of metal–oxide–semiconductor (MOS) memory cells. Modern random-access memory (RAM) uses MOS field-effect transistors (MOSFETs) as flip-flops, along with MOS capacitors for certain types of RAM.
The SRAM (static RAM) memory cell is a type of flip-flop circuit, typically implemented using MOSFETs. These require very low power to keep the stored value when not being accessed. A second type, DRAM (dynamic RAM), is based around MOS capacitors. Charging and discharging a capacitor can store a '1' or a '0' in the cell. However, the charge in this capacitor will slowly leak away, and must be refreshed periodically. Because of this refresh process, DRAM uses more power. However, DRAM can achieve greater storage densities.
On the other hand, most non-volatile memory (NVM) is based on floating-gate memory cell architectures. Non-volatile memory technologies including EPROM, EEPROM and flash memory use floating-gate memory cells, which are based around floating-gate MOSFET transistors.},
language = {en},
urldate = {2021-12-14},
journal = {Wikipedia},
month = nov,
year = {2021},
note = {Page Version ID: 1055073514},
}
@misc{Wikipedia_register_2021,
title = {Hardware register},
copyright = {Creative Commons Attribution-ShareAlike License},
url = {https://en.wikipedia.org/w/index.php?title=Hardware_register&oldid=1024702439},
abstract = {In digital electronics, especially computing, hardware registers are circuits typically composed of flip flops, often with many characteristics similar to memory, such as:
The ability to read or write multiple bits at a time, and
Using an address to select a particular register in a manner similar to a memory address.Their distinguishing characteristic, however, is that they also have special hardware-related functions beyond those of ordinary memory. So, depending on the point of view, hardware registers are like memory with additional hardware-related functions; or, memory circuits are like hardware registers that just store data.
Hardware registers are used in the interface between software and peripherals. Software writes them to send information to the device, and reads them to get information from the device. Some hardware devices also include registers that are not visible to software, for their internal use.
Depending on their complexity, modern hardware devices can have many registers. Standard integrated circuits typically document their externally-exposed registers as part of their electronic component datasheet.},
language = {en},
urldate = {2021-12-14},
journal = {Wikipedia},
month = may,
year = {2021},
note = {Page Version ID: 1024702439}
}
@misc{Wikipedia_ascii_2021,
title = {{ASCII}},
copyright = {Creative Commons Attribution-ShareAlike License},
url = {https://en.wikipedia.org/w/index.php?title=ASCII&oldid=1058507634},
abstract = {ASCII ( (listen) ASS-kee),: 6 abbreviated from American Standard Code for Information Interchange, is a character encoding standard for electronic communication. ASCII codes represent text in computers, telecommunications equipment, and other devices. Most modern character-encoding schemes are based on ASCII, although they support many additional characters.
The Internet Assigned Numbers Authority (IANA) prefers the name US-ASCII for this character encoding.ASCII is one of the IEEE milestones.},
language = {en},
urldate = {2021-12-14},
journal = {Wikipedia},
month = dec,
year = {2021},
note = {Page Version ID: 1058507634},
}
@misc{Wikipedia_unicode_2021,
title = {Unicode},
copyright = {Creative Commons Attribution-ShareAlike License},
url = {https://en.wikipedia.org/w/index.php?title=Unicode&oldid=1060083479},
abstract = {Unicode, formally the Unicode Standard, is an information technology standard for the consistent encoding, representation, and handling of text expressed in most of the world's writing systems. The standard, which is maintained by the Unicode Consortium, defines 144,697 characters covering 159 modern and historic scripts, as well as symbols, emoji, and non-visual control and formatting codes.
The Unicode character repertoire is synchronized with ISO/IEC 10646, each being code-for-code identical with the other. The Unicode Standard, however, includes more than just the base code. Alongside the character encodings, the Consortium's official publication includes a wide variety of details about the scripts and how to display them: normalization rules, decomposition, collation, rendering, and bidirectional text display order for multilingual texts, and so on. The Standard also includes reference data files and visual charts to help developers and designers correctly implement the repertoire.
Unicode's success at unifying character sets has led to its widespread and predominant use in the internationalization and localization of computer software. The standard has been implemented in many recent technologies, including modern operating systems, XML, and most modern programming languages.
Unicode can be implemented by different character encodings. The Unicode standard defines Unicode Transformation Formats (UTF): UTF-8, UTF-16, and UTF-32, and several other encodings. The most commonly used encodings are UTF-8, UTF-16, and the obsolete UCS-2 (a precursor of UTF-16 without full support for Unicode); GB18030, while not an official Unicode standard, is standardized in China and implements Unicode fully.
UTF-8, the dominant encoding on the World Wide Web (used in over 95\% of websites as of 2020, and up to 100\% for some languages) and on most Unix-like operating systems, uses one byte (8 bits) for the first 128 code points, and up to 4 bytes for other characters. The first 128 Unicode code points represent the ASCII characters, which means that any ASCII text is also a UTF-8 text.
UCS-2 uses two bytes (16 bits) for each character but can only encode the first 65,536 code points, the so-called Basic Multilingual Plane (BMP). With 1,112,064 possible Unicode code points corresponding to characters (see below) on 17 planes, and with over 144,000 code points defined as of version 14.0, UCS-2 is only able to represent less than half of all encoded Unicode characters. Therefore, UCS-2 is obsolete, though still used in software. UTF-16 extends UCS-2, by using the same 16-bit encoding as UCS-2 for the Basic Multilingual Plane, and a 4-byte encoding for the other planes. As long as it contains no code points in the reserved range U+D800–U+DFFF, a UCS-2 text is valid UTF-16 text.
UTF-32 (also referred to as UCS-4) uses four bytes to encode any given code point, but not necessarily any given user-perceived character (loosely speaking, a grapheme), since a user-perceived character may be represented by a grapheme cluster (a sequence of multiple code points). Like UCS-2, the number of bytes per code point is fixed, facilitating code point indexing; but unlike UCS-2, UTF-32 is able to encode all Unicode code points. However, because each code point uses four bytes, UTF-32 takes significantly more space than other encodings, and is not widely used. Although UTF-32 has a fixed size for each code point, it is also variable-length with respect to user-perceived characters. Examples include: the Devanagari kshi, which is encoded by 4 code points, and national flag emojis, which are composed of two code points. All combining character sequences are graphemes, but there are other sequences of code points that are as well, for example {\textbackslash}r{\textbackslash}n.},
language = {en},
urldate = {2021-12-14},
journal = {Wikipedia},
month = dec,
year = {2021},
note = {Page Version ID: 1060083479},
}
@misc{Wikipedia_word_length_2021,
title = {Word (computer architecture)},
copyright = {Creative Commons Attribution-ShareAlike License},
url = {https://en.wikipedia.org/w/index.php?title=Word_(computer_architecture)&oldid=1058922992},
abstract = {In computing, a word is the natural unit of data used by a particular processor design. A word is a fixed-sized piece of data handled as a unit by the instruction set or the hardware of the processor. The number of bits in a word (the word size, word width, or word length) is an important characteristic of any specific processor design or computer architecture.
The size of a word is reflected in many aspects of a computer's structure and operation; the majority of the registers in a processor are usually word sized and the largest piece of data that can be transferred to and from the working memory in a single operation is a word in many (not all) architectures. The largest possible address size, used to designate a location in memory, is typically a hardware word (here, "hardware word" means the full-sized natural word of the processor, as opposed to any other definition used).
Documentation for computers with fixed word size commonly stated memory sizes in words rather than bytes or characters. Terms such as kilowords (KW) meaning 1024 words (210) and megawords (MW) meaning 1,048,576 words (220) were normally used. With standardization on 8-bit bytes and byte addressability, stating memory sizes in bytes, kilobytes, and megabytes has become the norm.
Several of the earliest computers (and a few modern as well) used binary-coded decimal rather than plain binary, typically having a word size of 10 or 12 decimal digits, and some early decimal computers had no fixed word length at all. Early binary systems tended to use word lengths that were some multiple of 6-bits, with the 36-bit word being especially common on mainframe computers. The introduction of ASCII led to the move to systems with word lengths that were a multiple of 8-bits, with 16-bit machines being popular in the 1970s before the move to modern processors with 32 or 64 bits. Special-purpose designs like digital signal processors, may have any word length from 4 to 80 bits.The size of a word can sometimes differ from the expected due to backward compatibility with earlier computers. If multiple compatible variations or a family of processors share a common architecture and instruction set but differ in their word sizes, their documentation and software may become notationally complex to accommodate the difference (see Size families below).},
language = {en},
urldate = {2021-12-14},
journal = {Wikipedia},
month = dec,
year = {2021},
note = {Page Version ID: 1058922992},
annote = {undefined},
}
@incollection{braunl_central_2008,
address = {Berlin, Heidelberg},
title = {Central {Processing} {Unit}},
isbn = {978-3-540-70534-5},
url = {https://doi.org/10.1007/978-3-540-70534-5_2},
abstract = {The CPU (central processing unit) is the heart of every embedded system and every personal computer. It comprises the ALU (arithmetic logic unit), responsible for the number crunching, and the CU (control unit), responsible for instruction sequencing and branching. Modern microprocessors and microcontrollers provide on a single chip the CPU and a varying degree of additional components, such as counters, timing coprocessors, watchdogs, SRAM (static RAM), and Flash-ROM (electrically erasable ROM).Hardware can be described on several different levels, from low-level transistor- level to high-level hardware description languages (HDLs). The socalled register-transfer level is somewhat in-between, describing CPU components and their interaction on a relatively high level. We will use this level in this chapter to introduce gradually more complex components, which we will then use to construct a complete CPU. With the simulation system Retro [Chansavat Bräunl 1999], [Bräunl 2000], we will be able to actually program, run, and test our CPUs.},
language = {en},
urldate = {2021-12-14},
booktitle = {Embedded {Robotics}: {Mobile} {Robot} {Design} and {Applications} with {Embedded} {Systems}},
publisher = {Springer},
editor = {Bräunl, Thomas},
year = {2008},
doi = {10.1007/978-3-540-70534-5_2},
keywords = {Central Processing Unit, Function Unit, Memory Module, Output Line, Program Counter},
pages = {17--47},
}
@misc{CPU_redhat,
title = {The central processing unit ({CPU}): {Its} components and functionality},
shorttitle = {The central processing unit ({CPU})},
url = {https://www.redhat.com/sysadmin/cpu-components-functionality},
abstract = {An introduction to the CPU, what it does, how it works, and how it came to be.},
language = {en},
urldate = {2021-12-14},
journal = {Enable Sysadmin},
author = {Both, David},
note = {Publisher: Red Hat, Inc.
Section: Enable Sysadmin},
}
@misc{Wikipedia_CPU_2021,
title = {Central processing unit},
copyright = {Creative Commons Attribution-ShareAlike License},
url = {https://en.wikipedia.org/w/index.php?title=Central_processing_unit&oldid=1059451491},
abstract = {A central processing unit (CPU), also called a central processor, main processor or just processor, is the electronic circuitry that executes instructions comprising a computer program. The CPU performs basic arithmetic, logic, controlling, and input/output (I/O) operations specified by the instructions in the program. This contrasts with external components such as main memory and I/O circuitry, and specialized processors such as graphics processing units (GPUs).
The form, design, and implementation of CPUs have changed over time, but their fundamental operation remains almost unchanged. Principal components of a CPU include the arithmetic–logic unit (ALU) that performs arithmetic and logic operations, processor registers that supply operands to the ALU and store the results of ALU operations, and a control unit that orchestrates the fetching (from memory), decoding and execution of instructions by directing the coordinated operations of the ALU, registers and other components.
Most modern CPUs are implemented on integrated circuit (IC) microprocessors, with one or more CPUs on a single metal-oxide-semiconductor (MOS) IC chip. Microprocessors chips with multiple CPUs are multi-core processors. The individual physical CPUs, processor cores, can also be multithreaded to create additional virtual or logical CPUs.An IC that contains a CPU may also contain memory, peripheral interfaces, and other components of a computer; such integrated devices are variously called microcontrollers or systems on a chip (SoC).
Array processors or vector processors have multiple processors that operate in parallel, with no unit considered central. Virtual CPUs are an abstraction of dynamical aggregated computational resources.},
language = {en},
urldate = {2021-12-14},
journal = {Wikipedia},
month = dec,
year = {2021},
note = {Page Version ID: 1059451491},
}
@misc{GPU,
title = {What {Is} a {GPU}? {Graphics} {Processing} {Units} {Defined}},
shorttitle = {What {Is} a {GPU}?},
url = {https://www.intel.com/content/www/us/en/products/docs/processors/what-is-a-gpu.html},
abstract = {Find out what a GPU is, how they work, and their uses for parallel processing with a definition and description of graphics processing units.},
language = {en},
urldate = {2021-12-14},
journal = {Intel},
author = {gaming, Graphics processing technology has evolved to deliver unique benefits in the world of computing The latest graphics processing unitsunlock new possibilities in and Creation, Content and Learning, Machine and {more.}},
}
@misc{Wikipedia_hyper-threading,
title = {Hyper-threading - {Wikipedia}},
url = {https://en.wikipedia.org/wiki/Hyper-threading},
urldate = {2021-12-14},
}
@misc{hyperthreading,
title = {What {Is} {Hyper-threading} {\textbar} {HP}® {Tech} {Takes}},
url = {https://www.hp.com/us-en/shop/tech-takes/what-is-hyperthreading},
abstract = {Hyper-threading can increase CPU efficiency and improve your PC’s performance. What is hyper-threading? Discover more in our HP® Tech Takes article.},
language = {en-us},
urldate = {2021-12-14},
file = {Snapshot:/Users/carriewright/Zotero/storage/QG3D5N5C/what-is-hyperthreading.html:text/html},
}
@misc{RAM_ComputerHope,
title = {What is {RAM} ({Random}-{Access} {Memory})?},
url = {https://www.computerhope.com/jargon/r/ram.htm},
abstract = {Computer dictionary definition of what RAM (random-access memory) means, including related links, information, and terms.},
language = {en},
urldate = {2021-12-14},
}
@misc{RAM_HowStuff_Works,
title = {How {RAM} {Works}},
url = {https://computer.howstuffworks.com/ram.htm},
abstract = {Random access memory (RAM) is as important to your computer's operation as the CPU, because it determines how quickly and efficiently your computer performs tasks. So how does it do that and can you upgrade it?},
language = {en},
urldate = {2021-12-14},
journal = {HowStuffWorks},
month = aug,
year = {2000},
}
@misc{SSD,
title = {What is {SSD} ({Solid}-{State} {Drive})?},
url = {https://www.computerhope.com/jargon/s/ssd.htm},
abstract = {Computer dictionary definition of what SSD (solid-state drive) means, including related links, information, and terms.},
language = {en},
urldate = {2021-12-14},
}
@misc{hard_drive,
title = {What is a {Hard} {Drive}?},
url = {https://www.computerhope.com/jargon/h/harddriv.htm},
abstract = {Computer dictionary definition of what hard drive means, including related links, information, and terms.},
language = {en},
urldate = {2021-12-14},
}
@misc{hard_drive_works,
title = {How a {Hard} {Drive} {Works} - {ExtremeTech}},
url = {https://www.extremetech.com/computing/88078-how-a-hard-drive-works},
urldate = {2021-12-14},
}
@misc{Wikipedia_cache_2021,
title = {{CPU} cache},
copyright = {Creative Commons Attribution-ShareAlike License},
url = {https://en.wikipedia.org/w/index.php?title=CPU_cache&oldid=1058670354},
abstract = {A CPU cache is a hardware cache used by the central processing unit (CPU) of a computer to reduce the average cost (time or energy) to access data from the main memory. A cache is a smaller, faster memory, located closer to a processor core, which stores copies of the data from frequently used main memory locations. Most CPUs have a hierarchy of multiple cache levels (L1, L2, often L3, and rarely even L4), with separate instruction-specific and data-specific caches at level 1.
Other types of caches exist (that are not counted towards the "cache size" of the most important caches mentioned above), such as the translation lookaside buffer (TLB) which is part of the memory management unit (MMU) which most CPUs have.},
language = {en},
urldate = {2021-12-14},
journal = {Wikipedia},
month = dec,
year = {2021},
note = {Page Version ID: 1058670354},
}
@misc{Wikipedia_hardware_2021,
title = {Computer hardware},
copyright = {Creative Commons Attribution-ShareAlike License},
url = {https://simple.wikipedia.org/w/index.php?title=Computer_hardware&oldid=7474128},
abstract = {Computer hardware or hardware means the physical parts that make up a computer.
Hardware needs software to tell it what to do. Without software, the hardware cannot be used.},
language = {en},
urldate = {2021-12-14},
journal = {Simple English Wikipedia, the free encyclopedia},
month = apr,
year = {2021},
note = {Page Version ID: 7474128},
}
@misc{Wikipedia_software_2021,
title = {Software},
copyright = {Creative Commons Attribution-ShareAlike License},
url = {https://simple.wikipedia.org/w/index.php?title=Software&oldid=7816229},
abstract = {Computer software, also called software, is a set of instructions and documentation that tells a computer what to do or how to perform a task. Software includes all different programs on a computer, such as applications and the operating system. Applications are programs that are designed to perform a specific operation, such as a game or a word processor. The operating system (e.g. Mac OS, Microsoft Windows, Android and various Linux distributions) is a type of software that is used as a platform for running the applications, and controls all user interface tools including display and the keyboard.
The word software was first used in the late 1960s to emphasize on its difference from computer hardware, which can be physically observed by the user. Software is a set of instructions that the computer follows. Before compact discs (CDs) or development of the Internet age, software was used on various computer data storage media tools like paper punch cards, magnetic discs or magnetic tapes.
The word firmware is sometimes used to describe a style of software that is made specifically for a particular type of computer or an electronic device and is usually stored on a Flash memory or ROM chip in the computer. Firmware usually refers to a piece of software that directly controls a piece of hardware. The firmware for a CD drive or the firmware for a modem are examples of firmware implementation.
Today, software has become an important part of our lives. software is used everywhere. software engineers are responsible for producing fault-free software which has literally become an essential part of our daily lives. Changeability and conformity are two of the main properties of software design. There are also different processing models for designing software including Build and Fix, Waterfall and Agile software processing design methods.},
language = {en},
urldate = {2021-12-14},
journal = {Simple English Wikipedia, the free encyclopedia},
month = oct,
year = {2021},
note = {Page Version ID: 7816229},
}
@misc{Wikipedia_OS_2021,
title = {Operating system},
copyright = {Creative Commons Attribution-ShareAlike License},
url = {https://en.wikipedia.org/w/index.php?title=Operating_system&oldid=1059529136},
abstract = {An operating system (OS) is system software that manages computer hardware, software resources, and provides common services for computer programs.
Time-sharing operating systems schedule tasks for efficient use of the system and may also include accounting software for cost allocation of processor time, mass storage, printing, and other resources.
For hardware functions such as input and output and memory allocation, the operating system acts as an intermediary between programs and the computer hardware, although the application code is usually executed directly by the hardware and frequently makes system calls to an OS function or is interrupted by it. Operating systems are found on many devices that contain a computer – from cellular phones and video game consoles to web servers and supercomputers.
The dominant general-purpose personal computer operating system is Microsoft Windows with a market share of around 76.45\%. macOS by Apple Inc. is in second place (17.72\%), and the varieties of Linux are collectively in third place (1.73\%). In the mobile sector (including smartphones and tablets), Android's share is up to 72\% in the year 2020. According to third quarter 2016 data, Android's share on smartphones is dominant with 87.5 percent with also a growth rate of 10.3 percent per year, followed by Apple's iOS with 12.1 percent with per year decrease in market share of 5.2 percent, while other operating systems amount to just 0.3 percent. Linux distributions are dominant in the server and supercomputing sectors. Other specialized classes of operating systems (special-purpose operating systems), such as embedded and real-time systems, exist for many applications. Security-focused operating systems also exist. Some operating systems have low system requirements (e.g. light-weight Linux distribution). Others may have higher system requirements.
Some operating systems require installation or may come pre-installed with purchased computers (OEM-installation), whereas others may run directly from media (i.e. live CD) or flash memory (i.e. USB stick).},
language = {en},
urldate = {2021-12-14},
journal = {Wikipedia},
month = dec,
year = {2021},
note = {Page Version ID: 1059529136},
}
@misc{OS_2017,
title = {Everything {You} {Need} {To} {Know} {About} – {Operating} {Systems}},
url = {https://www.deskdecode.com/operating-systems-os/},
abstract = {Operating Systems – More Complicated Than You Thought, HUH!! Operating Systems? An article on Operating Systems? A little passé, you say? Nope, not really, because we’re sure we can give you …},
language = {en},
urldate = {2021-12-14},
journal = {DESKDECODE.COM},
author = {Tj, Madhur},
month = apr,
year = {2017},
}
@misc{punched_card_2021,
title = {Punched card},
copyright = {Creative Commons Attribution-ShareAlike License},
url = {https://en.wikipedia.org/w/index.php?title=Punched_card&oldid=1058789890},
abstract = {A punched card (also punch card or punched-card) is a piece of stiff paper that holds digital data represented by the presence or absence of holes in predefined positions. Punched cards were once common in data processing applications or to directly control automated machinery.
Punched cards were widely used through much of the 20th century in the data processing industry, where specialized and increasingly complex unit record machines, organized into semiautomatic data processing systems, used punched cards for data input, output, and storage. The IBM 12-row/80-column punched card format came to dominate the industry. Many early digital computers used punched cards as the primary medium for input of both computer programs and data.
While punched cards are now obsolete as a storage medium, as of 2012, some voting machines still used punched cards to record votes. They also had a significant cultural impact.},
language = {en},
urldate = {2021-12-14},
journal = {Wikipedia},
month = dec,
year = {2021},
note = {Page Version ID: 1058789890},
file = {Snapshot:/Users/carriewright/Zotero/storage/YPAIJKD4/index.html:text/html},
}
@misc{hardware_history_2021,
title = {History of computing hardware},
copyright = {Creative Commons Attribution-ShareAlike License},
url = {https://en.wikipedia.org/w/index.php?title=History_of_computing_hardware&oldid=1059827280},
abstract = {The history of computing hardware covers the developments from early simple devices to aid calculation to modern day computers. Before the 20th century, most calculations were done by humans. Early mechanical tools to help humans with digital calculations, like the abacus, were referred to as calculating machines or calculators (and other proprietary names). The machine operator was called the computer.
The first aids to computation were purely mechanical devices which required the operator to set up the initial values of an elementary arithmetic operation, then manipulate the device to obtain the result. Later, computers represented numbers in a continuous form (e.g. distance along a scale, rotation of a shaft, or a voltage). Numbers could also be represented in the form of digits, automatically manipulated by a mechanism. Although this approach generally required more complex mechanisms, it greatly increased the precision of results. The development of transistor technology and then the integrated circuit chip led to a series of breakthroughs, starting with transistor computers and then integrated circuit computers, causing digital computers to largely replace analog computers. Metal-oxide-semiconductor (MOS) large-scale integration (LSI) then enabled semiconductor memory and the microprocessor, leading to another key breakthrough, the miniaturized personal computer (PC), in the 1970s. The cost of computers gradually became so low that personal computers by the 1990s, and then mobile computers (smartphones and tablets) in the 2000s, became ubiquitous.},
language = {en},
urldate = {2021-12-14},
journal = {Wikipedia},
month = dec,
year = {2021},
note = {Page Version ID: 1059827280},
}
@misc{untold_2019,
title = {Untold {History} of {AI}: {Invisible} {Women} {Programmed} {America}'s {First} {Electronic} {Computer}},
author = {Oscar Schwartz},
shorttitle = {Untold {History} of {AI}},
url = {https://spectrum.ieee.org/untold-history-of-ai-invisible-woman-programmed-americas-first-electronic-computer},
abstract = {The “human computers” who operated ENIAC have received little credit},
language = {en},
urldate = {2021-12-14},
journal = {IEEE Spectrum},
month = mar,
year = {2019},
note = {Section: History of Technology},
file = {Snapshot:/Users/carriewright/Zotero/storage/SUKTHGBJ/untold-history-of-ai-invisible-woman-programmed-americas-first-electronic-computer.html:text/html},
}
@article{hinkson_comprehensive_2017,
title = {A {Comprehensive} {Infrastructure} for {Big} {Data} in {Cancer} {Research}: {Accelerating} {Cancer} {Research} and {Precision} {Medicine}},
volume = {5},
issn = {2296-634X},
shorttitle = {A {Comprehensive} {Infrastructure} for {Big} {Data} in {Cancer} {Research}},
url = {https://www.frontiersin.org/article/10.3389/fcell.2017.00083},
doi = {10.3389/fcell.2017.00083},
abstract = {Advancements in next-generation sequencing and other -omics technologies are accelerating the detailed molecular characterization of individual patient tumors, and driving the evolution of precision medicine. Cancer is no longer considered a single disease, but rather, a diverse array of diseases wherein each patient has a unique collection of germline variants and somatic mutations. Molecular profiling of patient-derived samples has led to a data explosion that could help us understand the contributions of environment and germline to risk, therapeutic response, and outcome. To maximize the value of these data, an interdisciplinary approach is paramount. The National Cancer Institute (NCI) has initiated multiple projects to characterize tumor samples using multi-omic approaches. These projects harness the expertise of clinicians, biologists, computer scientists, and software engineers to investigate cancer biology and therapeutic response in multidisciplinary teams. Petabytes of cancer genomic, transcriptomic, epigenomic, proteomic, and imaging data have been generated by these projects. To address the data analysis challenges associated with these large datasets, the NCI has sponsored the development of the Genomic Data Commons (GDC) and three Cloud Resources. The GDC ensures data and metadata quality, ingests and harmonizes genomic data, and securely redistributes the data. During its pilot phase, the Cloud Resources tested multiple cloud-based approaches for enhancing data access, collaboration, computational scalability, resource democratization, and reproducibility. These NCI-led efforts are continuously being refined to better support open data practices and precision oncology, and to serve as building blocks of the NCI Cancer Research Data Commons.},
urldate = {2021-12-06},
journal = {Frontiers in Cell and Developmental Biology},
author = {Hinkson, Izumi V. and Davidsen, Tanja M. and Klemm, Juli D. and Chandramouliswaran, Ishwar and Kerlavage, Anthony R. and Kibbe, Warren A.},
year = {2017},
pages = {83},
file = {Full Text PDF:/Users/carriewright/Zotero/storage/ADTY8LFN/Hinkson et al. - 2017 - A Comprehensive Infrastructure for Big Data in Can.pdf:application/pdf},
}
@misc{visions_women_2017,
title = {Women pioneered computer programming. {Then} men took their industry over.},
url = {https://timeline.com/women-pioneered-computer-programming-then-men-took-their-industry-over-c2959b822523},
abstract = {How “computer girls” gave way to tech bros},
language = {en},
urldate = {2021-12-14},
journal = {Medium},
author = {Visions, New},
month = aug,
year = {2017},
file = {Snapshot:/Users/carriewright/Zotero/storage/VC6N7PDF/women-pioneered-computer-programming-then-men-took-their-industry-over-c2959b822523.html:text/html},
}
@misc{antonio_villas-boas_laptops_2019,
title = {Laptops {Vs}. {Desktops}: 5 {Reasons} {Why} {I} {Still} {Prefer} a {Desktop} in 2019},
url = {https://www.businessinsider.com/laptops-vs-desktops-2018-8#for-most-laptops-youre-stuck-with-their-specs-until-its-time-to-upgrade-to-a-new-model-desktops-can-be-upgraded-pretty-easily-3},
urldate = {2021-12-16},
author = {{Antonio Villas-Boas}},
year = {2019},
file = {Laptops Vs. Desktops\: 5 Reasons Why I Still Prefer a Desktop in 2019:/Users/carriewright/Zotero/storage/IA5JWH8T/laptops-vs-desktops-2018-8.html:text/html},
}
@misc{userbenchmark,
title = {{UserBenchmark}: {Core} i7-{11700K} {Build} {Comparisons}},
shorttitle = {{UserBenchmark}},
url = {https://www.userbenchmark.com/PCBuilder/Custom/S0-M1487712vsS0-M},
urldate = {2021-12-16},
}
@misc{clock_cycle,
title = {What is a {Clock} {Cycle}? - {Definition} from {Techopedia}},
shorttitle = {What is a {Clock} {Cycle}?},
url = {http://www.techopedia.com/definition/5498/clock-cycle},
abstract = {This definition explains the meaning of Clock Cycle and why it matters.},
language = {en},
urldate = {2021-12-16},
journal = {Techopedia.com},
}
@misc{clock_rate,
title = {Clock rate},
copyright = {Creative Commons Attribution-ShareAlike License},
url = {https://en.wikipedia.org/w/index.php?title=Clock_rate&oldid=1057355255},
abstract = {In computing, the clock rate or clock speed typically refers to the frequency at which the clock generator of a processor can generate pulses, which are used to synchronize the operations of its components, and is used as an indicator of the processor's speed. It is measured in clock cycles per second or its equivalent, the SI unit hertz (Hz).
The clock rate of the first generation of computers was measured in hertz or kilohertz (kHz), the first personal computers (PCs) to arrive throughout the 1970s and 1980s had clock rates measured in megahertz (MHz), and in the 21st century the speed of modern CPUs is commonly advertised in gigahertz (GHz). This metric is most useful when comparing processors within the same family, holding constant other features that may affect performance. Video card and CPU manufacturers commonly select their highest performing units from a manufacturing batch and set their maximum clock rate higher, fetching a higher price.},
language = {en},
urldate = {2021-12-16},
journal = {Wikipedia},
month = nov,
year = {2021},
note = {Page Version ID: 1057355255},
}
@misc{RAM_speed,
title = {How important is {RAM} speed? {\textbar} {Velocity} {Micro}},
shorttitle = {How important is {RAM} speed?},
url = {https://www.velocitymicro.com/blog/important-ram-speed/},
abstract = {How important is RAM speed for gaming and other applications?Does faster mean better? Don't choose RAM speed before reading this article first.},
language = {en-US},
urldate = {2021-12-16},
journal = {Custom Gaming \& Enthusiast PC Blog {\textbar} Velocity Micro},
author = {{Josh Covington}},
month = dec,
year = {2017},
}
@misc{mukherjee_ram_2019,
title = {Does {RAM} clock speed have an impact on gaming? {\textbar} {Digit}},
shorttitle = {Does {RAM} clock speed have an impact on gaming?},
url = {https://www.digit.in/features/gaming/does-ram-clock-speed-have-an-impact-on-gaming-45138.html},
abstract = {Getting RAM with higher clock speeds may not always translate to better gaming performance - it could even make things worse.},
language = {en},
urldate = {2021-12-16},
journal = {digit.in},
author = {Mukherjee, Arnab},
month = sep,
year = {2019},
}
@misc{scott_thornton_RAM,
title = {What is {DDR} ({Double} {Data} {Rate}) {Memory} and {SDRAM} {Memory}},
url = {https://www.microcontrollertips.com/understanding-ddr-sdram-faq/},
urldate = {2021-12-16},
author = {{Scott Thornton}},
year = {2021},
file = {What is DDR (Double Data Rate) Memory and SDRAM Memory:/Users/carriewright/Zotero/storage/ME297D5E/understanding-ddr-sdram-faq.html:text/html},
}
@misc{NIS,
title = {{NIS} {Database} {Documentation}},
url = {https://www.hcup-us.ahrq.gov/db/nation/nis/nisdbdocumentation.jsp},
urldate = {2021-12-17},
}
@misc{server_def,
title = {Server {Definition}},
url = {https://techterms.com/definition/server},
urldate = {2021-12-17},
}
@misc{server_2021,
title = {Server (computing)},
copyright = {Creative Commons Attribution-ShareAlike License},
url = {https://en.wikipedia.org/w/index.php?title=Server_(computing)&oldid=1058799313},
abstract = {In computing, a server is a piece of computer hardware or software (computer program) that provides functionality for other programs or devices, called "clients". This architecture is called the client–server model. Servers can provide various functionalities, often called "services", such as sharing data or resources among multiple clients, or performing computation for a client. A single server can serve multiple clients, and a single client can use multiple servers. A client process may run on the same device or may connect over a network to a server on a different device. Typical servers are database servers, file servers, mail servers, print servers, web servers, game servers, and application servers.Client–server systems are usually most frequently implemented by (and often identified with) the request–response model: a client sends a request to the server, which performs some action and sends a response back to the client, typically with a result or acknowledgment. Designating a computer as "server-class hardware" implies that it is specialized for running servers on it. This often implies that it is more powerful and reliable than standard personal computers, but alternatively, large computing clusters may be composed of many relatively simple, replaceable server components.},
language = {en},
urldate = {2021-12-17},
journal = {Wikipedia},
month = dec,
year = {2021},
note = {Page Version ID: 1058799313},
}
@misc{GUI,
title = {What is a {Graphical} {User} {Interface}? {Definition} and {FAQs} {\textbar} {OmniSci}},
shorttitle = {What is a {Graphical} {User} {Interface}?},
url = {https://www.omnisci.com/technical-glossary/graphical-user-interface},
abstract = {Learn the definition of Graphical User Interface, and get answers to FAQs regarding: How does a GUI work, Advantages of GUI, Best Programming Language for Graphical User Interfaces and more.},
urldate = {2022-01-10}
}
@misc{command-line_2022,
title = {Command-line interface},
copyright = {Creative Commons Attribution-ShareAlike License},
url = {https://en.wikipedia.org/w/index.php?title=Command-line_interface&oldid=1063715296},
abstract = {A command-line interface (CLI) processes commands to a computer program in the form of lines of text. The program which handles the interface is called a command-line interpreter or command-line processor. Operating systems implement a command-line interface in a shell for interactive access to operating system functions or services. Such access was primarily provided to users by computer terminals starting in the mid-1960s, and continued to be used throughout the 1970s and 1980s on VAX/VMS, Unix systems and personal computer systems including DOS, CP/M and Apple DOS.
Today, many users rely upon graphical user interfaces and menu-driven interactions. However, some programming and maintenance tasks may not have a graphical user interface and may still use a command line.
Alternatives to the command line interface include text-based user interface menus (for example, IBM AIX SMIT), keyboard shortcuts, and various desktop metaphors centered on the pointer (usually controlled with a mouse). Examples of this include the Microsoft Windows, DOS Shell, and Mouse Systems PowerPanel. Command-line interfaces are often implemented in terminal devices that are also capable of screen-oriented text-based user interfaces that use cursor addressing to place symbols on a display screen.
Programs with command-line interfaces are generally easier to automate via scripting.
Many software systems implement command-line interfaces for control and operation. This includes programming environments and utility programs.},
language = {en},
urldate = {2022-01-10},
journal = {Wikipedia},
month = jan,
year = {2022},
note = {Page Version ID: 1063715296},
}
@misc{computer_cluster_2022,
title = {Computer cluster},
copyright = {Creative Commons Attribution-ShareAlike License},
url = {https://en.wikipedia.org/w/index.php?title=Computer_cluster&oldid=1064333998},
abstract = {A computer cluster is a set of computers that work together so that they can be viewed as a single system. Unlike grid computers, computer clusters have each node set to perform the same task, controlled and scheduled by software.
The components of a cluster are usually connected to each other through fast local area networks, with each node (computer used as a server) running its own instance of an operating system. In most circumstances, all of the nodes use the same hardware and the same operating system, although in some setups (e.g. using Open Source Cluster Application Resources (OSCAR)), different operating systems can be used on each computer, or different hardware.Clusters are usually deployed to improve performance and availability over that of a single computer, while typically being much more cost-effective than single computers of comparable speed or availability.Computer clusters emerged as a result of convergence of a number of computing trends including the availability of low-cost microprocessors, high-speed networks, and software for high-performance distributed computing. They have a wide range of applicability and deployment, ranging from small business clusters with a handful of nodes to some of the fastest supercomputers in the world such as IBM's Sequoia. Prior to the advent of clusters, single unit fault tolerant mainframes with modular redundancy were employed; but the lower upfront cost of clusters, and increased speed of network fabric has favoured the adoption of clusters. In contrast to high-reliability mainframes clusters are cheaper to scale out, but also have increased complexity in error handling, as in clusters error modes are not opaque to running programs.},
language = {en},
urldate = {2022-01-10},
journal = {Wikipedia},
month = jan,
year = {2022},
note = {Page Version ID: 1064333998},
file = {Snapshot:/Users/carriewright/Zotero/storage/IBY7SPRA/index.html:text/html},
}
@misc{parallel_2021,
title = {Parallel computing},
copyright = {Creative Commons Attribution-ShareAlike License},
url = {https://en.wikipedia.org/w/index.php?title=Parallel_computing&oldid=1055326361},
abstract = {Parallel computing is a type of computation in which many calculations or processes are carried out simultaneously. Large problems can often be divided into smaller ones, which can then be solved at the same time. There are several different forms of parallel computing: bit-level, instruction-level, data, and task parallelism. Parallelism has long been employed in high-performance computing, but has gained broader interest due to the physical constraints preventing frequency scaling. As power consumption (and consequently heat generation) by computers has become a concern in recent years, parallel computing has become the dominant paradigm in computer architecture, mainly in the form of multi-core processors.Parallel computing is closely related to concurrent computing—they are frequently used together, and often conflated, though the two are distinct: it is possible to have parallelism without concurrency (such as bit-level parallelism), and concurrency without parallelism (such as multitasking by time-sharing on a single-core CPU). In parallel computing, a computational task is typically broken down into several, often many, very similar sub-tasks that can be processed independently and whose results are combined afterwards, upon completion. In contrast, in concurrent computing, the various processes often do not address related tasks; when they do, as is typical in distributed computing, the separate tasks may have a varied nature and often require some inter-process communication during execution.
Parallel computers can be roughly classified according to the level at which the hardware supports parallelism, with multi-core and multi-processor computers having multiple processing elements within a single machine, while clusters, MPPs, and grids use multiple computers to work on the same task. Specialized parallel computer architectures are sometimes used alongside traditional processors, for accelerating specific tasks.
In some cases parallelism is transparent to the programmer, such as in bit-level or instruction-level parallelism, but explicitly parallel algorithms, particularly those that use concurrency, are more difficult to write than sequential ones, because concurrency introduces several new classes of potential software bugs, of which race conditions are the most common. Communication and synchronization between the different subtasks are typically some of the greatest obstacles to getting optimal parallel program performance.
A theoretical upper bound on the speed-up of a single program as a result of parallelization is given by Amdahl's law.},
language = {en},
urldate = {2022-01-10},
journal = {Wikipedia},
month = nov,
year = {2021},
note = {Page Version ID: 1055326361},
}
@misc{beowulf_2022,
title = {Beowulf cluster},
copyright = {Creative Commons Attribution-ShareAlike License},
url = {https://en.wikipedia.org/w/index.php?title=Beowulf_cluster&oldid=1064232827},
abstract = {A Beowulf cluster is a computer cluster of what are normally identical, commodity-grade computers networked into a small local area network with libraries and programs installed which allow processing to be shared among them. The result is a high-performance parallel computing cluster from inexpensive personal computer hardware.
The name Beowulf originally referred to a specific computer built in 1994 by Thomas Sterling and Donald Becker at NASA. The name "Beowulf" comes from the Old English epic poem of the same name.No particular piece of software defines a cluster as a Beowulf. Typically only free and open source software is used, both to save cost and to allow customisation. Most Beowulf clusters run a Unix-like operating system, such as BSD, Linux, or Solaris. Commonly used parallel processing libraries include Message Passing Interface (MPI) and Parallel Virtual Machine (PVM). Both of these permit the programmer to divide a task among a group of networked computers, and collect the results of processing. Examples of MPI software include Open MPI or MPICH. There are additional MPI implementations available.
Beowulf systems operate worldwide, chiefly in support of scientific computing. Since 2017, every system on the Top500 list of the world's fastest supercomputers has used Beowulf software methods and a Linux operating system. At this level, however, most are by no means just assemblages of commodity hardware; custom design work is often required for the nodes (often blade servers), the networking and the cooling systems.},
language = {en},
urldate = {2022-01-10},
journal = {Wikipedia},
month = jan,
year = {2022},
note = {Page Version ID: 1064232827},
}
@misc{de_doncker,
title = {Parallel \& {Cluster} {Computing}},
url = {https://cs.wmich.edu/~elise/courses/cs626/s09/hussein/Parallel_and_Cluster_Computing.pdf},
language = {en},
author = {De Doncker, Elise and Hussein, Lina},
}
@misc{grid,
title = {What is {Grid} {Computing}? {How} {It} {Works} with {Examples}},
shorttitle = {What is {Grid} {Computing}?},
url = {https://hazelcast.com/glossary/grid-computing/},
abstract = {Grid computing is the practice of leveraging multiple network computers, often geographically distributed, to work together to accomplish joint tasks.},
language = {en-US},
urldate = {2022-01-10},
journal = {Hazelcast},
}
@misc{lithmee_difference_2018,
title = {Difference {Between} {Cluster} and {Grid} {Computing}},
url = {https://pediaa.com/difference-between-cluster-and-grid-computing/},
abstract = {The main difference between cluster and grid computing is that the cluster computing is a homogenous network in which devices have the same hardware components and the same OS connected together in a cluster while the grid computing is a heterogeneous network in which devices have different...},
language = {en-US},
urldate = {2022-01-10},
journal = {Pediaa.Com},
author = {{Lithmee}},